cache-tree.con commit midx: use midx in approximate_object_count (b8990fb)
   1#include "cache.h"
   2#include "lockfile.h"
   3#include "tree.h"
   4#include "tree-walk.h"
   5#include "cache-tree.h"
   6
   7#ifndef DEBUG
   8#define DEBUG 0
   9#endif
  10
  11struct cache_tree *cache_tree(void)
  12{
  13        struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  14        it->entry_count = -1;
  15        return it;
  16}
  17
  18void cache_tree_free(struct cache_tree **it_p)
  19{
  20        int i;
  21        struct cache_tree *it = *it_p;
  22
  23        if (!it)
  24                return;
  25        for (i = 0; i < it->subtree_nr; i++)
  26                if (it->down[i]) {
  27                        cache_tree_free(&it->down[i]->cache_tree);
  28                        free(it->down[i]);
  29                }
  30        free(it->down);
  31        free(it);
  32        *it_p = NULL;
  33}
  34
  35static int subtree_name_cmp(const char *one, int onelen,
  36                            const char *two, int twolen)
  37{
  38        if (onelen < twolen)
  39                return -1;
  40        if (twolen < onelen)
  41                return 1;
  42        return memcmp(one, two, onelen);
  43}
  44
  45static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  46{
  47        struct cache_tree_sub **down = it->down;
  48        int lo, hi;
  49        lo = 0;
  50        hi = it->subtree_nr;
  51        while (lo < hi) {
  52                int mi = lo + (hi - lo) / 2;
  53                struct cache_tree_sub *mdl = down[mi];
  54                int cmp = subtree_name_cmp(path, pathlen,
  55                                           mdl->name, mdl->namelen);
  56                if (!cmp)
  57                        return mi;
  58                if (cmp < 0)
  59                        hi = mi;
  60                else
  61                        lo = mi + 1;
  62        }
  63        return -lo-1;
  64}
  65
  66static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  67                                           const char *path,
  68                                           int pathlen,
  69                                           int create)
  70{
  71        struct cache_tree_sub *down;
  72        int pos = subtree_pos(it, path, pathlen);
  73        if (0 <= pos)
  74                return it->down[pos];
  75        if (!create)
  76                return NULL;
  77
  78        pos = -pos-1;
  79        ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  80        it->subtree_nr++;
  81
  82        FLEX_ALLOC_MEM(down, name, path, pathlen);
  83        down->cache_tree = NULL;
  84        down->namelen = pathlen;
  85
  86        if (pos < it->subtree_nr)
  87                MOVE_ARRAY(it->down + pos + 1, it->down + pos,
  88                           it->subtree_nr - pos - 1);
  89        it->down[pos] = down;
  90        return down;
  91}
  92
  93struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  94{
  95        int pathlen = strlen(path);
  96        return find_subtree(it, path, pathlen, 1);
  97}
  98
  99static int do_invalidate_path(struct cache_tree *it, const char *path)
 100{
 101        /* a/b/c
 102         * ==> invalidate self
 103         * ==> find "a", have it invalidate "b/c"
 104         * a
 105         * ==> invalidate self
 106         * ==> if "a" exists as a subtree, remove it.
 107         */
 108        const char *slash;
 109        int namelen;
 110        struct cache_tree_sub *down;
 111
 112#if DEBUG
 113        fprintf(stderr, "cache-tree invalidate <%s>\n", path);
 114#endif
 115
 116        if (!it)
 117                return 0;
 118        slash = strchrnul(path, '/');
 119        namelen = slash - path;
 120        it->entry_count = -1;
 121        if (!*slash) {
 122                int pos;
 123                pos = subtree_pos(it, path, namelen);
 124                if (0 <= pos) {
 125                        cache_tree_free(&it->down[pos]->cache_tree);
 126                        free(it->down[pos]);
 127                        /* 0 1 2 3 4 5
 128                         *       ^     ^subtree_nr = 6
 129                         *       pos
 130                         * move 4 and 5 up one place (2 entries)
 131                         * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
 132                         */
 133                        MOVE_ARRAY(it->down + pos, it->down + pos + 1,
 134                                   it->subtree_nr - pos - 1);
 135                        it->subtree_nr--;
 136                }
 137                return 1;
 138        }
 139        down = find_subtree(it, path, namelen, 0);
 140        if (down)
 141                do_invalidate_path(down->cache_tree, slash + 1);
 142        return 1;
 143}
 144
 145void cache_tree_invalidate_path(struct index_state *istate, const char *path)
 146{
 147        if (do_invalidate_path(istate->cache_tree, path))
 148                istate->cache_changed |= CACHE_TREE_CHANGED;
 149}
 150
 151static int verify_cache(struct cache_entry **cache,
 152                        int entries, int flags)
 153{
 154        int i, funny;
 155        int silent = flags & WRITE_TREE_SILENT;
 156
 157        /* Verify that the tree is merged */
 158        funny = 0;
 159        for (i = 0; i < entries; i++) {
 160                const struct cache_entry *ce = cache[i];
 161                if (ce_stage(ce)) {
 162                        if (silent)
 163                                return -1;
 164                        if (10 < ++funny) {
 165                                fprintf(stderr, "...\n");
 166                                break;
 167                        }
 168                        fprintf(stderr, "%s: unmerged (%s)\n",
 169                                ce->name, oid_to_hex(&ce->oid));
 170                }
 171        }
 172        if (funny)
 173                return -1;
 174
 175        /* Also verify that the cache does not have path and path/file
 176         * at the same time.  At this point we know the cache has only
 177         * stage 0 entries.
 178         */
 179        funny = 0;
 180        for (i = 0; i < entries - 1; i++) {
 181                /* path/file always comes after path because of the way
 182                 * the cache is sorted.  Also path can appear only once,
 183                 * which means conflicting one would immediately follow.
 184                 */
 185                const char *this_name = cache[i]->name;
 186                const char *next_name = cache[i+1]->name;
 187                int this_len = strlen(this_name);
 188                if (this_len < strlen(next_name) &&
 189                    strncmp(this_name, next_name, this_len) == 0 &&
 190                    next_name[this_len] == '/') {
 191                        if (10 < ++funny) {
 192                                fprintf(stderr, "...\n");
 193                                break;
 194                        }
 195                        fprintf(stderr, "You have both %s and %s\n",
 196                                this_name, next_name);
 197                }
 198        }
 199        if (funny)
 200                return -1;
 201        return 0;
 202}
 203
 204static void discard_unused_subtrees(struct cache_tree *it)
 205{
 206        struct cache_tree_sub **down = it->down;
 207        int nr = it->subtree_nr;
 208        int dst, src;
 209        for (dst = src = 0; src < nr; src++) {
 210                struct cache_tree_sub *s = down[src];
 211                if (s->used)
 212                        down[dst++] = s;
 213                else {
 214                        cache_tree_free(&s->cache_tree);
 215                        free(s);
 216                        it->subtree_nr--;
 217                }
 218        }
 219}
 220
 221int cache_tree_fully_valid(struct cache_tree *it)
 222{
 223        int i;
 224        if (!it)
 225                return 0;
 226        if (it->entry_count < 0 || !has_sha1_file(it->oid.hash))
 227                return 0;
 228        for (i = 0; i < it->subtree_nr; i++) {
 229                if (!cache_tree_fully_valid(it->down[i]->cache_tree))
 230                        return 0;
 231        }
 232        return 1;
 233}
 234
 235static int update_one(struct cache_tree *it,
 236                      struct cache_entry **cache,
 237                      int entries,
 238                      const char *base,
 239                      int baselen,
 240                      int *skip_count,
 241                      int flags)
 242{
 243        struct strbuf buffer;
 244        int missing_ok = flags & WRITE_TREE_MISSING_OK;
 245        int dryrun = flags & WRITE_TREE_DRY_RUN;
 246        int repair = flags & WRITE_TREE_REPAIR;
 247        int to_invalidate = 0;
 248        int i;
 249
 250        assert(!(dryrun && repair));
 251
 252        *skip_count = 0;
 253
 254        if (0 <= it->entry_count && has_sha1_file(it->oid.hash))
 255                return it->entry_count;
 256
 257        /*
 258         * We first scan for subtrees and update them; we start by
 259         * marking existing subtrees -- the ones that are unmarked
 260         * should not be in the result.
 261         */
 262        for (i = 0; i < it->subtree_nr; i++)
 263                it->down[i]->used = 0;
 264
 265        /*
 266         * Find the subtrees and update them.
 267         */
 268        i = 0;
 269        while (i < entries) {
 270                const struct cache_entry *ce = cache[i];
 271                struct cache_tree_sub *sub;
 272                const char *path, *slash;
 273                int pathlen, sublen, subcnt, subskip;
 274
 275                path = ce->name;
 276                pathlen = ce_namelen(ce);
 277                if (pathlen <= baselen || memcmp(base, path, baselen))
 278                        break; /* at the end of this level */
 279
 280                slash = strchr(path + baselen, '/');
 281                if (!slash) {
 282                        i++;
 283                        continue;
 284                }
 285                /*
 286                 * a/bbb/c (base = a/, slash = /c)
 287                 * ==>
 288                 * path+baselen = bbb/c, sublen = 3
 289                 */
 290                sublen = slash - (path + baselen);
 291                sub = find_subtree(it, path + baselen, sublen, 1);
 292                if (!sub->cache_tree)
 293                        sub->cache_tree = cache_tree();
 294                subcnt = update_one(sub->cache_tree,
 295                                    cache + i, entries - i,
 296                                    path,
 297                                    baselen + sublen + 1,
 298                                    &subskip,
 299                                    flags);
 300                if (subcnt < 0)
 301                        return subcnt;
 302                if (!subcnt)
 303                        die("index cache-tree records empty sub-tree");
 304                i += subcnt;
 305                sub->count = subcnt; /* to be used in the next loop */
 306                *skip_count += subskip;
 307                sub->used = 1;
 308        }
 309
 310        discard_unused_subtrees(it);
 311
 312        /*
 313         * Then write out the tree object for this level.
 314         */
 315        strbuf_init(&buffer, 8192);
 316
 317        i = 0;
 318        while (i < entries) {
 319                const struct cache_entry *ce = cache[i];
 320                struct cache_tree_sub *sub = NULL;
 321                const char *path, *slash;
 322                int pathlen, entlen;
 323                const struct object_id *oid;
 324                unsigned mode;
 325                int expected_missing = 0;
 326                int contains_ita = 0;
 327
 328                path = ce->name;
 329                pathlen = ce_namelen(ce);
 330                if (pathlen <= baselen || memcmp(base, path, baselen))
 331                        break; /* at the end of this level */
 332
 333                slash = strchr(path + baselen, '/');
 334                if (slash) {
 335                        entlen = slash - (path + baselen);
 336                        sub = find_subtree(it, path + baselen, entlen, 0);
 337                        if (!sub)
 338                                die("cache-tree.c: '%.*s' in '%s' not found",
 339                                    entlen, path + baselen, path);
 340                        i += sub->count;
 341                        oid = &sub->cache_tree->oid;
 342                        mode = S_IFDIR;
 343                        contains_ita = sub->cache_tree->entry_count < 0;
 344                        if (contains_ita) {
 345                                to_invalidate = 1;
 346                                expected_missing = 1;
 347                        }
 348                }
 349                else {
 350                        oid = &ce->oid;
 351                        mode = ce->ce_mode;
 352                        entlen = pathlen - baselen;
 353                        i++;
 354                }
 355
 356                if (is_null_oid(oid) ||
 357                    (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) {
 358                        strbuf_release(&buffer);
 359                        if (expected_missing)
 360                                return -1;
 361                        return error("invalid object %06o %s for '%.*s'",
 362                                mode, oid_to_hex(oid), entlen+baselen, path);
 363                }
 364
 365                /*
 366                 * CE_REMOVE entries are removed before the index is
 367                 * written to disk. Skip them to remain consistent
 368                 * with the future on-disk index.
 369                 */
 370                if (ce->ce_flags & CE_REMOVE) {
 371                        *skip_count = *skip_count + 1;
 372                        continue;
 373                }
 374
 375                /*
 376                 * CE_INTENT_TO_ADD entries exist on on-disk index but
 377                 * they are not part of generated trees. Invalidate up
 378                 * to root to force cache-tree users to read elsewhere.
 379                 */
 380                if (!sub && ce_intent_to_add(ce)) {
 381                        to_invalidate = 1;
 382                        continue;
 383                }
 384
 385                /*
 386                 * "sub" can be an empty tree if all subentries are i-t-a.
 387                 */
 388                if (contains_ita && is_empty_tree_oid(oid))
 389                        continue;
 390
 391                strbuf_grow(&buffer, entlen + 100);
 392                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
 393                strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 394
 395#if DEBUG
 396                fprintf(stderr, "cache-tree update-one %o %.*s\n",
 397                        mode, entlen, path + baselen);
 398#endif
 399        }
 400
 401        if (repair) {
 402                struct object_id oid;
 403                hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
 404                if (has_object_file(&oid))
 405                        oidcpy(&it->oid, &oid);
 406                else
 407                        to_invalidate = 1;
 408        } else if (dryrun) {
 409                hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
 410        } else if (write_object_file(buffer.buf, buffer.len, tree_type,
 411                                     &it->oid)) {
 412                strbuf_release(&buffer);
 413                return -1;
 414        }
 415
 416        strbuf_release(&buffer);
 417        it->entry_count = to_invalidate ? -1 : i - *skip_count;
 418#if DEBUG
 419        fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
 420                it->entry_count, it->subtree_nr,
 421                oid_to_hex(&it->oid));
 422#endif
 423        return i;
 424}
 425
 426int cache_tree_update(struct index_state *istate, int flags)
 427{
 428        struct cache_tree *it = istate->cache_tree;
 429        struct cache_entry **cache = istate->cache;
 430        int entries = istate->cache_nr;
 431        int skip, i = verify_cache(cache, entries, flags);
 432
 433        if (i)
 434                return i;
 435        i = update_one(it, cache, entries, "", 0, &skip, flags);
 436        if (i < 0)
 437                return i;
 438        istate->cache_changed |= CACHE_TREE_CHANGED;
 439        return 0;
 440}
 441
 442static void write_one(struct strbuf *buffer, struct cache_tree *it,
 443                      const char *path, int pathlen)
 444{
 445        int i;
 446
 447        /* One "cache-tree" entry consists of the following:
 448         * path (NUL terminated)
 449         * entry_count, subtree_nr ("%d %d\n")
 450         * tree-sha1 (missing if invalid)
 451         * subtree_nr "cache-tree" entries for subtrees.
 452         */
 453        strbuf_grow(buffer, pathlen + 100);
 454        strbuf_add(buffer, path, pathlen);
 455        strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
 456
 457#if DEBUG
 458        if (0 <= it->entry_count)
 459                fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
 460                        pathlen, path, it->entry_count, it->subtree_nr,
 461                        oid_to_hex(&it->oid));
 462        else
 463                fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
 464                        pathlen, path, it->subtree_nr);
 465#endif
 466
 467        if (0 <= it->entry_count) {
 468                strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
 469        }
 470        for (i = 0; i < it->subtree_nr; i++) {
 471                struct cache_tree_sub *down = it->down[i];
 472                if (i) {
 473                        struct cache_tree_sub *prev = it->down[i-1];
 474                        if (subtree_name_cmp(down->name, down->namelen,
 475                                             prev->name, prev->namelen) <= 0)
 476                                die("fatal - unsorted cache subtree");
 477                }
 478                write_one(buffer, down->cache_tree, down->name, down->namelen);
 479        }
 480}
 481
 482void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
 483{
 484        write_one(sb, root, "", 0);
 485}
 486
 487static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
 488{
 489        const char *buf = *buffer;
 490        unsigned long size = *size_p;
 491        const char *cp;
 492        char *ep;
 493        struct cache_tree *it;
 494        int i, subtree_nr;
 495        const unsigned rawsz = the_hash_algo->rawsz;
 496
 497        it = NULL;
 498        /* skip name, but make sure name exists */
 499        while (size && *buf) {
 500                size--;
 501                buf++;
 502        }
 503        if (!size)
 504                goto free_return;
 505        buf++; size--;
 506        it = cache_tree();
 507
 508        cp = buf;
 509        it->entry_count = strtol(cp, &ep, 10);
 510        if (cp == ep)
 511                goto free_return;
 512        cp = ep;
 513        subtree_nr = strtol(cp, &ep, 10);
 514        if (cp == ep)
 515                goto free_return;
 516        while (size && *buf && *buf != '\n') {
 517                size--;
 518                buf++;
 519        }
 520        if (!size)
 521                goto free_return;
 522        buf++; size--;
 523        if (0 <= it->entry_count) {
 524                if (size < rawsz)
 525                        goto free_return;
 526                oidread(&it->oid, (const unsigned char *)buf);
 527                buf += rawsz;
 528                size -= rawsz;
 529        }
 530
 531#if DEBUG
 532        if (0 <= it->entry_count)
 533                fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
 534                        *buffer, it->entry_count, subtree_nr,
 535                        oid_to_hex(&it->oid));
 536        else
 537                fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
 538                        *buffer, subtree_nr);
 539#endif
 540
 541        /*
 542         * Just a heuristic -- we do not add directories that often but
 543         * we do not want to have to extend it immediately when we do,
 544         * hence +2.
 545         */
 546        it->subtree_alloc = subtree_nr + 2;
 547        it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
 548        for (i = 0; i < subtree_nr; i++) {
 549                /* read each subtree */
 550                struct cache_tree *sub;
 551                struct cache_tree_sub *subtree;
 552                const char *name = buf;
 553
 554                sub = read_one(&buf, &size);
 555                if (!sub)
 556                        goto free_return;
 557                subtree = cache_tree_sub(it, name);
 558                subtree->cache_tree = sub;
 559        }
 560        if (subtree_nr != it->subtree_nr)
 561                die("cache-tree: internal error");
 562        *buffer = buf;
 563        *size_p = size;
 564        return it;
 565
 566 free_return:
 567        cache_tree_free(&it);
 568        return NULL;
 569}
 570
 571struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
 572{
 573        if (buffer[0])
 574                return NULL; /* not the whole tree */
 575        return read_one(&buffer, &size);
 576}
 577
 578static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
 579{
 580        if (!it)
 581                return NULL;
 582        while (*path) {
 583                const char *slash;
 584                struct cache_tree_sub *sub;
 585
 586                slash = strchrnul(path, '/');
 587                /*
 588                 * Between path and slash is the name of the subtree
 589                 * to look for.
 590                 */
 591                sub = find_subtree(it, path, slash - path, 0);
 592                if (!sub)
 593                        return NULL;
 594                it = sub->cache_tree;
 595
 596                path = slash;
 597                while (*path == '/')
 598                        path++;
 599        }
 600        return it;
 601}
 602
 603int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 604{
 605        int entries, was_valid;
 606        struct lock_file lock_file = LOCK_INIT;
 607        int ret = 0;
 608
 609        hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
 610
 611        entries = read_index_from(index_state, index_path, get_git_dir());
 612        if (entries < 0) {
 613                ret = WRITE_TREE_UNREADABLE_INDEX;
 614                goto out;
 615        }
 616        if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
 617                cache_tree_free(&index_state->cache_tree);
 618
 619        if (!index_state->cache_tree)
 620                index_state->cache_tree = cache_tree();
 621
 622        was_valid = cache_tree_fully_valid(index_state->cache_tree);
 623        if (!was_valid) {
 624                if (cache_tree_update(index_state, flags) < 0) {
 625                        ret = WRITE_TREE_UNMERGED_INDEX;
 626                        goto out;
 627                }
 628                write_locked_index(index_state, &lock_file, COMMIT_LOCK);
 629                /* Not being able to write is fine -- we are only interested
 630                 * in updating the cache-tree part, and if the next caller
 631                 * ends up using the old index with unupdated cache-tree part
 632                 * it misses the work we did here, but that is just a
 633                 * performance penalty and not a big deal.
 634                 */
 635        }
 636
 637        if (prefix) {
 638                struct cache_tree *subtree;
 639                subtree = cache_tree_find(index_state->cache_tree, prefix);
 640                if (!subtree) {
 641                        ret = WRITE_TREE_PREFIX_ERROR;
 642                        goto out;
 643                }
 644                oidcpy(oid, &subtree->oid);
 645        }
 646        else
 647                oidcpy(oid, &index_state->cache_tree->oid);
 648
 649out:
 650        rollback_lock_file(&lock_file);
 651        return ret;
 652}
 653
 654int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix)
 655{
 656        return write_index_as_tree(oid, &the_index, get_index_file(), flags, prefix);
 657}
 658
 659static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
 660{
 661        struct tree_desc desc;
 662        struct name_entry entry;
 663        int cnt;
 664
 665        oidcpy(&it->oid, &tree->object.oid);
 666        init_tree_desc(&desc, tree->buffer, tree->size);
 667        cnt = 0;
 668        while (tree_entry(&desc, &entry)) {
 669                if (!S_ISDIR(entry.mode))
 670                        cnt++;
 671                else {
 672                        struct cache_tree_sub *sub;
 673                        struct tree *subtree = lookup_tree(entry.oid);
 674                        if (!subtree->object.parsed)
 675                                parse_tree(subtree);
 676                        sub = cache_tree_sub(it, entry.path);
 677                        sub->cache_tree = cache_tree();
 678                        prime_cache_tree_rec(sub->cache_tree, subtree);
 679                        cnt += sub->cache_tree->entry_count;
 680                }
 681        }
 682        it->entry_count = cnt;
 683}
 684
 685void prime_cache_tree(struct index_state *istate, struct tree *tree)
 686{
 687        cache_tree_free(&istate->cache_tree);
 688        istate->cache_tree = cache_tree();
 689        prime_cache_tree_rec(istate->cache_tree, tree);
 690        istate->cache_changed |= CACHE_TREE_CHANGED;
 691}
 692
 693/*
 694 * find the cache_tree that corresponds to the current level without
 695 * exploding the full path into textual form.  The root of the
 696 * cache tree is given as "root", and our current level is "info".
 697 * (1) When at root level, info->prev is NULL, so it is "root" itself.
 698 * (2) Otherwise, find the cache_tree that corresponds to one level
 699 *     above us, and find ourselves in there.
 700 */
 701static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
 702                                                         struct traverse_info *info)
 703{
 704        struct cache_tree *our_parent;
 705
 706        if (!info->prev)
 707                return root;
 708        our_parent = find_cache_tree_from_traversal(root, info->prev);
 709        return cache_tree_find(our_parent, info->name.path);
 710}
 711
 712int cache_tree_matches_traversal(struct cache_tree *root,
 713                                 struct name_entry *ent,
 714                                 struct traverse_info *info)
 715{
 716        struct cache_tree *it;
 717
 718        it = find_cache_tree_from_traversal(root, info);
 719        it = cache_tree_find(it, ent->path);
 720        if (it && it->entry_count > 0 && !oidcmp(ent->oid, &it->oid))
 721                return it->entry_count;
 722        return 0;
 723}
 724
 725int update_main_cache_tree(int flags)
 726{
 727        if (!the_index.cache_tree)
 728                the_index.cache_tree = cache_tree();
 729        return cache_tree_update(&the_index, flags);
 730}