cache-tree.con commit Merge branch 'jc/denoise-rm-to-resolve' (5e9d978)
   1#include "cache.h"
   2#include "lockfile.h"
   3#include "tree.h"
   4#include "tree-walk.h"
   5#include "cache-tree.h"
   6#include "object-store.h"
   7#include "replace-object.h"
   8
   9#ifndef DEBUG_CACHE_TREE
  10#define DEBUG_CACHE_TREE 0
  11#endif
  12
  13struct cache_tree *cache_tree(void)
  14{
  15        struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  16        it->entry_count = -1;
  17        return it;
  18}
  19
  20void cache_tree_free(struct cache_tree **it_p)
  21{
  22        int i;
  23        struct cache_tree *it = *it_p;
  24
  25        if (!it)
  26                return;
  27        for (i = 0; i < it->subtree_nr; i++)
  28                if (it->down[i]) {
  29                        cache_tree_free(&it->down[i]->cache_tree);
  30                        free(it->down[i]);
  31                }
  32        free(it->down);
  33        free(it);
  34        *it_p = NULL;
  35}
  36
  37static int subtree_name_cmp(const char *one, int onelen,
  38                            const char *two, int twolen)
  39{
  40        if (onelen < twolen)
  41                return -1;
  42        if (twolen < onelen)
  43                return 1;
  44        return memcmp(one, two, onelen);
  45}
  46
  47static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  48{
  49        struct cache_tree_sub **down = it->down;
  50        int lo, hi;
  51        lo = 0;
  52        hi = it->subtree_nr;
  53        while (lo < hi) {
  54                int mi = lo + (hi - lo) / 2;
  55                struct cache_tree_sub *mdl = down[mi];
  56                int cmp = subtree_name_cmp(path, pathlen,
  57                                           mdl->name, mdl->namelen);
  58                if (!cmp)
  59                        return mi;
  60                if (cmp < 0)
  61                        hi = mi;
  62                else
  63                        lo = mi + 1;
  64        }
  65        return -lo-1;
  66}
  67
  68static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  69                                           const char *path,
  70                                           int pathlen,
  71                                           int create)
  72{
  73        struct cache_tree_sub *down;
  74        int pos = subtree_pos(it, path, pathlen);
  75        if (0 <= pos)
  76                return it->down[pos];
  77        if (!create)
  78                return NULL;
  79
  80        pos = -pos-1;
  81        ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  82        it->subtree_nr++;
  83
  84        FLEX_ALLOC_MEM(down, name, path, pathlen);
  85        down->cache_tree = NULL;
  86        down->namelen = pathlen;
  87
  88        if (pos < it->subtree_nr)
  89                MOVE_ARRAY(it->down + pos + 1, it->down + pos,
  90                           it->subtree_nr - pos - 1);
  91        it->down[pos] = down;
  92        return down;
  93}
  94
  95struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  96{
  97        int pathlen = strlen(path);
  98        return find_subtree(it, path, pathlen, 1);
  99}
 100
 101static int do_invalidate_path(struct cache_tree *it, const char *path)
 102{
 103        /* a/b/c
 104         * ==> invalidate self
 105         * ==> find "a", have it invalidate "b/c"
 106         * a
 107         * ==> invalidate self
 108         * ==> if "a" exists as a subtree, remove it.
 109         */
 110        const char *slash;
 111        int namelen;
 112        struct cache_tree_sub *down;
 113
 114#if DEBUG_CACHE_TREE
 115        fprintf(stderr, "cache-tree invalidate <%s>\n", path);
 116#endif
 117
 118        if (!it)
 119                return 0;
 120        slash = strchrnul(path, '/');
 121        namelen = slash - path;
 122        it->entry_count = -1;
 123        if (!*slash) {
 124                int pos;
 125                pos = subtree_pos(it, path, namelen);
 126                if (0 <= pos) {
 127                        cache_tree_free(&it->down[pos]->cache_tree);
 128                        free(it->down[pos]);
 129                        /* 0 1 2 3 4 5
 130                         *       ^     ^subtree_nr = 6
 131                         *       pos
 132                         * move 4 and 5 up one place (2 entries)
 133                         * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
 134                         */
 135                        MOVE_ARRAY(it->down + pos, it->down + pos + 1,
 136                                   it->subtree_nr - pos - 1);
 137                        it->subtree_nr--;
 138                }
 139                return 1;
 140        }
 141        down = find_subtree(it, path, namelen, 0);
 142        if (down)
 143                do_invalidate_path(down->cache_tree, slash + 1);
 144        return 1;
 145}
 146
 147void cache_tree_invalidate_path(struct index_state *istate, const char *path)
 148{
 149        if (do_invalidate_path(istate->cache_tree, path))
 150                istate->cache_changed |= CACHE_TREE_CHANGED;
 151}
 152
 153static int verify_cache(struct cache_entry **cache,
 154                        int entries, int flags)
 155{
 156        int i, funny;
 157        int silent = flags & WRITE_TREE_SILENT;
 158
 159        /* Verify that the tree is merged */
 160        funny = 0;
 161        for (i = 0; i < entries; i++) {
 162                const struct cache_entry *ce = cache[i];
 163                if (ce_stage(ce)) {
 164                        if (silent)
 165                                return -1;
 166                        if (10 < ++funny) {
 167                                fprintf(stderr, "...\n");
 168                                break;
 169                        }
 170                        fprintf(stderr, "%s: unmerged (%s)\n",
 171                                ce->name, oid_to_hex(&ce->oid));
 172                }
 173        }
 174        if (funny)
 175                return -1;
 176
 177        /* Also verify that the cache does not have path and path/file
 178         * at the same time.  At this point we know the cache has only
 179         * stage 0 entries.
 180         */
 181        funny = 0;
 182        for (i = 0; i < entries - 1; i++) {
 183                /* path/file always comes after path because of the way
 184                 * the cache is sorted.  Also path can appear only once,
 185                 * which means conflicting one would immediately follow.
 186                 */
 187                const char *this_name = cache[i]->name;
 188                const char *next_name = cache[i+1]->name;
 189                int this_len = strlen(this_name);
 190                if (this_len < strlen(next_name) &&
 191                    strncmp(this_name, next_name, this_len) == 0 &&
 192                    next_name[this_len] == '/') {
 193                        if (10 < ++funny) {
 194                                fprintf(stderr, "...\n");
 195                                break;
 196                        }
 197                        fprintf(stderr, "You have both %s and %s\n",
 198                                this_name, next_name);
 199                }
 200        }
 201        if (funny)
 202                return -1;
 203        return 0;
 204}
 205
 206static void discard_unused_subtrees(struct cache_tree *it)
 207{
 208        struct cache_tree_sub **down = it->down;
 209        int nr = it->subtree_nr;
 210        int dst, src;
 211        for (dst = src = 0; src < nr; src++) {
 212                struct cache_tree_sub *s = down[src];
 213                if (s->used)
 214                        down[dst++] = s;
 215                else {
 216                        cache_tree_free(&s->cache_tree);
 217                        free(s);
 218                        it->subtree_nr--;
 219                }
 220        }
 221}
 222
 223int cache_tree_fully_valid(struct cache_tree *it)
 224{
 225        int i;
 226        if (!it)
 227                return 0;
 228        if (it->entry_count < 0 || !has_object_file(&it->oid))
 229                return 0;
 230        for (i = 0; i < it->subtree_nr; i++) {
 231                if (!cache_tree_fully_valid(it->down[i]->cache_tree))
 232                        return 0;
 233        }
 234        return 1;
 235}
 236
 237static int update_one(struct cache_tree *it,
 238                      struct cache_entry **cache,
 239                      int entries,
 240                      const char *base,
 241                      int baselen,
 242                      int *skip_count,
 243                      int flags)
 244{
 245        struct strbuf buffer;
 246        int missing_ok = flags & WRITE_TREE_MISSING_OK;
 247        int dryrun = flags & WRITE_TREE_DRY_RUN;
 248        int repair = flags & WRITE_TREE_REPAIR;
 249        int to_invalidate = 0;
 250        int i;
 251
 252        assert(!(dryrun && repair));
 253
 254        *skip_count = 0;
 255
 256        if (0 <= it->entry_count && has_object_file(&it->oid))
 257                return it->entry_count;
 258
 259        /*
 260         * We first scan for subtrees and update them; we start by
 261         * marking existing subtrees -- the ones that are unmarked
 262         * should not be in the result.
 263         */
 264        for (i = 0; i < it->subtree_nr; i++)
 265                it->down[i]->used = 0;
 266
 267        /*
 268         * Find the subtrees and update them.
 269         */
 270        i = 0;
 271        while (i < entries) {
 272                const struct cache_entry *ce = cache[i];
 273                struct cache_tree_sub *sub;
 274                const char *path, *slash;
 275                int pathlen, sublen, subcnt, subskip;
 276
 277                path = ce->name;
 278                pathlen = ce_namelen(ce);
 279                if (pathlen <= baselen || memcmp(base, path, baselen))
 280                        break; /* at the end of this level */
 281
 282                slash = strchr(path + baselen, '/');
 283                if (!slash) {
 284                        i++;
 285                        continue;
 286                }
 287                /*
 288                 * a/bbb/c (base = a/, slash = /c)
 289                 * ==>
 290                 * path+baselen = bbb/c, sublen = 3
 291                 */
 292                sublen = slash - (path + baselen);
 293                sub = find_subtree(it, path + baselen, sublen, 1);
 294                if (!sub->cache_tree)
 295                        sub->cache_tree = cache_tree();
 296                subcnt = update_one(sub->cache_tree,
 297                                    cache + i, entries - i,
 298                                    path,
 299                                    baselen + sublen + 1,
 300                                    &subskip,
 301                                    flags);
 302                if (subcnt < 0)
 303                        return subcnt;
 304                if (!subcnt)
 305                        die("index cache-tree records empty sub-tree");
 306                i += subcnt;
 307                sub->count = subcnt; /* to be used in the next loop */
 308                *skip_count += subskip;
 309                sub->used = 1;
 310        }
 311
 312        discard_unused_subtrees(it);
 313
 314        /*
 315         * Then write out the tree object for this level.
 316         */
 317        strbuf_init(&buffer, 8192);
 318
 319        i = 0;
 320        while (i < entries) {
 321                const struct cache_entry *ce = cache[i];
 322                struct cache_tree_sub *sub = NULL;
 323                const char *path, *slash;
 324                int pathlen, entlen;
 325                const struct object_id *oid;
 326                unsigned mode;
 327                int expected_missing = 0;
 328                int contains_ita = 0;
 329                int ce_missing_ok;
 330
 331                path = ce->name;
 332                pathlen = ce_namelen(ce);
 333                if (pathlen <= baselen || memcmp(base, path, baselen))
 334                        break; /* at the end of this level */
 335
 336                slash = strchr(path + baselen, '/');
 337                if (slash) {
 338                        entlen = slash - (path + baselen);
 339                        sub = find_subtree(it, path + baselen, entlen, 0);
 340                        if (!sub)
 341                                die("cache-tree.c: '%.*s' in '%s' not found",
 342                                    entlen, path + baselen, path);
 343                        i += sub->count;
 344                        oid = &sub->cache_tree->oid;
 345                        mode = S_IFDIR;
 346                        contains_ita = sub->cache_tree->entry_count < 0;
 347                        if (contains_ita) {
 348                                to_invalidate = 1;
 349                                expected_missing = 1;
 350                        }
 351                }
 352                else {
 353                        oid = &ce->oid;
 354                        mode = ce->ce_mode;
 355                        entlen = pathlen - baselen;
 356                        i++;
 357                }
 358
 359                ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
 360                        (repository_format_partial_clone &&
 361                         ce_skip_worktree(ce));
 362                if (is_null_oid(oid) ||
 363                    (!ce_missing_ok && !has_object_file(oid))) {
 364                        strbuf_release(&buffer);
 365                        if (expected_missing)
 366                                return -1;
 367                        return error("invalid object %06o %s for '%.*s'",
 368                                mode, oid_to_hex(oid), entlen+baselen, path);
 369                }
 370
 371                /*
 372                 * CE_REMOVE entries are removed before the index is
 373                 * written to disk. Skip them to remain consistent
 374                 * with the future on-disk index.
 375                 */
 376                if (ce->ce_flags & CE_REMOVE) {
 377                        *skip_count = *skip_count + 1;
 378                        continue;
 379                }
 380
 381                /*
 382                 * CE_INTENT_TO_ADD entries exist on on-disk index but
 383                 * they are not part of generated trees. Invalidate up
 384                 * to root to force cache-tree users to read elsewhere.
 385                 */
 386                if (!sub && ce_intent_to_add(ce)) {
 387                        to_invalidate = 1;
 388                        continue;
 389                }
 390
 391                /*
 392                 * "sub" can be an empty tree if all subentries are i-t-a.
 393                 */
 394                if (contains_ita && is_empty_tree_oid(oid))
 395                        continue;
 396
 397                strbuf_grow(&buffer, entlen + 100);
 398                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
 399                strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 400
 401#if DEBUG_CACHE_TREE
 402                fprintf(stderr, "cache-tree update-one %o %.*s\n",
 403                        mode, entlen, path + baselen);
 404#endif
 405        }
 406
 407        if (repair) {
 408                struct object_id oid;
 409                hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
 410                if (has_object_file(&oid))
 411                        oidcpy(&it->oid, &oid);
 412                else
 413                        to_invalidate = 1;
 414        } else if (dryrun) {
 415                hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
 416        } else if (write_object_file(buffer.buf, buffer.len, tree_type,
 417                                     &it->oid)) {
 418                strbuf_release(&buffer);
 419                return -1;
 420        }
 421
 422        strbuf_release(&buffer);
 423        it->entry_count = to_invalidate ? -1 : i - *skip_count;
 424#if DEBUG_CACHE_TREE
 425        fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
 426                it->entry_count, it->subtree_nr,
 427                oid_to_hex(&it->oid));
 428#endif
 429        return i;
 430}
 431
 432int cache_tree_update(struct index_state *istate, int flags)
 433{
 434        struct cache_tree *it = istate->cache_tree;
 435        struct cache_entry **cache = istate->cache;
 436        int entries = istate->cache_nr;
 437        int skip, i = verify_cache(cache, entries, flags);
 438
 439        if (i)
 440                return i;
 441        trace_performance_enter();
 442        i = update_one(it, cache, entries, "", 0, &skip, flags);
 443        trace_performance_leave("cache_tree_update");
 444        if (i < 0)
 445                return i;
 446        istate->cache_changed |= CACHE_TREE_CHANGED;
 447        return 0;
 448}
 449
 450static void write_one(struct strbuf *buffer, struct cache_tree *it,
 451                      const char *path, int pathlen)
 452{
 453        int i;
 454
 455        /* One "cache-tree" entry consists of the following:
 456         * path (NUL terminated)
 457         * entry_count, subtree_nr ("%d %d\n")
 458         * tree-sha1 (missing if invalid)
 459         * subtree_nr "cache-tree" entries for subtrees.
 460         */
 461        strbuf_grow(buffer, pathlen + 100);
 462        strbuf_add(buffer, path, pathlen);
 463        strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
 464
 465#if DEBUG_CACHE_TREE
 466        if (0 <= it->entry_count)
 467                fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
 468                        pathlen, path, it->entry_count, it->subtree_nr,
 469                        oid_to_hex(&it->oid));
 470        else
 471                fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
 472                        pathlen, path, it->subtree_nr);
 473#endif
 474
 475        if (0 <= it->entry_count) {
 476                strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
 477        }
 478        for (i = 0; i < it->subtree_nr; i++) {
 479                struct cache_tree_sub *down = it->down[i];
 480                if (i) {
 481                        struct cache_tree_sub *prev = it->down[i-1];
 482                        if (subtree_name_cmp(down->name, down->namelen,
 483                                             prev->name, prev->namelen) <= 0)
 484                                die("fatal - unsorted cache subtree");
 485                }
 486                write_one(buffer, down->cache_tree, down->name, down->namelen);
 487        }
 488}
 489
 490void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
 491{
 492        write_one(sb, root, "", 0);
 493}
 494
 495static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
 496{
 497        const char *buf = *buffer;
 498        unsigned long size = *size_p;
 499        const char *cp;
 500        char *ep;
 501        struct cache_tree *it;
 502        int i, subtree_nr;
 503        const unsigned rawsz = the_hash_algo->rawsz;
 504
 505        it = NULL;
 506        /* skip name, but make sure name exists */
 507        while (size && *buf) {
 508                size--;
 509                buf++;
 510        }
 511        if (!size)
 512                goto free_return;
 513        buf++; size--;
 514        it = cache_tree();
 515
 516        cp = buf;
 517        it->entry_count = strtol(cp, &ep, 10);
 518        if (cp == ep)
 519                goto free_return;
 520        cp = ep;
 521        subtree_nr = strtol(cp, &ep, 10);
 522        if (cp == ep)
 523                goto free_return;
 524        while (size && *buf && *buf != '\n') {
 525                size--;
 526                buf++;
 527        }
 528        if (!size)
 529                goto free_return;
 530        buf++; size--;
 531        if (0 <= it->entry_count) {
 532                if (size < rawsz)
 533                        goto free_return;
 534                oidread(&it->oid, (const unsigned char *)buf);
 535                buf += rawsz;
 536                size -= rawsz;
 537        }
 538
 539#if DEBUG_CACHE_TREE
 540        if (0 <= it->entry_count)
 541                fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
 542                        *buffer, it->entry_count, subtree_nr,
 543                        oid_to_hex(&it->oid));
 544        else
 545                fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
 546                        *buffer, subtree_nr);
 547#endif
 548
 549        /*
 550         * Just a heuristic -- we do not add directories that often but
 551         * we do not want to have to extend it immediately when we do,
 552         * hence +2.
 553         */
 554        it->subtree_alloc = subtree_nr + 2;
 555        it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
 556        for (i = 0; i < subtree_nr; i++) {
 557                /* read each subtree */
 558                struct cache_tree *sub;
 559                struct cache_tree_sub *subtree;
 560                const char *name = buf;
 561
 562                sub = read_one(&buf, &size);
 563                if (!sub)
 564                        goto free_return;
 565                subtree = cache_tree_sub(it, name);
 566                subtree->cache_tree = sub;
 567        }
 568        if (subtree_nr != it->subtree_nr)
 569                die("cache-tree: internal error");
 570        *buffer = buf;
 571        *size_p = size;
 572        return it;
 573
 574 free_return:
 575        cache_tree_free(&it);
 576        return NULL;
 577}
 578
 579struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
 580{
 581        if (buffer[0])
 582                return NULL; /* not the whole tree */
 583        return read_one(&buffer, &size);
 584}
 585
 586static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
 587{
 588        if (!it)
 589                return NULL;
 590        while (*path) {
 591                const char *slash;
 592                struct cache_tree_sub *sub;
 593
 594                slash = strchrnul(path, '/');
 595                /*
 596                 * Between path and slash is the name of the subtree
 597                 * to look for.
 598                 */
 599                sub = find_subtree(it, path, slash - path, 0);
 600                if (!sub)
 601                        return NULL;
 602                it = sub->cache_tree;
 603
 604                path = slash;
 605                while (*path == '/')
 606                        path++;
 607        }
 608        return it;
 609}
 610
 611int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 612{
 613        int entries, was_valid;
 614        struct lock_file lock_file = LOCK_INIT;
 615        int ret = 0;
 616
 617        hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
 618
 619        entries = read_index_from(index_state, index_path, get_git_dir());
 620        if (entries < 0) {
 621                ret = WRITE_TREE_UNREADABLE_INDEX;
 622                goto out;
 623        }
 624        if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
 625                cache_tree_free(&index_state->cache_tree);
 626
 627        if (!index_state->cache_tree)
 628                index_state->cache_tree = cache_tree();
 629
 630        was_valid = cache_tree_fully_valid(index_state->cache_tree);
 631        if (!was_valid) {
 632                if (cache_tree_update(index_state, flags) < 0) {
 633                        ret = WRITE_TREE_UNMERGED_INDEX;
 634                        goto out;
 635                }
 636                write_locked_index(index_state, &lock_file, COMMIT_LOCK);
 637                /* Not being able to write is fine -- we are only interested
 638                 * in updating the cache-tree part, and if the next caller
 639                 * ends up using the old index with unupdated cache-tree part
 640                 * it misses the work we did here, but that is just a
 641                 * performance penalty and not a big deal.
 642                 */
 643        }
 644
 645        if (prefix) {
 646                struct cache_tree *subtree;
 647                subtree = cache_tree_find(index_state->cache_tree, prefix);
 648                if (!subtree) {
 649                        ret = WRITE_TREE_PREFIX_ERROR;
 650                        goto out;
 651                }
 652                oidcpy(oid, &subtree->oid);
 653        }
 654        else
 655                oidcpy(oid, &index_state->cache_tree->oid);
 656
 657out:
 658        rollback_lock_file(&lock_file);
 659        return ret;
 660}
 661
 662static void prime_cache_tree_rec(struct repository *r,
 663                                 struct cache_tree *it,
 664                                 struct tree *tree)
 665{
 666        struct tree_desc desc;
 667        struct name_entry entry;
 668        int cnt;
 669
 670        oidcpy(&it->oid, &tree->object.oid);
 671        init_tree_desc(&desc, tree->buffer, tree->size);
 672        cnt = 0;
 673        while (tree_entry(&desc, &entry)) {
 674                if (!S_ISDIR(entry.mode))
 675                        cnt++;
 676                else {
 677                        struct cache_tree_sub *sub;
 678                        struct tree *subtree = lookup_tree(r, &entry.oid);
 679                        if (!subtree->object.parsed)
 680                                parse_tree(subtree);
 681                        sub = cache_tree_sub(it, entry.path);
 682                        sub->cache_tree = cache_tree();
 683                        prime_cache_tree_rec(r, sub->cache_tree, subtree);
 684                        cnt += sub->cache_tree->entry_count;
 685                }
 686        }
 687        it->entry_count = cnt;
 688}
 689
 690void prime_cache_tree(struct repository *r,
 691                      struct index_state *istate,
 692                      struct tree *tree)
 693{
 694        cache_tree_free(&istate->cache_tree);
 695        istate->cache_tree = cache_tree();
 696        prime_cache_tree_rec(r, istate->cache_tree, tree);
 697        istate->cache_changed |= CACHE_TREE_CHANGED;
 698}
 699
 700/*
 701 * find the cache_tree that corresponds to the current level without
 702 * exploding the full path into textual form.  The root of the
 703 * cache tree is given as "root", and our current level is "info".
 704 * (1) When at root level, info->prev is NULL, so it is "root" itself.
 705 * (2) Otherwise, find the cache_tree that corresponds to one level
 706 *     above us, and find ourselves in there.
 707 */
 708static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
 709                                                         struct traverse_info *info)
 710{
 711        struct cache_tree *our_parent;
 712
 713        if (!info->prev)
 714                return root;
 715        our_parent = find_cache_tree_from_traversal(root, info->prev);
 716        return cache_tree_find(our_parent, info->name.path);
 717}
 718
 719int cache_tree_matches_traversal(struct cache_tree *root,
 720                                 struct name_entry *ent,
 721                                 struct traverse_info *info)
 722{
 723        struct cache_tree *it;
 724
 725        it = find_cache_tree_from_traversal(root, info);
 726        it = cache_tree_find(it, ent->path);
 727        if (it && it->entry_count > 0 && oideq(&ent->oid, &it->oid))
 728                return it->entry_count;
 729        return 0;
 730}
 731
 732static void verify_one(struct repository *r,
 733                       struct index_state *istate,
 734                       struct cache_tree *it,
 735                       struct strbuf *path)
 736{
 737        int i, pos, len = path->len;
 738        struct strbuf tree_buf = STRBUF_INIT;
 739        struct object_id new_oid;
 740
 741        for (i = 0; i < it->subtree_nr; i++) {
 742                strbuf_addf(path, "%s/", it->down[i]->name);
 743                verify_one(r, istate, it->down[i]->cache_tree, path);
 744                strbuf_setlen(path, len);
 745        }
 746
 747        if (it->entry_count < 0 ||
 748            /* no verification on tests (t7003) that replace trees */
 749            lookup_replace_object(r, &it->oid) != &it->oid)
 750                return;
 751
 752        if (path->len) {
 753                pos = index_name_pos(istate, path->buf, path->len);
 754                pos = -pos - 1;
 755        } else {
 756                pos = 0;
 757        }
 758
 759        i = 0;
 760        while (i < it->entry_count) {
 761                struct cache_entry *ce = istate->cache[pos + i];
 762                const char *slash;
 763                struct cache_tree_sub *sub = NULL;
 764                const struct object_id *oid;
 765                const char *name;
 766                unsigned mode;
 767                int entlen;
 768
 769                if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
 770                        BUG("%s with flags 0x%x should not be in cache-tree",
 771                            ce->name, ce->ce_flags);
 772                name = ce->name + path->len;
 773                slash = strchr(name, '/');
 774                if (slash) {
 775                        entlen = slash - name;
 776                        sub = find_subtree(it, ce->name + path->len, entlen, 0);
 777                        if (!sub || sub->cache_tree->entry_count < 0)
 778                                BUG("bad subtree '%.*s'", entlen, name);
 779                        oid = &sub->cache_tree->oid;
 780                        mode = S_IFDIR;
 781                        i += sub->cache_tree->entry_count;
 782                } else {
 783                        oid = &ce->oid;
 784                        mode = ce->ce_mode;
 785                        entlen = ce_namelen(ce) - path->len;
 786                        i++;
 787                }
 788                strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
 789                strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz);
 790        }
 791        hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid);
 792        if (!oideq(&new_oid, &it->oid))
 793                BUG("cache-tree for path %.*s does not match. "
 794                    "Expected %s got %s", len, path->buf,
 795                    oid_to_hex(&new_oid), oid_to_hex(&it->oid));
 796        strbuf_setlen(path, len);
 797        strbuf_release(&tree_buf);
 798}
 799
 800void cache_tree_verify(struct repository *r, struct index_state *istate)
 801{
 802        struct strbuf path = STRBUF_INIT;
 803
 804        if (!istate->cache_tree)
 805                return;
 806        verify_one(r, istate, istate->cache_tree, &path);
 807        strbuf_release(&path);
 808}