cache-tree.con commit imap-send: use parse options API to determine verbosity (f1a3529)
   1#include "cache.h"
   2#include "lockfile.h"
   3#include "tree.h"
   4#include "tree-walk.h"
   5#include "cache-tree.h"
   6
   7#ifndef DEBUG
   8#define DEBUG 0
   9#endif
  10
  11struct cache_tree *cache_tree(void)
  12{
  13        struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  14        it->entry_count = -1;
  15        return it;
  16}
  17
  18void cache_tree_free(struct cache_tree **it_p)
  19{
  20        int i;
  21        struct cache_tree *it = *it_p;
  22
  23        if (!it)
  24                return;
  25        for (i = 0; i < it->subtree_nr; i++)
  26                if (it->down[i]) {
  27                        cache_tree_free(&it->down[i]->cache_tree);
  28                        free(it->down[i]);
  29                }
  30        free(it->down);
  31        free(it);
  32        *it_p = NULL;
  33}
  34
  35static int subtree_name_cmp(const char *one, int onelen,
  36                            const char *two, int twolen)
  37{
  38        if (onelen < twolen)
  39                return -1;
  40        if (twolen < onelen)
  41                return 1;
  42        return memcmp(one, two, onelen);
  43}
  44
  45static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  46{
  47        struct cache_tree_sub **down = it->down;
  48        int lo, hi;
  49        lo = 0;
  50        hi = it->subtree_nr;
  51        while (lo < hi) {
  52                int mi = (lo + hi) / 2;
  53                struct cache_tree_sub *mdl = down[mi];
  54                int cmp = subtree_name_cmp(path, pathlen,
  55                                           mdl->name, mdl->namelen);
  56                if (!cmp)
  57                        return mi;
  58                if (cmp < 0)
  59                        hi = mi;
  60                else
  61                        lo = mi + 1;
  62        }
  63        return -lo-1;
  64}
  65
  66static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  67                                           const char *path,
  68                                           int pathlen,
  69                                           int create)
  70{
  71        struct cache_tree_sub *down;
  72        int pos = subtree_pos(it, path, pathlen);
  73        if (0 <= pos)
  74                return it->down[pos];
  75        if (!create)
  76                return NULL;
  77
  78        pos = -pos-1;
  79        ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  80        it->subtree_nr++;
  81
  82        down = xmalloc(sizeof(*down) + pathlen + 1);
  83        down->cache_tree = NULL;
  84        down->namelen = pathlen;
  85        memcpy(down->name, path, pathlen);
  86        down->name[pathlen] = 0;
  87
  88        if (pos < it->subtree_nr)
  89                memmove(it->down + pos + 1,
  90                        it->down + pos,
  91                        sizeof(down) * (it->subtree_nr - pos - 1));
  92        it->down[pos] = down;
  93        return down;
  94}
  95
  96struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  97{
  98        int pathlen = strlen(path);
  99        return find_subtree(it, path, pathlen, 1);
 100}
 101
 102static int do_invalidate_path(struct cache_tree *it, const char *path)
 103{
 104        /* a/b/c
 105         * ==> invalidate self
 106         * ==> find "a", have it invalidate "b/c"
 107         * a
 108         * ==> invalidate self
 109         * ==> if "a" exists as a subtree, remove it.
 110         */
 111        const char *slash;
 112        int namelen;
 113        struct cache_tree_sub *down;
 114
 115#if DEBUG
 116        fprintf(stderr, "cache-tree invalidate <%s>\n", path);
 117#endif
 118
 119        if (!it)
 120                return 0;
 121        slash = strchrnul(path, '/');
 122        namelen = slash - path;
 123        it->entry_count = -1;
 124        if (!*slash) {
 125                int pos;
 126                pos = subtree_pos(it, path, namelen);
 127                if (0 <= pos) {
 128                        cache_tree_free(&it->down[pos]->cache_tree);
 129                        free(it->down[pos]);
 130                        /* 0 1 2 3 4 5
 131                         *       ^     ^subtree_nr = 6
 132                         *       pos
 133                         * move 4 and 5 up one place (2 entries)
 134                         * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
 135                         */
 136                        memmove(it->down+pos, it->down+pos+1,
 137                                sizeof(struct cache_tree_sub *) *
 138                                (it->subtree_nr - pos - 1));
 139                        it->subtree_nr--;
 140                }
 141                return 1;
 142        }
 143        down = find_subtree(it, path, namelen, 0);
 144        if (down)
 145                do_invalidate_path(down->cache_tree, slash + 1);
 146        return 1;
 147}
 148
 149void cache_tree_invalidate_path(struct index_state *istate, const char *path)
 150{
 151        if (do_invalidate_path(istate->cache_tree, path))
 152                istate->cache_changed |= CACHE_TREE_CHANGED;
 153}
 154
 155static int verify_cache(struct cache_entry **cache,
 156                        int entries, int flags)
 157{
 158        int i, funny;
 159        int silent = flags & WRITE_TREE_SILENT;
 160
 161        /* Verify that the tree is merged */
 162        funny = 0;
 163        for (i = 0; i < entries; i++) {
 164                const struct cache_entry *ce = cache[i];
 165                if (ce_stage(ce)) {
 166                        if (silent)
 167                                return -1;
 168                        if (10 < ++funny) {
 169                                fprintf(stderr, "...\n");
 170                                break;
 171                        }
 172                        fprintf(stderr, "%s: unmerged (%s)\n",
 173                                ce->name, sha1_to_hex(ce->sha1));
 174                }
 175        }
 176        if (funny)
 177                return -1;
 178
 179        /* Also verify that the cache does not have path and path/file
 180         * at the same time.  At this point we know the cache has only
 181         * stage 0 entries.
 182         */
 183        funny = 0;
 184        for (i = 0; i < entries - 1; i++) {
 185                /* path/file always comes after path because of the way
 186                 * the cache is sorted.  Also path can appear only once,
 187                 * which means conflicting one would immediately follow.
 188                 */
 189                const char *this_name = cache[i]->name;
 190                const char *next_name = cache[i+1]->name;
 191                int this_len = strlen(this_name);
 192                if (this_len < strlen(next_name) &&
 193                    strncmp(this_name, next_name, this_len) == 0 &&
 194                    next_name[this_len] == '/') {
 195                        if (10 < ++funny) {
 196                                fprintf(stderr, "...\n");
 197                                break;
 198                        }
 199                        fprintf(stderr, "You have both %s and %s\n",
 200                                this_name, next_name);
 201                }
 202        }
 203        if (funny)
 204                return -1;
 205        return 0;
 206}
 207
 208static void discard_unused_subtrees(struct cache_tree *it)
 209{
 210        struct cache_tree_sub **down = it->down;
 211        int nr = it->subtree_nr;
 212        int dst, src;
 213        for (dst = src = 0; src < nr; src++) {
 214                struct cache_tree_sub *s = down[src];
 215                if (s->used)
 216                        down[dst++] = s;
 217                else {
 218                        cache_tree_free(&s->cache_tree);
 219                        free(s);
 220                        it->subtree_nr--;
 221                }
 222        }
 223}
 224
 225int cache_tree_fully_valid(struct cache_tree *it)
 226{
 227        int i;
 228        if (!it)
 229                return 0;
 230        if (it->entry_count < 0 || !has_sha1_file(it->sha1))
 231                return 0;
 232        for (i = 0; i < it->subtree_nr; i++) {
 233                if (!cache_tree_fully_valid(it->down[i]->cache_tree))
 234                        return 0;
 235        }
 236        return 1;
 237}
 238
 239static int update_one(struct cache_tree *it,
 240                      struct cache_entry **cache,
 241                      int entries,
 242                      const char *base,
 243                      int baselen,
 244                      int *skip_count,
 245                      int flags)
 246{
 247        struct strbuf buffer;
 248        int missing_ok = flags & WRITE_TREE_MISSING_OK;
 249        int dryrun = flags & WRITE_TREE_DRY_RUN;
 250        int repair = flags & WRITE_TREE_REPAIR;
 251        int to_invalidate = 0;
 252        int i;
 253
 254        assert(!(dryrun && repair));
 255
 256        *skip_count = 0;
 257
 258        if (0 <= it->entry_count && has_sha1_file(it->sha1))
 259                return it->entry_count;
 260
 261        /*
 262         * We first scan for subtrees and update them; we start by
 263         * marking existing subtrees -- the ones that are unmarked
 264         * should not be in the result.
 265         */
 266        for (i = 0; i < it->subtree_nr; i++)
 267                it->down[i]->used = 0;
 268
 269        /*
 270         * Find the subtrees and update them.
 271         */
 272        i = 0;
 273        while (i < entries) {
 274                const struct cache_entry *ce = cache[i];
 275                struct cache_tree_sub *sub;
 276                const char *path, *slash;
 277                int pathlen, sublen, subcnt, subskip;
 278
 279                path = ce->name;
 280                pathlen = ce_namelen(ce);
 281                if (pathlen <= baselen || memcmp(base, path, baselen))
 282                        break; /* at the end of this level */
 283
 284                slash = strchr(path + baselen, '/');
 285                if (!slash) {
 286                        i++;
 287                        continue;
 288                }
 289                /*
 290                 * a/bbb/c (base = a/, slash = /c)
 291                 * ==>
 292                 * path+baselen = bbb/c, sublen = 3
 293                 */
 294                sublen = slash - (path + baselen);
 295                sub = find_subtree(it, path + baselen, sublen, 1);
 296                if (!sub->cache_tree)
 297                        sub->cache_tree = cache_tree();
 298                subcnt = update_one(sub->cache_tree,
 299                                    cache + i, entries - i,
 300                                    path,
 301                                    baselen + sublen + 1,
 302                                    &subskip,
 303                                    flags);
 304                if (subcnt < 0)
 305                        return subcnt;
 306                i += subcnt;
 307                sub->count = subcnt; /* to be used in the next loop */
 308                *skip_count += subskip;
 309                sub->used = 1;
 310        }
 311
 312        discard_unused_subtrees(it);
 313
 314        /*
 315         * Then write out the tree object for this level.
 316         */
 317        strbuf_init(&buffer, 8192);
 318
 319        i = 0;
 320        while (i < entries) {
 321                const struct cache_entry *ce = cache[i];
 322                struct cache_tree_sub *sub;
 323                const char *path, *slash;
 324                int pathlen, entlen;
 325                const unsigned char *sha1;
 326                unsigned mode;
 327                int expected_missing = 0;
 328
 329                path = ce->name;
 330                pathlen = ce_namelen(ce);
 331                if (pathlen <= baselen || memcmp(base, path, baselen))
 332                        break; /* at the end of this level */
 333
 334                slash = strchr(path + baselen, '/');
 335                if (slash) {
 336                        entlen = slash - (path + baselen);
 337                        sub = find_subtree(it, path + baselen, entlen, 0);
 338                        if (!sub)
 339                                die("cache-tree.c: '%.*s' in '%s' not found",
 340                                    entlen, path + baselen, path);
 341                        i += sub->count;
 342                        sha1 = sub->cache_tree->sha1;
 343                        mode = S_IFDIR;
 344                        if (sub->cache_tree->entry_count < 0) {
 345                                to_invalidate = 1;
 346                                expected_missing = 1;
 347                        }
 348                }
 349                else {
 350                        sha1 = ce->sha1;
 351                        mode = ce->ce_mode;
 352                        entlen = pathlen - baselen;
 353                        i++;
 354                }
 355                if (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1)) {
 356                        strbuf_release(&buffer);
 357                        if (expected_missing)
 358                                return -1;
 359                        return error("invalid object %06o %s for '%.*s'",
 360                                mode, sha1_to_hex(sha1), entlen+baselen, path);
 361                }
 362
 363                /*
 364                 * CE_REMOVE entries are removed before the index is
 365                 * written to disk. Skip them to remain consistent
 366                 * with the future on-disk index.
 367                 */
 368                if (ce->ce_flags & CE_REMOVE) {
 369                        *skip_count = *skip_count + 1;
 370                        continue;
 371                }
 372
 373                /*
 374                 * CE_INTENT_TO_ADD entries exist on on-disk index but
 375                 * they are not part of generated trees. Invalidate up
 376                 * to root to force cache-tree users to read elsewhere.
 377                 */
 378                if (ce->ce_flags & CE_INTENT_TO_ADD) {
 379                        to_invalidate = 1;
 380                        continue;
 381                }
 382
 383                strbuf_grow(&buffer, entlen + 100);
 384                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
 385                strbuf_add(&buffer, sha1, 20);
 386
 387#if DEBUG
 388                fprintf(stderr, "cache-tree update-one %o %.*s\n",
 389                        mode, entlen, path + baselen);
 390#endif
 391        }
 392
 393        if (repair) {
 394                unsigned char sha1[20];
 395                hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
 396                if (has_sha1_file(sha1))
 397                        hashcpy(it->sha1, sha1);
 398                else
 399                        to_invalidate = 1;
 400        } else if (dryrun)
 401                hash_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1);
 402        else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1)) {
 403                strbuf_release(&buffer);
 404                return -1;
 405        }
 406
 407        strbuf_release(&buffer);
 408        it->entry_count = to_invalidate ? -1 : i - *skip_count;
 409#if DEBUG
 410        fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
 411                it->entry_count, it->subtree_nr,
 412                sha1_to_hex(it->sha1));
 413#endif
 414        return i;
 415}
 416
 417int cache_tree_update(struct index_state *istate, int flags)
 418{
 419        struct cache_tree *it = istate->cache_tree;
 420        struct cache_entry **cache = istate->cache;
 421        int entries = istate->cache_nr;
 422        int skip, i = verify_cache(cache, entries, flags);
 423
 424        if (i)
 425                return i;
 426        i = update_one(it, cache, entries, "", 0, &skip, flags);
 427        if (i < 0)
 428                return i;
 429        istate->cache_changed |= CACHE_TREE_CHANGED;
 430        return 0;
 431}
 432
 433static void write_one(struct strbuf *buffer, struct cache_tree *it,
 434                      const char *path, int pathlen)
 435{
 436        int i;
 437
 438        /* One "cache-tree" entry consists of the following:
 439         * path (NUL terminated)
 440         * entry_count, subtree_nr ("%d %d\n")
 441         * tree-sha1 (missing if invalid)
 442         * subtree_nr "cache-tree" entries for subtrees.
 443         */
 444        strbuf_grow(buffer, pathlen + 100);
 445        strbuf_add(buffer, path, pathlen);
 446        strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
 447
 448#if DEBUG
 449        if (0 <= it->entry_count)
 450                fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
 451                        pathlen, path, it->entry_count, it->subtree_nr,
 452                        sha1_to_hex(it->sha1));
 453        else
 454                fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
 455                        pathlen, path, it->subtree_nr);
 456#endif
 457
 458        if (0 <= it->entry_count) {
 459                strbuf_add(buffer, it->sha1, 20);
 460        }
 461        for (i = 0; i < it->subtree_nr; i++) {
 462                struct cache_tree_sub *down = it->down[i];
 463                if (i) {
 464                        struct cache_tree_sub *prev = it->down[i-1];
 465                        if (subtree_name_cmp(down->name, down->namelen,
 466                                             prev->name, prev->namelen) <= 0)
 467                                die("fatal - unsorted cache subtree");
 468                }
 469                write_one(buffer, down->cache_tree, down->name, down->namelen);
 470        }
 471}
 472
 473void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
 474{
 475        write_one(sb, root, "", 0);
 476}
 477
 478static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
 479{
 480        const char *buf = *buffer;
 481        unsigned long size = *size_p;
 482        const char *cp;
 483        char *ep;
 484        struct cache_tree *it;
 485        int i, subtree_nr;
 486
 487        it = NULL;
 488        /* skip name, but make sure name exists */
 489        while (size && *buf) {
 490                size--;
 491                buf++;
 492        }
 493        if (!size)
 494                goto free_return;
 495        buf++; size--;
 496        it = cache_tree();
 497
 498        cp = buf;
 499        it->entry_count = strtol(cp, &ep, 10);
 500        if (cp == ep)
 501                goto free_return;
 502        cp = ep;
 503        subtree_nr = strtol(cp, &ep, 10);
 504        if (cp == ep)
 505                goto free_return;
 506        while (size && *buf && *buf != '\n') {
 507                size--;
 508                buf++;
 509        }
 510        if (!size)
 511                goto free_return;
 512        buf++; size--;
 513        if (0 <= it->entry_count) {
 514                if (size < 20)
 515                        goto free_return;
 516                hashcpy(it->sha1, (const unsigned char*)buf);
 517                buf += 20;
 518                size -= 20;
 519        }
 520
 521#if DEBUG
 522        if (0 <= it->entry_count)
 523                fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
 524                        *buffer, it->entry_count, subtree_nr,
 525                        sha1_to_hex(it->sha1));
 526        else
 527                fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
 528                        *buffer, subtree_nr);
 529#endif
 530
 531        /*
 532         * Just a heuristic -- we do not add directories that often but
 533         * we do not want to have to extend it immediately when we do,
 534         * hence +2.
 535         */
 536        it->subtree_alloc = subtree_nr + 2;
 537        it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
 538        for (i = 0; i < subtree_nr; i++) {
 539                /* read each subtree */
 540                struct cache_tree *sub;
 541                struct cache_tree_sub *subtree;
 542                const char *name = buf;
 543
 544                sub = read_one(&buf, &size);
 545                if (!sub)
 546                        goto free_return;
 547                subtree = cache_tree_sub(it, name);
 548                subtree->cache_tree = sub;
 549        }
 550        if (subtree_nr != it->subtree_nr)
 551                die("cache-tree: internal error");
 552        *buffer = buf;
 553        *size_p = size;
 554        return it;
 555
 556 free_return:
 557        cache_tree_free(&it);
 558        return NULL;
 559}
 560
 561struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
 562{
 563        if (buffer[0])
 564                return NULL; /* not the whole tree */
 565        return read_one(&buffer, &size);
 566}
 567
 568static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
 569{
 570        if (!it)
 571                return NULL;
 572        while (*path) {
 573                const char *slash;
 574                struct cache_tree_sub *sub;
 575
 576                slash = strchrnul(path, '/');
 577                /*
 578                 * Between path and slash is the name of the subtree
 579                 * to look for.
 580                 */
 581                sub = find_subtree(it, path, slash - path, 0);
 582                if (!sub)
 583                        return NULL;
 584                it = sub->cache_tree;
 585
 586                path = slash;
 587                while (*path == '/')
 588                        path++;
 589        }
 590        return it;
 591}
 592
 593int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
 594{
 595        int entries, was_valid, newfd;
 596        struct lock_file *lock_file;
 597
 598        /*
 599         * We can't free this memory, it becomes part of a linked list
 600         * parsed atexit()
 601         */
 602        lock_file = xcalloc(1, sizeof(struct lock_file));
 603
 604        newfd = hold_locked_index(lock_file, 1);
 605
 606        entries = read_cache();
 607        if (entries < 0)
 608                return WRITE_TREE_UNREADABLE_INDEX;
 609        if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
 610                cache_tree_free(&(active_cache_tree));
 611
 612        if (!active_cache_tree)
 613                active_cache_tree = cache_tree();
 614
 615        was_valid = cache_tree_fully_valid(active_cache_tree);
 616        if (!was_valid) {
 617                if (cache_tree_update(&the_index, flags) < 0)
 618                        return WRITE_TREE_UNMERGED_INDEX;
 619                if (0 <= newfd) {
 620                        if (!write_locked_index(&the_index, lock_file, COMMIT_LOCK))
 621                                newfd = -1;
 622                }
 623                /* Not being able to write is fine -- we are only interested
 624                 * in updating the cache-tree part, and if the next caller
 625                 * ends up using the old index with unupdated cache-tree part
 626                 * it misses the work we did here, but that is just a
 627                 * performance penalty and not a big deal.
 628                 */
 629        }
 630
 631        if (prefix) {
 632                struct cache_tree *subtree =
 633                        cache_tree_find(active_cache_tree, prefix);
 634                if (!subtree)
 635                        return WRITE_TREE_PREFIX_ERROR;
 636                hashcpy(sha1, subtree->sha1);
 637        }
 638        else
 639                hashcpy(sha1, active_cache_tree->sha1);
 640
 641        if (0 <= newfd)
 642                rollback_lock_file(lock_file);
 643
 644        return 0;
 645}
 646
 647static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
 648{
 649        struct tree_desc desc;
 650        struct name_entry entry;
 651        int cnt;
 652
 653        hashcpy(it->sha1, tree->object.sha1);
 654        init_tree_desc(&desc, tree->buffer, tree->size);
 655        cnt = 0;
 656        while (tree_entry(&desc, &entry)) {
 657                if (!S_ISDIR(entry.mode))
 658                        cnt++;
 659                else {
 660                        struct cache_tree_sub *sub;
 661                        struct tree *subtree = lookup_tree(entry.sha1);
 662                        if (!subtree->object.parsed)
 663                                parse_tree(subtree);
 664                        sub = cache_tree_sub(it, entry.path);
 665                        sub->cache_tree = cache_tree();
 666                        prime_cache_tree_rec(sub->cache_tree, subtree);
 667                        cnt += sub->cache_tree->entry_count;
 668                }
 669        }
 670        it->entry_count = cnt;
 671}
 672
 673void prime_cache_tree(struct index_state *istate, struct tree *tree)
 674{
 675        cache_tree_free(&istate->cache_tree);
 676        istate->cache_tree = cache_tree();
 677        prime_cache_tree_rec(istate->cache_tree, tree);
 678        istate->cache_changed |= CACHE_TREE_CHANGED;
 679}
 680
 681/*
 682 * find the cache_tree that corresponds to the current level without
 683 * exploding the full path into textual form.  The root of the
 684 * cache tree is given as "root", and our current level is "info".
 685 * (1) When at root level, info->prev is NULL, so it is "root" itself.
 686 * (2) Otherwise, find the cache_tree that corresponds to one level
 687 *     above us, and find ourselves in there.
 688 */
 689static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
 690                                                         struct traverse_info *info)
 691{
 692        struct cache_tree *our_parent;
 693
 694        if (!info->prev)
 695                return root;
 696        our_parent = find_cache_tree_from_traversal(root, info->prev);
 697        return cache_tree_find(our_parent, info->name.path);
 698}
 699
 700int cache_tree_matches_traversal(struct cache_tree *root,
 701                                 struct name_entry *ent,
 702                                 struct traverse_info *info)
 703{
 704        struct cache_tree *it;
 705
 706        it = find_cache_tree_from_traversal(root, info);
 707        it = cache_tree_find(it, ent->path);
 708        if (it && it->entry_count > 0 && !hashcmp(ent->sha1, it->sha1))
 709                return it->entry_count;
 710        return 0;
 711}
 712
 713int update_main_cache_tree(int flags)
 714{
 715        if (!the_index.cache_tree)
 716                the_index.cache_tree = cache_tree();
 717        return cache_tree_update(&the_index, flags);
 718}