cache-tree.con commit howto/using-merge-subtree: mention --allow-unrelated-histories (5e495f8)
   1#include "cache.h"
   2#include "lockfile.h"
   3#include "tree.h"
   4#include "tree-walk.h"
   5#include "cache-tree.h"
   6#include "object-store.h"
   7
   8#ifndef DEBUG
   9#define DEBUG 0
  10#endif
  11
  12struct cache_tree *cache_tree(void)
  13{
  14        struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  15        it->entry_count = -1;
  16        return it;
  17}
  18
  19void cache_tree_free(struct cache_tree **it_p)
  20{
  21        int i;
  22        struct cache_tree *it = *it_p;
  23
  24        if (!it)
  25                return;
  26        for (i = 0; i < it->subtree_nr; i++)
  27                if (it->down[i]) {
  28                        cache_tree_free(&it->down[i]->cache_tree);
  29                        free(it->down[i]);
  30                }
  31        free(it->down);
  32        free(it);
  33        *it_p = NULL;
  34}
  35
  36static int subtree_name_cmp(const char *one, int onelen,
  37                            const char *two, int twolen)
  38{
  39        if (onelen < twolen)
  40                return -1;
  41        if (twolen < onelen)
  42                return 1;
  43        return memcmp(one, two, onelen);
  44}
  45
  46static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  47{
  48        struct cache_tree_sub **down = it->down;
  49        int lo, hi;
  50        lo = 0;
  51        hi = it->subtree_nr;
  52        while (lo < hi) {
  53                int mi = lo + (hi - lo) / 2;
  54                struct cache_tree_sub *mdl = down[mi];
  55                int cmp = subtree_name_cmp(path, pathlen,
  56                                           mdl->name, mdl->namelen);
  57                if (!cmp)
  58                        return mi;
  59                if (cmp < 0)
  60                        hi = mi;
  61                else
  62                        lo = mi + 1;
  63        }
  64        return -lo-1;
  65}
  66
  67static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  68                                           const char *path,
  69                                           int pathlen,
  70                                           int create)
  71{
  72        struct cache_tree_sub *down;
  73        int pos = subtree_pos(it, path, pathlen);
  74        if (0 <= pos)
  75                return it->down[pos];
  76        if (!create)
  77                return NULL;
  78
  79        pos = -pos-1;
  80        ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  81        it->subtree_nr++;
  82
  83        FLEX_ALLOC_MEM(down, name, path, pathlen);
  84        down->cache_tree = NULL;
  85        down->namelen = pathlen;
  86
  87        if (pos < it->subtree_nr)
  88                MOVE_ARRAY(it->down + pos + 1, it->down + pos,
  89                           it->subtree_nr - pos - 1);
  90        it->down[pos] = down;
  91        return down;
  92}
  93
  94struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  95{
  96        int pathlen = strlen(path);
  97        return find_subtree(it, path, pathlen, 1);
  98}
  99
 100static int do_invalidate_path(struct cache_tree *it, const char *path)
 101{
 102        /* a/b/c
 103         * ==> invalidate self
 104         * ==> find "a", have it invalidate "b/c"
 105         * a
 106         * ==> invalidate self
 107         * ==> if "a" exists as a subtree, remove it.
 108         */
 109        const char *slash;
 110        int namelen;
 111        struct cache_tree_sub *down;
 112
 113#if DEBUG
 114        fprintf(stderr, "cache-tree invalidate <%s>\n", path);
 115#endif
 116
 117        if (!it)
 118                return 0;
 119        slash = strchrnul(path, '/');
 120        namelen = slash - path;
 121        it->entry_count = -1;
 122        if (!*slash) {
 123                int pos;
 124                pos = subtree_pos(it, path, namelen);
 125                if (0 <= pos) {
 126                        cache_tree_free(&it->down[pos]->cache_tree);
 127                        free(it->down[pos]);
 128                        /* 0 1 2 3 4 5
 129                         *       ^     ^subtree_nr = 6
 130                         *       pos
 131                         * move 4 and 5 up one place (2 entries)
 132                         * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
 133                         */
 134                        MOVE_ARRAY(it->down + pos, it->down + pos + 1,
 135                                   it->subtree_nr - pos - 1);
 136                        it->subtree_nr--;
 137                }
 138                return 1;
 139        }
 140        down = find_subtree(it, path, namelen, 0);
 141        if (down)
 142                do_invalidate_path(down->cache_tree, slash + 1);
 143        return 1;
 144}
 145
 146void cache_tree_invalidate_path(struct index_state *istate, const char *path)
 147{
 148        if (do_invalidate_path(istate->cache_tree, path))
 149                istate->cache_changed |= CACHE_TREE_CHANGED;
 150}
 151
 152static int verify_cache(struct cache_entry **cache,
 153                        int entries, int flags)
 154{
 155        int i, funny;
 156        int silent = flags & WRITE_TREE_SILENT;
 157
 158        /* Verify that the tree is merged */
 159        funny = 0;
 160        for (i = 0; i < entries; i++) {
 161                const struct cache_entry *ce = cache[i];
 162                if (ce_stage(ce)) {
 163                        if (silent)
 164                                return -1;
 165                        if (10 < ++funny) {
 166                                fprintf(stderr, "...\n");
 167                                break;
 168                        }
 169                        fprintf(stderr, "%s: unmerged (%s)\n",
 170                                ce->name, oid_to_hex(&ce->oid));
 171                }
 172        }
 173        if (funny)
 174                return -1;
 175
 176        /* Also verify that the cache does not have path and path/file
 177         * at the same time.  At this point we know the cache has only
 178         * stage 0 entries.
 179         */
 180        funny = 0;
 181        for (i = 0; i < entries - 1; i++) {
 182                /* path/file always comes after path because of the way
 183                 * the cache is sorted.  Also path can appear only once,
 184                 * which means conflicting one would immediately follow.
 185                 */
 186                const char *this_name = cache[i]->name;
 187                const char *next_name = cache[i+1]->name;
 188                int this_len = strlen(this_name);
 189                if (this_len < strlen(next_name) &&
 190                    strncmp(this_name, next_name, this_len) == 0 &&
 191                    next_name[this_len] == '/') {
 192                        if (10 < ++funny) {
 193                                fprintf(stderr, "...\n");
 194                                break;
 195                        }
 196                        fprintf(stderr, "You have both %s and %s\n",
 197                                this_name, next_name);
 198                }
 199        }
 200        if (funny)
 201                return -1;
 202        return 0;
 203}
 204
 205static void discard_unused_subtrees(struct cache_tree *it)
 206{
 207        struct cache_tree_sub **down = it->down;
 208        int nr = it->subtree_nr;
 209        int dst, src;
 210        for (dst = src = 0; src < nr; src++) {
 211                struct cache_tree_sub *s = down[src];
 212                if (s->used)
 213                        down[dst++] = s;
 214                else {
 215                        cache_tree_free(&s->cache_tree);
 216                        free(s);
 217                        it->subtree_nr--;
 218                }
 219        }
 220}
 221
 222int cache_tree_fully_valid(struct cache_tree *it)
 223{
 224        int i;
 225        if (!it)
 226                return 0;
 227        if (it->entry_count < 0 || !has_sha1_file(it->oid.hash))
 228                return 0;
 229        for (i = 0; i < it->subtree_nr; i++) {
 230                if (!cache_tree_fully_valid(it->down[i]->cache_tree))
 231                        return 0;
 232        }
 233        return 1;
 234}
 235
 236static int update_one(struct cache_tree *it,
 237                      struct cache_entry **cache,
 238                      int entries,
 239                      const char *base,
 240                      int baselen,
 241                      int *skip_count,
 242                      int flags)
 243{
 244        struct strbuf buffer;
 245        int missing_ok = flags & WRITE_TREE_MISSING_OK;
 246        int dryrun = flags & WRITE_TREE_DRY_RUN;
 247        int repair = flags & WRITE_TREE_REPAIR;
 248        int to_invalidate = 0;
 249        int i;
 250
 251        assert(!(dryrun && repair));
 252
 253        *skip_count = 0;
 254
 255        if (0 <= it->entry_count && has_sha1_file(it->oid.hash))
 256                return it->entry_count;
 257
 258        /*
 259         * We first scan for subtrees and update them; we start by
 260         * marking existing subtrees -- the ones that are unmarked
 261         * should not be in the result.
 262         */
 263        for (i = 0; i < it->subtree_nr; i++)
 264                it->down[i]->used = 0;
 265
 266        /*
 267         * Find the subtrees and update them.
 268         */
 269        i = 0;
 270        while (i < entries) {
 271                const struct cache_entry *ce = cache[i];
 272                struct cache_tree_sub *sub;
 273                const char *path, *slash;
 274                int pathlen, sublen, subcnt, subskip;
 275
 276                path = ce->name;
 277                pathlen = ce_namelen(ce);
 278                if (pathlen <= baselen || memcmp(base, path, baselen))
 279                        break; /* at the end of this level */
 280
 281                slash = strchr(path + baselen, '/');
 282                if (!slash) {
 283                        i++;
 284                        continue;
 285                }
 286                /*
 287                 * a/bbb/c (base = a/, slash = /c)
 288                 * ==>
 289                 * path+baselen = bbb/c, sublen = 3
 290                 */
 291                sublen = slash - (path + baselen);
 292                sub = find_subtree(it, path + baselen, sublen, 1);
 293                if (!sub->cache_tree)
 294                        sub->cache_tree = cache_tree();
 295                subcnt = update_one(sub->cache_tree,
 296                                    cache + i, entries - i,
 297                                    path,
 298                                    baselen + sublen + 1,
 299                                    &subskip,
 300                                    flags);
 301                if (subcnt < 0)
 302                        return subcnt;
 303                if (!subcnt)
 304                        die("index cache-tree records empty sub-tree");
 305                i += subcnt;
 306                sub->count = subcnt; /* to be used in the next loop */
 307                *skip_count += subskip;
 308                sub->used = 1;
 309        }
 310
 311        discard_unused_subtrees(it);
 312
 313        /*
 314         * Then write out the tree object for this level.
 315         */
 316        strbuf_init(&buffer, 8192);
 317
 318        i = 0;
 319        while (i < entries) {
 320                const struct cache_entry *ce = cache[i];
 321                struct cache_tree_sub *sub = NULL;
 322                const char *path, *slash;
 323                int pathlen, entlen;
 324                const struct object_id *oid;
 325                unsigned mode;
 326                int expected_missing = 0;
 327                int contains_ita = 0;
 328
 329                path = ce->name;
 330                pathlen = ce_namelen(ce);
 331                if (pathlen <= baselen || memcmp(base, path, baselen))
 332                        break; /* at the end of this level */
 333
 334                slash = strchr(path + baselen, '/');
 335                if (slash) {
 336                        entlen = slash - (path + baselen);
 337                        sub = find_subtree(it, path + baselen, entlen, 0);
 338                        if (!sub)
 339                                die("cache-tree.c: '%.*s' in '%s' not found",
 340                                    entlen, path + baselen, path);
 341                        i += sub->count;
 342                        oid = &sub->cache_tree->oid;
 343                        mode = S_IFDIR;
 344                        contains_ita = sub->cache_tree->entry_count < 0;
 345                        if (contains_ita) {
 346                                to_invalidate = 1;
 347                                expected_missing = 1;
 348                        }
 349                }
 350                else {
 351                        oid = &ce->oid;
 352                        mode = ce->ce_mode;
 353                        entlen = pathlen - baselen;
 354                        i++;
 355                }
 356
 357                if (is_null_oid(oid) ||
 358                    (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) {
 359                        strbuf_release(&buffer);
 360                        if (expected_missing)
 361                                return -1;
 362                        return error("invalid object %06o %s for '%.*s'",
 363                                mode, oid_to_hex(oid), entlen+baselen, path);
 364                }
 365
 366                /*
 367                 * CE_REMOVE entries are removed before the index is
 368                 * written to disk. Skip them to remain consistent
 369                 * with the future on-disk index.
 370                 */
 371                if (ce->ce_flags & CE_REMOVE) {
 372                        *skip_count = *skip_count + 1;
 373                        continue;
 374                }
 375
 376                /*
 377                 * CE_INTENT_TO_ADD entries exist on on-disk index but
 378                 * they are not part of generated trees. Invalidate up
 379                 * to root to force cache-tree users to read elsewhere.
 380                 */
 381                if (!sub && ce_intent_to_add(ce)) {
 382                        to_invalidate = 1;
 383                        continue;
 384                }
 385
 386                /*
 387                 * "sub" can be an empty tree if all subentries are i-t-a.
 388                 */
 389                if (contains_ita && is_empty_tree_oid(oid))
 390                        continue;
 391
 392                strbuf_grow(&buffer, entlen + 100);
 393                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
 394                strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 395
 396#if DEBUG
 397                fprintf(stderr, "cache-tree update-one %o %.*s\n",
 398                        mode, entlen, path + baselen);
 399#endif
 400        }
 401
 402        if (repair) {
 403                struct object_id oid;
 404                hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
 405                if (has_object_file(&oid))
 406                        oidcpy(&it->oid, &oid);
 407                else
 408                        to_invalidate = 1;
 409        } else if (dryrun) {
 410                hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
 411        } else if (write_object_file(buffer.buf, buffer.len, tree_type,
 412                                     &it->oid)) {
 413                strbuf_release(&buffer);
 414                return -1;
 415        }
 416
 417        strbuf_release(&buffer);
 418        it->entry_count = to_invalidate ? -1 : i - *skip_count;
 419#if DEBUG
 420        fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
 421                it->entry_count, it->subtree_nr,
 422                oid_to_hex(&it->oid));
 423#endif
 424        return i;
 425}
 426
 427int cache_tree_update(struct index_state *istate, int flags)
 428{
 429        struct cache_tree *it = istate->cache_tree;
 430        struct cache_entry **cache = istate->cache;
 431        int entries = istate->cache_nr;
 432        int skip, i = verify_cache(cache, entries, flags);
 433
 434        if (i)
 435                return i;
 436        i = update_one(it, cache, entries, "", 0, &skip, flags);
 437        if (i < 0)
 438                return i;
 439        istate->cache_changed |= CACHE_TREE_CHANGED;
 440        return 0;
 441}
 442
 443static void write_one(struct strbuf *buffer, struct cache_tree *it,
 444                      const char *path, int pathlen)
 445{
 446        int i;
 447
 448        /* One "cache-tree" entry consists of the following:
 449         * path (NUL terminated)
 450         * entry_count, subtree_nr ("%d %d\n")
 451         * tree-sha1 (missing if invalid)
 452         * subtree_nr "cache-tree" entries for subtrees.
 453         */
 454        strbuf_grow(buffer, pathlen + 100);
 455        strbuf_add(buffer, path, pathlen);
 456        strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
 457
 458#if DEBUG
 459        if (0 <= it->entry_count)
 460                fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
 461                        pathlen, path, it->entry_count, it->subtree_nr,
 462                        oid_to_hex(&it->oid));
 463        else
 464                fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
 465                        pathlen, path, it->subtree_nr);
 466#endif
 467
 468        if (0 <= it->entry_count) {
 469                strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
 470        }
 471        for (i = 0; i < it->subtree_nr; i++) {
 472                struct cache_tree_sub *down = it->down[i];
 473                if (i) {
 474                        struct cache_tree_sub *prev = it->down[i-1];
 475                        if (subtree_name_cmp(down->name, down->namelen,
 476                                             prev->name, prev->namelen) <= 0)
 477                                die("fatal - unsorted cache subtree");
 478                }
 479                write_one(buffer, down->cache_tree, down->name, down->namelen);
 480        }
 481}
 482
 483void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
 484{
 485        write_one(sb, root, "", 0);
 486}
 487
 488static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
 489{
 490        const char *buf = *buffer;
 491        unsigned long size = *size_p;
 492        const char *cp;
 493        char *ep;
 494        struct cache_tree *it;
 495        int i, subtree_nr;
 496        const unsigned rawsz = the_hash_algo->rawsz;
 497
 498        it = NULL;
 499        /* skip name, but make sure name exists */
 500        while (size && *buf) {
 501                size--;
 502                buf++;
 503        }
 504        if (!size)
 505                goto free_return;
 506        buf++; size--;
 507        it = cache_tree();
 508
 509        cp = buf;
 510        it->entry_count = strtol(cp, &ep, 10);
 511        if (cp == ep)
 512                goto free_return;
 513        cp = ep;
 514        subtree_nr = strtol(cp, &ep, 10);
 515        if (cp == ep)
 516                goto free_return;
 517        while (size && *buf && *buf != '\n') {
 518                size--;
 519                buf++;
 520        }
 521        if (!size)
 522                goto free_return;
 523        buf++; size--;
 524        if (0 <= it->entry_count) {
 525                if (size < rawsz)
 526                        goto free_return;
 527                oidread(&it->oid, (const unsigned char *)buf);
 528                buf += rawsz;
 529                size -= rawsz;
 530        }
 531
 532#if DEBUG
 533        if (0 <= it->entry_count)
 534                fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
 535                        *buffer, it->entry_count, subtree_nr,
 536                        oid_to_hex(&it->oid));
 537        else
 538                fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
 539                        *buffer, subtree_nr);
 540#endif
 541
 542        /*
 543         * Just a heuristic -- we do not add directories that often but
 544         * we do not want to have to extend it immediately when we do,
 545         * hence +2.
 546         */
 547        it->subtree_alloc = subtree_nr + 2;
 548        it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
 549        for (i = 0; i < subtree_nr; i++) {
 550                /* read each subtree */
 551                struct cache_tree *sub;
 552                struct cache_tree_sub *subtree;
 553                const char *name = buf;
 554
 555                sub = read_one(&buf, &size);
 556                if (!sub)
 557                        goto free_return;
 558                subtree = cache_tree_sub(it, name);
 559                subtree->cache_tree = sub;
 560        }
 561        if (subtree_nr != it->subtree_nr)
 562                die("cache-tree: internal error");
 563        *buffer = buf;
 564        *size_p = size;
 565        return it;
 566
 567 free_return:
 568        cache_tree_free(&it);
 569        return NULL;
 570}
 571
 572struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
 573{
 574        if (buffer[0])
 575                return NULL; /* not the whole tree */
 576        return read_one(&buffer, &size);
 577}
 578
 579static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
 580{
 581        if (!it)
 582                return NULL;
 583        while (*path) {
 584                const char *slash;
 585                struct cache_tree_sub *sub;
 586
 587                slash = strchrnul(path, '/');
 588                /*
 589                 * Between path and slash is the name of the subtree
 590                 * to look for.
 591                 */
 592                sub = find_subtree(it, path, slash - path, 0);
 593                if (!sub)
 594                        return NULL;
 595                it = sub->cache_tree;
 596
 597                path = slash;
 598                while (*path == '/')
 599                        path++;
 600        }
 601        return it;
 602}
 603
 604int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 605{
 606        int entries, was_valid;
 607        struct lock_file lock_file = LOCK_INIT;
 608        int ret = 0;
 609
 610        hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
 611
 612        entries = read_index_from(index_state, index_path, get_git_dir());
 613        if (entries < 0) {
 614                ret = WRITE_TREE_UNREADABLE_INDEX;
 615                goto out;
 616        }
 617        if (flags & WRITE_TREE_IGNORE_CACHE_TREE)
 618                cache_tree_free(&index_state->cache_tree);
 619
 620        if (!index_state->cache_tree)
 621                index_state->cache_tree = cache_tree();
 622
 623        was_valid = cache_tree_fully_valid(index_state->cache_tree);
 624        if (!was_valid) {
 625                if (cache_tree_update(index_state, flags) < 0) {
 626                        ret = WRITE_TREE_UNMERGED_INDEX;
 627                        goto out;
 628                }
 629                write_locked_index(index_state, &lock_file, COMMIT_LOCK);
 630                /* Not being able to write is fine -- we are only interested
 631                 * in updating the cache-tree part, and if the next caller
 632                 * ends up using the old index with unupdated cache-tree part
 633                 * it misses the work we did here, but that is just a
 634                 * performance penalty and not a big deal.
 635                 */
 636        }
 637
 638        if (prefix) {
 639                struct cache_tree *subtree;
 640                subtree = cache_tree_find(index_state->cache_tree, prefix);
 641                if (!subtree) {
 642                        ret = WRITE_TREE_PREFIX_ERROR;
 643                        goto out;
 644                }
 645                oidcpy(oid, &subtree->oid);
 646        }
 647        else
 648                oidcpy(oid, &index_state->cache_tree->oid);
 649
 650out:
 651        rollback_lock_file(&lock_file);
 652        return ret;
 653}
 654
 655static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
 656{
 657        struct tree_desc desc;
 658        struct name_entry entry;
 659        int cnt;
 660
 661        oidcpy(&it->oid, &tree->object.oid);
 662        init_tree_desc(&desc, tree->buffer, tree->size);
 663        cnt = 0;
 664        while (tree_entry(&desc, &entry)) {
 665                if (!S_ISDIR(entry.mode))
 666                        cnt++;
 667                else {
 668                        struct cache_tree_sub *sub;
 669                        struct tree *subtree = lookup_tree(the_repository,
 670                                                           entry.oid);
 671                        if (!subtree->object.parsed)
 672                                parse_tree(subtree);
 673                        sub = cache_tree_sub(it, entry.path);
 674                        sub->cache_tree = cache_tree();
 675                        prime_cache_tree_rec(sub->cache_tree, subtree);
 676                        cnt += sub->cache_tree->entry_count;
 677                }
 678        }
 679        it->entry_count = cnt;
 680}
 681
 682void prime_cache_tree(struct index_state *istate, struct tree *tree)
 683{
 684        cache_tree_free(&istate->cache_tree);
 685        istate->cache_tree = cache_tree();
 686        prime_cache_tree_rec(istate->cache_tree, tree);
 687        istate->cache_changed |= CACHE_TREE_CHANGED;
 688}
 689
 690/*
 691 * find the cache_tree that corresponds to the current level without
 692 * exploding the full path into textual form.  The root of the
 693 * cache tree is given as "root", and our current level is "info".
 694 * (1) When at root level, info->prev is NULL, so it is "root" itself.
 695 * (2) Otherwise, find the cache_tree that corresponds to one level
 696 *     above us, and find ourselves in there.
 697 */
 698static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
 699                                                         struct traverse_info *info)
 700{
 701        struct cache_tree *our_parent;
 702
 703        if (!info->prev)
 704                return root;
 705        our_parent = find_cache_tree_from_traversal(root, info->prev);
 706        return cache_tree_find(our_parent, info->name.path);
 707}
 708
 709int cache_tree_matches_traversal(struct cache_tree *root,
 710                                 struct name_entry *ent,
 711                                 struct traverse_info *info)
 712{
 713        struct cache_tree *it;
 714
 715        it = find_cache_tree_from_traversal(root, info);
 716        it = cache_tree_find(it, ent->path);
 717        if (it && it->entry_count > 0 && !oidcmp(ent->oid, &it->oid))
 718                return it->entry_count;
 719        return 0;
 720}