bf911028c8079df82edef0ff3e3a64fcd0c5608d
   1#include "../cache.h"
   2#include "../refs.h"
   3#include "refs-internal.h"
   4#include "ref-cache.h"
   5#include "../iterator.h"
   6
   7/* FIXME: This declaration shouldn't be here */
   8void read_loose_refs(const char *dirname, struct ref_dir *dir);
   9
  10void add_entry_to_dir(struct ref_dir *dir, struct ref_entry *entry)
  11{
  12        ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
  13        dir->entries[dir->nr++] = entry;
  14        /* optimize for the case that entries are added in order */
  15        if (dir->nr == 1 ||
  16            (dir->nr == dir->sorted + 1 &&
  17             strcmp(dir->entries[dir->nr - 2]->name,
  18                    dir->entries[dir->nr - 1]->name) < 0))
  19                dir->sorted = dir->nr;
  20}
  21
  22struct ref_dir *get_ref_dir(struct ref_entry *entry)
  23{
  24        struct ref_dir *dir;
  25        assert(entry->flag & REF_DIR);
  26        dir = &entry->u.subdir;
  27        if (entry->flag & REF_INCOMPLETE) {
  28                read_loose_refs(entry->name, dir);
  29
  30                /*
  31                 * Manually add refs/bisect, which, being
  32                 * per-worktree, might not appear in the directory
  33                 * listing for refs/ in the main repo.
  34                 */
  35                if (!strcmp(entry->name, "refs/")) {
  36                        int pos = search_ref_dir(dir, "refs/bisect/", 12);
  37                        if (pos < 0) {
  38                                struct ref_entry *child_entry;
  39                                child_entry = create_dir_entry(dir->ref_store,
  40                                                               "refs/bisect/",
  41                                                               12, 1);
  42                                add_entry_to_dir(dir, child_entry);
  43                        }
  44                }
  45                entry->flag &= ~REF_INCOMPLETE;
  46        }
  47        return dir;
  48}
  49
  50struct ref_entry *create_ref_entry(const char *refname,
  51                                   const unsigned char *sha1, int flag,
  52                                   int check_name)
  53{
  54        struct ref_entry *ref;
  55
  56        if (check_name &&
  57            check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
  58                die("Reference has invalid format: '%s'", refname);
  59        FLEX_ALLOC_STR(ref, name, refname);
  60        hashcpy(ref->u.value.oid.hash, sha1);
  61        oidclr(&ref->u.value.peeled);
  62        ref->flag = flag;
  63        return ref;
  64}
  65
  66struct ref_cache *create_ref_cache(struct files_ref_store *refs)
  67{
  68        struct ref_cache *ret = xcalloc(1, sizeof(*ret));
  69
  70        ret->root = create_dir_entry(refs, "", 0, 1);
  71        return ret;
  72}
  73
  74static void clear_ref_dir(struct ref_dir *dir);
  75
  76static void free_ref_entry(struct ref_entry *entry)
  77{
  78        if (entry->flag & REF_DIR) {
  79                /*
  80                 * Do not use get_ref_dir() here, as that might
  81                 * trigger the reading of loose refs.
  82                 */
  83                clear_ref_dir(&entry->u.subdir);
  84        }
  85        free(entry);
  86}
  87
  88void free_ref_cache(struct ref_cache *cache)
  89{
  90        free_ref_entry(cache->root);
  91        free(cache);
  92}
  93
  94/*
  95 * Clear and free all entries in dir, recursively.
  96 */
  97static void clear_ref_dir(struct ref_dir *dir)
  98{
  99        int i;
 100        for (i = 0; i < dir->nr; i++)
 101                free_ref_entry(dir->entries[i]);
 102        free(dir->entries);
 103        dir->sorted = dir->nr = dir->alloc = 0;
 104        dir->entries = NULL;
 105}
 106
 107struct ref_entry *create_dir_entry(struct files_ref_store *ref_store,
 108                                   const char *dirname, size_t len,
 109                                   int incomplete)
 110{
 111        struct ref_entry *direntry;
 112        FLEX_ALLOC_MEM(direntry, name, dirname, len);
 113        direntry->u.subdir.ref_store = ref_store;
 114        direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
 115        return direntry;
 116}
 117
 118static int ref_entry_cmp(const void *a, const void *b)
 119{
 120        struct ref_entry *one = *(struct ref_entry **)a;
 121        struct ref_entry *two = *(struct ref_entry **)b;
 122        return strcmp(one->name, two->name);
 123}
 124
 125static void sort_ref_dir(struct ref_dir *dir);
 126
 127struct string_slice {
 128        size_t len;
 129        const char *str;
 130};
 131
 132static int ref_entry_cmp_sslice(const void *key_, const void *ent_)
 133{
 134        const struct string_slice *key = key_;
 135        const struct ref_entry *ent = *(const struct ref_entry * const *)ent_;
 136        int cmp = strncmp(key->str, ent->name, key->len);
 137        if (cmp)
 138                return cmp;
 139        return '\0' - (unsigned char)ent->name[key->len];
 140}
 141
 142int search_ref_dir(struct ref_dir *dir, const char *refname, size_t len)
 143{
 144        struct ref_entry **r;
 145        struct string_slice key;
 146
 147        if (refname == NULL || !dir->nr)
 148                return -1;
 149
 150        sort_ref_dir(dir);
 151        key.len = len;
 152        key.str = refname;
 153        r = bsearch(&key, dir->entries, dir->nr, sizeof(*dir->entries),
 154                    ref_entry_cmp_sslice);
 155
 156        if (r == NULL)
 157                return -1;
 158
 159        return r - dir->entries;
 160}
 161
 162/*
 163 * Search for a directory entry directly within dir (without
 164 * recursing).  Sort dir if necessary.  subdirname must be a directory
 165 * name (i.e., end in '/').  If mkdir is set, then create the
 166 * directory if it is missing; otherwise, return NULL if the desired
 167 * directory cannot be found.  dir must already be complete.
 168 */
 169static struct ref_dir *search_for_subdir(struct ref_dir *dir,
 170                                         const char *subdirname, size_t len,
 171                                         int mkdir)
 172{
 173        int entry_index = search_ref_dir(dir, subdirname, len);
 174        struct ref_entry *entry;
 175        if (entry_index == -1) {
 176                if (!mkdir)
 177                        return NULL;
 178                /*
 179                 * Since dir is complete, the absence of a subdir
 180                 * means that the subdir really doesn't exist;
 181                 * therefore, create an empty record for it but mark
 182                 * the record complete.
 183                 */
 184                entry = create_dir_entry(dir->ref_store, subdirname, len, 0);
 185                add_entry_to_dir(dir, entry);
 186        } else {
 187                entry = dir->entries[entry_index];
 188        }
 189        return get_ref_dir(entry);
 190}
 191
 192struct ref_dir *find_containing_dir(struct ref_dir *dir,
 193                                    const char *refname, int mkdir)
 194{
 195        const char *slash;
 196        for (slash = strchr(refname, '/'); slash; slash = strchr(slash + 1, '/')) {
 197                size_t dirnamelen = slash - refname + 1;
 198                struct ref_dir *subdir;
 199                subdir = search_for_subdir(dir, refname, dirnamelen, mkdir);
 200                if (!subdir) {
 201                        dir = NULL;
 202                        break;
 203                }
 204                dir = subdir;
 205        }
 206
 207        return dir;
 208}
 209
 210struct ref_entry *find_ref_entry(struct ref_dir *dir, const char *refname)
 211{
 212        int entry_index;
 213        struct ref_entry *entry;
 214        dir = find_containing_dir(dir, refname, 0);
 215        if (!dir)
 216                return NULL;
 217        entry_index = search_ref_dir(dir, refname, strlen(refname));
 218        if (entry_index == -1)
 219                return NULL;
 220        entry = dir->entries[entry_index];
 221        return (entry->flag & REF_DIR) ? NULL : entry;
 222}
 223
 224int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
 225{
 226        int refname_len = strlen(refname);
 227        int entry_index;
 228        struct ref_entry *entry;
 229        int is_dir = refname[refname_len - 1] == '/';
 230        if (is_dir) {
 231                /*
 232                 * refname represents a reference directory.  Remove
 233                 * the trailing slash; otherwise we will get the
 234                 * directory *representing* refname rather than the
 235                 * one *containing* it.
 236                 */
 237                char *dirname = xmemdupz(refname, refname_len - 1);
 238                dir = find_containing_dir(dir, dirname, 0);
 239                free(dirname);
 240        } else {
 241                dir = find_containing_dir(dir, refname, 0);
 242        }
 243        if (!dir)
 244                return -1;
 245        entry_index = search_ref_dir(dir, refname, refname_len);
 246        if (entry_index == -1)
 247                return -1;
 248        entry = dir->entries[entry_index];
 249
 250        memmove(&dir->entries[entry_index],
 251                &dir->entries[entry_index + 1],
 252                (dir->nr - entry_index - 1) * sizeof(*dir->entries)
 253                );
 254        dir->nr--;
 255        if (dir->sorted > entry_index)
 256                dir->sorted--;
 257        free_ref_entry(entry);
 258        return dir->nr;
 259}
 260
 261int add_ref_entry(struct ref_dir *dir, struct ref_entry *ref)
 262{
 263        dir = find_containing_dir(dir, ref->name, 1);
 264        if (!dir)
 265                return -1;
 266        add_entry_to_dir(dir, ref);
 267        return 0;
 268}
 269
 270/*
 271 * Emit a warning and return true iff ref1 and ref2 have the same name
 272 * and the same sha1.  Die if they have the same name but different
 273 * sha1s.
 274 */
 275static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
 276{
 277        if (strcmp(ref1->name, ref2->name))
 278                return 0;
 279
 280        /* Duplicate name; make sure that they don't conflict: */
 281
 282        if ((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR))
 283                /* This is impossible by construction */
 284                die("Reference directory conflict: %s", ref1->name);
 285
 286        if (oidcmp(&ref1->u.value.oid, &ref2->u.value.oid))
 287                die("Duplicated ref, and SHA1s don't match: %s", ref1->name);
 288
 289        warning("Duplicated ref: %s", ref1->name);
 290        return 1;
 291}
 292
 293/*
 294 * Sort the entries in dir non-recursively (if they are not already
 295 * sorted) and remove any duplicate entries.
 296 */
 297static void sort_ref_dir(struct ref_dir *dir)
 298{
 299        int i, j;
 300        struct ref_entry *last = NULL;
 301
 302        /*
 303         * This check also prevents passing a zero-length array to qsort(),
 304         * which is a problem on some platforms.
 305         */
 306        if (dir->sorted == dir->nr)
 307                return;
 308
 309        QSORT(dir->entries, dir->nr, ref_entry_cmp);
 310
 311        /* Remove any duplicates: */
 312        for (i = 0, j = 0; j < dir->nr; j++) {
 313                struct ref_entry *entry = dir->entries[j];
 314                if (last && is_dup_ref(last, entry))
 315                        free_ref_entry(entry);
 316                else
 317                        last = dir->entries[i++] = entry;
 318        }
 319        dir->sorted = dir->nr = i;
 320}
 321
 322int do_for_each_entry_in_dir(struct ref_dir *dir, int offset,
 323                             each_ref_entry_fn fn, void *cb_data)
 324{
 325        int i;
 326        assert(dir->sorted == dir->nr);
 327        for (i = offset; i < dir->nr; i++) {
 328                struct ref_entry *entry = dir->entries[i];
 329                int retval;
 330                if (entry->flag & REF_DIR) {
 331                        struct ref_dir *subdir = get_ref_dir(entry);
 332                        sort_ref_dir(subdir);
 333                        retval = do_for_each_entry_in_dir(subdir, 0, fn, cb_data);
 334                } else {
 335                        retval = fn(entry, cb_data);
 336                }
 337                if (retval)
 338                        return retval;
 339        }
 340        return 0;
 341}
 342
 343void prime_ref_dir(struct ref_dir *dir)
 344{
 345        /*
 346         * The hard work of loading loose refs is done by get_ref_dir(), so we
 347         * just need to recurse through all of the sub-directories. We do not
 348         * even need to care about sorting, as traversal order does not matter
 349         * to us.
 350         */
 351        int i;
 352        for (i = 0; i < dir->nr; i++) {
 353                struct ref_entry *entry = dir->entries[i];
 354                if (entry->flag & REF_DIR)
 355                        prime_ref_dir(get_ref_dir(entry));
 356        }
 357}
 358
 359/*
 360 * A level in the reference hierarchy that is currently being iterated
 361 * through.
 362 */
 363struct cache_ref_iterator_level {
 364        /*
 365         * The ref_dir being iterated over at this level. The ref_dir
 366         * is sorted before being stored here.
 367         */
 368        struct ref_dir *dir;
 369
 370        /*
 371         * The index of the current entry within dir (which might
 372         * itself be a directory). If index == -1, then the iteration
 373         * hasn't yet begun. If index == dir->nr, then the iteration
 374         * through this level is over.
 375         */
 376        int index;
 377};
 378
 379/*
 380 * Represent an iteration through a ref_dir in the memory cache. The
 381 * iteration recurses through subdirectories.
 382 */
 383struct cache_ref_iterator {
 384        struct ref_iterator base;
 385
 386        /*
 387         * The number of levels currently on the stack. This is always
 388         * at least 1, because when it becomes zero the iteration is
 389         * ended and this struct is freed.
 390         */
 391        size_t levels_nr;
 392
 393        /* The number of levels that have been allocated on the stack */
 394        size_t levels_alloc;
 395
 396        /*
 397         * A stack of levels. levels[0] is the uppermost level that is
 398         * being iterated over in this iteration. (This is not
 399         * necessary the top level in the references hierarchy. If we
 400         * are iterating through a subtree, then levels[0] will hold
 401         * the ref_dir for that subtree, and subsequent levels will go
 402         * on from there.)
 403         */
 404        struct cache_ref_iterator_level *levels;
 405};
 406
 407static int cache_ref_iterator_advance(struct ref_iterator *ref_iterator)
 408{
 409        struct cache_ref_iterator *iter =
 410                (struct cache_ref_iterator *)ref_iterator;
 411
 412        while (1) {
 413                struct cache_ref_iterator_level *level =
 414                        &iter->levels[iter->levels_nr - 1];
 415                struct ref_dir *dir = level->dir;
 416                struct ref_entry *entry;
 417
 418                if (level->index == -1)
 419                        sort_ref_dir(dir);
 420
 421                if (++level->index == level->dir->nr) {
 422                        /* This level is exhausted; pop up a level */
 423                        if (--iter->levels_nr == 0)
 424                                return ref_iterator_abort(ref_iterator);
 425
 426                        continue;
 427                }
 428
 429                entry = dir->entries[level->index];
 430
 431                if (entry->flag & REF_DIR) {
 432                        /* push down a level */
 433                        ALLOC_GROW(iter->levels, iter->levels_nr + 1,
 434                                   iter->levels_alloc);
 435
 436                        level = &iter->levels[iter->levels_nr++];
 437                        level->dir = get_ref_dir(entry);
 438                        level->index = -1;
 439                } else {
 440                        iter->base.refname = entry->name;
 441                        iter->base.oid = &entry->u.value.oid;
 442                        iter->base.flags = entry->flag;
 443                        return ITER_OK;
 444                }
 445        }
 446}
 447
 448enum peel_status peel_entry(struct ref_entry *entry, int repeel)
 449{
 450        enum peel_status status;
 451
 452        if (entry->flag & REF_KNOWS_PEELED) {
 453                if (repeel) {
 454                        entry->flag &= ~REF_KNOWS_PEELED;
 455                        oidclr(&entry->u.value.peeled);
 456                } else {
 457                        return is_null_oid(&entry->u.value.peeled) ?
 458                                PEEL_NON_TAG : PEEL_PEELED;
 459                }
 460        }
 461        if (entry->flag & REF_ISBROKEN)
 462                return PEEL_BROKEN;
 463        if (entry->flag & REF_ISSYMREF)
 464                return PEEL_IS_SYMREF;
 465
 466        status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);
 467        if (status == PEEL_PEELED || status == PEEL_NON_TAG)
 468                entry->flag |= REF_KNOWS_PEELED;
 469        return status;
 470}
 471
 472static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
 473                                   struct object_id *peeled)
 474{
 475        struct cache_ref_iterator *iter =
 476                (struct cache_ref_iterator *)ref_iterator;
 477        struct cache_ref_iterator_level *level;
 478        struct ref_entry *entry;
 479
 480        level = &iter->levels[iter->levels_nr - 1];
 481
 482        if (level->index == -1)
 483                die("BUG: peel called before advance for cache iterator");
 484
 485        entry = level->dir->entries[level->index];
 486
 487        if (peel_entry(entry, 0))
 488                return -1;
 489        oidcpy(peeled, &entry->u.value.peeled);
 490        return 0;
 491}
 492
 493static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
 494{
 495        struct cache_ref_iterator *iter =
 496                (struct cache_ref_iterator *)ref_iterator;
 497
 498        free(iter->levels);
 499        base_ref_iterator_free(ref_iterator);
 500        return ITER_DONE;
 501}
 502
 503static struct ref_iterator_vtable cache_ref_iterator_vtable = {
 504        cache_ref_iterator_advance,
 505        cache_ref_iterator_peel,
 506        cache_ref_iterator_abort
 507};
 508
 509struct ref_iterator *cache_ref_iterator_begin(struct ref_dir *dir)
 510{
 511        struct cache_ref_iterator *iter;
 512        struct ref_iterator *ref_iterator;
 513        struct cache_ref_iterator_level *level;
 514
 515        iter = xcalloc(1, sizeof(*iter));
 516        ref_iterator = &iter->base;
 517        base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
 518        ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
 519
 520        iter->levels_nr = 1;
 521        level = &iter->levels[0];
 522        level->index = -1;
 523        level->dir = dir;
 524
 525        return ref_iterator;
 526}