75d44cf0616522f5747591abfe1f86dae534790f
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10enum mmap_strategy {
  11        /*
  12         * Don't use mmap() at all for reading `packed-refs`.
  13         */
  14        MMAP_NONE,
  15
  16        /*
  17         * Can use mmap() for reading `packed-refs`, but the file must
  18         * not remain mmapped. This is the usual option on Windows,
  19         * where you cannot rename a new version of a file onto a file
  20         * that is currently mmapped.
  21         */
  22        MMAP_TEMPORARY,
  23
  24        /*
  25         * It is OK to leave the `packed-refs` file mmapped while
  26         * arbitrary other code is running.
  27         */
  28        MMAP_OK
  29};
  30
  31#if defined(NO_MMAP)
  32static enum mmap_strategy mmap_strategy = MMAP_NONE;
  33#elif defined(MMAP_PREVENTS_DELETE)
  34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  35#else
  36static enum mmap_strategy mmap_strategy = MMAP_OK;
  37#endif
  38
  39struct packed_ref_store;
  40
  41struct packed_ref_cache {
  42        /*
  43         * A back-pointer to the packed_ref_store with which this
  44         * cache is associated:
  45         */
  46        struct packed_ref_store *refs;
  47
  48        struct ref_cache *cache;
  49
  50        /* Is the `packed-refs` file currently mmapped? */
  51        int mmapped;
  52
  53        /*
  54         * The contents of the `packed-refs` file. If the file is
  55         * mmapped, this points at the mmapped contents of the file.
  56         * If not, this points at heap-allocated memory containing the
  57         * contents. If there were no contents (e.g., because the file
  58         * didn't exist), `buf` and `eof` are both NULL.
  59         */
  60        char *buf, *eof;
  61
  62        /* The size of the header line, if any; otherwise, 0: */
  63        size_t header_len;
  64
  65        /*
  66         * What is the peeled state of this cache? (This is usually
  67         * determined from the header of the "packed-refs" file.)
  68         */
  69        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  70
  71        /*
  72         * Count of references to the data structure in this instance,
  73         * including the pointer from files_ref_store::packed if any.
  74         * The data will not be freed as long as the reference count
  75         * is nonzero.
  76         */
  77        unsigned int referrers;
  78
  79        /* The metadata from when this packed-refs cache was read */
  80        struct stat_validity validity;
  81};
  82
  83/*
  84 * A container for `packed-refs`-related data. It is not (yet) a
  85 * `ref_store`.
  86 */
  87struct packed_ref_store {
  88        struct ref_store base;
  89
  90        unsigned int store_flags;
  91
  92        /* The path of the "packed-refs" file: */
  93        char *path;
  94
  95        /*
  96         * A cache of the values read from the `packed-refs` file, if
  97         * it might still be current; otherwise, NULL.
  98         */
  99        struct packed_ref_cache *cache;
 100
 101        /*
 102         * Lock used for the "packed-refs" file. Note that this (and
 103         * thus the enclosing `packed_ref_store`) must not be freed.
 104         */
 105        struct lock_file lock;
 106
 107        /*
 108         * Temporary file used when rewriting new contents to the
 109         * "packed-refs" file. Note that this (and thus the enclosing
 110         * `packed_ref_store`) must not be freed.
 111         */
 112        struct tempfile tempfile;
 113};
 114
 115/*
 116 * Increment the reference count of *packed_refs.
 117 */
 118static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
 119{
 120        packed_refs->referrers++;
 121}
 122
 123/*
 124 * If the buffer in `packed_refs` is active, then either munmap the
 125 * memory and close the file, or free the memory. Then set the buffer
 126 * pointers to NULL.
 127 */
 128static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs)
 129{
 130        if (packed_refs->mmapped) {
 131                if (munmap(packed_refs->buf,
 132                           packed_refs->eof - packed_refs->buf))
 133                        die_errno("error ummapping packed-refs file %s",
 134                                  packed_refs->refs->path);
 135                packed_refs->mmapped = 0;
 136        } else {
 137                free(packed_refs->buf);
 138        }
 139        packed_refs->buf = packed_refs->eof = NULL;
 140        packed_refs->header_len = 0;
 141}
 142
 143/*
 144 * Decrease the reference count of *packed_refs.  If it goes to zero,
 145 * free *packed_refs and return true; otherwise return false.
 146 */
 147static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
 148{
 149        if (!--packed_refs->referrers) {
 150                free_ref_cache(packed_refs->cache);
 151                stat_validity_clear(&packed_refs->validity);
 152                release_packed_ref_buffer(packed_refs);
 153                free(packed_refs);
 154                return 1;
 155        } else {
 156                return 0;
 157        }
 158}
 159
 160struct ref_store *packed_ref_store_create(const char *path,
 161                                          unsigned int store_flags)
 162{
 163        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 164        struct ref_store *ref_store = (struct ref_store *)refs;
 165
 166        base_ref_store_init(ref_store, &refs_be_packed);
 167        refs->store_flags = store_flags;
 168
 169        refs->path = xstrdup(path);
 170        return ref_store;
 171}
 172
 173/*
 174 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 175 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 176 * support at least the flags specified in `required_flags`. `caller`
 177 * is used in any necessary error messages.
 178 */
 179static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 180                                                unsigned int required_flags,
 181                                                const char *caller)
 182{
 183        struct packed_ref_store *refs;
 184
 185        if (ref_store->be != &refs_be_packed)
 186                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 187                    ref_store->be->name, caller);
 188
 189        refs = (struct packed_ref_store *)ref_store;
 190
 191        if ((refs->store_flags & required_flags) != required_flags)
 192                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 193                    caller, required_flags, refs->store_flags);
 194
 195        return refs;
 196}
 197
 198static void clear_packed_ref_cache(struct packed_ref_store *refs)
 199{
 200        if (refs->cache) {
 201                struct packed_ref_cache *cache = refs->cache;
 202
 203                refs->cache = NULL;
 204                release_packed_ref_cache(cache);
 205        }
 206}
 207
 208static NORETURN void die_unterminated_line(const char *path,
 209                                           const char *p, size_t len)
 210{
 211        if (len < 80)
 212                die("unterminated line in %s: %.*s", path, (int)len, p);
 213        else
 214                die("unterminated line in %s: %.75s...", path, p);
 215}
 216
 217static NORETURN void die_invalid_line(const char *path,
 218                                      const char *p, size_t len)
 219{
 220        const char *eol = memchr(p, '\n', len);
 221
 222        if (!eol)
 223                die_unterminated_line(path, p, len);
 224        else if (eol - p < 80)
 225                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 226        else
 227                die("unexpected line in %s: %.75s...", path, p);
 228
 229}
 230
 231/*
 232 * An iterator over a packed-refs file that is currently mmapped.
 233 */
 234struct mmapped_ref_iterator {
 235        struct ref_iterator base;
 236
 237        struct packed_ref_cache *packed_refs;
 238
 239        /* The current position in the mmapped file: */
 240        const char *pos;
 241
 242        /* The end of the mmapped file: */
 243        const char *eof;
 244
 245        struct object_id oid, peeled;
 246
 247        struct strbuf refname_buf;
 248};
 249
 250static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
 251{
 252        struct mmapped_ref_iterator *iter =
 253                (struct mmapped_ref_iterator *)ref_iterator;
 254        const char *p = iter->pos, *eol;
 255
 256        strbuf_reset(&iter->refname_buf);
 257
 258        if (iter->pos == iter->eof)
 259                return ref_iterator_abort(ref_iterator);
 260
 261        iter->base.flags = REF_ISPACKED;
 262
 263        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 264            parse_oid_hex(p, &iter->oid, &p) ||
 265            !isspace(*p++))
 266                die_invalid_line(iter->packed_refs->refs->path,
 267                                 iter->pos, iter->eof - iter->pos);
 268
 269        eol = memchr(p, '\n', iter->eof - p);
 270        if (!eol)
 271                die_unterminated_line(iter->packed_refs->refs->path,
 272                                      iter->pos, iter->eof - iter->pos);
 273
 274        strbuf_add(&iter->refname_buf, p, eol - p);
 275        iter->base.refname = iter->refname_buf.buf;
 276
 277        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 278                if (!refname_is_safe(iter->base.refname))
 279                        die("packed refname is dangerous: %s",
 280                            iter->base.refname);
 281                oidclr(&iter->oid);
 282                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 283        }
 284        if (iter->packed_refs->peeled == PEELED_FULLY ||
 285            (iter->packed_refs->peeled == PEELED_TAGS &&
 286             starts_with(iter->base.refname, "refs/tags/")))
 287                iter->base.flags |= REF_KNOWS_PEELED;
 288
 289        iter->pos = eol + 1;
 290
 291        if (iter->pos < iter->eof && *iter->pos == '^') {
 292                p = iter->pos + 1;
 293                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 294                    parse_oid_hex(p, &iter->peeled, &p) ||
 295                    *p++ != '\n')
 296                        die_invalid_line(iter->packed_refs->refs->path,
 297                                         iter->pos, iter->eof - iter->pos);
 298                iter->pos = p;
 299
 300                /*
 301                 * Regardless of what the file header said, we
 302                 * definitely know the value of *this* reference. But
 303                 * we suppress it if the reference is broken:
 304                 */
 305                if ((iter->base.flags & REF_ISBROKEN)) {
 306                        oidclr(&iter->peeled);
 307                        iter->base.flags &= ~REF_KNOWS_PEELED;
 308                } else {
 309                        iter->base.flags |= REF_KNOWS_PEELED;
 310                }
 311        } else {
 312                oidclr(&iter->peeled);
 313        }
 314
 315        return ITER_OK;
 316}
 317
 318static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
 319                                    struct object_id *peeled)
 320{
 321        struct mmapped_ref_iterator *iter =
 322                (struct mmapped_ref_iterator *)ref_iterator;
 323
 324        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 325                oidcpy(peeled, &iter->peeled);
 326                return is_null_oid(&iter->peeled) ? -1 : 0;
 327        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 328                return -1;
 329        } else {
 330                return !!peel_object(iter->oid.hash, peeled->hash);
 331        }
 332}
 333
 334static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
 335{
 336        struct mmapped_ref_iterator *iter =
 337                (struct mmapped_ref_iterator *)ref_iterator;
 338
 339        release_packed_ref_cache(iter->packed_refs);
 340        strbuf_release(&iter->refname_buf);
 341        base_ref_iterator_free(ref_iterator);
 342        return ITER_DONE;
 343}
 344
 345static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
 346        mmapped_ref_iterator_advance,
 347        mmapped_ref_iterator_peel,
 348        mmapped_ref_iterator_abort
 349};
 350
 351struct ref_iterator *mmapped_ref_iterator_begin(
 352                struct packed_ref_cache *packed_refs,
 353                const char *pos, const char *eof)
 354{
 355        struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
 356        struct ref_iterator *ref_iterator = &iter->base;
 357
 358        if (!packed_refs->buf)
 359                return empty_ref_iterator_begin();
 360
 361        base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 0);
 362
 363        iter->packed_refs = packed_refs;
 364        acquire_packed_ref_cache(iter->packed_refs);
 365        iter->pos = pos;
 366        iter->eof = eof;
 367        strbuf_init(&iter->refname_buf, 0);
 368
 369        iter->base.oid = &iter->oid;
 370
 371        return ref_iterator;
 372}
 373
 374/*
 375 * Depending on `mmap_strategy`, either mmap or read the contents of
 376 * the `packed-refs` file into the `packed_refs` instance. Return 1 if
 377 * the file existed and was read, or 0 if the file was absent. Die on
 378 * errors.
 379 */
 380static int load_contents(struct packed_ref_cache *packed_refs)
 381{
 382        int fd;
 383        struct stat st;
 384        size_t size;
 385        ssize_t bytes_read;
 386
 387        fd = open(packed_refs->refs->path, O_RDONLY);
 388        if (fd < 0) {
 389                if (errno == ENOENT) {
 390                        /*
 391                         * This is OK; it just means that no
 392                         * "packed-refs" file has been written yet,
 393                         * which is equivalent to it being empty,
 394                         * which is its state when initialized with
 395                         * zeros.
 396                         */
 397                        return 0;
 398                } else {
 399                        die_errno("couldn't read %s", packed_refs->refs->path);
 400                }
 401        }
 402
 403        stat_validity_update(&packed_refs->validity, fd);
 404
 405        if (fstat(fd, &st) < 0)
 406                die_errno("couldn't stat %s", packed_refs->refs->path);
 407        size = xsize_t(st.st_size);
 408
 409        switch (mmap_strategy) {
 410        case MMAP_NONE:
 411        case MMAP_TEMPORARY:
 412                packed_refs->buf = xmalloc(size);
 413                bytes_read = read_in_full(fd, packed_refs->buf, size);
 414                if (bytes_read < 0 || bytes_read != size)
 415                        die_errno("couldn't read %s", packed_refs->refs->path);
 416                packed_refs->eof = packed_refs->buf + size;
 417                packed_refs->mmapped = 0;
 418                break;
 419        case MMAP_OK:
 420                packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 421                packed_refs->eof = packed_refs->buf + size;
 422                packed_refs->mmapped = 1;
 423                break;
 424        }
 425        close(fd);
 426
 427        return 1;
 428}
 429
 430/*
 431 * Read from the `packed-refs` file into a newly-allocated
 432 * `packed_ref_cache` and return it. The return value will already
 433 * have its reference count incremented.
 434 *
 435 * A comment line of the form "# pack-refs with: " may contain zero or
 436 * more traits. We interpret the traits as follows:
 437 *
 438 *   No traits:
 439 *
 440 *      Probably no references are peeled. But if the file contains a
 441 *      peeled value for a reference, we will use it.
 442 *
 443 *   peeled:
 444 *
 445 *      References under "refs/tags/", if they *can* be peeled, *are*
 446 *      peeled in this file. References outside of "refs/tags/" are
 447 *      probably not peeled even if they could have been, but if we find
 448 *      a peeled value for such a reference we will use it.
 449 *
 450 *   fully-peeled:
 451 *
 452 *      All references in the file that can be peeled are peeled.
 453 *      Inversely (and this is more important), any references in the
 454 *      file for which no peeled value is recorded is not peelable. This
 455 *      trait should typically be written alongside "peeled" for
 456 *      compatibility with older clients, but we do not require it
 457 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 458 */
 459static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs)
 460{
 461        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 462        struct ref_dir *dir;
 463        struct ref_iterator *iter;
 464        int ok;
 465
 466        packed_refs->refs = refs;
 467        acquire_packed_ref_cache(packed_refs);
 468        packed_refs->cache = create_ref_cache(NULL, NULL);
 469        packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
 470        packed_refs->peeled = PEELED_NONE;
 471
 472        if (!load_contents(packed_refs))
 473                return packed_refs;
 474
 475        /* If the file has a header line, process it: */
 476        if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') {
 477                struct strbuf tmp = STRBUF_INIT;
 478                char *p;
 479                const char *eol;
 480                struct string_list traits = STRING_LIST_INIT_NODUP;
 481
 482                eol = memchr(packed_refs->buf, '\n',
 483                             packed_refs->eof - packed_refs->buf);
 484                if (!eol)
 485                        die_unterminated_line(refs->path,
 486                                              packed_refs->buf,
 487                                              packed_refs->eof - packed_refs->buf);
 488
 489                strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf);
 490
 491                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 492                        die_invalid_line(refs->path,
 493                                         packed_refs->buf,
 494                                         packed_refs->eof - packed_refs->buf);
 495
 496                string_list_split_in_place(&traits, p, ' ', -1);
 497
 498                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 499                        packed_refs->peeled = PEELED_FULLY;
 500                else if (unsorted_string_list_has_string(&traits, "peeled"))
 501                        packed_refs->peeled = PEELED_TAGS;
 502                /* perhaps other traits later as well */
 503
 504                /* The "+ 1" is for the LF character. */
 505                packed_refs->header_len = eol + 1 - packed_refs->buf;
 506
 507                string_list_clear(&traits, 0);
 508                strbuf_release(&tmp);
 509        }
 510
 511        dir = get_ref_dir(packed_refs->cache->root);
 512        iter = mmapped_ref_iterator_begin(
 513                        packed_refs,
 514                        packed_refs->buf + packed_refs->header_len,
 515                        packed_refs->eof);
 516        while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
 517                struct ref_entry *entry =
 518                        create_ref_entry(iter->refname, iter->oid, iter->flags);
 519
 520                if ((iter->flags & REF_KNOWS_PEELED))
 521                        ref_iterator_peel(iter, &entry->u.value.peeled);
 522                add_ref_entry(dir, entry);
 523        }
 524
 525        if (ok != ITER_DONE)
 526                die("error reading packed-refs file %s", refs->path);
 527
 528        return packed_refs;
 529}
 530
 531/*
 532 * Check that the packed refs cache (if any) still reflects the
 533 * contents of the file. If not, clear the cache.
 534 */
 535static void validate_packed_ref_cache(struct packed_ref_store *refs)
 536{
 537        if (refs->cache &&
 538            !stat_validity_check(&refs->cache->validity, refs->path))
 539                clear_packed_ref_cache(refs);
 540}
 541
 542/*
 543 * Get the packed_ref_cache for the specified packed_ref_store,
 544 * creating and populating it if it hasn't been read before or if the
 545 * file has been changed (according to its `validity` field) since it
 546 * was last read. On the other hand, if we hold the lock, then assume
 547 * that the file hasn't been changed out from under us, so skip the
 548 * extra `stat()` call in `stat_validity_check()`.
 549 */
 550static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 551{
 552        if (!is_lock_file_locked(&refs->lock))
 553                validate_packed_ref_cache(refs);
 554
 555        if (!refs->cache)
 556                refs->cache = read_packed_refs(refs);
 557
 558        return refs->cache;
 559}
 560
 561static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
 562{
 563        return get_ref_dir(packed_ref_cache->cache->root);
 564}
 565
 566static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
 567{
 568        return get_packed_ref_dir(get_packed_ref_cache(refs));
 569}
 570
 571/*
 572 * Return the ref_entry for the given refname from the packed
 573 * references.  If it does not exist, return NULL.
 574 */
 575static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
 576                                        const char *refname)
 577{
 578        return find_ref_entry(get_packed_refs(refs), refname);
 579}
 580
 581static int packed_read_raw_ref(struct ref_store *ref_store,
 582                               const char *refname, unsigned char *sha1,
 583                               struct strbuf *referent, unsigned int *type)
 584{
 585        struct packed_ref_store *refs =
 586                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 587
 588        struct ref_entry *entry;
 589
 590        *type = 0;
 591
 592        entry = get_packed_ref(refs, refname);
 593        if (!entry) {
 594                errno = ENOENT;
 595                return -1;
 596        }
 597
 598        hashcpy(sha1, entry->u.value.oid.hash);
 599        *type = REF_ISPACKED;
 600        return 0;
 601}
 602
 603static int packed_peel_ref(struct ref_store *ref_store,
 604                           const char *refname, unsigned char *sha1)
 605{
 606        struct packed_ref_store *refs =
 607                packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
 608                                "peel_ref");
 609        struct ref_entry *r = get_packed_ref(refs, refname);
 610
 611        if (!r || peel_entry(r, 0))
 612                return -1;
 613
 614        hashcpy(sha1, r->u.value.peeled.hash);
 615        return 0;
 616}
 617
 618struct packed_ref_iterator {
 619        struct ref_iterator base;
 620
 621        struct packed_ref_cache *cache;
 622        struct ref_iterator *iter0;
 623        unsigned int flags;
 624};
 625
 626static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 627{
 628        struct packed_ref_iterator *iter =
 629                (struct packed_ref_iterator *)ref_iterator;
 630        int ok;
 631
 632        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 633                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 634                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 635                        continue;
 636
 637                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 638                    !ref_resolves_to_object(iter->iter0->refname,
 639                                            iter->iter0->oid,
 640                                            iter->iter0->flags))
 641                        continue;
 642
 643                iter->base.refname = iter->iter0->refname;
 644                iter->base.oid = iter->iter0->oid;
 645                iter->base.flags = iter->iter0->flags;
 646                return ITER_OK;
 647        }
 648
 649        iter->iter0 = NULL;
 650        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 651                ok = ITER_ERROR;
 652
 653        return ok;
 654}
 655
 656static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 657                                   struct object_id *peeled)
 658{
 659        struct packed_ref_iterator *iter =
 660                (struct packed_ref_iterator *)ref_iterator;
 661
 662        return ref_iterator_peel(iter->iter0, peeled);
 663}
 664
 665static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 666{
 667        struct packed_ref_iterator *iter =
 668                (struct packed_ref_iterator *)ref_iterator;
 669        int ok = ITER_DONE;
 670
 671        if (iter->iter0)
 672                ok = ref_iterator_abort(iter->iter0);
 673
 674        release_packed_ref_cache(iter->cache);
 675        base_ref_iterator_free(ref_iterator);
 676        return ok;
 677}
 678
 679static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 680        packed_ref_iterator_advance,
 681        packed_ref_iterator_peel,
 682        packed_ref_iterator_abort
 683};
 684
 685static struct ref_iterator *packed_ref_iterator_begin(
 686                struct ref_store *ref_store,
 687                const char *prefix, unsigned int flags)
 688{
 689        struct packed_ref_store *refs;
 690        struct packed_ref_iterator *iter;
 691        struct ref_iterator *ref_iterator;
 692        unsigned int required_flags = REF_STORE_READ;
 693
 694        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 695                required_flags |= REF_STORE_ODB;
 696        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 697
 698        iter = xcalloc(1, sizeof(*iter));
 699        ref_iterator = &iter->base;
 700        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 701
 702        /*
 703         * Note that get_packed_ref_cache() internally checks whether
 704         * the packed-ref cache is up to date with what is on disk,
 705         * and re-reads it if not.
 706         */
 707
 708        iter->cache = get_packed_ref_cache(refs);
 709        acquire_packed_ref_cache(iter->cache);
 710        iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
 711
 712        iter->flags = flags;
 713
 714        return ref_iterator;
 715}
 716
 717/*
 718 * Write an entry to the packed-refs file for the specified refname.
 719 * If peeled is non-NULL, write it as the entry's peeled value. On
 720 * error, return a nonzero value and leave errno set at the value left
 721 * by the failing call to `fprintf()`.
 722 */
 723static int write_packed_entry(FILE *fh, const char *refname,
 724                              const unsigned char *sha1,
 725                              const unsigned char *peeled)
 726{
 727        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 728            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 729                return -1;
 730
 731        return 0;
 732}
 733
 734int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 735{
 736        struct packed_ref_store *refs =
 737                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 738                                "packed_refs_lock");
 739        static int timeout_configured = 0;
 740        static int timeout_value = 1000;
 741
 742        if (!timeout_configured) {
 743                git_config_get_int("core.packedrefstimeout", &timeout_value);
 744                timeout_configured = 1;
 745        }
 746
 747        /*
 748         * Note that we close the lockfile immediately because we
 749         * don't write new content to it, but rather to a separate
 750         * tempfile.
 751         */
 752        if (hold_lock_file_for_update_timeout(
 753                            &refs->lock,
 754                            refs->path,
 755                            flags, timeout_value) < 0) {
 756                unable_to_lock_message(refs->path, errno, err);
 757                return -1;
 758        }
 759
 760        if (close_lock_file(&refs->lock)) {
 761                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
 762                return -1;
 763        }
 764
 765        /*
 766         * Now that we hold the `packed-refs` lock, make sure that our
 767         * cache matches the current version of the file. Normally
 768         * `get_packed_ref_cache()` does that for us, but that
 769         * function assumes that when the file is locked, any existing
 770         * cache is still valid. We've just locked the file, but it
 771         * might have changed the moment *before* we locked it.
 772         */
 773        validate_packed_ref_cache(refs);
 774
 775        /*
 776         * Now make sure that the packed-refs file as it exists in the
 777         * locked state is loaded into the cache:
 778         */
 779        get_packed_ref_cache(refs);
 780        return 0;
 781}
 782
 783void packed_refs_unlock(struct ref_store *ref_store)
 784{
 785        struct packed_ref_store *refs = packed_downcast(
 786                        ref_store,
 787                        REF_STORE_READ | REF_STORE_WRITE,
 788                        "packed_refs_unlock");
 789
 790        if (!is_lock_file_locked(&refs->lock))
 791                die("BUG: packed_refs_unlock() called when not locked");
 792        rollback_lock_file(&refs->lock);
 793}
 794
 795int packed_refs_is_locked(struct ref_store *ref_store)
 796{
 797        struct packed_ref_store *refs = packed_downcast(
 798                        ref_store,
 799                        REF_STORE_READ | REF_STORE_WRITE,
 800                        "packed_refs_is_locked");
 801
 802        return is_lock_file_locked(&refs->lock);
 803}
 804
 805/*
 806 * The packed-refs header line that we write out.  Perhaps other
 807 * traits will be added later.
 808 *
 809 * Note that earlier versions of Git used to parse these traits by
 810 * looking for " trait " in the line. For this reason, the space after
 811 * the colon and the trailing space are required.
 812 */
 813static const char PACKED_REFS_HEADER[] =
 814        "# pack-refs with: peeled fully-peeled \n";
 815
 816static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
 817{
 818        /* Nothing to do. */
 819        return 0;
 820}
 821
 822/*
 823 * Write the packed-refs from the cache to the packed-refs tempfile,
 824 * incorporating any changes from `updates`. `updates` must be a
 825 * sorted string list whose keys are the refnames and whose util
 826 * values are `struct ref_update *`. On error, rollback the tempfile,
 827 * write an error message to `err`, and return a nonzero value.
 828 *
 829 * The packfile must be locked before calling this function and will
 830 * remain locked when it is done.
 831 */
 832static int write_with_updates(struct packed_ref_store *refs,
 833                              struct string_list *updates,
 834                              struct strbuf *err)
 835{
 836        struct ref_iterator *iter = NULL;
 837        size_t i;
 838        int ok;
 839        FILE *out;
 840        struct strbuf sb = STRBUF_INIT;
 841        char *packed_refs_path;
 842
 843        if (!is_lock_file_locked(&refs->lock))
 844                die("BUG: write_with_updates() called while unlocked");
 845
 846        /*
 847         * If packed-refs is a symlink, we want to overwrite the
 848         * symlinked-to file, not the symlink itself. Also, put the
 849         * staging file next to it:
 850         */
 851        packed_refs_path = get_locked_file_path(&refs->lock);
 852        strbuf_addf(&sb, "%s.new", packed_refs_path);
 853        free(packed_refs_path);
 854        if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
 855                strbuf_addf(err, "unable to create file %s: %s",
 856                            sb.buf, strerror(errno));
 857                strbuf_release(&sb);
 858                return -1;
 859        }
 860        strbuf_release(&sb);
 861
 862        out = fdopen_tempfile(&refs->tempfile, "w");
 863        if (!out) {
 864                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
 865                            strerror(errno));
 866                goto error;
 867        }
 868
 869        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
 870                goto write_error;
 871
 872        /*
 873         * We iterate in parallel through the current list of refs and
 874         * the list of updates, processing an entry from at least one
 875         * of the lists each time through the loop. When the current
 876         * list of refs is exhausted, set iter to NULL. When the list
 877         * of updates is exhausted, leave i set to updates->nr.
 878         */
 879        iter = packed_ref_iterator_begin(&refs->base, "",
 880                                         DO_FOR_EACH_INCLUDE_BROKEN);
 881        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 882                iter = NULL;
 883
 884        i = 0;
 885
 886        while (iter || i < updates->nr) {
 887                struct ref_update *update = NULL;
 888                int cmp;
 889
 890                if (i >= updates->nr) {
 891                        cmp = -1;
 892                } else {
 893                        update = updates->items[i].util;
 894
 895                        if (!iter)
 896                                cmp = +1;
 897                        else
 898                                cmp = strcmp(iter->refname, update->refname);
 899                }
 900
 901                if (!cmp) {
 902                        /*
 903                         * There is both an old value and an update
 904                         * for this reference. Check the old value if
 905                         * necessary:
 906                         */
 907                        if ((update->flags & REF_HAVE_OLD)) {
 908                                if (is_null_oid(&update->old_oid)) {
 909                                        strbuf_addf(err, "cannot update ref '%s': "
 910                                                    "reference already exists",
 911                                                    update->refname);
 912                                        goto error;
 913                                } else if (oidcmp(&update->old_oid, iter->oid)) {
 914                                        strbuf_addf(err, "cannot update ref '%s': "
 915                                                    "is at %s but expected %s",
 916                                                    update->refname,
 917                                                    oid_to_hex(iter->oid),
 918                                                    oid_to_hex(&update->old_oid));
 919                                        goto error;
 920                                }
 921                        }
 922
 923                        /* Now figure out what to use for the new value: */
 924                        if ((update->flags & REF_HAVE_NEW)) {
 925                                /*
 926                                 * The update takes precedence. Skip
 927                                 * the iterator over the unneeded
 928                                 * value.
 929                                 */
 930                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 931                                        iter = NULL;
 932                                cmp = +1;
 933                        } else {
 934                                /*
 935                                 * The update doesn't actually want to
 936                                 * change anything. We're done with it.
 937                                 */
 938                                i++;
 939                                cmp = -1;
 940                        }
 941                } else if (cmp > 0) {
 942                        /*
 943                         * There is no old value but there is an
 944                         * update for this reference. Make sure that
 945                         * the update didn't expect an existing value:
 946                         */
 947                        if ((update->flags & REF_HAVE_OLD) &&
 948                            !is_null_oid(&update->old_oid)) {
 949                                strbuf_addf(err, "cannot update ref '%s': "
 950                                            "reference is missing but expected %s",
 951                                            update->refname,
 952                                            oid_to_hex(&update->old_oid));
 953                                goto error;
 954                        }
 955                }
 956
 957                if (cmp < 0) {
 958                        /* Pass the old reference through. */
 959
 960                        struct object_id peeled;
 961                        int peel_error = ref_iterator_peel(iter, &peeled);
 962
 963                        if (write_packed_entry(out, iter->refname,
 964                                               iter->oid->hash,
 965                                               peel_error ? NULL : peeled.hash))
 966                                goto write_error;
 967
 968                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 969                                iter = NULL;
 970                } else if (is_null_oid(&update->new_oid)) {
 971                        /*
 972                         * The update wants to delete the reference,
 973                         * and the reference either didn't exist or we
 974                         * have already skipped it. So we're done with
 975                         * the update (and don't have to write
 976                         * anything).
 977                         */
 978                        i++;
 979                } else {
 980                        struct object_id peeled;
 981                        int peel_error = peel_object(update->new_oid.hash,
 982                                                     peeled.hash);
 983
 984                        if (write_packed_entry(out, update->refname,
 985                                               update->new_oid.hash,
 986                                               peel_error ? NULL : peeled.hash))
 987                                goto write_error;
 988
 989                        i++;
 990                }
 991        }
 992
 993        if (ok != ITER_DONE) {
 994                strbuf_addf(err, "unable to write packed-refs file: "
 995                            "error iterating over old contents");
 996                goto error;
 997        }
 998
 999        if (close_tempfile(&refs->tempfile)) {
1000                strbuf_addf(err, "error closing file %s: %s",
1001                            get_tempfile_path(&refs->tempfile),
1002                            strerror(errno));
1003                strbuf_release(&sb);
1004                return -1;
1005        }
1006
1007        return 0;
1008
1009write_error:
1010        strbuf_addf(err, "error writing to %s: %s",
1011                    get_tempfile_path(&refs->tempfile), strerror(errno));
1012
1013error:
1014        if (iter)
1015                ref_iterator_abort(iter);
1016
1017        delete_tempfile(&refs->tempfile);
1018        return -1;
1019}
1020
1021struct packed_transaction_backend_data {
1022        /* True iff the transaction owns the packed-refs lock. */
1023        int own_lock;
1024
1025        struct string_list updates;
1026};
1027
1028static void packed_transaction_cleanup(struct packed_ref_store *refs,
1029                                       struct ref_transaction *transaction)
1030{
1031        struct packed_transaction_backend_data *data = transaction->backend_data;
1032
1033        if (data) {
1034                string_list_clear(&data->updates, 0);
1035
1036                if (is_tempfile_active(&refs->tempfile))
1037                        delete_tempfile(&refs->tempfile);
1038
1039                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1040                        packed_refs_unlock(&refs->base);
1041                        data->own_lock = 0;
1042                }
1043
1044                free(data);
1045                transaction->backend_data = NULL;
1046        }
1047
1048        transaction->state = REF_TRANSACTION_CLOSED;
1049}
1050
1051static int packed_transaction_prepare(struct ref_store *ref_store,
1052                                      struct ref_transaction *transaction,
1053                                      struct strbuf *err)
1054{
1055        struct packed_ref_store *refs = packed_downcast(
1056                        ref_store,
1057                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1058                        "ref_transaction_prepare");
1059        struct packed_transaction_backend_data *data;
1060        size_t i;
1061        int ret = TRANSACTION_GENERIC_ERROR;
1062
1063        /*
1064         * Note that we *don't* skip transactions with zero updates,
1065         * because such a transaction might be executed for the side
1066         * effect of ensuring that all of the references are peeled.
1067         * If the caller wants to optimize away empty transactions, it
1068         * should do so itself.
1069         */
1070
1071        data = xcalloc(1, sizeof(*data));
1072        string_list_init(&data->updates, 0);
1073
1074        transaction->backend_data = data;
1075
1076        /*
1077         * Stick the updates in a string list by refname so that we
1078         * can sort them:
1079         */
1080        for (i = 0; i < transaction->nr; i++) {
1081                struct ref_update *update = transaction->updates[i];
1082                struct string_list_item *item =
1083                        string_list_append(&data->updates, update->refname);
1084
1085                /* Store a pointer to update in item->util: */
1086                item->util = update;
1087        }
1088        string_list_sort(&data->updates);
1089
1090        if (ref_update_reject_duplicates(&data->updates, err))
1091                goto failure;
1092
1093        if (!is_lock_file_locked(&refs->lock)) {
1094                if (packed_refs_lock(ref_store, 0, err))
1095                        goto failure;
1096                data->own_lock = 1;
1097        }
1098
1099        if (write_with_updates(refs, &data->updates, err))
1100                goto failure;
1101
1102        transaction->state = REF_TRANSACTION_PREPARED;
1103        return 0;
1104
1105failure:
1106        packed_transaction_cleanup(refs, transaction);
1107        return ret;
1108}
1109
1110static int packed_transaction_abort(struct ref_store *ref_store,
1111                                    struct ref_transaction *transaction,
1112                                    struct strbuf *err)
1113{
1114        struct packed_ref_store *refs = packed_downcast(
1115                        ref_store,
1116                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1117                        "ref_transaction_abort");
1118
1119        packed_transaction_cleanup(refs, transaction);
1120        return 0;
1121}
1122
1123static int packed_transaction_finish(struct ref_store *ref_store,
1124                                     struct ref_transaction *transaction,
1125                                     struct strbuf *err)
1126{
1127        struct packed_ref_store *refs = packed_downcast(
1128                        ref_store,
1129                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1130                        "ref_transaction_finish");
1131        int ret = TRANSACTION_GENERIC_ERROR;
1132        char *packed_refs_path;
1133
1134        clear_packed_ref_cache(refs);
1135
1136        packed_refs_path = get_locked_file_path(&refs->lock);
1137        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1138                strbuf_addf(err, "error replacing %s: %s",
1139                            refs->path, strerror(errno));
1140                goto cleanup;
1141        }
1142
1143        ret = 0;
1144
1145cleanup:
1146        free(packed_refs_path);
1147        packed_transaction_cleanup(refs, transaction);
1148        return ret;
1149}
1150
1151static int packed_initial_transaction_commit(struct ref_store *ref_store,
1152                                            struct ref_transaction *transaction,
1153                                            struct strbuf *err)
1154{
1155        return ref_transaction_commit(transaction, err);
1156}
1157
1158static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1159                             struct string_list *refnames, unsigned int flags)
1160{
1161        struct packed_ref_store *refs =
1162                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1163        struct strbuf err = STRBUF_INIT;
1164        struct ref_transaction *transaction;
1165        struct string_list_item *item;
1166        int ret;
1167
1168        (void)refs; /* We need the check above, but don't use the variable */
1169
1170        if (!refnames->nr)
1171                return 0;
1172
1173        /*
1174         * Since we don't check the references' old_oids, the
1175         * individual updates can't fail, so we can pack all of the
1176         * updates into a single transaction.
1177         */
1178
1179        transaction = ref_store_transaction_begin(ref_store, &err);
1180        if (!transaction)
1181                return -1;
1182
1183        for_each_string_list_item(item, refnames) {
1184                if (ref_transaction_delete(transaction, item->string, NULL,
1185                                           flags, msg, &err)) {
1186                        warning(_("could not delete reference %s: %s"),
1187                                item->string, err.buf);
1188                        strbuf_reset(&err);
1189                }
1190        }
1191
1192        ret = ref_transaction_commit(transaction, &err);
1193
1194        if (ret) {
1195                if (refnames->nr == 1)
1196                        error(_("could not delete reference %s: %s"),
1197                              refnames->items[0].string, err.buf);
1198                else
1199                        error(_("could not delete references: %s"), err.buf);
1200        }
1201
1202        ref_transaction_free(transaction);
1203        strbuf_release(&err);
1204        return ret;
1205}
1206
1207static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1208{
1209        /*
1210         * Packed refs are already packed. It might be that loose refs
1211         * are packed *into* a packed refs store, but that is done by
1212         * updating the packed references via a transaction.
1213         */
1214        return 0;
1215}
1216
1217static int packed_create_symref(struct ref_store *ref_store,
1218                               const char *refname, const char *target,
1219                               const char *logmsg)
1220{
1221        die("BUG: packed reference store does not support symrefs");
1222}
1223
1224static int packed_rename_ref(struct ref_store *ref_store,
1225                            const char *oldrefname, const char *newrefname,
1226                            const char *logmsg)
1227{
1228        die("BUG: packed reference store does not support renaming references");
1229}
1230
1231static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1232{
1233        return empty_ref_iterator_begin();
1234}
1235
1236static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1237                                      const char *refname,
1238                                      each_reflog_ent_fn fn, void *cb_data)
1239{
1240        return 0;
1241}
1242
1243static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1244                                              const char *refname,
1245                                              each_reflog_ent_fn fn,
1246                                              void *cb_data)
1247{
1248        return 0;
1249}
1250
1251static int packed_reflog_exists(struct ref_store *ref_store,
1252                               const char *refname)
1253{
1254        return 0;
1255}
1256
1257static int packed_create_reflog(struct ref_store *ref_store,
1258                               const char *refname, int force_create,
1259                               struct strbuf *err)
1260{
1261        die("BUG: packed reference store does not support reflogs");
1262}
1263
1264static int packed_delete_reflog(struct ref_store *ref_store,
1265                               const char *refname)
1266{
1267        return 0;
1268}
1269
1270static int packed_reflog_expire(struct ref_store *ref_store,
1271                                const char *refname, const unsigned char *sha1,
1272                                unsigned int flags,
1273                                reflog_expiry_prepare_fn prepare_fn,
1274                                reflog_expiry_should_prune_fn should_prune_fn,
1275                                reflog_expiry_cleanup_fn cleanup_fn,
1276                                void *policy_cb_data)
1277{
1278        return 0;
1279}
1280
1281struct ref_storage_be refs_be_packed = {
1282        NULL,
1283        "packed",
1284        packed_ref_store_create,
1285        packed_init_db,
1286        packed_transaction_prepare,
1287        packed_transaction_finish,
1288        packed_transaction_abort,
1289        packed_initial_transaction_commit,
1290
1291        packed_pack_refs,
1292        packed_peel_ref,
1293        packed_create_symref,
1294        packed_delete_refs,
1295        packed_rename_ref,
1296
1297        packed_ref_iterator_begin,
1298        packed_read_raw_ref,
1299
1300        packed_reflog_iterator_begin,
1301        packed_for_each_reflog_ent,
1302        packed_for_each_reflog_ent_reverse,
1303        packed_reflog_exists,
1304        packed_create_reflog,
1305        packed_delete_reflog,
1306        packed_reflog_expire
1307};