refs / packed-backend.con commit Sync with Git 2.14.4 (9e0f06d)
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "packed-backend.h"
   6#include "../iterator.h"
   7#include "../lockfile.h"
   8
   9enum mmap_strategy {
  10        /*
  11         * Don't use mmap() at all for reading `packed-refs`.
  12         */
  13        MMAP_NONE,
  14
  15        /*
  16         * Can use mmap() for reading `packed-refs`, but the file must
  17         * not remain mmapped. This is the usual option on Windows,
  18         * where you cannot rename a new version of a file onto a file
  19         * that is currently mmapped.
  20         */
  21        MMAP_TEMPORARY,
  22
  23        /*
  24         * It is OK to leave the `packed-refs` file mmapped while
  25         * arbitrary other code is running.
  26         */
  27        MMAP_OK
  28};
  29
  30#if defined(NO_MMAP)
  31static enum mmap_strategy mmap_strategy = MMAP_NONE;
  32#elif defined(MMAP_PREVENTS_DELETE)
  33static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  34#else
  35static enum mmap_strategy mmap_strategy = MMAP_OK;
  36#endif
  37
  38struct packed_ref_store;
  39
  40/*
  41 * A `snapshot` represents one snapshot of a `packed-refs` file.
  42 *
  43 * Normally, this will be a mmapped view of the contents of the
  44 * `packed-refs` file at the time the snapshot was created. However,
  45 * if the `packed-refs` file was not sorted, this might point at heap
  46 * memory holding the contents of the `packed-refs` file with its
  47 * records sorted by refname.
  48 *
  49 * `snapshot` instances are reference counted (via
  50 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
  51 * an instance from disappearing while an iterator is still iterating
  52 * over it. Instances are garbage collected when their `referrers`
  53 * count goes to zero.
  54 *
  55 * The most recent `snapshot`, if available, is referenced by the
  56 * `packed_ref_store`. Its freshness is checked whenever
  57 * `get_snapshot()` is called; if the existing snapshot is obsolete, a
  58 * new snapshot is taken.
  59 */
  60struct snapshot {
  61        /*
  62         * A back-pointer to the packed_ref_store with which this
  63         * snapshot is associated:
  64         */
  65        struct packed_ref_store *refs;
  66
  67        /* Is the `packed-refs` file currently mmapped? */
  68        int mmapped;
  69
  70        /*
  71         * The contents of the `packed-refs` file. If the file was
  72         * already sorted, this points at the mmapped contents of the
  73         * file. If not, this points at heap-allocated memory
  74         * containing the contents, sorted. If there were no contents
  75         * (e.g., because the file didn't exist), `buf` and `eof` are
  76         * both NULL.
  77         */
  78        char *buf, *eof;
  79
  80        /* The size of the header line, if any; otherwise, 0: */
  81        size_t header_len;
  82
  83        /*
  84         * What is the peeled state of the `packed-refs` file that
  85         * this snapshot represents? (This is usually determined from
  86         * the file's header.)
  87         */
  88        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  89
  90        /*
  91         * Count of references to this instance, including the pointer
  92         * from `packed_ref_store::snapshot`, if any. The instance
  93         * will not be freed as long as the reference count is
  94         * nonzero.
  95         */
  96        unsigned int referrers;
  97
  98        /*
  99         * The metadata of the `packed-refs` file from which this
 100         * snapshot was created, used to tell if the file has been
 101         * replaced since we read it.
 102         */
 103        struct stat_validity validity;
 104};
 105
 106/*
 107 * A `ref_store` representing references stored in a `packed-refs`
 108 * file. It implements the `ref_store` interface, though it has some
 109 * limitations:
 110 *
 111 * - It cannot store symbolic references.
 112 *
 113 * - It cannot store reflogs.
 114 *
 115 * - It does not support reference renaming (though it could).
 116 *
 117 * On the other hand, it can be locked outside of a reference
 118 * transaction. In that case, it remains locked even after the
 119 * transaction is done and the new `packed-refs` file is activated.
 120 */
 121struct packed_ref_store {
 122        struct ref_store base;
 123
 124        unsigned int store_flags;
 125
 126        /* The path of the "packed-refs" file: */
 127        char *path;
 128
 129        /*
 130         * A snapshot of the values read from the `packed-refs` file,
 131         * if it might still be current; otherwise, NULL.
 132         */
 133        struct snapshot *snapshot;
 134
 135        /*
 136         * Lock used for the "packed-refs" file. Note that this (and
 137         * thus the enclosing `packed_ref_store`) must not be freed.
 138         */
 139        struct lock_file lock;
 140
 141        /*
 142         * Temporary file used when rewriting new contents to the
 143         * "packed-refs" file. Note that this (and thus the enclosing
 144         * `packed_ref_store`) must not be freed.
 145         */
 146        struct tempfile *tempfile;
 147};
 148
 149/*
 150 * Increment the reference count of `*snapshot`.
 151 */
 152static void acquire_snapshot(struct snapshot *snapshot)
 153{
 154        snapshot->referrers++;
 155}
 156
 157/*
 158 * If the buffer in `snapshot` is active, then either munmap the
 159 * memory and close the file, or free the memory. Then set the buffer
 160 * pointers to NULL.
 161 */
 162static void clear_snapshot_buffer(struct snapshot *snapshot)
 163{
 164        if (snapshot->mmapped) {
 165                if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
 166                        die_errno("error ummapping packed-refs file %s",
 167                                  snapshot->refs->path);
 168                snapshot->mmapped = 0;
 169        } else {
 170                free(snapshot->buf);
 171        }
 172        snapshot->buf = snapshot->eof = NULL;
 173        snapshot->header_len = 0;
 174}
 175
 176/*
 177 * Decrease the reference count of `*snapshot`. If it goes to zero,
 178 * free `*snapshot` and return true; otherwise return false.
 179 */
 180static int release_snapshot(struct snapshot *snapshot)
 181{
 182        if (!--snapshot->referrers) {
 183                stat_validity_clear(&snapshot->validity);
 184                clear_snapshot_buffer(snapshot);
 185                free(snapshot);
 186                return 1;
 187        } else {
 188                return 0;
 189        }
 190}
 191
 192struct ref_store *packed_ref_store_create(const char *path,
 193                                          unsigned int store_flags)
 194{
 195        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 196        struct ref_store *ref_store = (struct ref_store *)refs;
 197
 198        base_ref_store_init(ref_store, &refs_be_packed);
 199        refs->store_flags = store_flags;
 200
 201        refs->path = xstrdup(path);
 202        return ref_store;
 203}
 204
 205/*
 206 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 207 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 208 * support at least the flags specified in `required_flags`. `caller`
 209 * is used in any necessary error messages.
 210 */
 211static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 212                                                unsigned int required_flags,
 213                                                const char *caller)
 214{
 215        struct packed_ref_store *refs;
 216
 217        if (ref_store->be != &refs_be_packed)
 218                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 219                    ref_store->be->name, caller);
 220
 221        refs = (struct packed_ref_store *)ref_store;
 222
 223        if ((refs->store_flags & required_flags) != required_flags)
 224                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 225                    caller, required_flags, refs->store_flags);
 226
 227        return refs;
 228}
 229
 230static void clear_snapshot(struct packed_ref_store *refs)
 231{
 232        if (refs->snapshot) {
 233                struct snapshot *snapshot = refs->snapshot;
 234
 235                refs->snapshot = NULL;
 236                release_snapshot(snapshot);
 237        }
 238}
 239
 240static NORETURN void die_unterminated_line(const char *path,
 241                                           const char *p, size_t len)
 242{
 243        if (len < 80)
 244                die("unterminated line in %s: %.*s", path, (int)len, p);
 245        else
 246                die("unterminated line in %s: %.75s...", path, p);
 247}
 248
 249static NORETURN void die_invalid_line(const char *path,
 250                                      const char *p, size_t len)
 251{
 252        const char *eol = memchr(p, '\n', len);
 253
 254        if (!eol)
 255                die_unterminated_line(path, p, len);
 256        else if (eol - p < 80)
 257                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 258        else
 259                die("unexpected line in %s: %.75s...", path, p);
 260
 261}
 262
 263struct snapshot_record {
 264        const char *start;
 265        size_t len;
 266};
 267
 268static int cmp_packed_ref_records(const void *v1, const void *v2)
 269{
 270        const struct snapshot_record *e1 = v1, *e2 = v2;
 271        const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
 272        const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
 273
 274        while (1) {
 275                if (*r1 == '\n')
 276                        return *r2 == '\n' ? 0 : -1;
 277                if (*r1 != *r2) {
 278                        if (*r2 == '\n')
 279                                return 1;
 280                        else
 281                                return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 282                }
 283                r1++;
 284                r2++;
 285        }
 286}
 287
 288/*
 289 * Compare a snapshot record at `rec` to the specified NUL-terminated
 290 * refname.
 291 */
 292static int cmp_record_to_refname(const char *rec, const char *refname)
 293{
 294        const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
 295        const char *r2 = refname;
 296
 297        while (1) {
 298                if (*r1 == '\n')
 299                        return *r2 ? -1 : 0;
 300                if (!*r2)
 301                        return 1;
 302                if (*r1 != *r2)
 303                        return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 304                r1++;
 305                r2++;
 306        }
 307}
 308
 309/*
 310 * `snapshot->buf` is not known to be sorted. Check whether it is, and
 311 * if not, sort it into new memory and munmap/free the old storage.
 312 */
 313static void sort_snapshot(struct snapshot *snapshot)
 314{
 315        struct snapshot_record *records = NULL;
 316        size_t alloc = 0, nr = 0;
 317        int sorted = 1;
 318        const char *pos, *eof, *eol;
 319        size_t len, i;
 320        char *new_buffer, *dst;
 321
 322        pos = snapshot->buf + snapshot->header_len;
 323        eof = snapshot->eof;
 324        len = eof - pos;
 325
 326        if (!len)
 327                return;
 328
 329        /*
 330         * Initialize records based on a crude estimate of the number
 331         * of references in the file (we'll grow it below if needed):
 332         */
 333        ALLOC_GROW(records, len / 80 + 20, alloc);
 334
 335        while (pos < eof) {
 336                eol = memchr(pos, '\n', eof - pos);
 337                if (!eol)
 338                        /* The safety check should prevent this. */
 339                        BUG("unterminated line found in packed-refs");
 340                if (eol - pos < GIT_SHA1_HEXSZ + 2)
 341                        die_invalid_line(snapshot->refs->path,
 342                                         pos, eof - pos);
 343                eol++;
 344                if (eol < eof && *eol == '^') {
 345                        /*
 346                         * Keep any peeled line together with its
 347                         * reference:
 348                         */
 349                        const char *peeled_start = eol;
 350
 351                        eol = memchr(peeled_start, '\n', eof - peeled_start);
 352                        if (!eol)
 353                                /* The safety check should prevent this. */
 354                                BUG("unterminated peeled line found in packed-refs");
 355                        eol++;
 356                }
 357
 358                ALLOC_GROW(records, nr + 1, alloc);
 359                records[nr].start = pos;
 360                records[nr].len = eol - pos;
 361                nr++;
 362
 363                if (sorted &&
 364                    nr > 1 &&
 365                    cmp_packed_ref_records(&records[nr - 2],
 366                                           &records[nr - 1]) >= 0)
 367                        sorted = 0;
 368
 369                pos = eol;
 370        }
 371
 372        if (sorted)
 373                goto cleanup;
 374
 375        /* We need to sort the memory. First we sort the records array: */
 376        QSORT(records, nr, cmp_packed_ref_records);
 377
 378        /*
 379         * Allocate a new chunk of memory, and copy the old memory to
 380         * the new in the order indicated by `records` (not bothering
 381         * with the header line):
 382         */
 383        new_buffer = xmalloc(len);
 384        for (dst = new_buffer, i = 0; i < nr; i++) {
 385                memcpy(dst, records[i].start, records[i].len);
 386                dst += records[i].len;
 387        }
 388
 389        /*
 390         * Now munmap the old buffer and use the sorted buffer in its
 391         * place:
 392         */
 393        clear_snapshot_buffer(snapshot);
 394        snapshot->buf = new_buffer;
 395        snapshot->eof = new_buffer + len;
 396        snapshot->header_len = 0;
 397
 398cleanup:
 399        free(records);
 400}
 401
 402/*
 403 * Return a pointer to the start of the record that contains the
 404 * character `*p` (which must be within the buffer). If no other
 405 * record start is found, return `buf`.
 406 */
 407static const char *find_start_of_record(const char *buf, const char *p)
 408{
 409        while (p > buf && (p[-1] != '\n' || p[0] == '^'))
 410                p--;
 411        return p;
 412}
 413
 414/*
 415 * Return a pointer to the start of the record following the record
 416 * that contains `*p`. If none is found before `end`, return `end`.
 417 */
 418static const char *find_end_of_record(const char *p, const char *end)
 419{
 420        while (++p < end && (p[-1] != '\n' || p[0] == '^'))
 421                ;
 422        return p;
 423}
 424
 425/*
 426 * We want to be able to compare mmapped reference records quickly,
 427 * without totally parsing them. We can do so because the records are
 428 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
 429 * + 1) bytes past the beginning of the record.
 430 *
 431 * But what if the `packed-refs` file contains garbage? We're willing
 432 * to tolerate not detecting the problem, as long as we don't produce
 433 * totally garbled output (we can't afford to check the integrity of
 434 * the whole file during every Git invocation). But we do want to be
 435 * sure that we never read past the end of the buffer in memory and
 436 * perform an illegal memory access.
 437 *
 438 * Guarantee that minimum level of safety by verifying that the last
 439 * record in the file is LF-terminated, and that it has at least
 440 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
 441 * these checks fails.
 442 */
 443static void verify_buffer_safe(struct snapshot *snapshot)
 444{
 445        const char *buf = snapshot->buf + snapshot->header_len;
 446        const char *eof = snapshot->eof;
 447        const char *last_line;
 448
 449        if (buf == eof)
 450                return;
 451
 452        last_line = find_start_of_record(buf, eof - 1);
 453        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
 454                die_invalid_line(snapshot->refs->path,
 455                                 last_line, eof - last_line);
 456}
 457
 458/*
 459 * Depending on `mmap_strategy`, either mmap or read the contents of
 460 * the `packed-refs` file into the snapshot. Return 1 if the file
 461 * existed and was read, or 0 if the file was absent. Die on errors.
 462 */
 463static int load_contents(struct snapshot *snapshot)
 464{
 465        int fd;
 466        struct stat st;
 467        size_t size;
 468        ssize_t bytes_read;
 469
 470        fd = open(snapshot->refs->path, O_RDONLY);
 471        if (fd < 0) {
 472                if (errno == ENOENT) {
 473                        /*
 474                         * This is OK; it just means that no
 475                         * "packed-refs" file has been written yet,
 476                         * which is equivalent to it being empty,
 477                         * which is its state when initialized with
 478                         * zeros.
 479                         */
 480                        return 0;
 481                } else {
 482                        die_errno("couldn't read %s", snapshot->refs->path);
 483                }
 484        }
 485
 486        stat_validity_update(&snapshot->validity, fd);
 487
 488        if (fstat(fd, &st) < 0)
 489                die_errno("couldn't stat %s", snapshot->refs->path);
 490        size = xsize_t(st.st_size);
 491
 492        switch (mmap_strategy) {
 493        case MMAP_NONE:
 494                snapshot->buf = xmalloc(size);
 495                bytes_read = read_in_full(fd, snapshot->buf, size);
 496                if (bytes_read < 0 || bytes_read != size)
 497                        die_errno("couldn't read %s", snapshot->refs->path);
 498                snapshot->eof = snapshot->buf + size;
 499                snapshot->mmapped = 0;
 500                break;
 501        case MMAP_TEMPORARY:
 502        case MMAP_OK:
 503                snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 504                snapshot->eof = snapshot->buf + size;
 505                snapshot->mmapped = 1;
 506                break;
 507        }
 508        close(fd);
 509
 510        return 1;
 511}
 512
 513/*
 514 * Find the place in `snapshot->buf` where the start of the record for
 515 * `refname` starts. If `mustexist` is true and the reference doesn't
 516 * exist, then return NULL. If `mustexist` is false and the reference
 517 * doesn't exist, then return the point where that reference would be
 518 * inserted. In the latter mode, `refname` doesn't have to be a proper
 519 * reference name; for example, one could search for "refs/replace/"
 520 * to find the start of any replace references.
 521 *
 522 * The record is sought using a binary search, so `snapshot->buf` must
 523 * be sorted.
 524 */
 525static const char *find_reference_location(struct snapshot *snapshot,
 526                                           const char *refname, int mustexist)
 527{
 528        /*
 529         * This is not *quite* a garden-variety binary search, because
 530         * the data we're searching is made up of records, and we
 531         * always need to find the beginning of a record to do a
 532         * comparison. A "record" here is one line for the reference
 533         * itself and zero or one peel lines that start with '^'. Our
 534         * loop invariant is described in the next two comments.
 535         */
 536
 537        /*
 538         * A pointer to the character at the start of a record whose
 539         * preceding records all have reference names that come
 540         * *before* `refname`.
 541         */
 542        const char *lo = snapshot->buf + snapshot->header_len;
 543
 544        /*
 545         * A pointer to a the first character of a record whose
 546         * reference name comes *after* `refname`.
 547         */
 548        const char *hi = snapshot->eof;
 549
 550        while (lo < hi) {
 551                const char *mid, *rec;
 552                int cmp;
 553
 554                mid = lo + (hi - lo) / 2;
 555                rec = find_start_of_record(lo, mid);
 556                cmp = cmp_record_to_refname(rec, refname);
 557                if (cmp < 0) {
 558                        lo = find_end_of_record(mid, hi);
 559                } else if (cmp > 0) {
 560                        hi = rec;
 561                } else {
 562                        return rec;
 563                }
 564        }
 565
 566        if (mustexist)
 567                return NULL;
 568        else
 569                return lo;
 570}
 571
 572/*
 573 * Create a newly-allocated `snapshot` of the `packed-refs` file in
 574 * its current state and return it. The return value will already have
 575 * its reference count incremented.
 576 *
 577 * A comment line of the form "# pack-refs with: " may contain zero or
 578 * more traits. We interpret the traits as follows:
 579 *
 580 *   Neither `peeled` nor `fully-peeled`:
 581 *
 582 *      Probably no references are peeled. But if the file contains a
 583 *      peeled value for a reference, we will use it.
 584 *
 585 *   `peeled`:
 586 *
 587 *      References under "refs/tags/", if they *can* be peeled, *are*
 588 *      peeled in this file. References outside of "refs/tags/" are
 589 *      probably not peeled even if they could have been, but if we find
 590 *      a peeled value for such a reference we will use it.
 591 *
 592 *   `fully-peeled`:
 593 *
 594 *      All references in the file that can be peeled are peeled.
 595 *      Inversely (and this is more important), any references in the
 596 *      file for which no peeled value is recorded is not peelable. This
 597 *      trait should typically be written alongside "peeled" for
 598 *      compatibility with older clients, but we do not require it
 599 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 600 *
 601 *   `sorted`:
 602 *
 603 *      The references in this file are known to be sorted by refname.
 604 */
 605static struct snapshot *create_snapshot(struct packed_ref_store *refs)
 606{
 607        struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));
 608        int sorted = 0;
 609
 610        snapshot->refs = refs;
 611        acquire_snapshot(snapshot);
 612        snapshot->peeled = PEELED_NONE;
 613
 614        if (!load_contents(snapshot))
 615                return snapshot;
 616
 617        /* If the file has a header line, process it: */
 618        if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
 619                struct strbuf tmp = STRBUF_INIT;
 620                char *p;
 621                const char *eol;
 622                struct string_list traits = STRING_LIST_INIT_NODUP;
 623
 624                eol = memchr(snapshot->buf, '\n',
 625                             snapshot->eof - snapshot->buf);
 626                if (!eol)
 627                        die_unterminated_line(refs->path,
 628                                              snapshot->buf,
 629                                              snapshot->eof - snapshot->buf);
 630
 631                strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
 632
 633                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 634                        die_invalid_line(refs->path,
 635                                         snapshot->buf,
 636                                         snapshot->eof - snapshot->buf);
 637
 638                string_list_split_in_place(&traits, p, ' ', -1);
 639
 640                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 641                        snapshot->peeled = PEELED_FULLY;
 642                else if (unsorted_string_list_has_string(&traits, "peeled"))
 643                        snapshot->peeled = PEELED_TAGS;
 644
 645                sorted = unsorted_string_list_has_string(&traits, "sorted");
 646
 647                /* perhaps other traits later as well */
 648
 649                /* The "+ 1" is for the LF character. */
 650                snapshot->header_len = eol + 1 - snapshot->buf;
 651
 652                string_list_clear(&traits, 0);
 653                strbuf_release(&tmp);
 654        }
 655
 656        verify_buffer_safe(snapshot);
 657
 658        if (!sorted) {
 659                sort_snapshot(snapshot);
 660
 661                /*
 662                 * Reordering the records might have moved a short one
 663                 * to the end of the buffer, so verify the buffer's
 664                 * safety again:
 665                 */
 666                verify_buffer_safe(snapshot);
 667        }
 668
 669        if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
 670                /*
 671                 * We don't want to leave the file mmapped, so we are
 672                 * forced to make a copy now:
 673                 */
 674                size_t size = snapshot->eof -
 675                        (snapshot->buf + snapshot->header_len);
 676                char *buf_copy = xmalloc(size);
 677
 678                memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
 679                clear_snapshot_buffer(snapshot);
 680                snapshot->buf = buf_copy;
 681                snapshot->eof = buf_copy + size;
 682        }
 683
 684        return snapshot;
 685}
 686
 687/*
 688 * Check that `refs->snapshot` (if present) still reflects the
 689 * contents of the `packed-refs` file. If not, clear the snapshot.
 690 */
 691static void validate_snapshot(struct packed_ref_store *refs)
 692{
 693        if (refs->snapshot &&
 694            !stat_validity_check(&refs->snapshot->validity, refs->path))
 695                clear_snapshot(refs);
 696}
 697
 698/*
 699 * Get the `snapshot` for the specified packed_ref_store, creating and
 700 * populating it if it hasn't been read before or if the file has been
 701 * changed (according to its `validity` field) since it was last read.
 702 * On the other hand, if we hold the lock, then assume that the file
 703 * hasn't been changed out from under us, so skip the extra `stat()`
 704 * call in `stat_validity_check()`. This function does *not* increase
 705 * the snapshot's reference count on behalf of the caller.
 706 */
 707static struct snapshot *get_snapshot(struct packed_ref_store *refs)
 708{
 709        if (!is_lock_file_locked(&refs->lock))
 710                validate_snapshot(refs);
 711
 712        if (!refs->snapshot)
 713                refs->snapshot = create_snapshot(refs);
 714
 715        return refs->snapshot;
 716}
 717
 718static int packed_read_raw_ref(struct ref_store *ref_store,
 719                               const char *refname, unsigned char *sha1,
 720                               struct strbuf *referent, unsigned int *type)
 721{
 722        struct packed_ref_store *refs =
 723                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 724        struct snapshot *snapshot = get_snapshot(refs);
 725        const char *rec;
 726
 727        *type = 0;
 728
 729        rec = find_reference_location(snapshot, refname, 1);
 730
 731        if (!rec) {
 732                /* refname is not a packed reference. */
 733                errno = ENOENT;
 734                return -1;
 735        }
 736
 737        if (get_sha1_hex(rec, sha1))
 738                die_invalid_line(refs->path, rec, snapshot->eof - rec);
 739
 740        *type = REF_ISPACKED;
 741        return 0;
 742}
 743
 744/*
 745 * This value is set in `base.flags` if the peeled value of the
 746 * current reference is known. In that case, `peeled` contains the
 747 * correct peeled value for the reference, which might be `null_sha1`
 748 * if the reference is not a tag or if it is broken.
 749 */
 750#define REF_KNOWS_PEELED 0x40
 751
 752/*
 753 * An iterator over a snapshot of a `packed-refs` file.
 754 */
 755struct packed_ref_iterator {
 756        struct ref_iterator base;
 757
 758        struct snapshot *snapshot;
 759
 760        /* The current position in the snapshot's buffer: */
 761        const char *pos;
 762
 763        /* The end of the part of the buffer that will be iterated over: */
 764        const char *eof;
 765
 766        /* Scratch space for current values: */
 767        struct object_id oid, peeled;
 768        struct strbuf refname_buf;
 769
 770        unsigned int flags;
 771};
 772
 773/*
 774 * Move the iterator to the next record in the snapshot, without
 775 * respect for whether the record is actually required by the current
 776 * iteration. Adjust the fields in `iter` and return `ITER_OK` or
 777 * `ITER_DONE`. This function does not free the iterator in the case
 778 * of `ITER_DONE`.
 779 */
 780static int next_record(struct packed_ref_iterator *iter)
 781{
 782        const char *p = iter->pos, *eol;
 783
 784        strbuf_reset(&iter->refname_buf);
 785
 786        if (iter->pos == iter->eof)
 787                return ITER_DONE;
 788
 789        iter->base.flags = REF_ISPACKED;
 790
 791        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 792            parse_oid_hex(p, &iter->oid, &p) ||
 793            !isspace(*p++))
 794                die_invalid_line(iter->snapshot->refs->path,
 795                                 iter->pos, iter->eof - iter->pos);
 796
 797        eol = memchr(p, '\n', iter->eof - p);
 798        if (!eol)
 799                die_unterminated_line(iter->snapshot->refs->path,
 800                                      iter->pos, iter->eof - iter->pos);
 801
 802        strbuf_add(&iter->refname_buf, p, eol - p);
 803        iter->base.refname = iter->refname_buf.buf;
 804
 805        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 806                if (!refname_is_safe(iter->base.refname))
 807                        die("packed refname is dangerous: %s",
 808                            iter->base.refname);
 809                oidclr(&iter->oid);
 810                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 811        }
 812        if (iter->snapshot->peeled == PEELED_FULLY ||
 813            (iter->snapshot->peeled == PEELED_TAGS &&
 814             starts_with(iter->base.refname, "refs/tags/")))
 815                iter->base.flags |= REF_KNOWS_PEELED;
 816
 817        iter->pos = eol + 1;
 818
 819        if (iter->pos < iter->eof && *iter->pos == '^') {
 820                p = iter->pos + 1;
 821                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 822                    parse_oid_hex(p, &iter->peeled, &p) ||
 823                    *p++ != '\n')
 824                        die_invalid_line(iter->snapshot->refs->path,
 825                                         iter->pos, iter->eof - iter->pos);
 826                iter->pos = p;
 827
 828                /*
 829                 * Regardless of what the file header said, we
 830                 * definitely know the value of *this* reference. But
 831                 * we suppress it if the reference is broken:
 832                 */
 833                if ((iter->base.flags & REF_ISBROKEN)) {
 834                        oidclr(&iter->peeled);
 835                        iter->base.flags &= ~REF_KNOWS_PEELED;
 836                } else {
 837                        iter->base.flags |= REF_KNOWS_PEELED;
 838                }
 839        } else {
 840                oidclr(&iter->peeled);
 841        }
 842
 843        return ITER_OK;
 844}
 845
 846static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 847{
 848        struct packed_ref_iterator *iter =
 849                (struct packed_ref_iterator *)ref_iterator;
 850        int ok;
 851
 852        while ((ok = next_record(iter)) == ITER_OK) {
 853                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 854                    ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
 855                        continue;
 856
 857                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 858                    !ref_resolves_to_object(iter->base.refname, &iter->oid,
 859                                            iter->flags))
 860                        continue;
 861
 862                return ITER_OK;
 863        }
 864
 865        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 866                ok = ITER_ERROR;
 867
 868        return ok;
 869}
 870
 871static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 872                                   struct object_id *peeled)
 873{
 874        struct packed_ref_iterator *iter =
 875                (struct packed_ref_iterator *)ref_iterator;
 876
 877        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 878                oidcpy(peeled, &iter->peeled);
 879                return is_null_oid(&iter->peeled) ? -1 : 0;
 880        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 881                return -1;
 882        } else {
 883                return !!peel_object(iter->oid.hash, peeled->hash);
 884        }
 885}
 886
 887static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 888{
 889        struct packed_ref_iterator *iter =
 890                (struct packed_ref_iterator *)ref_iterator;
 891        int ok = ITER_DONE;
 892
 893        strbuf_release(&iter->refname_buf);
 894        release_snapshot(iter->snapshot);
 895        base_ref_iterator_free(ref_iterator);
 896        return ok;
 897}
 898
 899static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 900        packed_ref_iterator_advance,
 901        packed_ref_iterator_peel,
 902        packed_ref_iterator_abort
 903};
 904
 905static struct ref_iterator *packed_ref_iterator_begin(
 906                struct ref_store *ref_store,
 907                const char *prefix, unsigned int flags)
 908{
 909        struct packed_ref_store *refs;
 910        struct snapshot *snapshot;
 911        const char *start;
 912        struct packed_ref_iterator *iter;
 913        struct ref_iterator *ref_iterator;
 914        unsigned int required_flags = REF_STORE_READ;
 915
 916        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 917                required_flags |= REF_STORE_ODB;
 918        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 919
 920        /*
 921         * Note that `get_snapshot()` internally checks whether the
 922         * snapshot is up to date with what is on disk, and re-reads
 923         * it if not.
 924         */
 925        snapshot = get_snapshot(refs);
 926
 927        if (!snapshot->buf)
 928                return empty_ref_iterator_begin();
 929
 930        iter = xcalloc(1, sizeof(*iter));
 931        ref_iterator = &iter->base;
 932        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 933
 934        iter->snapshot = snapshot;
 935        acquire_snapshot(snapshot);
 936
 937        if (prefix && *prefix)
 938                start = find_reference_location(snapshot, prefix, 0);
 939        else
 940                start = snapshot->buf + snapshot->header_len;
 941
 942        iter->pos = start;
 943        iter->eof = snapshot->eof;
 944        strbuf_init(&iter->refname_buf, 0);
 945
 946        iter->base.oid = &iter->oid;
 947
 948        iter->flags = flags;
 949
 950        if (prefix && *prefix)
 951                /* Stop iteration after we've gone *past* prefix: */
 952                ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
 953
 954        return ref_iterator;
 955}
 956
 957/*
 958 * Write an entry to the packed-refs file for the specified refname.
 959 * If peeled is non-NULL, write it as the entry's peeled value. On
 960 * error, return a nonzero value and leave errno set at the value left
 961 * by the failing call to `fprintf()`.
 962 */
 963static int write_packed_entry(FILE *fh, const char *refname,
 964                              const unsigned char *sha1,
 965                              const unsigned char *peeled)
 966{
 967        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 968            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 969                return -1;
 970
 971        return 0;
 972}
 973
 974int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 975{
 976        struct packed_ref_store *refs =
 977                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 978                                "packed_refs_lock");
 979        static int timeout_configured = 0;
 980        static int timeout_value = 1000;
 981
 982        if (!timeout_configured) {
 983                git_config_get_int("core.packedrefstimeout", &timeout_value);
 984                timeout_configured = 1;
 985        }
 986
 987        /*
 988         * Note that we close the lockfile immediately because we
 989         * don't write new content to it, but rather to a separate
 990         * tempfile.
 991         */
 992        if (hold_lock_file_for_update_timeout(
 993                            &refs->lock,
 994                            refs->path,
 995                            flags, timeout_value) < 0) {
 996                unable_to_lock_message(refs->path, errno, err);
 997                return -1;
 998        }
 999
1000        if (close_lock_file_gently(&refs->lock)) {
1001                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1002                rollback_lock_file(&refs->lock);
1003                return -1;
1004        }
1005
1006        /*
1007         * Now that we hold the `packed-refs` lock, make sure that our
1008         * snapshot matches the current version of the file. Normally
1009         * `get_snapshot()` does that for us, but that function
1010         * assumes that when the file is locked, any existing snapshot
1011         * is still valid. We've just locked the file, but it might
1012         * have changed the moment *before* we locked it.
1013         */
1014        validate_snapshot(refs);
1015
1016        /*
1017         * Now make sure that the packed-refs file as it exists in the
1018         * locked state is loaded into the snapshot:
1019         */
1020        get_snapshot(refs);
1021        return 0;
1022}
1023
1024void packed_refs_unlock(struct ref_store *ref_store)
1025{
1026        struct packed_ref_store *refs = packed_downcast(
1027                        ref_store,
1028                        REF_STORE_READ | REF_STORE_WRITE,
1029                        "packed_refs_unlock");
1030
1031        if (!is_lock_file_locked(&refs->lock))
1032                die("BUG: packed_refs_unlock() called when not locked");
1033        rollback_lock_file(&refs->lock);
1034}
1035
1036int packed_refs_is_locked(struct ref_store *ref_store)
1037{
1038        struct packed_ref_store *refs = packed_downcast(
1039                        ref_store,
1040                        REF_STORE_READ | REF_STORE_WRITE,
1041                        "packed_refs_is_locked");
1042
1043        return is_lock_file_locked(&refs->lock);
1044}
1045
1046/*
1047 * The packed-refs header line that we write out. Perhaps other traits
1048 * will be added later.
1049 *
1050 * Note that earlier versions of Git used to parse these traits by
1051 * looking for " trait " in the line. For this reason, the space after
1052 * the colon and the trailing space are required.
1053 */
1054static const char PACKED_REFS_HEADER[] =
1055        "# pack-refs with: peeled fully-peeled sorted \n";
1056
1057static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1058{
1059        /* Nothing to do. */
1060        return 0;
1061}
1062
1063/*
1064 * Write the packed refs from the current snapshot to the packed-refs
1065 * tempfile, incorporating any changes from `updates`. `updates` must
1066 * be a sorted string list whose keys are the refnames and whose util
1067 * values are `struct ref_update *`. On error, rollback the tempfile,
1068 * write an error message to `err`, and return a nonzero value.
1069 *
1070 * The packfile must be locked before calling this function and will
1071 * remain locked when it is done.
1072 */
1073static int write_with_updates(struct packed_ref_store *refs,
1074                              struct string_list *updates,
1075                              struct strbuf *err)
1076{
1077        struct ref_iterator *iter = NULL;
1078        size_t i;
1079        int ok;
1080        FILE *out;
1081        struct strbuf sb = STRBUF_INIT;
1082        char *packed_refs_path;
1083
1084        if (!is_lock_file_locked(&refs->lock))
1085                die("BUG: write_with_updates() called while unlocked");
1086
1087        /*
1088         * If packed-refs is a symlink, we want to overwrite the
1089         * symlinked-to file, not the symlink itself. Also, put the
1090         * staging file next to it:
1091         */
1092        packed_refs_path = get_locked_file_path(&refs->lock);
1093        strbuf_addf(&sb, "%s.new", packed_refs_path);
1094        free(packed_refs_path);
1095        refs->tempfile = create_tempfile(sb.buf);
1096        if (!refs->tempfile) {
1097                strbuf_addf(err, "unable to create file %s: %s",
1098                            sb.buf, strerror(errno));
1099                strbuf_release(&sb);
1100                return -1;
1101        }
1102        strbuf_release(&sb);
1103
1104        out = fdopen_tempfile(refs->tempfile, "w");
1105        if (!out) {
1106                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1107                            strerror(errno));
1108                goto error;
1109        }
1110
1111        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1112                goto write_error;
1113
1114        /*
1115         * We iterate in parallel through the current list of refs and
1116         * the list of updates, processing an entry from at least one
1117         * of the lists each time through the loop. When the current
1118         * list of refs is exhausted, set iter to NULL. When the list
1119         * of updates is exhausted, leave i set to updates->nr.
1120         */
1121        iter = packed_ref_iterator_begin(&refs->base, "",
1122                                         DO_FOR_EACH_INCLUDE_BROKEN);
1123        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1124                iter = NULL;
1125
1126        i = 0;
1127
1128        while (iter || i < updates->nr) {
1129                struct ref_update *update = NULL;
1130                int cmp;
1131
1132                if (i >= updates->nr) {
1133                        cmp = -1;
1134                } else {
1135                        update = updates->items[i].util;
1136
1137                        if (!iter)
1138                                cmp = +1;
1139                        else
1140                                cmp = strcmp(iter->refname, update->refname);
1141                }
1142
1143                if (!cmp) {
1144                        /*
1145                         * There is both an old value and an update
1146                         * for this reference. Check the old value if
1147                         * necessary:
1148                         */
1149                        if ((update->flags & REF_HAVE_OLD)) {
1150                                if (is_null_oid(&update->old_oid)) {
1151                                        strbuf_addf(err, "cannot update ref '%s': "
1152                                                    "reference already exists",
1153                                                    update->refname);
1154                                        goto error;
1155                                } else if (oidcmp(&update->old_oid, iter->oid)) {
1156                                        strbuf_addf(err, "cannot update ref '%s': "
1157                                                    "is at %s but expected %s",
1158                                                    update->refname,
1159                                                    oid_to_hex(iter->oid),
1160                                                    oid_to_hex(&update->old_oid));
1161                                        goto error;
1162                                }
1163                        }
1164
1165                        /* Now figure out what to use for the new value: */
1166                        if ((update->flags & REF_HAVE_NEW)) {
1167                                /*
1168                                 * The update takes precedence. Skip
1169                                 * the iterator over the unneeded
1170                                 * value.
1171                                 */
1172                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1173                                        iter = NULL;
1174                                cmp = +1;
1175                        } else {
1176                                /*
1177                                 * The update doesn't actually want to
1178                                 * change anything. We're done with it.
1179                                 */
1180                                i++;
1181                                cmp = -1;
1182                        }
1183                } else if (cmp > 0) {
1184                        /*
1185                         * There is no old value but there is an
1186                         * update for this reference. Make sure that
1187                         * the update didn't expect an existing value:
1188                         */
1189                        if ((update->flags & REF_HAVE_OLD) &&
1190                            !is_null_oid(&update->old_oid)) {
1191                                strbuf_addf(err, "cannot update ref '%s': "
1192                                            "reference is missing but expected %s",
1193                                            update->refname,
1194                                            oid_to_hex(&update->old_oid));
1195                                goto error;
1196                        }
1197                }
1198
1199                if (cmp < 0) {
1200                        /* Pass the old reference through. */
1201
1202                        struct object_id peeled;
1203                        int peel_error = ref_iterator_peel(iter, &peeled);
1204
1205                        if (write_packed_entry(out, iter->refname,
1206                                               iter->oid->hash,
1207                                               peel_error ? NULL : peeled.hash))
1208                                goto write_error;
1209
1210                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1211                                iter = NULL;
1212                } else if (is_null_oid(&update->new_oid)) {
1213                        /*
1214                         * The update wants to delete the reference,
1215                         * and the reference either didn't exist or we
1216                         * have already skipped it. So we're done with
1217                         * the update (and don't have to write
1218                         * anything).
1219                         */
1220                        i++;
1221                } else {
1222                        struct object_id peeled;
1223                        int peel_error = peel_object(update->new_oid.hash,
1224                                                     peeled.hash);
1225
1226                        if (write_packed_entry(out, update->refname,
1227                                               update->new_oid.hash,
1228                                               peel_error ? NULL : peeled.hash))
1229                                goto write_error;
1230
1231                        i++;
1232                }
1233        }
1234
1235        if (ok != ITER_DONE) {
1236                strbuf_addstr(err, "unable to write packed-refs file: "
1237                              "error iterating over old contents");
1238                goto error;
1239        }
1240
1241        if (close_tempfile_gently(refs->tempfile)) {
1242                strbuf_addf(err, "error closing file %s: %s",
1243                            get_tempfile_path(refs->tempfile),
1244                            strerror(errno));
1245                strbuf_release(&sb);
1246                delete_tempfile(&refs->tempfile);
1247                return -1;
1248        }
1249
1250        return 0;
1251
1252write_error:
1253        strbuf_addf(err, "error writing to %s: %s",
1254                    get_tempfile_path(refs->tempfile), strerror(errno));
1255
1256error:
1257        if (iter)
1258                ref_iterator_abort(iter);
1259
1260        delete_tempfile(&refs->tempfile);
1261        return -1;
1262}
1263
1264int is_packed_transaction_needed(struct ref_store *ref_store,
1265                                 struct ref_transaction *transaction)
1266{
1267        struct packed_ref_store *refs = packed_downcast(
1268                        ref_store,
1269                        REF_STORE_READ,
1270                        "is_packed_transaction_needed");
1271        struct strbuf referent = STRBUF_INIT;
1272        size_t i;
1273        int ret;
1274
1275        if (!is_lock_file_locked(&refs->lock))
1276                BUG("is_packed_transaction_needed() called while unlocked");
1277
1278        /*
1279         * We're only going to bother returning false for the common,
1280         * trivial case that references are only being deleted, their
1281         * old values are not being checked, and the old `packed-refs`
1282         * file doesn't contain any of those reference(s). This gives
1283         * false positives for some other cases that could
1284         * theoretically be optimized away:
1285         *
1286         * 1. It could be that the old value is being verified without
1287         *    setting a new value. In this case, we could verify the
1288         *    old value here and skip the update if it agrees. If it
1289         *    disagrees, we could either let the update go through
1290         *    (the actual commit would re-detect and report the
1291         *    problem), or come up with a way of reporting such an
1292         *    error to *our* caller.
1293         *
1294         * 2. It could be that a new value is being set, but that it
1295         *    is identical to the current packed value of the
1296         *    reference.
1297         *
1298         * Neither of these cases will come up in the current code,
1299         * because the only caller of this function passes to it a
1300         * transaction that only includes `delete` updates with no
1301         * `old_id`. Even if that ever changes, false positives only
1302         * cause an optimization to be missed; they do not affect
1303         * correctness.
1304         */
1305
1306        /*
1307         * Start with the cheap checks that don't require old
1308         * reference values to be read:
1309         */
1310        for (i = 0; i < transaction->nr; i++) {
1311                struct ref_update *update = transaction->updates[i];
1312
1313                if (update->flags & REF_HAVE_OLD)
1314                        /* Have to check the old value -> needed. */
1315                        return 1;
1316
1317                if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))
1318                        /* Have to set a new value -> needed. */
1319                        return 1;
1320        }
1321
1322        /*
1323         * The transaction isn't checking any old values nor is it
1324         * setting any nonzero new values, so it still might be able
1325         * to be skipped. Now do the more expensive check: the update
1326         * is needed if any of the updates is a delete, and the old
1327         * `packed-refs` file contains a value for that reference.
1328         */
1329        ret = 0;
1330        for (i = 0; i < transaction->nr; i++) {
1331                struct ref_update *update = transaction->updates[i];
1332                unsigned int type;
1333                struct object_id oid;
1334
1335                if (!(update->flags & REF_HAVE_NEW))
1336                        /*
1337                         * This reference isn't being deleted -> not
1338                         * needed.
1339                         */
1340                        continue;
1341
1342                if (!refs_read_raw_ref(ref_store, update->refname,
1343                                       oid.hash, &referent, &type) ||
1344                    errno != ENOENT) {
1345                        /*
1346                         * We have to actually delete that reference
1347                         * -> this transaction is needed.
1348                         */
1349                        ret = 1;
1350                        break;
1351                }
1352        }
1353
1354        strbuf_release(&referent);
1355        return ret;
1356}
1357
1358struct packed_transaction_backend_data {
1359        /* True iff the transaction owns the packed-refs lock. */
1360        int own_lock;
1361
1362        struct string_list updates;
1363};
1364
1365static void packed_transaction_cleanup(struct packed_ref_store *refs,
1366                                       struct ref_transaction *transaction)
1367{
1368        struct packed_transaction_backend_data *data = transaction->backend_data;
1369
1370        if (data) {
1371                string_list_clear(&data->updates, 0);
1372
1373                if (is_tempfile_active(refs->tempfile))
1374                        delete_tempfile(&refs->tempfile);
1375
1376                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1377                        packed_refs_unlock(&refs->base);
1378                        data->own_lock = 0;
1379                }
1380
1381                free(data);
1382                transaction->backend_data = NULL;
1383        }
1384
1385        transaction->state = REF_TRANSACTION_CLOSED;
1386}
1387
1388static int packed_transaction_prepare(struct ref_store *ref_store,
1389                                      struct ref_transaction *transaction,
1390                                      struct strbuf *err)
1391{
1392        struct packed_ref_store *refs = packed_downcast(
1393                        ref_store,
1394                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1395                        "ref_transaction_prepare");
1396        struct packed_transaction_backend_data *data;
1397        size_t i;
1398        int ret = TRANSACTION_GENERIC_ERROR;
1399
1400        /*
1401         * Note that we *don't* skip transactions with zero updates,
1402         * because such a transaction might be executed for the side
1403         * effect of ensuring that all of the references are peeled or
1404         * ensuring that the `packed-refs` file is sorted. If the
1405         * caller wants to optimize away empty transactions, it should
1406         * do so itself.
1407         */
1408
1409        data = xcalloc(1, sizeof(*data));
1410        string_list_init(&data->updates, 0);
1411
1412        transaction->backend_data = data;
1413
1414        /*
1415         * Stick the updates in a string list by refname so that we
1416         * can sort them:
1417         */
1418        for (i = 0; i < transaction->nr; i++) {
1419                struct ref_update *update = transaction->updates[i];
1420                struct string_list_item *item =
1421                        string_list_append(&data->updates, update->refname);
1422
1423                /* Store a pointer to update in item->util: */
1424                item->util = update;
1425        }
1426        string_list_sort(&data->updates);
1427
1428        if (ref_update_reject_duplicates(&data->updates, err))
1429                goto failure;
1430
1431        if (!is_lock_file_locked(&refs->lock)) {
1432                if (packed_refs_lock(ref_store, 0, err))
1433                        goto failure;
1434                data->own_lock = 1;
1435        }
1436
1437        if (write_with_updates(refs, &data->updates, err))
1438                goto failure;
1439
1440        transaction->state = REF_TRANSACTION_PREPARED;
1441        return 0;
1442
1443failure:
1444        packed_transaction_cleanup(refs, transaction);
1445        return ret;
1446}
1447
1448static int packed_transaction_abort(struct ref_store *ref_store,
1449                                    struct ref_transaction *transaction,
1450                                    struct strbuf *err)
1451{
1452        struct packed_ref_store *refs = packed_downcast(
1453                        ref_store,
1454                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1455                        "ref_transaction_abort");
1456
1457        packed_transaction_cleanup(refs, transaction);
1458        return 0;
1459}
1460
1461static int packed_transaction_finish(struct ref_store *ref_store,
1462                                     struct ref_transaction *transaction,
1463                                     struct strbuf *err)
1464{
1465        struct packed_ref_store *refs = packed_downcast(
1466                        ref_store,
1467                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1468                        "ref_transaction_finish");
1469        int ret = TRANSACTION_GENERIC_ERROR;
1470        char *packed_refs_path;
1471
1472        clear_snapshot(refs);
1473
1474        packed_refs_path = get_locked_file_path(&refs->lock);
1475        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1476                strbuf_addf(err, "error replacing %s: %s",
1477                            refs->path, strerror(errno));
1478                goto cleanup;
1479        }
1480
1481        ret = 0;
1482
1483cleanup:
1484        free(packed_refs_path);
1485        packed_transaction_cleanup(refs, transaction);
1486        return ret;
1487}
1488
1489static int packed_initial_transaction_commit(struct ref_store *ref_store,
1490                                            struct ref_transaction *transaction,
1491                                            struct strbuf *err)
1492{
1493        return ref_transaction_commit(transaction, err);
1494}
1495
1496static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1497                             struct string_list *refnames, unsigned int flags)
1498{
1499        struct packed_ref_store *refs =
1500                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1501        struct strbuf err = STRBUF_INIT;
1502        struct ref_transaction *transaction;
1503        struct string_list_item *item;
1504        int ret;
1505
1506        (void)refs; /* We need the check above, but don't use the variable */
1507
1508        if (!refnames->nr)
1509                return 0;
1510
1511        /*
1512         * Since we don't check the references' old_oids, the
1513         * individual updates can't fail, so we can pack all of the
1514         * updates into a single transaction.
1515         */
1516
1517        transaction = ref_store_transaction_begin(ref_store, &err);
1518        if (!transaction)
1519                return -1;
1520
1521        for_each_string_list_item(item, refnames) {
1522                if (ref_transaction_delete(transaction, item->string, NULL,
1523                                           flags, msg, &err)) {
1524                        warning(_("could not delete reference %s: %s"),
1525                                item->string, err.buf);
1526                        strbuf_reset(&err);
1527                }
1528        }
1529
1530        ret = ref_transaction_commit(transaction, &err);
1531
1532        if (ret) {
1533                if (refnames->nr == 1)
1534                        error(_("could not delete reference %s: %s"),
1535                              refnames->items[0].string, err.buf);
1536                else
1537                        error(_("could not delete references: %s"), err.buf);
1538        }
1539
1540        ref_transaction_free(transaction);
1541        strbuf_release(&err);
1542        return ret;
1543}
1544
1545static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1546{
1547        /*
1548         * Packed refs are already packed. It might be that loose refs
1549         * are packed *into* a packed refs store, but that is done by
1550         * updating the packed references via a transaction.
1551         */
1552        return 0;
1553}
1554
1555static int packed_create_symref(struct ref_store *ref_store,
1556                               const char *refname, const char *target,
1557                               const char *logmsg)
1558{
1559        die("BUG: packed reference store does not support symrefs");
1560}
1561
1562static int packed_rename_ref(struct ref_store *ref_store,
1563                            const char *oldrefname, const char *newrefname,
1564                            const char *logmsg)
1565{
1566        die("BUG: packed reference store does not support renaming references");
1567}
1568
1569static int packed_copy_ref(struct ref_store *ref_store,
1570                           const char *oldrefname, const char *newrefname,
1571                           const char *logmsg)
1572{
1573        die("BUG: packed reference store does not support copying references");
1574}
1575
1576static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1577{
1578        return empty_ref_iterator_begin();
1579}
1580
1581static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1582                                      const char *refname,
1583                                      each_reflog_ent_fn fn, void *cb_data)
1584{
1585        return 0;
1586}
1587
1588static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1589                                              const char *refname,
1590                                              each_reflog_ent_fn fn,
1591                                              void *cb_data)
1592{
1593        return 0;
1594}
1595
1596static int packed_reflog_exists(struct ref_store *ref_store,
1597                               const char *refname)
1598{
1599        return 0;
1600}
1601
1602static int packed_create_reflog(struct ref_store *ref_store,
1603                               const char *refname, int force_create,
1604                               struct strbuf *err)
1605{
1606        die("BUG: packed reference store does not support reflogs");
1607}
1608
1609static int packed_delete_reflog(struct ref_store *ref_store,
1610                               const char *refname)
1611{
1612        return 0;
1613}
1614
1615static int packed_reflog_expire(struct ref_store *ref_store,
1616                                const char *refname, const unsigned char *sha1,
1617                                unsigned int flags,
1618                                reflog_expiry_prepare_fn prepare_fn,
1619                                reflog_expiry_should_prune_fn should_prune_fn,
1620                                reflog_expiry_cleanup_fn cleanup_fn,
1621                                void *policy_cb_data)
1622{
1623        return 0;
1624}
1625
1626struct ref_storage_be refs_be_packed = {
1627        NULL,
1628        "packed",
1629        packed_ref_store_create,
1630        packed_init_db,
1631        packed_transaction_prepare,
1632        packed_transaction_finish,
1633        packed_transaction_abort,
1634        packed_initial_transaction_commit,
1635
1636        packed_pack_refs,
1637        packed_create_symref,
1638        packed_delete_refs,
1639        packed_rename_ref,
1640        packed_copy_ref,
1641
1642        packed_ref_iterator_begin,
1643        packed_read_raw_ref,
1644
1645        packed_reflog_iterator_begin,
1646        packed_for_each_reflog_ent,
1647        packed_for_each_reflog_ent_reverse,
1648        packed_reflog_exists,
1649        packed_create_reflog,
1650        packed_delete_reflog,
1651        packed_reflog_expire
1652};