refs / packed-backend.con commit Merge branch 'jk/misc-resolve-ref-unsafe-fixes' into maint (9fbcb51)
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "packed-backend.h"
   6#include "../iterator.h"
   7#include "../lockfile.h"
   8
   9enum mmap_strategy {
  10        /*
  11         * Don't use mmap() at all for reading `packed-refs`.
  12         */
  13        MMAP_NONE,
  14
  15        /*
  16         * Can use mmap() for reading `packed-refs`, but the file must
  17         * not remain mmapped. This is the usual option on Windows,
  18         * where you cannot rename a new version of a file onto a file
  19         * that is currently mmapped.
  20         */
  21        MMAP_TEMPORARY,
  22
  23        /*
  24         * It is OK to leave the `packed-refs` file mmapped while
  25         * arbitrary other code is running.
  26         */
  27        MMAP_OK
  28};
  29
  30#if defined(NO_MMAP)
  31static enum mmap_strategy mmap_strategy = MMAP_NONE;
  32#elif defined(MMAP_PREVENTS_DELETE)
  33static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  34#else
  35static enum mmap_strategy mmap_strategy = MMAP_OK;
  36#endif
  37
  38struct packed_ref_store;
  39
  40/*
  41 * A `snapshot` represents one snapshot of a `packed-refs` file.
  42 *
  43 * Normally, this will be a mmapped view of the contents of the
  44 * `packed-refs` file at the time the snapshot was created. However,
  45 * if the `packed-refs` file was not sorted, this might point at heap
  46 * memory holding the contents of the `packed-refs` file with its
  47 * records sorted by refname.
  48 *
  49 * `snapshot` instances are reference counted (via
  50 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
  51 * an instance from disappearing while an iterator is still iterating
  52 * over it. Instances are garbage collected when their `referrers`
  53 * count goes to zero.
  54 *
  55 * The most recent `snapshot`, if available, is referenced by the
  56 * `packed_ref_store`. Its freshness is checked whenever
  57 * `get_snapshot()` is called; if the existing snapshot is obsolete, a
  58 * new snapshot is taken.
  59 */
  60struct snapshot {
  61        /*
  62         * A back-pointer to the packed_ref_store with which this
  63         * snapshot is associated:
  64         */
  65        struct packed_ref_store *refs;
  66
  67        /* Is the `packed-refs` file currently mmapped? */
  68        int mmapped;
  69
  70        /*
  71         * The contents of the `packed-refs` file. If the file was
  72         * already sorted, this points at the mmapped contents of the
  73         * file. If not, this points at heap-allocated memory
  74         * containing the contents, sorted. If there were no contents
  75         * (e.g., because the file didn't exist), `buf` and `eof` are
  76         * both NULL.
  77         */
  78        char *buf, *eof;
  79
  80        /* The size of the header line, if any; otherwise, 0: */
  81        size_t header_len;
  82
  83        /*
  84         * What is the peeled state of the `packed-refs` file that
  85         * this snapshot represents? (This is usually determined from
  86         * the file's header.)
  87         */
  88        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  89
  90        /*
  91         * Count of references to this instance, including the pointer
  92         * from `packed_ref_store::snapshot`, if any. The instance
  93         * will not be freed as long as the reference count is
  94         * nonzero.
  95         */
  96        unsigned int referrers;
  97
  98        /*
  99         * The metadata of the `packed-refs` file from which this
 100         * snapshot was created, used to tell if the file has been
 101         * replaced since we read it.
 102         */
 103        struct stat_validity validity;
 104};
 105
 106/*
 107 * A `ref_store` representing references stored in a `packed-refs`
 108 * file. It implements the `ref_store` interface, though it has some
 109 * limitations:
 110 *
 111 * - It cannot store symbolic references.
 112 *
 113 * - It cannot store reflogs.
 114 *
 115 * - It does not support reference renaming (though it could).
 116 *
 117 * On the other hand, it can be locked outside of a reference
 118 * transaction. In that case, it remains locked even after the
 119 * transaction is done and the new `packed-refs` file is activated.
 120 */
 121struct packed_ref_store {
 122        struct ref_store base;
 123
 124        unsigned int store_flags;
 125
 126        /* The path of the "packed-refs" file: */
 127        char *path;
 128
 129        /*
 130         * A snapshot of the values read from the `packed-refs` file,
 131         * if it might still be current; otherwise, NULL.
 132         */
 133        struct snapshot *snapshot;
 134
 135        /*
 136         * Lock used for the "packed-refs" file. Note that this (and
 137         * thus the enclosing `packed_ref_store`) must not be freed.
 138         */
 139        struct lock_file lock;
 140
 141        /*
 142         * Temporary file used when rewriting new contents to the
 143         * "packed-refs" file. Note that this (and thus the enclosing
 144         * `packed_ref_store`) must not be freed.
 145         */
 146        struct tempfile *tempfile;
 147};
 148
 149/*
 150 * Increment the reference count of `*snapshot`.
 151 */
 152static void acquire_snapshot(struct snapshot *snapshot)
 153{
 154        snapshot->referrers++;
 155}
 156
 157/*
 158 * If the buffer in `snapshot` is active, then either munmap the
 159 * memory and close the file, or free the memory. Then set the buffer
 160 * pointers to NULL.
 161 */
 162static void clear_snapshot_buffer(struct snapshot *snapshot)
 163{
 164        if (snapshot->mmapped) {
 165                if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
 166                        die_errno("error ummapping packed-refs file %s",
 167                                  snapshot->refs->path);
 168                snapshot->mmapped = 0;
 169        } else {
 170                free(snapshot->buf);
 171        }
 172        snapshot->buf = snapshot->eof = NULL;
 173        snapshot->header_len = 0;
 174}
 175
 176/*
 177 * Decrease the reference count of `*snapshot`. If it goes to zero,
 178 * free `*snapshot` and return true; otherwise return false.
 179 */
 180static int release_snapshot(struct snapshot *snapshot)
 181{
 182        if (!--snapshot->referrers) {
 183                stat_validity_clear(&snapshot->validity);
 184                clear_snapshot_buffer(snapshot);
 185                free(snapshot);
 186                return 1;
 187        } else {
 188                return 0;
 189        }
 190}
 191
 192struct ref_store *packed_ref_store_create(const char *path,
 193                                          unsigned int store_flags)
 194{
 195        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 196        struct ref_store *ref_store = (struct ref_store *)refs;
 197
 198        base_ref_store_init(ref_store, &refs_be_packed);
 199        refs->store_flags = store_flags;
 200
 201        refs->path = xstrdup(path);
 202        return ref_store;
 203}
 204
 205/*
 206 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 207 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 208 * support at least the flags specified in `required_flags`. `caller`
 209 * is used in any necessary error messages.
 210 */
 211static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 212                                                unsigned int required_flags,
 213                                                const char *caller)
 214{
 215        struct packed_ref_store *refs;
 216
 217        if (ref_store->be != &refs_be_packed)
 218                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 219                    ref_store->be->name, caller);
 220
 221        refs = (struct packed_ref_store *)ref_store;
 222
 223        if ((refs->store_flags & required_flags) != required_flags)
 224                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 225                    caller, required_flags, refs->store_flags);
 226
 227        return refs;
 228}
 229
 230static void clear_snapshot(struct packed_ref_store *refs)
 231{
 232        if (refs->snapshot) {
 233                struct snapshot *snapshot = refs->snapshot;
 234
 235                refs->snapshot = NULL;
 236                release_snapshot(snapshot);
 237        }
 238}
 239
 240static NORETURN void die_unterminated_line(const char *path,
 241                                           const char *p, size_t len)
 242{
 243        if (len < 80)
 244                die("unterminated line in %s: %.*s", path, (int)len, p);
 245        else
 246                die("unterminated line in %s: %.75s...", path, p);
 247}
 248
 249static NORETURN void die_invalid_line(const char *path,
 250                                      const char *p, size_t len)
 251{
 252        const char *eol = memchr(p, '\n', len);
 253
 254        if (!eol)
 255                die_unterminated_line(path, p, len);
 256        else if (eol - p < 80)
 257                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 258        else
 259                die("unexpected line in %s: %.75s...", path, p);
 260
 261}
 262
 263struct snapshot_record {
 264        const char *start;
 265        size_t len;
 266};
 267
 268static int cmp_packed_ref_records(const void *v1, const void *v2)
 269{
 270        const struct snapshot_record *e1 = v1, *e2 = v2;
 271        const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
 272        const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
 273
 274        while (1) {
 275                if (*r1 == '\n')
 276                        return *r2 == '\n' ? 0 : -1;
 277                if (*r1 != *r2) {
 278                        if (*r2 == '\n')
 279                                return 1;
 280                        else
 281                                return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 282                }
 283                r1++;
 284                r2++;
 285        }
 286}
 287
 288/*
 289 * Compare a snapshot record at `rec` to the specified NUL-terminated
 290 * refname.
 291 */
 292static int cmp_record_to_refname(const char *rec, const char *refname)
 293{
 294        const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
 295        const char *r2 = refname;
 296
 297        while (1) {
 298                if (*r1 == '\n')
 299                        return *r2 ? -1 : 0;
 300                if (!*r2)
 301                        return 1;
 302                if (*r1 != *r2)
 303                        return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 304                r1++;
 305                r2++;
 306        }
 307}
 308
 309/*
 310 * `snapshot->buf` is not known to be sorted. Check whether it is, and
 311 * if not, sort it into new memory and munmap/free the old storage.
 312 */
 313static void sort_snapshot(struct snapshot *snapshot)
 314{
 315        struct snapshot_record *records = NULL;
 316        size_t alloc = 0, nr = 0;
 317        int sorted = 1;
 318        const char *pos, *eof, *eol;
 319        size_t len, i;
 320        char *new_buffer, *dst;
 321
 322        pos = snapshot->buf + snapshot->header_len;
 323        eof = snapshot->eof;
 324        len = eof - pos;
 325
 326        if (!len)
 327                return;
 328
 329        /*
 330         * Initialize records based on a crude estimate of the number
 331         * of references in the file (we'll grow it below if needed):
 332         */
 333        ALLOC_GROW(records, len / 80 + 20, alloc);
 334
 335        while (pos < eof) {
 336                eol = memchr(pos, '\n', eof - pos);
 337                if (!eol)
 338                        /* The safety check should prevent this. */
 339                        BUG("unterminated line found in packed-refs");
 340                if (eol - pos < GIT_SHA1_HEXSZ + 2)
 341                        die_invalid_line(snapshot->refs->path,
 342                                         pos, eof - pos);
 343                eol++;
 344                if (eol < eof && *eol == '^') {
 345                        /*
 346                         * Keep any peeled line together with its
 347                         * reference:
 348                         */
 349                        const char *peeled_start = eol;
 350
 351                        eol = memchr(peeled_start, '\n', eof - peeled_start);
 352                        if (!eol)
 353                                /* The safety check should prevent this. */
 354                                BUG("unterminated peeled line found in packed-refs");
 355                        eol++;
 356                }
 357
 358                ALLOC_GROW(records, nr + 1, alloc);
 359                records[nr].start = pos;
 360                records[nr].len = eol - pos;
 361                nr++;
 362
 363                if (sorted &&
 364                    nr > 1 &&
 365                    cmp_packed_ref_records(&records[nr - 2],
 366                                           &records[nr - 1]) >= 0)
 367                        sorted = 0;
 368
 369                pos = eol;
 370        }
 371
 372        if (sorted)
 373                goto cleanup;
 374
 375        /* We need to sort the memory. First we sort the records array: */
 376        QSORT(records, nr, cmp_packed_ref_records);
 377
 378        /*
 379         * Allocate a new chunk of memory, and copy the old memory to
 380         * the new in the order indicated by `records` (not bothering
 381         * with the header line):
 382         */
 383        new_buffer = xmalloc(len);
 384        for (dst = new_buffer, i = 0; i < nr; i++) {
 385                memcpy(dst, records[i].start, records[i].len);
 386                dst += records[i].len;
 387        }
 388
 389        /*
 390         * Now munmap the old buffer and use the sorted buffer in its
 391         * place:
 392         */
 393        clear_snapshot_buffer(snapshot);
 394        snapshot->buf = new_buffer;
 395        snapshot->eof = new_buffer + len;
 396        snapshot->header_len = 0;
 397
 398cleanup:
 399        free(records);
 400}
 401
 402/*
 403 * Return a pointer to the start of the record that contains the
 404 * character `*p` (which must be within the buffer). If no other
 405 * record start is found, return `buf`.
 406 */
 407static const char *find_start_of_record(const char *buf, const char *p)
 408{
 409        while (p > buf && (p[-1] != '\n' || p[0] == '^'))
 410                p--;
 411        return p;
 412}
 413
 414/*
 415 * Return a pointer to the start of the record following the record
 416 * that contains `*p`. If none is found before `end`, return `end`.
 417 */
 418static const char *find_end_of_record(const char *p, const char *end)
 419{
 420        while (++p < end && (p[-1] != '\n' || p[0] == '^'))
 421                ;
 422        return p;
 423}
 424
 425/*
 426 * We want to be able to compare mmapped reference records quickly,
 427 * without totally parsing them. We can do so because the records are
 428 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
 429 * + 1) bytes past the beginning of the record.
 430 *
 431 * But what if the `packed-refs` file contains garbage? We're willing
 432 * to tolerate not detecting the problem, as long as we don't produce
 433 * totally garbled output (we can't afford to check the integrity of
 434 * the whole file during every Git invocation). But we do want to be
 435 * sure that we never read past the end of the buffer in memory and
 436 * perform an illegal memory access.
 437 *
 438 * Guarantee that minimum level of safety by verifying that the last
 439 * record in the file is LF-terminated, and that it has at least
 440 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
 441 * these checks fails.
 442 */
 443static void verify_buffer_safe(struct snapshot *snapshot)
 444{
 445        const char *buf = snapshot->buf + snapshot->header_len;
 446        const char *eof = snapshot->eof;
 447        const char *last_line;
 448
 449        if (buf == eof)
 450                return;
 451
 452        last_line = find_start_of_record(buf, eof - 1);
 453        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
 454                die_invalid_line(snapshot->refs->path,
 455                                 last_line, eof - last_line);
 456}
 457
 458/*
 459 * Depending on `mmap_strategy`, either mmap or read the contents of
 460 * the `packed-refs` file into the snapshot. Return 1 if the file
 461 * existed and was read, or 0 if the file was absent. Die on errors.
 462 */
 463static int load_contents(struct snapshot *snapshot)
 464{
 465        int fd;
 466        struct stat st;
 467        size_t size;
 468        ssize_t bytes_read;
 469
 470        fd = open(snapshot->refs->path, O_RDONLY);
 471        if (fd < 0) {
 472                if (errno == ENOENT) {
 473                        /*
 474                         * This is OK; it just means that no
 475                         * "packed-refs" file has been written yet,
 476                         * which is equivalent to it being empty,
 477                         * which is its state when initialized with
 478                         * zeros.
 479                         */
 480                        return 0;
 481                } else {
 482                        die_errno("couldn't read %s", snapshot->refs->path);
 483                }
 484        }
 485
 486        stat_validity_update(&snapshot->validity, fd);
 487
 488        if (fstat(fd, &st) < 0)
 489                die_errno("couldn't stat %s", snapshot->refs->path);
 490        size = xsize_t(st.st_size);
 491
 492        switch (mmap_strategy) {
 493        case MMAP_NONE:
 494                snapshot->buf = xmalloc(size);
 495                bytes_read = read_in_full(fd, snapshot->buf, size);
 496                if (bytes_read < 0 || bytes_read != size)
 497                        die_errno("couldn't read %s", snapshot->refs->path);
 498                snapshot->eof = snapshot->buf + size;
 499                snapshot->mmapped = 0;
 500                break;
 501        case MMAP_TEMPORARY:
 502        case MMAP_OK:
 503                snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 504                snapshot->eof = snapshot->buf + size;
 505                snapshot->mmapped = 1;
 506                break;
 507        }
 508        close(fd);
 509
 510        return 1;
 511}
 512
 513/*
 514 * Find the place in `snapshot->buf` where the start of the record for
 515 * `refname` starts. If `mustexist` is true and the reference doesn't
 516 * exist, then return NULL. If `mustexist` is false and the reference
 517 * doesn't exist, then return the point where that reference would be
 518 * inserted. In the latter mode, `refname` doesn't have to be a proper
 519 * reference name; for example, one could search for "refs/replace/"
 520 * to find the start of any replace references.
 521 *
 522 * The record is sought using a binary search, so `snapshot->buf` must
 523 * be sorted.
 524 */
 525static const char *find_reference_location(struct snapshot *snapshot,
 526                                           const char *refname, int mustexist)
 527{
 528        /*
 529         * This is not *quite* a garden-variety binary search, because
 530         * the data we're searching is made up of records, and we
 531         * always need to find the beginning of a record to do a
 532         * comparison. A "record" here is one line for the reference
 533         * itself and zero or one peel lines that start with '^'. Our
 534         * loop invariant is described in the next two comments.
 535         */
 536
 537        /*
 538         * A pointer to the character at the start of a record whose
 539         * preceding records all have reference names that come
 540         * *before* `refname`.
 541         */
 542        const char *lo = snapshot->buf + snapshot->header_len;
 543
 544        /*
 545         * A pointer to a the first character of a record whose
 546         * reference name comes *after* `refname`.
 547         */
 548        const char *hi = snapshot->eof;
 549
 550        while (lo < hi) {
 551                const char *mid, *rec;
 552                int cmp;
 553
 554                mid = lo + (hi - lo) / 2;
 555                rec = find_start_of_record(lo, mid);
 556                cmp = cmp_record_to_refname(rec, refname);
 557                if (cmp < 0) {
 558                        lo = find_end_of_record(mid, hi);
 559                } else if (cmp > 0) {
 560                        hi = rec;
 561                } else {
 562                        return rec;
 563                }
 564        }
 565
 566        if (mustexist)
 567                return NULL;
 568        else
 569                return lo;
 570}
 571
 572/*
 573 * Create a newly-allocated `snapshot` of the `packed-refs` file in
 574 * its current state and return it. The return value will already have
 575 * its reference count incremented.
 576 *
 577 * A comment line of the form "# pack-refs with: " may contain zero or
 578 * more traits. We interpret the traits as follows:
 579 *
 580 *   Neither `peeled` nor `fully-peeled`:
 581 *
 582 *      Probably no references are peeled. But if the file contains a
 583 *      peeled value for a reference, we will use it.
 584 *
 585 *   `peeled`:
 586 *
 587 *      References under "refs/tags/", if they *can* be peeled, *are*
 588 *      peeled in this file. References outside of "refs/tags/" are
 589 *      probably not peeled even if they could have been, but if we find
 590 *      a peeled value for such a reference we will use it.
 591 *
 592 *   `fully-peeled`:
 593 *
 594 *      All references in the file that can be peeled are peeled.
 595 *      Inversely (and this is more important), any references in the
 596 *      file for which no peeled value is recorded is not peelable. This
 597 *      trait should typically be written alongside "peeled" for
 598 *      compatibility with older clients, but we do not require it
 599 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 600 *
 601 *   `sorted`:
 602 *
 603 *      The references in this file are known to be sorted by refname.
 604 */
 605static struct snapshot *create_snapshot(struct packed_ref_store *refs)
 606{
 607        struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));
 608        int sorted = 0;
 609
 610        snapshot->refs = refs;
 611        acquire_snapshot(snapshot);
 612        snapshot->peeled = PEELED_NONE;
 613
 614        if (!load_contents(snapshot))
 615                return snapshot;
 616
 617        /* If the file has a header line, process it: */
 618        if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
 619                struct strbuf tmp = STRBUF_INIT;
 620                char *p;
 621                const char *eol;
 622                struct string_list traits = STRING_LIST_INIT_NODUP;
 623
 624                eol = memchr(snapshot->buf, '\n',
 625                             snapshot->eof - snapshot->buf);
 626                if (!eol)
 627                        die_unterminated_line(refs->path,
 628                                              snapshot->buf,
 629                                              snapshot->eof - snapshot->buf);
 630
 631                strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
 632
 633                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 634                        die_invalid_line(refs->path,
 635                                         snapshot->buf,
 636                                         snapshot->eof - snapshot->buf);
 637
 638                string_list_split_in_place(&traits, p, ' ', -1);
 639
 640                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 641                        snapshot->peeled = PEELED_FULLY;
 642                else if (unsorted_string_list_has_string(&traits, "peeled"))
 643                        snapshot->peeled = PEELED_TAGS;
 644
 645                sorted = unsorted_string_list_has_string(&traits, "sorted");
 646
 647                /* perhaps other traits later as well */
 648
 649                /* The "+ 1" is for the LF character. */
 650                snapshot->header_len = eol + 1 - snapshot->buf;
 651
 652                string_list_clear(&traits, 0);
 653                strbuf_release(&tmp);
 654        }
 655
 656        verify_buffer_safe(snapshot);
 657
 658        if (!sorted) {
 659                sort_snapshot(snapshot);
 660
 661                /*
 662                 * Reordering the records might have moved a short one
 663                 * to the end of the buffer, so verify the buffer's
 664                 * safety again:
 665                 */
 666                verify_buffer_safe(snapshot);
 667        }
 668
 669        if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
 670                /*
 671                 * We don't want to leave the file mmapped, so we are
 672                 * forced to make a copy now:
 673                 */
 674                size_t size = snapshot->eof -
 675                        (snapshot->buf + snapshot->header_len);
 676                char *buf_copy = xmalloc(size);
 677
 678                memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
 679                clear_snapshot_buffer(snapshot);
 680                snapshot->buf = buf_copy;
 681                snapshot->eof = buf_copy + size;
 682        }
 683
 684        return snapshot;
 685}
 686
 687/*
 688 * Check that `refs->snapshot` (if present) still reflects the
 689 * contents of the `packed-refs` file. If not, clear the snapshot.
 690 */
 691static void validate_snapshot(struct packed_ref_store *refs)
 692{
 693        if (refs->snapshot &&
 694            !stat_validity_check(&refs->snapshot->validity, refs->path))
 695                clear_snapshot(refs);
 696}
 697
 698/*
 699 * Get the `snapshot` for the specified packed_ref_store, creating and
 700 * populating it if it hasn't been read before or if the file has been
 701 * changed (according to its `validity` field) since it was last read.
 702 * On the other hand, if we hold the lock, then assume that the file
 703 * hasn't been changed out from under us, so skip the extra `stat()`
 704 * call in `stat_validity_check()`. This function does *not* increase
 705 * the snapshot's reference count on behalf of the caller.
 706 */
 707static struct snapshot *get_snapshot(struct packed_ref_store *refs)
 708{
 709        if (!is_lock_file_locked(&refs->lock))
 710                validate_snapshot(refs);
 711
 712        if (!refs->snapshot)
 713                refs->snapshot = create_snapshot(refs);
 714
 715        return refs->snapshot;
 716}
 717
 718static int packed_read_raw_ref(struct ref_store *ref_store,
 719                               const char *refname, unsigned char *sha1,
 720                               struct strbuf *referent, unsigned int *type)
 721{
 722        struct packed_ref_store *refs =
 723                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 724        struct snapshot *snapshot = get_snapshot(refs);
 725        const char *rec;
 726
 727        *type = 0;
 728
 729        rec = find_reference_location(snapshot, refname, 1);
 730
 731        if (!rec) {
 732                /* refname is not a packed reference. */
 733                errno = ENOENT;
 734                return -1;
 735        }
 736
 737        if (get_sha1_hex(rec, sha1))
 738                die_invalid_line(refs->path, rec, snapshot->eof - rec);
 739
 740        *type = REF_ISPACKED;
 741        return 0;
 742}
 743
 744/*
 745 * This value is set in `base.flags` if the peeled value of the
 746 * current reference is known. In that case, `peeled` contains the
 747 * correct peeled value for the reference, which might be `null_sha1`
 748 * if the reference is not a tag or if it is broken.
 749 */
 750#define REF_KNOWS_PEELED 0x40
 751
 752/*
 753 * An iterator over a snapshot of a `packed-refs` file.
 754 */
 755struct packed_ref_iterator {
 756        struct ref_iterator base;
 757
 758        struct snapshot *snapshot;
 759
 760        /* The current position in the snapshot's buffer: */
 761        const char *pos;
 762
 763        /* The end of the part of the buffer that will be iterated over: */
 764        const char *eof;
 765
 766        /* Scratch space for current values: */
 767        struct object_id oid, peeled;
 768        struct strbuf refname_buf;
 769
 770        unsigned int flags;
 771};
 772
 773/*
 774 * Move the iterator to the next record in the snapshot, without
 775 * respect for whether the record is actually required by the current
 776 * iteration. Adjust the fields in `iter` and return `ITER_OK` or
 777 * `ITER_DONE`. This function does not free the iterator in the case
 778 * of `ITER_DONE`.
 779 */
 780static int next_record(struct packed_ref_iterator *iter)
 781{
 782        const char *p = iter->pos, *eol;
 783
 784        strbuf_reset(&iter->refname_buf);
 785
 786        if (iter->pos == iter->eof)
 787                return ITER_DONE;
 788
 789        iter->base.flags = REF_ISPACKED;
 790
 791        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 792            parse_oid_hex(p, &iter->oid, &p) ||
 793            !isspace(*p++))
 794                die_invalid_line(iter->snapshot->refs->path,
 795                                 iter->pos, iter->eof - iter->pos);
 796
 797        eol = memchr(p, '\n', iter->eof - p);
 798        if (!eol)
 799                die_unterminated_line(iter->snapshot->refs->path,
 800                                      iter->pos, iter->eof - iter->pos);
 801
 802        strbuf_add(&iter->refname_buf, p, eol - p);
 803        iter->base.refname = iter->refname_buf.buf;
 804
 805        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 806                if (!refname_is_safe(iter->base.refname))
 807                        die("packed refname is dangerous: %s",
 808                            iter->base.refname);
 809                oidclr(&iter->oid);
 810                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 811        }
 812        if (iter->snapshot->peeled == PEELED_FULLY ||
 813            (iter->snapshot->peeled == PEELED_TAGS &&
 814             starts_with(iter->base.refname, "refs/tags/")))
 815                iter->base.flags |= REF_KNOWS_PEELED;
 816
 817        iter->pos = eol + 1;
 818
 819        if (iter->pos < iter->eof && *iter->pos == '^') {
 820                p = iter->pos + 1;
 821                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 822                    parse_oid_hex(p, &iter->peeled, &p) ||
 823                    *p++ != '\n')
 824                        die_invalid_line(iter->snapshot->refs->path,
 825                                         iter->pos, iter->eof - iter->pos);
 826                iter->pos = p;
 827
 828                /*
 829                 * Regardless of what the file header said, we
 830                 * definitely know the value of *this* reference. But
 831                 * we suppress it if the reference is broken:
 832                 */
 833                if ((iter->base.flags & REF_ISBROKEN)) {
 834                        oidclr(&iter->peeled);
 835                        iter->base.flags &= ~REF_KNOWS_PEELED;
 836                } else {
 837                        iter->base.flags |= REF_KNOWS_PEELED;
 838                }
 839        } else {
 840                oidclr(&iter->peeled);
 841        }
 842
 843        return ITER_OK;
 844}
 845
 846static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 847{
 848        struct packed_ref_iterator *iter =
 849                (struct packed_ref_iterator *)ref_iterator;
 850        int ok;
 851
 852        while ((ok = next_record(iter)) == ITER_OK) {
 853                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 854                    ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
 855                        continue;
 856
 857                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 858                    !ref_resolves_to_object(iter->base.refname, &iter->oid,
 859                                            iter->flags))
 860                        continue;
 861
 862                return ITER_OK;
 863        }
 864
 865        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 866                ok = ITER_ERROR;
 867
 868        return ok;
 869}
 870
 871static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 872                                   struct object_id *peeled)
 873{
 874        struct packed_ref_iterator *iter =
 875                (struct packed_ref_iterator *)ref_iterator;
 876
 877        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 878                oidcpy(peeled, &iter->peeled);
 879                return is_null_oid(&iter->peeled) ? -1 : 0;
 880        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 881                return -1;
 882        } else {
 883                return !!peel_object(iter->oid.hash, peeled->hash);
 884        }
 885}
 886
 887static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 888{
 889        struct packed_ref_iterator *iter =
 890                (struct packed_ref_iterator *)ref_iterator;
 891        int ok = ITER_DONE;
 892
 893        strbuf_release(&iter->refname_buf);
 894        release_snapshot(iter->snapshot);
 895        base_ref_iterator_free(ref_iterator);
 896        return ok;
 897}
 898
 899static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 900        packed_ref_iterator_advance,
 901        packed_ref_iterator_peel,
 902        packed_ref_iterator_abort
 903};
 904
 905static struct ref_iterator *packed_ref_iterator_begin(
 906                struct ref_store *ref_store,
 907                const char *prefix, unsigned int flags)
 908{
 909        struct packed_ref_store *refs;
 910        struct snapshot *snapshot;
 911        const char *start;
 912        struct packed_ref_iterator *iter;
 913        struct ref_iterator *ref_iterator;
 914        unsigned int required_flags = REF_STORE_READ;
 915
 916        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 917                required_flags |= REF_STORE_ODB;
 918        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 919
 920        /*
 921         * Note that `get_snapshot()` internally checks whether the
 922         * snapshot is up to date with what is on disk, and re-reads
 923         * it if not.
 924         */
 925        snapshot = get_snapshot(refs);
 926
 927        if (!snapshot->buf)
 928                return empty_ref_iterator_begin();
 929
 930        iter = xcalloc(1, sizeof(*iter));
 931        ref_iterator = &iter->base;
 932        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 933
 934        iter->snapshot = snapshot;
 935        acquire_snapshot(snapshot);
 936
 937        if (prefix && *prefix)
 938                start = find_reference_location(snapshot, prefix, 0);
 939        else
 940                start = snapshot->buf + snapshot->header_len;
 941
 942        iter->pos = start;
 943        iter->eof = snapshot->eof;
 944        strbuf_init(&iter->refname_buf, 0);
 945
 946        iter->base.oid = &iter->oid;
 947
 948        iter->flags = flags;
 949
 950        if (prefix && *prefix)
 951                /* Stop iteration after we've gone *past* prefix: */
 952                ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
 953
 954        return ref_iterator;
 955}
 956
 957/*
 958 * Write an entry to the packed-refs file for the specified refname.
 959 * If peeled is non-NULL, write it as the entry's peeled value. On
 960 * error, return a nonzero value and leave errno set at the value left
 961 * by the failing call to `fprintf()`.
 962 */
 963static int write_packed_entry(FILE *fh, const char *refname,
 964                              const unsigned char *sha1,
 965                              const unsigned char *peeled)
 966{
 967        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 968            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 969                return -1;
 970
 971        return 0;
 972}
 973
 974int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 975{
 976        struct packed_ref_store *refs =
 977                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 978                                "packed_refs_lock");
 979        static int timeout_configured = 0;
 980        static int timeout_value = 1000;
 981
 982        if (!timeout_configured) {
 983                git_config_get_int("core.packedrefstimeout", &timeout_value);
 984                timeout_configured = 1;
 985        }
 986
 987        /*
 988         * Note that we close the lockfile immediately because we
 989         * don't write new content to it, but rather to a separate
 990         * tempfile.
 991         */
 992        if (hold_lock_file_for_update_timeout(
 993                            &refs->lock,
 994                            refs->path,
 995                            flags, timeout_value) < 0) {
 996                unable_to_lock_message(refs->path, errno, err);
 997                return -1;
 998        }
 999
1000        if (close_lock_file_gently(&refs->lock)) {
1001                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1002                rollback_lock_file(&refs->lock);
1003                return -1;
1004        }
1005
1006        /*
1007         * Now that we hold the `packed-refs` lock, make sure that our
1008         * snapshot matches the current version of the file. Normally
1009         * `get_snapshot()` does that for us, but that function
1010         * assumes that when the file is locked, any existing snapshot
1011         * is still valid. We've just locked the file, but it might
1012         * have changed the moment *before* we locked it.
1013         */
1014        validate_snapshot(refs);
1015
1016        /*
1017         * Now make sure that the packed-refs file as it exists in the
1018         * locked state is loaded into the snapshot:
1019         */
1020        get_snapshot(refs);
1021        return 0;
1022}
1023
1024void packed_refs_unlock(struct ref_store *ref_store)
1025{
1026        struct packed_ref_store *refs = packed_downcast(
1027                        ref_store,
1028                        REF_STORE_READ | REF_STORE_WRITE,
1029                        "packed_refs_unlock");
1030
1031        if (!is_lock_file_locked(&refs->lock))
1032                die("BUG: packed_refs_unlock() called when not locked");
1033        rollback_lock_file(&refs->lock);
1034}
1035
1036int packed_refs_is_locked(struct ref_store *ref_store)
1037{
1038        struct packed_ref_store *refs = packed_downcast(
1039                        ref_store,
1040                        REF_STORE_READ | REF_STORE_WRITE,
1041                        "packed_refs_is_locked");
1042
1043        return is_lock_file_locked(&refs->lock);
1044}
1045
1046/*
1047 * The packed-refs header line that we write out. Perhaps other traits
1048 * will be added later.
1049 *
1050 * Note that earlier versions of Git used to parse these traits by
1051 * looking for " trait " in the line. For this reason, the space after
1052 * the colon and the trailing space are required.
1053 */
1054static const char PACKED_REFS_HEADER[] =
1055        "# pack-refs with: peeled fully-peeled sorted \n";
1056
1057static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1058{
1059        /* Nothing to do. */
1060        return 0;
1061}
1062
1063/*
1064 * Write the packed refs from the current snapshot to the packed-refs
1065 * tempfile, incorporating any changes from `updates`. `updates` must
1066 * be a sorted string list whose keys are the refnames and whose util
1067 * values are `struct ref_update *`. On error, rollback the tempfile,
1068 * write an error message to `err`, and return a nonzero value.
1069 *
1070 * The packfile must be locked before calling this function and will
1071 * remain locked when it is done.
1072 */
1073static int write_with_updates(struct packed_ref_store *refs,
1074                              struct string_list *updates,
1075                              struct strbuf *err)
1076{
1077        struct ref_iterator *iter = NULL;
1078        size_t i;
1079        int ok;
1080        FILE *out;
1081        struct strbuf sb = STRBUF_INIT;
1082        char *packed_refs_path;
1083
1084        if (!is_lock_file_locked(&refs->lock))
1085                die("BUG: write_with_updates() called while unlocked");
1086
1087        /*
1088         * If packed-refs is a symlink, we want to overwrite the
1089         * symlinked-to file, not the symlink itself. Also, put the
1090         * staging file next to it:
1091         */
1092        packed_refs_path = get_locked_file_path(&refs->lock);
1093        strbuf_addf(&sb, "%s.new", packed_refs_path);
1094        free(packed_refs_path);
1095        refs->tempfile = create_tempfile(sb.buf);
1096        if (!refs->tempfile) {
1097                strbuf_addf(err, "unable to create file %s: %s",
1098                            sb.buf, strerror(errno));
1099                strbuf_release(&sb);
1100                return -1;
1101        }
1102        strbuf_release(&sb);
1103
1104        out = fdopen_tempfile(refs->tempfile, "w");
1105        if (!out) {
1106                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1107                            strerror(errno));
1108                goto error;
1109        }
1110
1111        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1112                goto write_error;
1113
1114        /*
1115         * We iterate in parallel through the current list of refs and
1116         * the list of updates, processing an entry from at least one
1117         * of the lists each time through the loop. When the current
1118         * list of refs is exhausted, set iter to NULL. When the list
1119         * of updates is exhausted, leave i set to updates->nr.
1120         */
1121        iter = packed_ref_iterator_begin(&refs->base, "",
1122                                         DO_FOR_EACH_INCLUDE_BROKEN);
1123        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1124                iter = NULL;
1125
1126        i = 0;
1127
1128        while (iter || i < updates->nr) {
1129                struct ref_update *update = NULL;
1130                int cmp;
1131
1132                if (i >= updates->nr) {
1133                        cmp = -1;
1134                } else {
1135                        update = updates->items[i].util;
1136
1137                        if (!iter)
1138                                cmp = +1;
1139                        else
1140                                cmp = strcmp(iter->refname, update->refname);
1141                }
1142
1143                if (!cmp) {
1144                        /*
1145                         * There is both an old value and an update
1146                         * for this reference. Check the old value if
1147                         * necessary:
1148                         */
1149                        if ((update->flags & REF_HAVE_OLD)) {
1150                                if (is_null_oid(&update->old_oid)) {
1151                                        strbuf_addf(err, "cannot update ref '%s': "
1152                                                    "reference already exists",
1153                                                    update->refname);
1154                                        goto error;
1155                                } else if (oidcmp(&update->old_oid, iter->oid)) {
1156                                        strbuf_addf(err, "cannot update ref '%s': "
1157                                                    "is at %s but expected %s",
1158                                                    update->refname,
1159                                                    oid_to_hex(iter->oid),
1160                                                    oid_to_hex(&update->old_oid));
1161                                        goto error;
1162                                }
1163                        }
1164
1165                        /* Now figure out what to use for the new value: */
1166                        if ((update->flags & REF_HAVE_NEW)) {
1167                                /*
1168                                 * The update takes precedence. Skip
1169                                 * the iterator over the unneeded
1170                                 * value.
1171                                 */
1172                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1173                                        iter = NULL;
1174                                cmp = +1;
1175                        } else {
1176                                /*
1177                                 * The update doesn't actually want to
1178                                 * change anything. We're done with it.
1179                                 */
1180                                i++;
1181                                cmp = -1;
1182                        }
1183                } else if (cmp > 0) {
1184                        /*
1185                         * There is no old value but there is an
1186                         * update for this reference. Make sure that
1187                         * the update didn't expect an existing value:
1188                         */
1189                        if ((update->flags & REF_HAVE_OLD) &&
1190                            !is_null_oid(&update->old_oid)) {
1191                                strbuf_addf(err, "cannot update ref '%s': "
1192                                            "reference is missing but expected %s",
1193                                            update->refname,
1194                                            oid_to_hex(&update->old_oid));
1195                                goto error;
1196                        }
1197                }
1198
1199                if (cmp < 0) {
1200                        /* Pass the old reference through. */
1201
1202                        struct object_id peeled;
1203                        int peel_error = ref_iterator_peel(iter, &peeled);
1204
1205                        if (write_packed_entry(out, iter->refname,
1206                                               iter->oid->hash,
1207                                               peel_error ? NULL : peeled.hash))
1208                                goto write_error;
1209
1210                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1211                                iter = NULL;
1212                } else if (is_null_oid(&update->new_oid)) {
1213                        /*
1214                         * The update wants to delete the reference,
1215                         * and the reference either didn't exist or we
1216                         * have already skipped it. So we're done with
1217                         * the update (and don't have to write
1218                         * anything).
1219                         */
1220                        i++;
1221                } else {
1222                        struct object_id peeled;
1223                        int peel_error = peel_object(update->new_oid.hash,
1224                                                     peeled.hash);
1225
1226                        if (write_packed_entry(out, update->refname,
1227                                               update->new_oid.hash,
1228                                               peel_error ? NULL : peeled.hash))
1229                                goto write_error;
1230
1231                        i++;
1232                }
1233        }
1234
1235        if (ok != ITER_DONE) {
1236                strbuf_addstr(err, "unable to write packed-refs file: "
1237                              "error iterating over old contents");
1238                goto error;
1239        }
1240
1241        if (close_tempfile_gently(refs->tempfile)) {
1242                strbuf_addf(err, "error closing file %s: %s",
1243                            get_tempfile_path(refs->tempfile),
1244                            strerror(errno));
1245                strbuf_release(&sb);
1246                delete_tempfile(&refs->tempfile);
1247                return -1;
1248        }
1249
1250        return 0;
1251
1252write_error:
1253        strbuf_addf(err, "error writing to %s: %s",
1254                    get_tempfile_path(refs->tempfile), strerror(errno));
1255
1256error:
1257        if (iter)
1258                ref_iterator_abort(iter);
1259
1260        delete_tempfile(&refs->tempfile);
1261        return -1;
1262}
1263
1264struct packed_transaction_backend_data {
1265        /* True iff the transaction owns the packed-refs lock. */
1266        int own_lock;
1267
1268        struct string_list updates;
1269};
1270
1271static void packed_transaction_cleanup(struct packed_ref_store *refs,
1272                                       struct ref_transaction *transaction)
1273{
1274        struct packed_transaction_backend_data *data = transaction->backend_data;
1275
1276        if (data) {
1277                string_list_clear(&data->updates, 0);
1278
1279                if (is_tempfile_active(refs->tempfile))
1280                        delete_tempfile(&refs->tempfile);
1281
1282                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1283                        packed_refs_unlock(&refs->base);
1284                        data->own_lock = 0;
1285                }
1286
1287                free(data);
1288                transaction->backend_data = NULL;
1289        }
1290
1291        transaction->state = REF_TRANSACTION_CLOSED;
1292}
1293
1294static int packed_transaction_prepare(struct ref_store *ref_store,
1295                                      struct ref_transaction *transaction,
1296                                      struct strbuf *err)
1297{
1298        struct packed_ref_store *refs = packed_downcast(
1299                        ref_store,
1300                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1301                        "ref_transaction_prepare");
1302        struct packed_transaction_backend_data *data;
1303        size_t i;
1304        int ret = TRANSACTION_GENERIC_ERROR;
1305
1306        /*
1307         * Note that we *don't* skip transactions with zero updates,
1308         * because such a transaction might be executed for the side
1309         * effect of ensuring that all of the references are peeled or
1310         * ensuring that the `packed-refs` file is sorted. If the
1311         * caller wants to optimize away empty transactions, it should
1312         * do so itself.
1313         */
1314
1315        data = xcalloc(1, sizeof(*data));
1316        string_list_init(&data->updates, 0);
1317
1318        transaction->backend_data = data;
1319
1320        /*
1321         * Stick the updates in a string list by refname so that we
1322         * can sort them:
1323         */
1324        for (i = 0; i < transaction->nr; i++) {
1325                struct ref_update *update = transaction->updates[i];
1326                struct string_list_item *item =
1327                        string_list_append(&data->updates, update->refname);
1328
1329                /* Store a pointer to update in item->util: */
1330                item->util = update;
1331        }
1332        string_list_sort(&data->updates);
1333
1334        if (ref_update_reject_duplicates(&data->updates, err))
1335                goto failure;
1336
1337        if (!is_lock_file_locked(&refs->lock)) {
1338                if (packed_refs_lock(ref_store, 0, err))
1339                        goto failure;
1340                data->own_lock = 1;
1341        }
1342
1343        if (write_with_updates(refs, &data->updates, err))
1344                goto failure;
1345
1346        transaction->state = REF_TRANSACTION_PREPARED;
1347        return 0;
1348
1349failure:
1350        packed_transaction_cleanup(refs, transaction);
1351        return ret;
1352}
1353
1354static int packed_transaction_abort(struct ref_store *ref_store,
1355                                    struct ref_transaction *transaction,
1356                                    struct strbuf *err)
1357{
1358        struct packed_ref_store *refs = packed_downcast(
1359                        ref_store,
1360                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1361                        "ref_transaction_abort");
1362
1363        packed_transaction_cleanup(refs, transaction);
1364        return 0;
1365}
1366
1367static int packed_transaction_finish(struct ref_store *ref_store,
1368                                     struct ref_transaction *transaction,
1369                                     struct strbuf *err)
1370{
1371        struct packed_ref_store *refs = packed_downcast(
1372                        ref_store,
1373                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1374                        "ref_transaction_finish");
1375        int ret = TRANSACTION_GENERIC_ERROR;
1376        char *packed_refs_path;
1377
1378        clear_snapshot(refs);
1379
1380        packed_refs_path = get_locked_file_path(&refs->lock);
1381        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1382                strbuf_addf(err, "error replacing %s: %s",
1383                            refs->path, strerror(errno));
1384                goto cleanup;
1385        }
1386
1387        ret = 0;
1388
1389cleanup:
1390        free(packed_refs_path);
1391        packed_transaction_cleanup(refs, transaction);
1392        return ret;
1393}
1394
1395static int packed_initial_transaction_commit(struct ref_store *ref_store,
1396                                            struct ref_transaction *transaction,
1397                                            struct strbuf *err)
1398{
1399        return ref_transaction_commit(transaction, err);
1400}
1401
1402static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1403                             struct string_list *refnames, unsigned int flags)
1404{
1405        struct packed_ref_store *refs =
1406                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1407        struct strbuf err = STRBUF_INIT;
1408        struct ref_transaction *transaction;
1409        struct string_list_item *item;
1410        int ret;
1411
1412        (void)refs; /* We need the check above, but don't use the variable */
1413
1414        if (!refnames->nr)
1415                return 0;
1416
1417        /*
1418         * Since we don't check the references' old_oids, the
1419         * individual updates can't fail, so we can pack all of the
1420         * updates into a single transaction.
1421         */
1422
1423        transaction = ref_store_transaction_begin(ref_store, &err);
1424        if (!transaction)
1425                return -1;
1426
1427        for_each_string_list_item(item, refnames) {
1428                if (ref_transaction_delete(transaction, item->string, NULL,
1429                                           flags, msg, &err)) {
1430                        warning(_("could not delete reference %s: %s"),
1431                                item->string, err.buf);
1432                        strbuf_reset(&err);
1433                }
1434        }
1435
1436        ret = ref_transaction_commit(transaction, &err);
1437
1438        if (ret) {
1439                if (refnames->nr == 1)
1440                        error(_("could not delete reference %s: %s"),
1441                              refnames->items[0].string, err.buf);
1442                else
1443                        error(_("could not delete references: %s"), err.buf);
1444        }
1445
1446        ref_transaction_free(transaction);
1447        strbuf_release(&err);
1448        return ret;
1449}
1450
1451static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1452{
1453        /*
1454         * Packed refs are already packed. It might be that loose refs
1455         * are packed *into* a packed refs store, but that is done by
1456         * updating the packed references via a transaction.
1457         */
1458        return 0;
1459}
1460
1461static int packed_create_symref(struct ref_store *ref_store,
1462                               const char *refname, const char *target,
1463                               const char *logmsg)
1464{
1465        die("BUG: packed reference store does not support symrefs");
1466}
1467
1468static int packed_rename_ref(struct ref_store *ref_store,
1469                            const char *oldrefname, const char *newrefname,
1470                            const char *logmsg)
1471{
1472        die("BUG: packed reference store does not support renaming references");
1473}
1474
1475static int packed_copy_ref(struct ref_store *ref_store,
1476                           const char *oldrefname, const char *newrefname,
1477                           const char *logmsg)
1478{
1479        die("BUG: packed reference store does not support copying references");
1480}
1481
1482static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1483{
1484        return empty_ref_iterator_begin();
1485}
1486
1487static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1488                                      const char *refname,
1489                                      each_reflog_ent_fn fn, void *cb_data)
1490{
1491        return 0;
1492}
1493
1494static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1495                                              const char *refname,
1496                                              each_reflog_ent_fn fn,
1497                                              void *cb_data)
1498{
1499        return 0;
1500}
1501
1502static int packed_reflog_exists(struct ref_store *ref_store,
1503                               const char *refname)
1504{
1505        return 0;
1506}
1507
1508static int packed_create_reflog(struct ref_store *ref_store,
1509                               const char *refname, int force_create,
1510                               struct strbuf *err)
1511{
1512        die("BUG: packed reference store does not support reflogs");
1513}
1514
1515static int packed_delete_reflog(struct ref_store *ref_store,
1516                               const char *refname)
1517{
1518        return 0;
1519}
1520
1521static int packed_reflog_expire(struct ref_store *ref_store,
1522                                const char *refname, const unsigned char *sha1,
1523                                unsigned int flags,
1524                                reflog_expiry_prepare_fn prepare_fn,
1525                                reflog_expiry_should_prune_fn should_prune_fn,
1526                                reflog_expiry_cleanup_fn cleanup_fn,
1527                                void *policy_cb_data)
1528{
1529        return 0;
1530}
1531
1532struct ref_storage_be refs_be_packed = {
1533        NULL,
1534        "packed",
1535        packed_ref_store_create,
1536        packed_init_db,
1537        packed_transaction_prepare,
1538        packed_transaction_finish,
1539        packed_transaction_abort,
1540        packed_initial_transaction_commit,
1541
1542        packed_pack_refs,
1543        packed_create_symref,
1544        packed_delete_refs,
1545        packed_rename_ref,
1546        packed_copy_ref,
1547
1548        packed_ref_iterator_begin,
1549        packed_read_raw_ref,
1550
1551        packed_reflog_iterator_begin,
1552        packed_for_each_reflog_ent,
1553        packed_for_each_reflog_ent_reverse,
1554        packed_reflog_exists,
1555        packed_create_reflog,
1556        packed_delete_reflog,
1557        packed_reflog_expire
1558};