be614e79f530f492d9dc7c6bcf9242ff1a1a712f
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10enum mmap_strategy {
  11        /*
  12         * Don't use mmap() at all for reading `packed-refs`.
  13         */
  14        MMAP_NONE,
  15
  16        /*
  17         * Can use mmap() for reading `packed-refs`, but the file must
  18         * not remain mmapped. This is the usual option on Windows,
  19         * where you cannot rename a new version of a file onto a file
  20         * that is currently mmapped.
  21         */
  22        MMAP_TEMPORARY,
  23
  24        /*
  25         * It is OK to leave the `packed-refs` file mmapped while
  26         * arbitrary other code is running.
  27         */
  28        MMAP_OK
  29};
  30
  31#if defined(NO_MMAP)
  32static enum mmap_strategy mmap_strategy = MMAP_NONE;
  33#elif defined(MMAP_PREVENTS_DELETE)
  34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  35#else
  36static enum mmap_strategy mmap_strategy = MMAP_OK;
  37#endif
  38
  39struct packed_ref_store;
  40
  41struct packed_ref_cache {
  42        /*
  43         * A back-pointer to the packed_ref_store with which this
  44         * cache is associated:
  45         */
  46        struct packed_ref_store *refs;
  47
  48        struct ref_cache *cache;
  49
  50        /* Is the `packed-refs` file currently mmapped? */
  51        int mmapped;
  52
  53        /*
  54         * The contents of the `packed-refs` file. If the file was
  55         * already sorted, this points at the mmapped contents of the
  56         * file. If not, this points at heap-allocated memory
  57         * containing the contents, sorted. If there were no contents
  58         * (e.g., because the file didn't exist), `buf` and `eof` are
  59         * both NULL.
  60         */
  61        char *buf, *eof;
  62
  63        /* The size of the header line, if any; otherwise, 0: */
  64        size_t header_len;
  65
  66        /*
  67         * What is the peeled state of this cache? (This is usually
  68         * determined from the header of the "packed-refs" file.)
  69         */
  70        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  71
  72        /*
  73         * Count of references to the data structure in this instance,
  74         * including the pointer from files_ref_store::packed if any.
  75         * The data will not be freed as long as the reference count
  76         * is nonzero.
  77         */
  78        unsigned int referrers;
  79
  80        /* The metadata from when this packed-refs cache was read */
  81        struct stat_validity validity;
  82};
  83
  84/*
  85 * A container for `packed-refs`-related data. It is not (yet) a
  86 * `ref_store`.
  87 */
  88struct packed_ref_store {
  89        struct ref_store base;
  90
  91        unsigned int store_flags;
  92
  93        /* The path of the "packed-refs" file: */
  94        char *path;
  95
  96        /*
  97         * A cache of the values read from the `packed-refs` file, if
  98         * it might still be current; otherwise, NULL.
  99         */
 100        struct packed_ref_cache *cache;
 101
 102        /*
 103         * Lock used for the "packed-refs" file. Note that this (and
 104         * thus the enclosing `packed_ref_store`) must not be freed.
 105         */
 106        struct lock_file lock;
 107
 108        /*
 109         * Temporary file used when rewriting new contents to the
 110         * "packed-refs" file. Note that this (and thus the enclosing
 111         * `packed_ref_store`) must not be freed.
 112         */
 113        struct tempfile tempfile;
 114};
 115
 116/*
 117 * Increment the reference count of *packed_refs.
 118 */
 119static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
 120{
 121        packed_refs->referrers++;
 122}
 123
 124/*
 125 * If the buffer in `packed_refs` is active, then either munmap the
 126 * memory and close the file, or free the memory. Then set the buffer
 127 * pointers to NULL.
 128 */
 129static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs)
 130{
 131        if (packed_refs->mmapped) {
 132                if (munmap(packed_refs->buf,
 133                           packed_refs->eof - packed_refs->buf))
 134                        die_errno("error ummapping packed-refs file %s",
 135                                  packed_refs->refs->path);
 136                packed_refs->mmapped = 0;
 137        } else {
 138                free(packed_refs->buf);
 139        }
 140        packed_refs->buf = packed_refs->eof = NULL;
 141        packed_refs->header_len = 0;
 142}
 143
 144/*
 145 * Decrease the reference count of *packed_refs.  If it goes to zero,
 146 * free *packed_refs and return true; otherwise return false.
 147 */
 148static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
 149{
 150        if (!--packed_refs->referrers) {
 151                free_ref_cache(packed_refs->cache);
 152                stat_validity_clear(&packed_refs->validity);
 153                release_packed_ref_buffer(packed_refs);
 154                free(packed_refs);
 155                return 1;
 156        } else {
 157                return 0;
 158        }
 159}
 160
 161struct ref_store *packed_ref_store_create(const char *path,
 162                                          unsigned int store_flags)
 163{
 164        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 165        struct ref_store *ref_store = (struct ref_store *)refs;
 166
 167        base_ref_store_init(ref_store, &refs_be_packed);
 168        refs->store_flags = store_flags;
 169
 170        refs->path = xstrdup(path);
 171        return ref_store;
 172}
 173
 174/*
 175 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 176 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 177 * support at least the flags specified in `required_flags`. `caller`
 178 * is used in any necessary error messages.
 179 */
 180static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 181                                                unsigned int required_flags,
 182                                                const char *caller)
 183{
 184        struct packed_ref_store *refs;
 185
 186        if (ref_store->be != &refs_be_packed)
 187                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 188                    ref_store->be->name, caller);
 189
 190        refs = (struct packed_ref_store *)ref_store;
 191
 192        if ((refs->store_flags & required_flags) != required_flags)
 193                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 194                    caller, required_flags, refs->store_flags);
 195
 196        return refs;
 197}
 198
 199static void clear_packed_ref_cache(struct packed_ref_store *refs)
 200{
 201        if (refs->cache) {
 202                struct packed_ref_cache *cache = refs->cache;
 203
 204                refs->cache = NULL;
 205                release_packed_ref_cache(cache);
 206        }
 207}
 208
 209static NORETURN void die_unterminated_line(const char *path,
 210                                           const char *p, size_t len)
 211{
 212        if (len < 80)
 213                die("unterminated line in %s: %.*s", path, (int)len, p);
 214        else
 215                die("unterminated line in %s: %.75s...", path, p);
 216}
 217
 218static NORETURN void die_invalid_line(const char *path,
 219                                      const char *p, size_t len)
 220{
 221        const char *eol = memchr(p, '\n', len);
 222
 223        if (!eol)
 224                die_unterminated_line(path, p, len);
 225        else if (eol - p < 80)
 226                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 227        else
 228                die("unexpected line in %s: %.75s...", path, p);
 229
 230}
 231
 232/*
 233 * An iterator over a packed-refs file that is currently mmapped.
 234 */
 235struct mmapped_ref_iterator {
 236        struct ref_iterator base;
 237
 238        struct packed_ref_cache *packed_refs;
 239
 240        /* The current position in the mmapped file: */
 241        const char *pos;
 242
 243        /* The end of the mmapped file: */
 244        const char *eof;
 245
 246        struct object_id oid, peeled;
 247
 248        struct strbuf refname_buf;
 249};
 250
 251static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
 252{
 253        struct mmapped_ref_iterator *iter =
 254                (struct mmapped_ref_iterator *)ref_iterator;
 255        const char *p = iter->pos, *eol;
 256
 257        strbuf_reset(&iter->refname_buf);
 258
 259        if (iter->pos == iter->eof)
 260                return ref_iterator_abort(ref_iterator);
 261
 262        iter->base.flags = REF_ISPACKED;
 263
 264        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 265            parse_oid_hex(p, &iter->oid, &p) ||
 266            !isspace(*p++))
 267                die_invalid_line(iter->packed_refs->refs->path,
 268                                 iter->pos, iter->eof - iter->pos);
 269
 270        eol = memchr(p, '\n', iter->eof - p);
 271        if (!eol)
 272                die_unterminated_line(iter->packed_refs->refs->path,
 273                                      iter->pos, iter->eof - iter->pos);
 274
 275        strbuf_add(&iter->refname_buf, p, eol - p);
 276        iter->base.refname = iter->refname_buf.buf;
 277
 278        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 279                if (!refname_is_safe(iter->base.refname))
 280                        die("packed refname is dangerous: %s",
 281                            iter->base.refname);
 282                oidclr(&iter->oid);
 283                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 284        }
 285        if (iter->packed_refs->peeled == PEELED_FULLY ||
 286            (iter->packed_refs->peeled == PEELED_TAGS &&
 287             starts_with(iter->base.refname, "refs/tags/")))
 288                iter->base.flags |= REF_KNOWS_PEELED;
 289
 290        iter->pos = eol + 1;
 291
 292        if (iter->pos < iter->eof && *iter->pos == '^') {
 293                p = iter->pos + 1;
 294                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 295                    parse_oid_hex(p, &iter->peeled, &p) ||
 296                    *p++ != '\n')
 297                        die_invalid_line(iter->packed_refs->refs->path,
 298                                         iter->pos, iter->eof - iter->pos);
 299                iter->pos = p;
 300
 301                /*
 302                 * Regardless of what the file header said, we
 303                 * definitely know the value of *this* reference. But
 304                 * we suppress it if the reference is broken:
 305                 */
 306                if ((iter->base.flags & REF_ISBROKEN)) {
 307                        oidclr(&iter->peeled);
 308                        iter->base.flags &= ~REF_KNOWS_PEELED;
 309                } else {
 310                        iter->base.flags |= REF_KNOWS_PEELED;
 311                }
 312        } else {
 313                oidclr(&iter->peeled);
 314        }
 315
 316        return ITER_OK;
 317}
 318
 319static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
 320                                    struct object_id *peeled)
 321{
 322        struct mmapped_ref_iterator *iter =
 323                (struct mmapped_ref_iterator *)ref_iterator;
 324
 325        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 326                oidcpy(peeled, &iter->peeled);
 327                return is_null_oid(&iter->peeled) ? -1 : 0;
 328        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 329                return -1;
 330        } else {
 331                return !!peel_object(iter->oid.hash, peeled->hash);
 332        }
 333}
 334
 335static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
 336{
 337        struct mmapped_ref_iterator *iter =
 338                (struct mmapped_ref_iterator *)ref_iterator;
 339
 340        release_packed_ref_cache(iter->packed_refs);
 341        strbuf_release(&iter->refname_buf);
 342        base_ref_iterator_free(ref_iterator);
 343        return ITER_DONE;
 344}
 345
 346static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
 347        mmapped_ref_iterator_advance,
 348        mmapped_ref_iterator_peel,
 349        mmapped_ref_iterator_abort
 350};
 351
 352struct ref_iterator *mmapped_ref_iterator_begin(
 353                struct packed_ref_cache *packed_refs,
 354                const char *pos, const char *eof)
 355{
 356        struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
 357        struct ref_iterator *ref_iterator = &iter->base;
 358
 359        if (!packed_refs->buf)
 360                return empty_ref_iterator_begin();
 361
 362        base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1);
 363
 364        iter->packed_refs = packed_refs;
 365        acquire_packed_ref_cache(iter->packed_refs);
 366        iter->pos = pos;
 367        iter->eof = eof;
 368        strbuf_init(&iter->refname_buf, 0);
 369
 370        iter->base.oid = &iter->oid;
 371
 372        return ref_iterator;
 373}
 374
 375struct packed_ref_entry {
 376        const char *start;
 377        size_t len;
 378};
 379
 380static int cmp_packed_ref_entries(const void *v1, const void *v2)
 381{
 382        const struct packed_ref_entry *e1 = v1, *e2 = v2;
 383        const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
 384        const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
 385
 386        while (1) {
 387                if (*r1 == '\n')
 388                        return *r2 == '\n' ? 0 : -1;
 389                if (*r1 != *r2) {
 390                        if (*r2 == '\n')
 391                                return 1;
 392                        else
 393                                return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 394                }
 395                r1++;
 396                r2++;
 397        }
 398}
 399
 400/*
 401 * Compare a packed-refs record pointed to by `rec` to the specified
 402 * NUL-terminated refname.
 403 */
 404static int cmp_entry_to_refname(const char *rec, const char *refname)
 405{
 406        const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
 407        const char *r2 = refname;
 408
 409        while (1) {
 410                if (*r1 == '\n')
 411                        return *r2 ? -1 : 0;
 412                if (!*r2)
 413                        return 1;
 414                if (*r1 != *r2)
 415                        return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 416                r1++;
 417                r2++;
 418        }
 419}
 420
 421/*
 422 * `packed_refs->buf` is not known to be sorted. Check whether it is,
 423 * and if not, sort it into new memory and munmap/free the old
 424 * storage.
 425 */
 426static void sort_packed_refs(struct packed_ref_cache *packed_refs)
 427{
 428        struct packed_ref_entry *entries = NULL;
 429        size_t alloc = 0, nr = 0;
 430        int sorted = 1;
 431        const char *pos, *eof, *eol;
 432        size_t len, i;
 433        char *new_buffer, *dst;
 434
 435        pos = packed_refs->buf + packed_refs->header_len;
 436        eof = packed_refs->eof;
 437        len = eof - pos;
 438
 439        if (!len)
 440                return;
 441
 442        /*
 443         * Initialize entries based on a crude estimate of the number
 444         * of references in the file (we'll grow it below if needed):
 445         */
 446        ALLOC_GROW(entries, len / 80 + 20, alloc);
 447
 448        while (pos < eof) {
 449                eol = memchr(pos, '\n', eof - pos);
 450                if (!eol)
 451                        /* The safety check should prevent this. */
 452                        BUG("unterminated line found in packed-refs");
 453                if (eol - pos < GIT_SHA1_HEXSZ + 2)
 454                        die_invalid_line(packed_refs->refs->path,
 455                                         pos, eof - pos);
 456                eol++;
 457                if (eol < eof && *eol == '^') {
 458                        /*
 459                         * Keep any peeled line together with its
 460                         * reference:
 461                         */
 462                        const char *peeled_start = eol;
 463
 464                        eol = memchr(peeled_start, '\n', eof - peeled_start);
 465                        if (!eol)
 466                                /* The safety check should prevent this. */
 467                                BUG("unterminated peeled line found in packed-refs");
 468                        eol++;
 469                }
 470
 471                ALLOC_GROW(entries, nr + 1, alloc);
 472                entries[nr].start = pos;
 473                entries[nr].len = eol - pos;
 474                nr++;
 475
 476                if (sorted &&
 477                    nr > 1 &&
 478                    cmp_packed_ref_entries(&entries[nr - 2],
 479                                           &entries[nr - 1]) >= 0)
 480                        sorted = 0;
 481
 482                pos = eol;
 483        }
 484
 485        if (sorted)
 486                goto cleanup;
 487
 488        /* We need to sort the memory. First we sort the entries array: */
 489        QSORT(entries, nr, cmp_packed_ref_entries);
 490
 491        /*
 492         * Allocate a new chunk of memory, and copy the old memory to
 493         * the new in the order indicated by `entries` (not bothering
 494         * with the header line):
 495         */
 496        new_buffer = xmalloc(len);
 497        for (dst = new_buffer, i = 0; i < nr; i++) {
 498                memcpy(dst, entries[i].start, entries[i].len);
 499                dst += entries[i].len;
 500        }
 501
 502        /*
 503         * Now munmap the old buffer and use the sorted buffer in its
 504         * place:
 505         */
 506        release_packed_ref_buffer(packed_refs);
 507        packed_refs->buf = new_buffer;
 508        packed_refs->eof = new_buffer + len;
 509        packed_refs->header_len = 0;
 510
 511cleanup:
 512        free(entries);
 513}
 514
 515/*
 516 * Return a pointer to the start of the record that contains the
 517 * character `*p` (which must be within the buffer). If no other
 518 * record start is found, return `buf`.
 519 */
 520static const char *find_start_of_record(const char *buf, const char *p)
 521{
 522        while (p > buf && (p[-1] != '\n' || p[0] == '^'))
 523                p--;
 524        return p;
 525}
 526
 527/*
 528 * Return a pointer to the start of the record following the record
 529 * that contains `*p`. If none is found before `end`, return `end`.
 530 */
 531static const char *find_end_of_record(const char *p, const char *end)
 532{
 533        while (++p < end && (p[-1] != '\n' || p[0] == '^'))
 534                ;
 535        return p;
 536}
 537
 538/*
 539 * We want to be able to compare mmapped reference records quickly,
 540 * without totally parsing them. We can do so because the records are
 541 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
 542 * + 1) bytes past the beginning of the record.
 543 *
 544 * But what if the `packed-refs` file contains garbage? We're willing
 545 * to tolerate not detecting the problem, as long as we don't produce
 546 * totally garbled output (we can't afford to check the integrity of
 547 * the whole file during every Git invocation). But we do want to be
 548 * sure that we never read past the end of the buffer in memory and
 549 * perform an illegal memory access.
 550 *
 551 * Guarantee that minimum level of safety by verifying that the last
 552 * record in the file is LF-terminated, and that it has at least
 553 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
 554 * these checks fails.
 555 */
 556static void verify_buffer_safe(struct packed_ref_cache *packed_refs)
 557{
 558        const char *buf = packed_refs->buf + packed_refs->header_len;
 559        const char *eof = packed_refs->eof;
 560        const char *last_line;
 561
 562        if (buf == eof)
 563                return;
 564
 565        last_line = find_start_of_record(buf, eof - 1);
 566        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
 567                die_invalid_line(packed_refs->refs->path,
 568                                 last_line, eof - last_line);
 569}
 570
 571/*
 572 * Depending on `mmap_strategy`, either mmap or read the contents of
 573 * the `packed-refs` file into the `packed_refs` instance. Return 1 if
 574 * the file existed and was read, or 0 if the file was absent. Die on
 575 * errors.
 576 */
 577static int load_contents(struct packed_ref_cache *packed_refs)
 578{
 579        int fd;
 580        struct stat st;
 581        size_t size;
 582        ssize_t bytes_read;
 583
 584        fd = open(packed_refs->refs->path, O_RDONLY);
 585        if (fd < 0) {
 586                if (errno == ENOENT) {
 587                        /*
 588                         * This is OK; it just means that no
 589                         * "packed-refs" file has been written yet,
 590                         * which is equivalent to it being empty,
 591                         * which is its state when initialized with
 592                         * zeros.
 593                         */
 594                        return 0;
 595                } else {
 596                        die_errno("couldn't read %s", packed_refs->refs->path);
 597                }
 598        }
 599
 600        stat_validity_update(&packed_refs->validity, fd);
 601
 602        if (fstat(fd, &st) < 0)
 603                die_errno("couldn't stat %s", packed_refs->refs->path);
 604        size = xsize_t(st.st_size);
 605
 606        switch (mmap_strategy) {
 607        case MMAP_NONE:
 608                packed_refs->buf = xmalloc(size);
 609                bytes_read = read_in_full(fd, packed_refs->buf, size);
 610                if (bytes_read < 0 || bytes_read != size)
 611                        die_errno("couldn't read %s", packed_refs->refs->path);
 612                packed_refs->eof = packed_refs->buf + size;
 613                packed_refs->mmapped = 0;
 614                break;
 615        case MMAP_TEMPORARY:
 616        case MMAP_OK:
 617                packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 618                packed_refs->eof = packed_refs->buf + size;
 619                packed_refs->mmapped = 1;
 620                break;
 621        }
 622        close(fd);
 623
 624        return 1;
 625}
 626
 627/*
 628 * Find the place in `cache->buf` where the start of the record for
 629 * `refname` starts. If `mustexist` is true and the reference doesn't
 630 * exist, then return NULL. If `mustexist` is false and the reference
 631 * doesn't exist, then return the point where that reference would be
 632 * inserted. In the latter mode, `refname` doesn't have to be a proper
 633 * reference name; for example, one could search for "refs/replace/"
 634 * to find the start of any replace references.
 635 *
 636 * The record is sought using a binary search, so `cache->buf` must be
 637 * sorted.
 638 */
 639static const char *find_reference_location(struct packed_ref_cache *cache,
 640                                           const char *refname, int mustexist)
 641{
 642        /*
 643         * This is not *quite* a garden-variety binary search, because
 644         * the data we're searching is made up of records, and we
 645         * always need to find the beginning of a record to do a
 646         * comparison. A "record" here is one line for the reference
 647         * itself and zero or one peel lines that start with '^'. Our
 648         * loop invariant is described in the next two comments.
 649         */
 650
 651        /*
 652         * A pointer to the character at the start of a record whose
 653         * preceding records all have reference names that come
 654         * *before* `refname`.
 655         */
 656        const char *lo = cache->buf + cache->header_len;
 657
 658        /*
 659         * A pointer to a the first character of a record whose
 660         * reference name comes *after* `refname`.
 661         */
 662        const char *hi = cache->eof;
 663
 664        while (lo < hi) {
 665                const char *mid, *rec;
 666                int cmp;
 667
 668                mid = lo + (hi - lo) / 2;
 669                rec = find_start_of_record(lo, mid);
 670                cmp = cmp_entry_to_refname(rec, refname);
 671                if (cmp < 0) {
 672                        lo = find_end_of_record(mid, hi);
 673                } else if (cmp > 0) {
 674                        hi = rec;
 675                } else {
 676                        return rec;
 677                }
 678        }
 679
 680        if (mustexist)
 681                return NULL;
 682        else
 683                return lo;
 684}
 685
 686/*
 687 * Read from the `packed-refs` file into a newly-allocated
 688 * `packed_ref_cache` and return it. The return value will already
 689 * have its reference count incremented.
 690 *
 691 * A comment line of the form "# pack-refs with: " may contain zero or
 692 * more traits. We interpret the traits as follows:
 693 *
 694 *   Neither `peeled` nor `fully-peeled`:
 695 *
 696 *      Probably no references are peeled. But if the file contains a
 697 *      peeled value for a reference, we will use it.
 698 *
 699 *   `peeled`:
 700 *
 701 *      References under "refs/tags/", if they *can* be peeled, *are*
 702 *      peeled in this file. References outside of "refs/tags/" are
 703 *      probably not peeled even if they could have been, but if we find
 704 *      a peeled value for such a reference we will use it.
 705 *
 706 *   `fully-peeled`:
 707 *
 708 *      All references in the file that can be peeled are peeled.
 709 *      Inversely (and this is more important), any references in the
 710 *      file for which no peeled value is recorded is not peelable. This
 711 *      trait should typically be written alongside "peeled" for
 712 *      compatibility with older clients, but we do not require it
 713 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 714 *
 715 *   `sorted`:
 716 *
 717 *      The references in this file are known to be sorted by refname.
 718 */
 719static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs)
 720{
 721        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 722        struct ref_dir *dir;
 723        struct ref_iterator *iter;
 724        int sorted = 0;
 725        int ok;
 726
 727        packed_refs->refs = refs;
 728        acquire_packed_ref_cache(packed_refs);
 729        packed_refs->cache = create_ref_cache(NULL, NULL);
 730        packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
 731        packed_refs->peeled = PEELED_NONE;
 732
 733        if (!load_contents(packed_refs))
 734                return packed_refs;
 735
 736        /* If the file has a header line, process it: */
 737        if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') {
 738                struct strbuf tmp = STRBUF_INIT;
 739                char *p;
 740                const char *eol;
 741                struct string_list traits = STRING_LIST_INIT_NODUP;
 742
 743                eol = memchr(packed_refs->buf, '\n',
 744                             packed_refs->eof - packed_refs->buf);
 745                if (!eol)
 746                        die_unterminated_line(refs->path,
 747                                              packed_refs->buf,
 748                                              packed_refs->eof - packed_refs->buf);
 749
 750                strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf);
 751
 752                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 753                        die_invalid_line(refs->path,
 754                                         packed_refs->buf,
 755                                         packed_refs->eof - packed_refs->buf);
 756
 757                string_list_split_in_place(&traits, p, ' ', -1);
 758
 759                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 760                        packed_refs->peeled = PEELED_FULLY;
 761                else if (unsorted_string_list_has_string(&traits, "peeled"))
 762                        packed_refs->peeled = PEELED_TAGS;
 763
 764                sorted = unsorted_string_list_has_string(&traits, "sorted");
 765
 766                /* perhaps other traits later as well */
 767
 768                /* The "+ 1" is for the LF character. */
 769                packed_refs->header_len = eol + 1 - packed_refs->buf;
 770
 771                string_list_clear(&traits, 0);
 772                strbuf_release(&tmp);
 773        }
 774
 775        verify_buffer_safe(packed_refs);
 776
 777        if (!sorted) {
 778                sort_packed_refs(packed_refs);
 779
 780                /*
 781                 * Reordering the records might have moved a short one
 782                 * to the end of the buffer, so verify the buffer's
 783                 * safety again:
 784                 */
 785                verify_buffer_safe(packed_refs);
 786        }
 787
 788        if (mmap_strategy != MMAP_OK && packed_refs->mmapped) {
 789                /*
 790                 * We don't want to leave the file mmapped, so we are
 791                 * forced to make a copy now:
 792                 */
 793                size_t size = packed_refs->eof -
 794                        (packed_refs->buf + packed_refs->header_len);
 795                char *buf_copy = xmalloc(size);
 796
 797                memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size);
 798                release_packed_ref_buffer(packed_refs);
 799                packed_refs->buf = buf_copy;
 800                packed_refs->eof = buf_copy + size;
 801        }
 802
 803        dir = get_ref_dir(packed_refs->cache->root);
 804        iter = mmapped_ref_iterator_begin(
 805                        packed_refs,
 806                        packed_refs->buf + packed_refs->header_len,
 807                        packed_refs->eof);
 808        while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
 809                struct ref_entry *entry =
 810                        create_ref_entry(iter->refname, iter->oid, iter->flags);
 811
 812                if ((iter->flags & REF_KNOWS_PEELED))
 813                        ref_iterator_peel(iter, &entry->u.value.peeled);
 814                add_ref_entry(dir, entry);
 815        }
 816
 817        if (ok != ITER_DONE)
 818                die("error reading packed-refs file %s", refs->path);
 819
 820        return packed_refs;
 821}
 822
 823/*
 824 * Check that the packed refs cache (if any) still reflects the
 825 * contents of the file. If not, clear the cache.
 826 */
 827static void validate_packed_ref_cache(struct packed_ref_store *refs)
 828{
 829        if (refs->cache &&
 830            !stat_validity_check(&refs->cache->validity, refs->path))
 831                clear_packed_ref_cache(refs);
 832}
 833
 834/*
 835 * Get the packed_ref_cache for the specified packed_ref_store,
 836 * creating and populating it if it hasn't been read before or if the
 837 * file has been changed (according to its `validity` field) since it
 838 * was last read. On the other hand, if we hold the lock, then assume
 839 * that the file hasn't been changed out from under us, so skip the
 840 * extra `stat()` call in `stat_validity_check()`.
 841 */
 842static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 843{
 844        if (!is_lock_file_locked(&refs->lock))
 845                validate_packed_ref_cache(refs);
 846
 847        if (!refs->cache)
 848                refs->cache = read_packed_refs(refs);
 849
 850        return refs->cache;
 851}
 852
 853static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
 854{
 855        return get_ref_dir(packed_ref_cache->cache->root);
 856}
 857
 858static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
 859{
 860        return get_packed_ref_dir(get_packed_ref_cache(refs));
 861}
 862
 863/*
 864 * Return the ref_entry for the given refname from the packed
 865 * references.  If it does not exist, return NULL.
 866 */
 867static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
 868                                        const char *refname)
 869{
 870        return find_ref_entry(get_packed_refs(refs), refname);
 871}
 872
 873static int packed_read_raw_ref(struct ref_store *ref_store,
 874                               const char *refname, unsigned char *sha1,
 875                               struct strbuf *referent, unsigned int *type)
 876{
 877        struct packed_ref_store *refs =
 878                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 879        struct packed_ref_cache *packed_refs = get_packed_ref_cache(refs);
 880        const char *rec;
 881
 882        *type = 0;
 883
 884        rec = find_reference_location(packed_refs, refname, 1);
 885
 886        if (!rec) {
 887                /* refname is not a packed reference. */
 888                errno = ENOENT;
 889                return -1;
 890        }
 891
 892        if (get_sha1_hex(rec, sha1))
 893                die_invalid_line(refs->path, rec, packed_refs->eof - rec);
 894
 895        *type = REF_ISPACKED;
 896        return 0;
 897}
 898
 899static int packed_peel_ref(struct ref_store *ref_store,
 900                           const char *refname, unsigned char *sha1)
 901{
 902        struct packed_ref_store *refs =
 903                packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
 904                                "peel_ref");
 905        struct ref_entry *r = get_packed_ref(refs, refname);
 906
 907        if (!r || peel_entry(r, 0))
 908                return -1;
 909
 910        hashcpy(sha1, r->u.value.peeled.hash);
 911        return 0;
 912}
 913
 914struct packed_ref_iterator {
 915        struct ref_iterator base;
 916
 917        struct packed_ref_cache *cache;
 918        struct ref_iterator *iter0;
 919        unsigned int flags;
 920};
 921
 922static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 923{
 924        struct packed_ref_iterator *iter =
 925                (struct packed_ref_iterator *)ref_iterator;
 926        int ok;
 927
 928        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 929                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 930                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 931                        continue;
 932
 933                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 934                    !ref_resolves_to_object(iter->iter0->refname,
 935                                            iter->iter0->oid,
 936                                            iter->iter0->flags))
 937                        continue;
 938
 939                iter->base.refname = iter->iter0->refname;
 940                iter->base.oid = iter->iter0->oid;
 941                iter->base.flags = iter->iter0->flags;
 942                return ITER_OK;
 943        }
 944
 945        iter->iter0 = NULL;
 946        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 947                ok = ITER_ERROR;
 948
 949        return ok;
 950}
 951
 952static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 953                                   struct object_id *peeled)
 954{
 955        struct packed_ref_iterator *iter =
 956                (struct packed_ref_iterator *)ref_iterator;
 957
 958        return ref_iterator_peel(iter->iter0, peeled);
 959}
 960
 961static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 962{
 963        struct packed_ref_iterator *iter =
 964                (struct packed_ref_iterator *)ref_iterator;
 965        int ok = ITER_DONE;
 966
 967        if (iter->iter0)
 968                ok = ref_iterator_abort(iter->iter0);
 969
 970        release_packed_ref_cache(iter->cache);
 971        base_ref_iterator_free(ref_iterator);
 972        return ok;
 973}
 974
 975static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 976        packed_ref_iterator_advance,
 977        packed_ref_iterator_peel,
 978        packed_ref_iterator_abort
 979};
 980
 981static struct ref_iterator *packed_ref_iterator_begin(
 982                struct ref_store *ref_store,
 983                const char *prefix, unsigned int flags)
 984{
 985        struct packed_ref_store *refs;
 986        struct packed_ref_cache *packed_refs;
 987        const char *start;
 988        struct packed_ref_iterator *iter;
 989        struct ref_iterator *ref_iterator;
 990        unsigned int required_flags = REF_STORE_READ;
 991
 992        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 993                required_flags |= REF_STORE_ODB;
 994        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 995
 996        iter = xcalloc(1, sizeof(*iter));
 997        ref_iterator = &iter->base;
 998        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 999
1000        /*
1001         * Note that get_packed_ref_cache() internally checks whether
1002         * the packed-ref cache is up to date with what is on disk,
1003         * and re-reads it if not.
1004         */
1005        iter->cache = packed_refs = get_packed_ref_cache(refs);
1006        acquire_packed_ref_cache(packed_refs);
1007
1008        if (prefix && *prefix)
1009                start = find_reference_location(packed_refs, prefix, 0);
1010        else
1011                start = packed_refs->buf + packed_refs->header_len;
1012
1013        iter->iter0 = mmapped_ref_iterator_begin(
1014                        packed_refs, start, packed_refs->eof);
1015
1016        iter->flags = flags;
1017
1018        if (prefix && *prefix)
1019                /* Stop iteration after we've gone *past* prefix: */
1020                ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
1021
1022        return ref_iterator;
1023}
1024
1025/*
1026 * Write an entry to the packed-refs file for the specified refname.
1027 * If peeled is non-NULL, write it as the entry's peeled value. On
1028 * error, return a nonzero value and leave errno set at the value left
1029 * by the failing call to `fprintf()`.
1030 */
1031static int write_packed_entry(FILE *fh, const char *refname,
1032                              const unsigned char *sha1,
1033                              const unsigned char *peeled)
1034{
1035        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
1036            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
1037                return -1;
1038
1039        return 0;
1040}
1041
1042int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
1043{
1044        struct packed_ref_store *refs =
1045                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
1046                                "packed_refs_lock");
1047        static int timeout_configured = 0;
1048        static int timeout_value = 1000;
1049
1050        if (!timeout_configured) {
1051                git_config_get_int("core.packedrefstimeout", &timeout_value);
1052                timeout_configured = 1;
1053        }
1054
1055        /*
1056         * Note that we close the lockfile immediately because we
1057         * don't write new content to it, but rather to a separate
1058         * tempfile.
1059         */
1060        if (hold_lock_file_for_update_timeout(
1061                            &refs->lock,
1062                            refs->path,
1063                            flags, timeout_value) < 0) {
1064                unable_to_lock_message(refs->path, errno, err);
1065                return -1;
1066        }
1067
1068        if (close_lock_file(&refs->lock)) {
1069                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1070                return -1;
1071        }
1072
1073        /*
1074         * Now that we hold the `packed-refs` lock, make sure that our
1075         * cache matches the current version of the file. Normally
1076         * `get_packed_ref_cache()` does that for us, but that
1077         * function assumes that when the file is locked, any existing
1078         * cache is still valid. We've just locked the file, but it
1079         * might have changed the moment *before* we locked it.
1080         */
1081        validate_packed_ref_cache(refs);
1082
1083        /*
1084         * Now make sure that the packed-refs file as it exists in the
1085         * locked state is loaded into the cache:
1086         */
1087        get_packed_ref_cache(refs);
1088        return 0;
1089}
1090
1091void packed_refs_unlock(struct ref_store *ref_store)
1092{
1093        struct packed_ref_store *refs = packed_downcast(
1094                        ref_store,
1095                        REF_STORE_READ | REF_STORE_WRITE,
1096                        "packed_refs_unlock");
1097
1098        if (!is_lock_file_locked(&refs->lock))
1099                die("BUG: packed_refs_unlock() called when not locked");
1100        rollback_lock_file(&refs->lock);
1101}
1102
1103int packed_refs_is_locked(struct ref_store *ref_store)
1104{
1105        struct packed_ref_store *refs = packed_downcast(
1106                        ref_store,
1107                        REF_STORE_READ | REF_STORE_WRITE,
1108                        "packed_refs_is_locked");
1109
1110        return is_lock_file_locked(&refs->lock);
1111}
1112
1113/*
1114 * The packed-refs header line that we write out.  Perhaps other
1115 * traits will be added later.
1116 *
1117 * Note that earlier versions of Git used to parse these traits by
1118 * looking for " trait " in the line. For this reason, the space after
1119 * the colon and the trailing space are required.
1120 */
1121static const char PACKED_REFS_HEADER[] =
1122        "# pack-refs with: peeled fully-peeled sorted \n";
1123
1124static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1125{
1126        /* Nothing to do. */
1127        return 0;
1128}
1129
1130/*
1131 * Write the packed-refs from the cache to the packed-refs tempfile,
1132 * incorporating any changes from `updates`. `updates` must be a
1133 * sorted string list whose keys are the refnames and whose util
1134 * values are `struct ref_update *`. On error, rollback the tempfile,
1135 * write an error message to `err`, and return a nonzero value.
1136 *
1137 * The packfile must be locked before calling this function and will
1138 * remain locked when it is done.
1139 */
1140static int write_with_updates(struct packed_ref_store *refs,
1141                              struct string_list *updates,
1142                              struct strbuf *err)
1143{
1144        struct ref_iterator *iter = NULL;
1145        size_t i;
1146        int ok;
1147        FILE *out;
1148        struct strbuf sb = STRBUF_INIT;
1149        char *packed_refs_path;
1150
1151        if (!is_lock_file_locked(&refs->lock))
1152                die("BUG: write_with_updates() called while unlocked");
1153
1154        /*
1155         * If packed-refs is a symlink, we want to overwrite the
1156         * symlinked-to file, not the symlink itself. Also, put the
1157         * staging file next to it:
1158         */
1159        packed_refs_path = get_locked_file_path(&refs->lock);
1160        strbuf_addf(&sb, "%s.new", packed_refs_path);
1161        free(packed_refs_path);
1162        if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
1163                strbuf_addf(err, "unable to create file %s: %s",
1164                            sb.buf, strerror(errno));
1165                strbuf_release(&sb);
1166                return -1;
1167        }
1168        strbuf_release(&sb);
1169
1170        out = fdopen_tempfile(&refs->tempfile, "w");
1171        if (!out) {
1172                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1173                            strerror(errno));
1174                goto error;
1175        }
1176
1177        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1178                goto write_error;
1179
1180        /*
1181         * We iterate in parallel through the current list of refs and
1182         * the list of updates, processing an entry from at least one
1183         * of the lists each time through the loop. When the current
1184         * list of refs is exhausted, set iter to NULL. When the list
1185         * of updates is exhausted, leave i set to updates->nr.
1186         */
1187        iter = packed_ref_iterator_begin(&refs->base, "",
1188                                         DO_FOR_EACH_INCLUDE_BROKEN);
1189        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1190                iter = NULL;
1191
1192        i = 0;
1193
1194        while (iter || i < updates->nr) {
1195                struct ref_update *update = NULL;
1196                int cmp;
1197
1198                if (i >= updates->nr) {
1199                        cmp = -1;
1200                } else {
1201                        update = updates->items[i].util;
1202
1203                        if (!iter)
1204                                cmp = +1;
1205                        else
1206                                cmp = strcmp(iter->refname, update->refname);
1207                }
1208
1209                if (!cmp) {
1210                        /*
1211                         * There is both an old value and an update
1212                         * for this reference. Check the old value if
1213                         * necessary:
1214                         */
1215                        if ((update->flags & REF_HAVE_OLD)) {
1216                                if (is_null_oid(&update->old_oid)) {
1217                                        strbuf_addf(err, "cannot update ref '%s': "
1218                                                    "reference already exists",
1219                                                    update->refname);
1220                                        goto error;
1221                                } else if (oidcmp(&update->old_oid, iter->oid)) {
1222                                        strbuf_addf(err, "cannot update ref '%s': "
1223                                                    "is at %s but expected %s",
1224                                                    update->refname,
1225                                                    oid_to_hex(iter->oid),
1226                                                    oid_to_hex(&update->old_oid));
1227                                        goto error;
1228                                }
1229                        }
1230
1231                        /* Now figure out what to use for the new value: */
1232                        if ((update->flags & REF_HAVE_NEW)) {
1233                                /*
1234                                 * The update takes precedence. Skip
1235                                 * the iterator over the unneeded
1236                                 * value.
1237                                 */
1238                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1239                                        iter = NULL;
1240                                cmp = +1;
1241                        } else {
1242                                /*
1243                                 * The update doesn't actually want to
1244                                 * change anything. We're done with it.
1245                                 */
1246                                i++;
1247                                cmp = -1;
1248                        }
1249                } else if (cmp > 0) {
1250                        /*
1251                         * There is no old value but there is an
1252                         * update for this reference. Make sure that
1253                         * the update didn't expect an existing value:
1254                         */
1255                        if ((update->flags & REF_HAVE_OLD) &&
1256                            !is_null_oid(&update->old_oid)) {
1257                                strbuf_addf(err, "cannot update ref '%s': "
1258                                            "reference is missing but expected %s",
1259                                            update->refname,
1260                                            oid_to_hex(&update->old_oid));
1261                                goto error;
1262                        }
1263                }
1264
1265                if (cmp < 0) {
1266                        /* Pass the old reference through. */
1267
1268                        struct object_id peeled;
1269                        int peel_error = ref_iterator_peel(iter, &peeled);
1270
1271                        if (write_packed_entry(out, iter->refname,
1272                                               iter->oid->hash,
1273                                               peel_error ? NULL : peeled.hash))
1274                                goto write_error;
1275
1276                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1277                                iter = NULL;
1278                } else if (is_null_oid(&update->new_oid)) {
1279                        /*
1280                         * The update wants to delete the reference,
1281                         * and the reference either didn't exist or we
1282                         * have already skipped it. So we're done with
1283                         * the update (and don't have to write
1284                         * anything).
1285                         */
1286                        i++;
1287                } else {
1288                        struct object_id peeled;
1289                        int peel_error = peel_object(update->new_oid.hash,
1290                                                     peeled.hash);
1291
1292                        if (write_packed_entry(out, update->refname,
1293                                               update->new_oid.hash,
1294                                               peel_error ? NULL : peeled.hash))
1295                                goto write_error;
1296
1297                        i++;
1298                }
1299        }
1300
1301        if (ok != ITER_DONE) {
1302                strbuf_addf(err, "unable to write packed-refs file: "
1303                            "error iterating over old contents");
1304                goto error;
1305        }
1306
1307        if (close_tempfile(&refs->tempfile)) {
1308                strbuf_addf(err, "error closing file %s: %s",
1309                            get_tempfile_path(&refs->tempfile),
1310                            strerror(errno));
1311                strbuf_release(&sb);
1312                return -1;
1313        }
1314
1315        return 0;
1316
1317write_error:
1318        strbuf_addf(err, "error writing to %s: %s",
1319                    get_tempfile_path(&refs->tempfile), strerror(errno));
1320
1321error:
1322        if (iter)
1323                ref_iterator_abort(iter);
1324
1325        delete_tempfile(&refs->tempfile);
1326        return -1;
1327}
1328
1329struct packed_transaction_backend_data {
1330        /* True iff the transaction owns the packed-refs lock. */
1331        int own_lock;
1332
1333        struct string_list updates;
1334};
1335
1336static void packed_transaction_cleanup(struct packed_ref_store *refs,
1337                                       struct ref_transaction *transaction)
1338{
1339        struct packed_transaction_backend_data *data = transaction->backend_data;
1340
1341        if (data) {
1342                string_list_clear(&data->updates, 0);
1343
1344                if (is_tempfile_active(&refs->tempfile))
1345                        delete_tempfile(&refs->tempfile);
1346
1347                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1348                        packed_refs_unlock(&refs->base);
1349                        data->own_lock = 0;
1350                }
1351
1352                free(data);
1353                transaction->backend_data = NULL;
1354        }
1355
1356        transaction->state = REF_TRANSACTION_CLOSED;
1357}
1358
1359static int packed_transaction_prepare(struct ref_store *ref_store,
1360                                      struct ref_transaction *transaction,
1361                                      struct strbuf *err)
1362{
1363        struct packed_ref_store *refs = packed_downcast(
1364                        ref_store,
1365                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1366                        "ref_transaction_prepare");
1367        struct packed_transaction_backend_data *data;
1368        size_t i;
1369        int ret = TRANSACTION_GENERIC_ERROR;
1370
1371        /*
1372         * Note that we *don't* skip transactions with zero updates,
1373         * because such a transaction might be executed for the side
1374         * effect of ensuring that all of the references are peeled.
1375         * If the caller wants to optimize away empty transactions, it
1376         * should do so itself.
1377         */
1378
1379        data = xcalloc(1, sizeof(*data));
1380        string_list_init(&data->updates, 0);
1381
1382        transaction->backend_data = data;
1383
1384        /*
1385         * Stick the updates in a string list by refname so that we
1386         * can sort them:
1387         */
1388        for (i = 0; i < transaction->nr; i++) {
1389                struct ref_update *update = transaction->updates[i];
1390                struct string_list_item *item =
1391                        string_list_append(&data->updates, update->refname);
1392
1393                /* Store a pointer to update in item->util: */
1394                item->util = update;
1395        }
1396        string_list_sort(&data->updates);
1397
1398        if (ref_update_reject_duplicates(&data->updates, err))
1399                goto failure;
1400
1401        if (!is_lock_file_locked(&refs->lock)) {
1402                if (packed_refs_lock(ref_store, 0, err))
1403                        goto failure;
1404                data->own_lock = 1;
1405        }
1406
1407        if (write_with_updates(refs, &data->updates, err))
1408                goto failure;
1409
1410        transaction->state = REF_TRANSACTION_PREPARED;
1411        return 0;
1412
1413failure:
1414        packed_transaction_cleanup(refs, transaction);
1415        return ret;
1416}
1417
1418static int packed_transaction_abort(struct ref_store *ref_store,
1419                                    struct ref_transaction *transaction,
1420                                    struct strbuf *err)
1421{
1422        struct packed_ref_store *refs = packed_downcast(
1423                        ref_store,
1424                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1425                        "ref_transaction_abort");
1426
1427        packed_transaction_cleanup(refs, transaction);
1428        return 0;
1429}
1430
1431static int packed_transaction_finish(struct ref_store *ref_store,
1432                                     struct ref_transaction *transaction,
1433                                     struct strbuf *err)
1434{
1435        struct packed_ref_store *refs = packed_downcast(
1436                        ref_store,
1437                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1438                        "ref_transaction_finish");
1439        int ret = TRANSACTION_GENERIC_ERROR;
1440        char *packed_refs_path;
1441
1442        clear_packed_ref_cache(refs);
1443
1444        packed_refs_path = get_locked_file_path(&refs->lock);
1445        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1446                strbuf_addf(err, "error replacing %s: %s",
1447                            refs->path, strerror(errno));
1448                goto cleanup;
1449        }
1450
1451        ret = 0;
1452
1453cleanup:
1454        free(packed_refs_path);
1455        packed_transaction_cleanup(refs, transaction);
1456        return ret;
1457}
1458
1459static int packed_initial_transaction_commit(struct ref_store *ref_store,
1460                                            struct ref_transaction *transaction,
1461                                            struct strbuf *err)
1462{
1463        return ref_transaction_commit(transaction, err);
1464}
1465
1466static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1467                             struct string_list *refnames, unsigned int flags)
1468{
1469        struct packed_ref_store *refs =
1470                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1471        struct strbuf err = STRBUF_INIT;
1472        struct ref_transaction *transaction;
1473        struct string_list_item *item;
1474        int ret;
1475
1476        (void)refs; /* We need the check above, but don't use the variable */
1477
1478        if (!refnames->nr)
1479                return 0;
1480
1481        /*
1482         * Since we don't check the references' old_oids, the
1483         * individual updates can't fail, so we can pack all of the
1484         * updates into a single transaction.
1485         */
1486
1487        transaction = ref_store_transaction_begin(ref_store, &err);
1488        if (!transaction)
1489                return -1;
1490
1491        for_each_string_list_item(item, refnames) {
1492                if (ref_transaction_delete(transaction, item->string, NULL,
1493                                           flags, msg, &err)) {
1494                        warning(_("could not delete reference %s: %s"),
1495                                item->string, err.buf);
1496                        strbuf_reset(&err);
1497                }
1498        }
1499
1500        ret = ref_transaction_commit(transaction, &err);
1501
1502        if (ret) {
1503                if (refnames->nr == 1)
1504                        error(_("could not delete reference %s: %s"),
1505                              refnames->items[0].string, err.buf);
1506                else
1507                        error(_("could not delete references: %s"), err.buf);
1508        }
1509
1510        ref_transaction_free(transaction);
1511        strbuf_release(&err);
1512        return ret;
1513}
1514
1515static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1516{
1517        /*
1518         * Packed refs are already packed. It might be that loose refs
1519         * are packed *into* a packed refs store, but that is done by
1520         * updating the packed references via a transaction.
1521         */
1522        return 0;
1523}
1524
1525static int packed_create_symref(struct ref_store *ref_store,
1526                               const char *refname, const char *target,
1527                               const char *logmsg)
1528{
1529        die("BUG: packed reference store does not support symrefs");
1530}
1531
1532static int packed_rename_ref(struct ref_store *ref_store,
1533                            const char *oldrefname, const char *newrefname,
1534                            const char *logmsg)
1535{
1536        die("BUG: packed reference store does not support renaming references");
1537}
1538
1539static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1540{
1541        return empty_ref_iterator_begin();
1542}
1543
1544static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1545                                      const char *refname,
1546                                      each_reflog_ent_fn fn, void *cb_data)
1547{
1548        return 0;
1549}
1550
1551static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1552                                              const char *refname,
1553                                              each_reflog_ent_fn fn,
1554                                              void *cb_data)
1555{
1556        return 0;
1557}
1558
1559static int packed_reflog_exists(struct ref_store *ref_store,
1560                               const char *refname)
1561{
1562        return 0;
1563}
1564
1565static int packed_create_reflog(struct ref_store *ref_store,
1566                               const char *refname, int force_create,
1567                               struct strbuf *err)
1568{
1569        die("BUG: packed reference store does not support reflogs");
1570}
1571
1572static int packed_delete_reflog(struct ref_store *ref_store,
1573                               const char *refname)
1574{
1575        return 0;
1576}
1577
1578static int packed_reflog_expire(struct ref_store *ref_store,
1579                                const char *refname, const unsigned char *sha1,
1580                                unsigned int flags,
1581                                reflog_expiry_prepare_fn prepare_fn,
1582                                reflog_expiry_should_prune_fn should_prune_fn,
1583                                reflog_expiry_cleanup_fn cleanup_fn,
1584                                void *policy_cb_data)
1585{
1586        return 0;
1587}
1588
1589struct ref_storage_be refs_be_packed = {
1590        NULL,
1591        "packed",
1592        packed_ref_store_create,
1593        packed_init_db,
1594        packed_transaction_prepare,
1595        packed_transaction_finish,
1596        packed_transaction_abort,
1597        packed_initial_transaction_commit,
1598
1599        packed_pack_refs,
1600        packed_peel_ref,
1601        packed_create_symref,
1602        packed_delete_refs,
1603        packed_rename_ref,
1604
1605        packed_ref_iterator_begin,
1606        packed_read_raw_ref,
1607
1608        packed_reflog_iterator_begin,
1609        packed_for_each_reflog_ent,
1610        packed_for_each_reflog_ent_reverse,
1611        packed_reflog_exists,
1612        packed_create_reflog,
1613        packed_delete_reflog,
1614        packed_reflog_expire
1615};