3829e9c29437995652a557c6482dc0aecf8fba95
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10enum mmap_strategy {
  11        /*
  12         * Don't use mmap() at all for reading `packed-refs`.
  13         */
  14        MMAP_NONE,
  15
  16        /*
  17         * Can use mmap() for reading `packed-refs`, but the file must
  18         * not remain mmapped. This is the usual option on Windows,
  19         * where you cannot rename a new version of a file onto a file
  20         * that is currently mmapped.
  21         */
  22        MMAP_TEMPORARY,
  23
  24        /*
  25         * It is OK to leave the `packed-refs` file mmapped while
  26         * arbitrary other code is running.
  27         */
  28        MMAP_OK
  29};
  30
  31#if defined(NO_MMAP)
  32static enum mmap_strategy mmap_strategy = MMAP_NONE;
  33#elif defined(MMAP_PREVENTS_DELETE)
  34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
  35#else
  36static enum mmap_strategy mmap_strategy = MMAP_OK;
  37#endif
  38
  39struct packed_ref_store;
  40
  41struct packed_ref_cache {
  42        /*
  43         * A back-pointer to the packed_ref_store with which this
  44         * cache is associated:
  45         */
  46        struct packed_ref_store *refs;
  47
  48        /* Is the `packed-refs` file currently mmapped? */
  49        int mmapped;
  50
  51        /*
  52         * The contents of the `packed-refs` file. If the file was
  53         * already sorted, this points at the mmapped contents of the
  54         * file. If not, this points at heap-allocated memory
  55         * containing the contents, sorted. If there were no contents
  56         * (e.g., because the file didn't exist), `buf` and `eof` are
  57         * both NULL.
  58         */
  59        char *buf, *eof;
  60
  61        /* The size of the header line, if any; otherwise, 0: */
  62        size_t header_len;
  63
  64        /*
  65         * What is the peeled state of this cache? (This is usually
  66         * determined from the header of the "packed-refs" file.)
  67         */
  68        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
  69
  70        /*
  71         * Count of references to the data structure in this instance,
  72         * including the pointer from files_ref_store::packed if any.
  73         * The data will not be freed as long as the reference count
  74         * is nonzero.
  75         */
  76        unsigned int referrers;
  77
  78        /* The metadata from when this packed-refs cache was read */
  79        struct stat_validity validity;
  80};
  81
  82/*
  83 * A container for `packed-refs`-related data. It is not (yet) a
  84 * `ref_store`.
  85 */
  86struct packed_ref_store {
  87        struct ref_store base;
  88
  89        unsigned int store_flags;
  90
  91        /* The path of the "packed-refs" file: */
  92        char *path;
  93
  94        /*
  95         * A cache of the values read from the `packed-refs` file, if
  96         * it might still be current; otherwise, NULL.
  97         */
  98        struct packed_ref_cache *cache;
  99
 100        /*
 101         * Lock used for the "packed-refs" file. Note that this (and
 102         * thus the enclosing `packed_ref_store`) must not be freed.
 103         */
 104        struct lock_file lock;
 105
 106        /*
 107         * Temporary file used when rewriting new contents to the
 108         * "packed-refs" file. Note that this (and thus the enclosing
 109         * `packed_ref_store`) must not be freed.
 110         */
 111        struct tempfile tempfile;
 112};
 113
 114/*
 115 * Increment the reference count of *packed_refs.
 116 */
 117static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
 118{
 119        packed_refs->referrers++;
 120}
 121
 122/*
 123 * If the buffer in `packed_refs` is active, then either munmap the
 124 * memory and close the file, or free the memory. Then set the buffer
 125 * pointers to NULL.
 126 */
 127static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs)
 128{
 129        if (packed_refs->mmapped) {
 130                if (munmap(packed_refs->buf,
 131                           packed_refs->eof - packed_refs->buf))
 132                        die_errno("error ummapping packed-refs file %s",
 133                                  packed_refs->refs->path);
 134                packed_refs->mmapped = 0;
 135        } else {
 136                free(packed_refs->buf);
 137        }
 138        packed_refs->buf = packed_refs->eof = NULL;
 139        packed_refs->header_len = 0;
 140}
 141
 142/*
 143 * Decrease the reference count of *packed_refs.  If it goes to zero,
 144 * free *packed_refs and return true; otherwise return false.
 145 */
 146static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
 147{
 148        if (!--packed_refs->referrers) {
 149                stat_validity_clear(&packed_refs->validity);
 150                release_packed_ref_buffer(packed_refs);
 151                free(packed_refs);
 152                return 1;
 153        } else {
 154                return 0;
 155        }
 156}
 157
 158struct ref_store *packed_ref_store_create(const char *path,
 159                                          unsigned int store_flags)
 160{
 161        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
 162        struct ref_store *ref_store = (struct ref_store *)refs;
 163
 164        base_ref_store_init(ref_store, &refs_be_packed);
 165        refs->store_flags = store_flags;
 166
 167        refs->path = xstrdup(path);
 168        return ref_store;
 169}
 170
 171/*
 172 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
 173 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
 174 * support at least the flags specified in `required_flags`. `caller`
 175 * is used in any necessary error messages.
 176 */
 177static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 178                                                unsigned int required_flags,
 179                                                const char *caller)
 180{
 181        struct packed_ref_store *refs;
 182
 183        if (ref_store->be != &refs_be_packed)
 184                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 185                    ref_store->be->name, caller);
 186
 187        refs = (struct packed_ref_store *)ref_store;
 188
 189        if ((refs->store_flags & required_flags) != required_flags)
 190                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 191                    caller, required_flags, refs->store_flags);
 192
 193        return refs;
 194}
 195
 196static void clear_packed_ref_cache(struct packed_ref_store *refs)
 197{
 198        if (refs->cache) {
 199                struct packed_ref_cache *cache = refs->cache;
 200
 201                refs->cache = NULL;
 202                release_packed_ref_cache(cache);
 203        }
 204}
 205
 206static NORETURN void die_unterminated_line(const char *path,
 207                                           const char *p, size_t len)
 208{
 209        if (len < 80)
 210                die("unterminated line in %s: %.*s", path, (int)len, p);
 211        else
 212                die("unterminated line in %s: %.75s...", path, p);
 213}
 214
 215static NORETURN void die_invalid_line(const char *path,
 216                                      const char *p, size_t len)
 217{
 218        const char *eol = memchr(p, '\n', len);
 219
 220        if (!eol)
 221                die_unterminated_line(path, p, len);
 222        else if (eol - p < 80)
 223                die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
 224        else
 225                die("unexpected line in %s: %.75s...", path, p);
 226
 227}
 228
 229/*
 230 * An iterator over a packed-refs file that is currently mmapped.
 231 */
 232struct mmapped_ref_iterator {
 233        struct ref_iterator base;
 234
 235        struct packed_ref_cache *packed_refs;
 236
 237        /* The current position in the mmapped file: */
 238        const char *pos;
 239
 240        /* The end of the mmapped file: */
 241        const char *eof;
 242
 243        struct object_id oid, peeled;
 244
 245        struct strbuf refname_buf;
 246};
 247
 248static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
 249{
 250        struct mmapped_ref_iterator *iter =
 251                (struct mmapped_ref_iterator *)ref_iterator;
 252        const char *p = iter->pos, *eol;
 253
 254        strbuf_reset(&iter->refname_buf);
 255
 256        if (iter->pos == iter->eof)
 257                return ref_iterator_abort(ref_iterator);
 258
 259        iter->base.flags = REF_ISPACKED;
 260
 261        if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
 262            parse_oid_hex(p, &iter->oid, &p) ||
 263            !isspace(*p++))
 264                die_invalid_line(iter->packed_refs->refs->path,
 265                                 iter->pos, iter->eof - iter->pos);
 266
 267        eol = memchr(p, '\n', iter->eof - p);
 268        if (!eol)
 269                die_unterminated_line(iter->packed_refs->refs->path,
 270                                      iter->pos, iter->eof - iter->pos);
 271
 272        strbuf_add(&iter->refname_buf, p, eol - p);
 273        iter->base.refname = iter->refname_buf.buf;
 274
 275        if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
 276                if (!refname_is_safe(iter->base.refname))
 277                        die("packed refname is dangerous: %s",
 278                            iter->base.refname);
 279                oidclr(&iter->oid);
 280                iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
 281        }
 282        if (iter->packed_refs->peeled == PEELED_FULLY ||
 283            (iter->packed_refs->peeled == PEELED_TAGS &&
 284             starts_with(iter->base.refname, "refs/tags/")))
 285                iter->base.flags |= REF_KNOWS_PEELED;
 286
 287        iter->pos = eol + 1;
 288
 289        if (iter->pos < iter->eof && *iter->pos == '^') {
 290                p = iter->pos + 1;
 291                if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
 292                    parse_oid_hex(p, &iter->peeled, &p) ||
 293                    *p++ != '\n')
 294                        die_invalid_line(iter->packed_refs->refs->path,
 295                                         iter->pos, iter->eof - iter->pos);
 296                iter->pos = p;
 297
 298                /*
 299                 * Regardless of what the file header said, we
 300                 * definitely know the value of *this* reference. But
 301                 * we suppress it if the reference is broken:
 302                 */
 303                if ((iter->base.flags & REF_ISBROKEN)) {
 304                        oidclr(&iter->peeled);
 305                        iter->base.flags &= ~REF_KNOWS_PEELED;
 306                } else {
 307                        iter->base.flags |= REF_KNOWS_PEELED;
 308                }
 309        } else {
 310                oidclr(&iter->peeled);
 311        }
 312
 313        return ITER_OK;
 314}
 315
 316static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
 317                                    struct object_id *peeled)
 318{
 319        struct mmapped_ref_iterator *iter =
 320                (struct mmapped_ref_iterator *)ref_iterator;
 321
 322        if ((iter->base.flags & REF_KNOWS_PEELED)) {
 323                oidcpy(peeled, &iter->peeled);
 324                return is_null_oid(&iter->peeled) ? -1 : 0;
 325        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
 326                return -1;
 327        } else {
 328                return !!peel_object(iter->oid.hash, peeled->hash);
 329        }
 330}
 331
 332static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
 333{
 334        struct mmapped_ref_iterator *iter =
 335                (struct mmapped_ref_iterator *)ref_iterator;
 336
 337        release_packed_ref_cache(iter->packed_refs);
 338        strbuf_release(&iter->refname_buf);
 339        base_ref_iterator_free(ref_iterator);
 340        return ITER_DONE;
 341}
 342
 343static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
 344        mmapped_ref_iterator_advance,
 345        mmapped_ref_iterator_peel,
 346        mmapped_ref_iterator_abort
 347};
 348
 349struct ref_iterator *mmapped_ref_iterator_begin(
 350                struct packed_ref_cache *packed_refs,
 351                const char *pos, const char *eof)
 352{
 353        struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
 354        struct ref_iterator *ref_iterator = &iter->base;
 355
 356        if (!packed_refs->buf)
 357                return empty_ref_iterator_begin();
 358
 359        base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1);
 360
 361        iter->packed_refs = packed_refs;
 362        acquire_packed_ref_cache(iter->packed_refs);
 363        iter->pos = pos;
 364        iter->eof = eof;
 365        strbuf_init(&iter->refname_buf, 0);
 366
 367        iter->base.oid = &iter->oid;
 368
 369        return ref_iterator;
 370}
 371
 372struct packed_ref_entry {
 373        const char *start;
 374        size_t len;
 375};
 376
 377static int cmp_packed_ref_entries(const void *v1, const void *v2)
 378{
 379        const struct packed_ref_entry *e1 = v1, *e2 = v2;
 380        const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
 381        const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
 382
 383        while (1) {
 384                if (*r1 == '\n')
 385                        return *r2 == '\n' ? 0 : -1;
 386                if (*r1 != *r2) {
 387                        if (*r2 == '\n')
 388                                return 1;
 389                        else
 390                                return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 391                }
 392                r1++;
 393                r2++;
 394        }
 395}
 396
 397/*
 398 * Compare a packed-refs record pointed to by `rec` to the specified
 399 * NUL-terminated refname.
 400 */
 401static int cmp_entry_to_refname(const char *rec, const char *refname)
 402{
 403        const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
 404        const char *r2 = refname;
 405
 406        while (1) {
 407                if (*r1 == '\n')
 408                        return *r2 ? -1 : 0;
 409                if (!*r2)
 410                        return 1;
 411                if (*r1 != *r2)
 412                        return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
 413                r1++;
 414                r2++;
 415        }
 416}
 417
 418/*
 419 * `packed_refs->buf` is not known to be sorted. Check whether it is,
 420 * and if not, sort it into new memory and munmap/free the old
 421 * storage.
 422 */
 423static void sort_packed_refs(struct packed_ref_cache *packed_refs)
 424{
 425        struct packed_ref_entry *entries = NULL;
 426        size_t alloc = 0, nr = 0;
 427        int sorted = 1;
 428        const char *pos, *eof, *eol;
 429        size_t len, i;
 430        char *new_buffer, *dst;
 431
 432        pos = packed_refs->buf + packed_refs->header_len;
 433        eof = packed_refs->eof;
 434        len = eof - pos;
 435
 436        if (!len)
 437                return;
 438
 439        /*
 440         * Initialize entries based on a crude estimate of the number
 441         * of references in the file (we'll grow it below if needed):
 442         */
 443        ALLOC_GROW(entries, len / 80 + 20, alloc);
 444
 445        while (pos < eof) {
 446                eol = memchr(pos, '\n', eof - pos);
 447                if (!eol)
 448                        /* The safety check should prevent this. */
 449                        BUG("unterminated line found in packed-refs");
 450                if (eol - pos < GIT_SHA1_HEXSZ + 2)
 451                        die_invalid_line(packed_refs->refs->path,
 452                                         pos, eof - pos);
 453                eol++;
 454                if (eol < eof && *eol == '^') {
 455                        /*
 456                         * Keep any peeled line together with its
 457                         * reference:
 458                         */
 459                        const char *peeled_start = eol;
 460
 461                        eol = memchr(peeled_start, '\n', eof - peeled_start);
 462                        if (!eol)
 463                                /* The safety check should prevent this. */
 464                                BUG("unterminated peeled line found in packed-refs");
 465                        eol++;
 466                }
 467
 468                ALLOC_GROW(entries, nr + 1, alloc);
 469                entries[nr].start = pos;
 470                entries[nr].len = eol - pos;
 471                nr++;
 472
 473                if (sorted &&
 474                    nr > 1 &&
 475                    cmp_packed_ref_entries(&entries[nr - 2],
 476                                           &entries[nr - 1]) >= 0)
 477                        sorted = 0;
 478
 479                pos = eol;
 480        }
 481
 482        if (sorted)
 483                goto cleanup;
 484
 485        /* We need to sort the memory. First we sort the entries array: */
 486        QSORT(entries, nr, cmp_packed_ref_entries);
 487
 488        /*
 489         * Allocate a new chunk of memory, and copy the old memory to
 490         * the new in the order indicated by `entries` (not bothering
 491         * with the header line):
 492         */
 493        new_buffer = xmalloc(len);
 494        for (dst = new_buffer, i = 0; i < nr; i++) {
 495                memcpy(dst, entries[i].start, entries[i].len);
 496                dst += entries[i].len;
 497        }
 498
 499        /*
 500         * Now munmap the old buffer and use the sorted buffer in its
 501         * place:
 502         */
 503        release_packed_ref_buffer(packed_refs);
 504        packed_refs->buf = new_buffer;
 505        packed_refs->eof = new_buffer + len;
 506        packed_refs->header_len = 0;
 507
 508cleanup:
 509        free(entries);
 510}
 511
 512/*
 513 * Return a pointer to the start of the record that contains the
 514 * character `*p` (which must be within the buffer). If no other
 515 * record start is found, return `buf`.
 516 */
 517static const char *find_start_of_record(const char *buf, const char *p)
 518{
 519        while (p > buf && (p[-1] != '\n' || p[0] == '^'))
 520                p--;
 521        return p;
 522}
 523
 524/*
 525 * Return a pointer to the start of the record following the record
 526 * that contains `*p`. If none is found before `end`, return `end`.
 527 */
 528static const char *find_end_of_record(const char *p, const char *end)
 529{
 530        while (++p < end && (p[-1] != '\n' || p[0] == '^'))
 531                ;
 532        return p;
 533}
 534
 535/*
 536 * We want to be able to compare mmapped reference records quickly,
 537 * without totally parsing them. We can do so because the records are
 538 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
 539 * + 1) bytes past the beginning of the record.
 540 *
 541 * But what if the `packed-refs` file contains garbage? We're willing
 542 * to tolerate not detecting the problem, as long as we don't produce
 543 * totally garbled output (we can't afford to check the integrity of
 544 * the whole file during every Git invocation). But we do want to be
 545 * sure that we never read past the end of the buffer in memory and
 546 * perform an illegal memory access.
 547 *
 548 * Guarantee that minimum level of safety by verifying that the last
 549 * record in the file is LF-terminated, and that it has at least
 550 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
 551 * these checks fails.
 552 */
 553static void verify_buffer_safe(struct packed_ref_cache *packed_refs)
 554{
 555        const char *buf = packed_refs->buf + packed_refs->header_len;
 556        const char *eof = packed_refs->eof;
 557        const char *last_line;
 558
 559        if (buf == eof)
 560                return;
 561
 562        last_line = find_start_of_record(buf, eof - 1);
 563        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
 564                die_invalid_line(packed_refs->refs->path,
 565                                 last_line, eof - last_line);
 566}
 567
 568/*
 569 * Depending on `mmap_strategy`, either mmap or read the contents of
 570 * the `packed-refs` file into the `packed_refs` instance. Return 1 if
 571 * the file existed and was read, or 0 if the file was absent. Die on
 572 * errors.
 573 */
 574static int load_contents(struct packed_ref_cache *packed_refs)
 575{
 576        int fd;
 577        struct stat st;
 578        size_t size;
 579        ssize_t bytes_read;
 580
 581        fd = open(packed_refs->refs->path, O_RDONLY);
 582        if (fd < 0) {
 583                if (errno == ENOENT) {
 584                        /*
 585                         * This is OK; it just means that no
 586                         * "packed-refs" file has been written yet,
 587                         * which is equivalent to it being empty,
 588                         * which is its state when initialized with
 589                         * zeros.
 590                         */
 591                        return 0;
 592                } else {
 593                        die_errno("couldn't read %s", packed_refs->refs->path);
 594                }
 595        }
 596
 597        stat_validity_update(&packed_refs->validity, fd);
 598
 599        if (fstat(fd, &st) < 0)
 600                die_errno("couldn't stat %s", packed_refs->refs->path);
 601        size = xsize_t(st.st_size);
 602
 603        switch (mmap_strategy) {
 604        case MMAP_NONE:
 605                packed_refs->buf = xmalloc(size);
 606                bytes_read = read_in_full(fd, packed_refs->buf, size);
 607                if (bytes_read < 0 || bytes_read != size)
 608                        die_errno("couldn't read %s", packed_refs->refs->path);
 609                packed_refs->eof = packed_refs->buf + size;
 610                packed_refs->mmapped = 0;
 611                break;
 612        case MMAP_TEMPORARY:
 613        case MMAP_OK:
 614                packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
 615                packed_refs->eof = packed_refs->buf + size;
 616                packed_refs->mmapped = 1;
 617                break;
 618        }
 619        close(fd);
 620
 621        return 1;
 622}
 623
 624/*
 625 * Find the place in `cache->buf` where the start of the record for
 626 * `refname` starts. If `mustexist` is true and the reference doesn't
 627 * exist, then return NULL. If `mustexist` is false and the reference
 628 * doesn't exist, then return the point where that reference would be
 629 * inserted. In the latter mode, `refname` doesn't have to be a proper
 630 * reference name; for example, one could search for "refs/replace/"
 631 * to find the start of any replace references.
 632 *
 633 * The record is sought using a binary search, so `cache->buf` must be
 634 * sorted.
 635 */
 636static const char *find_reference_location(struct packed_ref_cache *cache,
 637                                           const char *refname, int mustexist)
 638{
 639        /*
 640         * This is not *quite* a garden-variety binary search, because
 641         * the data we're searching is made up of records, and we
 642         * always need to find the beginning of a record to do a
 643         * comparison. A "record" here is one line for the reference
 644         * itself and zero or one peel lines that start with '^'. Our
 645         * loop invariant is described in the next two comments.
 646         */
 647
 648        /*
 649         * A pointer to the character at the start of a record whose
 650         * preceding records all have reference names that come
 651         * *before* `refname`.
 652         */
 653        const char *lo = cache->buf + cache->header_len;
 654
 655        /*
 656         * A pointer to a the first character of a record whose
 657         * reference name comes *after* `refname`.
 658         */
 659        const char *hi = cache->eof;
 660
 661        while (lo < hi) {
 662                const char *mid, *rec;
 663                int cmp;
 664
 665                mid = lo + (hi - lo) / 2;
 666                rec = find_start_of_record(lo, mid);
 667                cmp = cmp_entry_to_refname(rec, refname);
 668                if (cmp < 0) {
 669                        lo = find_end_of_record(mid, hi);
 670                } else if (cmp > 0) {
 671                        hi = rec;
 672                } else {
 673                        return rec;
 674                }
 675        }
 676
 677        if (mustexist)
 678                return NULL;
 679        else
 680                return lo;
 681}
 682
 683/*
 684 * Read from the `packed-refs` file into a newly-allocated
 685 * `packed_ref_cache` and return it. The return value will already
 686 * have its reference count incremented.
 687 *
 688 * A comment line of the form "# pack-refs with: " may contain zero or
 689 * more traits. We interpret the traits as follows:
 690 *
 691 *   Neither `peeled` nor `fully-peeled`:
 692 *
 693 *      Probably no references are peeled. But if the file contains a
 694 *      peeled value for a reference, we will use it.
 695 *
 696 *   `peeled`:
 697 *
 698 *      References under "refs/tags/", if they *can* be peeled, *are*
 699 *      peeled in this file. References outside of "refs/tags/" are
 700 *      probably not peeled even if they could have been, but if we find
 701 *      a peeled value for such a reference we will use it.
 702 *
 703 *   `fully-peeled`:
 704 *
 705 *      All references in the file that can be peeled are peeled.
 706 *      Inversely (and this is more important), any references in the
 707 *      file for which no peeled value is recorded is not peelable. This
 708 *      trait should typically be written alongside "peeled" for
 709 *      compatibility with older clients, but we do not require it
 710 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 711 *
 712 *   `sorted`:
 713 *
 714 *      The references in this file are known to be sorted by refname.
 715 */
 716static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs)
 717{
 718        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 719        int sorted = 0;
 720
 721        packed_refs->refs = refs;
 722        acquire_packed_ref_cache(packed_refs);
 723        packed_refs->peeled = PEELED_NONE;
 724
 725        if (!load_contents(packed_refs))
 726                return packed_refs;
 727
 728        /* If the file has a header line, process it: */
 729        if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') {
 730                struct strbuf tmp = STRBUF_INIT;
 731                char *p;
 732                const char *eol;
 733                struct string_list traits = STRING_LIST_INIT_NODUP;
 734
 735                eol = memchr(packed_refs->buf, '\n',
 736                             packed_refs->eof - packed_refs->buf);
 737                if (!eol)
 738                        die_unterminated_line(refs->path,
 739                                              packed_refs->buf,
 740                                              packed_refs->eof - packed_refs->buf);
 741
 742                strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf);
 743
 744                if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
 745                        die_invalid_line(refs->path,
 746                                         packed_refs->buf,
 747                                         packed_refs->eof - packed_refs->buf);
 748
 749                string_list_split_in_place(&traits, p, ' ', -1);
 750
 751                if (unsorted_string_list_has_string(&traits, "fully-peeled"))
 752                        packed_refs->peeled = PEELED_FULLY;
 753                else if (unsorted_string_list_has_string(&traits, "peeled"))
 754                        packed_refs->peeled = PEELED_TAGS;
 755
 756                sorted = unsorted_string_list_has_string(&traits, "sorted");
 757
 758                /* perhaps other traits later as well */
 759
 760                /* The "+ 1" is for the LF character. */
 761                packed_refs->header_len = eol + 1 - packed_refs->buf;
 762
 763                string_list_clear(&traits, 0);
 764                strbuf_release(&tmp);
 765        }
 766
 767        verify_buffer_safe(packed_refs);
 768
 769        if (!sorted) {
 770                sort_packed_refs(packed_refs);
 771
 772                /*
 773                 * Reordering the records might have moved a short one
 774                 * to the end of the buffer, so verify the buffer's
 775                 * safety again:
 776                 */
 777                verify_buffer_safe(packed_refs);
 778        }
 779
 780        if (mmap_strategy != MMAP_OK && packed_refs->mmapped) {
 781                /*
 782                 * We don't want to leave the file mmapped, so we are
 783                 * forced to make a copy now:
 784                 */
 785                size_t size = packed_refs->eof -
 786                        (packed_refs->buf + packed_refs->header_len);
 787                char *buf_copy = xmalloc(size);
 788
 789                memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size);
 790                release_packed_ref_buffer(packed_refs);
 791                packed_refs->buf = buf_copy;
 792                packed_refs->eof = buf_copy + size;
 793        }
 794
 795        return packed_refs;
 796}
 797
 798/*
 799 * Check that the packed refs cache (if any) still reflects the
 800 * contents of the file. If not, clear the cache.
 801 */
 802static void validate_packed_ref_cache(struct packed_ref_store *refs)
 803{
 804        if (refs->cache &&
 805            !stat_validity_check(&refs->cache->validity, refs->path))
 806                clear_packed_ref_cache(refs);
 807}
 808
 809/*
 810 * Get the packed_ref_cache for the specified packed_ref_store,
 811 * creating and populating it if it hasn't been read before or if the
 812 * file has been changed (according to its `validity` field) since it
 813 * was last read. On the other hand, if we hold the lock, then assume
 814 * that the file hasn't been changed out from under us, so skip the
 815 * extra `stat()` call in `stat_validity_check()`.
 816 */
 817static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 818{
 819        if (!is_lock_file_locked(&refs->lock))
 820                validate_packed_ref_cache(refs);
 821
 822        if (!refs->cache)
 823                refs->cache = read_packed_refs(refs);
 824
 825        return refs->cache;
 826}
 827
 828static int packed_read_raw_ref(struct ref_store *ref_store,
 829                               const char *refname, unsigned char *sha1,
 830                               struct strbuf *referent, unsigned int *type)
 831{
 832        struct packed_ref_store *refs =
 833                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 834        struct packed_ref_cache *packed_refs = get_packed_ref_cache(refs);
 835        const char *rec;
 836
 837        *type = 0;
 838
 839        rec = find_reference_location(packed_refs, refname, 1);
 840
 841        if (!rec) {
 842                /* refname is not a packed reference. */
 843                errno = ENOENT;
 844                return -1;
 845        }
 846
 847        if (get_sha1_hex(rec, sha1))
 848                die_invalid_line(refs->path, rec, packed_refs->eof - rec);
 849
 850        *type = REF_ISPACKED;
 851        return 0;
 852}
 853
 854struct packed_ref_iterator {
 855        struct ref_iterator base;
 856
 857        struct packed_ref_cache *cache;
 858        struct ref_iterator *iter0;
 859        unsigned int flags;
 860};
 861
 862static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 863{
 864        struct packed_ref_iterator *iter =
 865                (struct packed_ref_iterator *)ref_iterator;
 866        int ok;
 867
 868        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 869                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 870                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 871                        continue;
 872
 873                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 874                    !ref_resolves_to_object(iter->iter0->refname,
 875                                            iter->iter0->oid,
 876                                            iter->iter0->flags))
 877                        continue;
 878
 879                iter->base.refname = iter->iter0->refname;
 880                iter->base.oid = iter->iter0->oid;
 881                iter->base.flags = iter->iter0->flags;
 882                return ITER_OK;
 883        }
 884
 885        iter->iter0 = NULL;
 886        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 887                ok = ITER_ERROR;
 888
 889        return ok;
 890}
 891
 892static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 893                                   struct object_id *peeled)
 894{
 895        struct packed_ref_iterator *iter =
 896                (struct packed_ref_iterator *)ref_iterator;
 897
 898        return ref_iterator_peel(iter->iter0, peeled);
 899}
 900
 901static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 902{
 903        struct packed_ref_iterator *iter =
 904                (struct packed_ref_iterator *)ref_iterator;
 905        int ok = ITER_DONE;
 906
 907        if (iter->iter0)
 908                ok = ref_iterator_abort(iter->iter0);
 909
 910        release_packed_ref_cache(iter->cache);
 911        base_ref_iterator_free(ref_iterator);
 912        return ok;
 913}
 914
 915static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 916        packed_ref_iterator_advance,
 917        packed_ref_iterator_peel,
 918        packed_ref_iterator_abort
 919};
 920
 921static struct ref_iterator *packed_ref_iterator_begin(
 922                struct ref_store *ref_store,
 923                const char *prefix, unsigned int flags)
 924{
 925        struct packed_ref_store *refs;
 926        struct packed_ref_cache *packed_refs;
 927        const char *start;
 928        struct packed_ref_iterator *iter;
 929        struct ref_iterator *ref_iterator;
 930        unsigned int required_flags = REF_STORE_READ;
 931
 932        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 933                required_flags |= REF_STORE_ODB;
 934        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 935
 936        iter = xcalloc(1, sizeof(*iter));
 937        ref_iterator = &iter->base;
 938        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
 939
 940        /*
 941         * Note that get_packed_ref_cache() internally checks whether
 942         * the packed-ref cache is up to date with what is on disk,
 943         * and re-reads it if not.
 944         */
 945        iter->cache = packed_refs = get_packed_ref_cache(refs);
 946        acquire_packed_ref_cache(packed_refs);
 947
 948        if (prefix && *prefix)
 949                start = find_reference_location(packed_refs, prefix, 0);
 950        else
 951                start = packed_refs->buf + packed_refs->header_len;
 952
 953        iter->iter0 = mmapped_ref_iterator_begin(packed_refs,
 954                                                 start, packed_refs->eof);
 955
 956        iter->flags = flags;
 957
 958        if (prefix && *prefix)
 959                /* Stop iteration after we've gone *past* prefix: */
 960                ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
 961
 962        return ref_iterator;
 963}
 964
 965/*
 966 * Write an entry to the packed-refs file for the specified refname.
 967 * If peeled is non-NULL, write it as the entry's peeled value. On
 968 * error, return a nonzero value and leave errno set at the value left
 969 * by the failing call to `fprintf()`.
 970 */
 971static int write_packed_entry(FILE *fh, const char *refname,
 972                              const unsigned char *sha1,
 973                              const unsigned char *peeled)
 974{
 975        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 976            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 977                return -1;
 978
 979        return 0;
 980}
 981
 982int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 983{
 984        struct packed_ref_store *refs =
 985                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 986                                "packed_refs_lock");
 987        static int timeout_configured = 0;
 988        static int timeout_value = 1000;
 989
 990        if (!timeout_configured) {
 991                git_config_get_int("core.packedrefstimeout", &timeout_value);
 992                timeout_configured = 1;
 993        }
 994
 995        /*
 996         * Note that we close the lockfile immediately because we
 997         * don't write new content to it, but rather to a separate
 998         * tempfile.
 999         */
1000        if (hold_lock_file_for_update_timeout(
1001                            &refs->lock,
1002                            refs->path,
1003                            flags, timeout_value) < 0) {
1004                unable_to_lock_message(refs->path, errno, err);
1005                return -1;
1006        }
1007
1008        if (close_lock_file(&refs->lock)) {
1009                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
1010                return -1;
1011        }
1012
1013        /*
1014         * Now that we hold the `packed-refs` lock, make sure that our
1015         * cache matches the current version of the file. Normally
1016         * `get_packed_ref_cache()` does that for us, but that
1017         * function assumes that when the file is locked, any existing
1018         * cache is still valid. We've just locked the file, but it
1019         * might have changed the moment *before* we locked it.
1020         */
1021        validate_packed_ref_cache(refs);
1022
1023        /*
1024         * Now make sure that the packed-refs file as it exists in the
1025         * locked state is loaded into the cache:
1026         */
1027        get_packed_ref_cache(refs);
1028        return 0;
1029}
1030
1031void packed_refs_unlock(struct ref_store *ref_store)
1032{
1033        struct packed_ref_store *refs = packed_downcast(
1034                        ref_store,
1035                        REF_STORE_READ | REF_STORE_WRITE,
1036                        "packed_refs_unlock");
1037
1038        if (!is_lock_file_locked(&refs->lock))
1039                die("BUG: packed_refs_unlock() called when not locked");
1040        rollback_lock_file(&refs->lock);
1041}
1042
1043int packed_refs_is_locked(struct ref_store *ref_store)
1044{
1045        struct packed_ref_store *refs = packed_downcast(
1046                        ref_store,
1047                        REF_STORE_READ | REF_STORE_WRITE,
1048                        "packed_refs_is_locked");
1049
1050        return is_lock_file_locked(&refs->lock);
1051}
1052
1053/*
1054 * The packed-refs header line that we write out.  Perhaps other
1055 * traits will be added later.
1056 *
1057 * Note that earlier versions of Git used to parse these traits by
1058 * looking for " trait " in the line. For this reason, the space after
1059 * the colon and the trailing space are required.
1060 */
1061static const char PACKED_REFS_HEADER[] =
1062        "# pack-refs with: peeled fully-peeled sorted \n";
1063
1064static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
1065{
1066        /* Nothing to do. */
1067        return 0;
1068}
1069
1070/*
1071 * Write the packed-refs from the cache to the packed-refs tempfile,
1072 * incorporating any changes from `updates`. `updates` must be a
1073 * sorted string list whose keys are the refnames and whose util
1074 * values are `struct ref_update *`. On error, rollback the tempfile,
1075 * write an error message to `err`, and return a nonzero value.
1076 *
1077 * The packfile must be locked before calling this function and will
1078 * remain locked when it is done.
1079 */
1080static int write_with_updates(struct packed_ref_store *refs,
1081                              struct string_list *updates,
1082                              struct strbuf *err)
1083{
1084        struct ref_iterator *iter = NULL;
1085        size_t i;
1086        int ok;
1087        FILE *out;
1088        struct strbuf sb = STRBUF_INIT;
1089        char *packed_refs_path;
1090
1091        if (!is_lock_file_locked(&refs->lock))
1092                die("BUG: write_with_updates() called while unlocked");
1093
1094        /*
1095         * If packed-refs is a symlink, we want to overwrite the
1096         * symlinked-to file, not the symlink itself. Also, put the
1097         * staging file next to it:
1098         */
1099        packed_refs_path = get_locked_file_path(&refs->lock);
1100        strbuf_addf(&sb, "%s.new", packed_refs_path);
1101        free(packed_refs_path);
1102        if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
1103                strbuf_addf(err, "unable to create file %s: %s",
1104                            sb.buf, strerror(errno));
1105                strbuf_release(&sb);
1106                return -1;
1107        }
1108        strbuf_release(&sb);
1109
1110        out = fdopen_tempfile(&refs->tempfile, "w");
1111        if (!out) {
1112                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
1113                            strerror(errno));
1114                goto error;
1115        }
1116
1117        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
1118                goto write_error;
1119
1120        /*
1121         * We iterate in parallel through the current list of refs and
1122         * the list of updates, processing an entry from at least one
1123         * of the lists each time through the loop. When the current
1124         * list of refs is exhausted, set iter to NULL. When the list
1125         * of updates is exhausted, leave i set to updates->nr.
1126         */
1127        iter = packed_ref_iterator_begin(&refs->base, "",
1128                                         DO_FOR_EACH_INCLUDE_BROKEN);
1129        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1130                iter = NULL;
1131
1132        i = 0;
1133
1134        while (iter || i < updates->nr) {
1135                struct ref_update *update = NULL;
1136                int cmp;
1137
1138                if (i >= updates->nr) {
1139                        cmp = -1;
1140                } else {
1141                        update = updates->items[i].util;
1142
1143                        if (!iter)
1144                                cmp = +1;
1145                        else
1146                                cmp = strcmp(iter->refname, update->refname);
1147                }
1148
1149                if (!cmp) {
1150                        /*
1151                         * There is both an old value and an update
1152                         * for this reference. Check the old value if
1153                         * necessary:
1154                         */
1155                        if ((update->flags & REF_HAVE_OLD)) {
1156                                if (is_null_oid(&update->old_oid)) {
1157                                        strbuf_addf(err, "cannot update ref '%s': "
1158                                                    "reference already exists",
1159                                                    update->refname);
1160                                        goto error;
1161                                } else if (oidcmp(&update->old_oid, iter->oid)) {
1162                                        strbuf_addf(err, "cannot update ref '%s': "
1163                                                    "is at %s but expected %s",
1164                                                    update->refname,
1165                                                    oid_to_hex(iter->oid),
1166                                                    oid_to_hex(&update->old_oid));
1167                                        goto error;
1168                                }
1169                        }
1170
1171                        /* Now figure out what to use for the new value: */
1172                        if ((update->flags & REF_HAVE_NEW)) {
1173                                /*
1174                                 * The update takes precedence. Skip
1175                                 * the iterator over the unneeded
1176                                 * value.
1177                                 */
1178                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1179                                        iter = NULL;
1180                                cmp = +1;
1181                        } else {
1182                                /*
1183                                 * The update doesn't actually want to
1184                                 * change anything. We're done with it.
1185                                 */
1186                                i++;
1187                                cmp = -1;
1188                        }
1189                } else if (cmp > 0) {
1190                        /*
1191                         * There is no old value but there is an
1192                         * update for this reference. Make sure that
1193                         * the update didn't expect an existing value:
1194                         */
1195                        if ((update->flags & REF_HAVE_OLD) &&
1196                            !is_null_oid(&update->old_oid)) {
1197                                strbuf_addf(err, "cannot update ref '%s': "
1198                                            "reference is missing but expected %s",
1199                                            update->refname,
1200                                            oid_to_hex(&update->old_oid));
1201                                goto error;
1202                        }
1203                }
1204
1205                if (cmp < 0) {
1206                        /* Pass the old reference through. */
1207
1208                        struct object_id peeled;
1209                        int peel_error = ref_iterator_peel(iter, &peeled);
1210
1211                        if (write_packed_entry(out, iter->refname,
1212                                               iter->oid->hash,
1213                                               peel_error ? NULL : peeled.hash))
1214                                goto write_error;
1215
1216                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
1217                                iter = NULL;
1218                } else if (is_null_oid(&update->new_oid)) {
1219                        /*
1220                         * The update wants to delete the reference,
1221                         * and the reference either didn't exist or we
1222                         * have already skipped it. So we're done with
1223                         * the update (and don't have to write
1224                         * anything).
1225                         */
1226                        i++;
1227                } else {
1228                        struct object_id peeled;
1229                        int peel_error = peel_object(update->new_oid.hash,
1230                                                     peeled.hash);
1231
1232                        if (write_packed_entry(out, update->refname,
1233                                               update->new_oid.hash,
1234                                               peel_error ? NULL : peeled.hash))
1235                                goto write_error;
1236
1237                        i++;
1238                }
1239        }
1240
1241        if (ok != ITER_DONE) {
1242                strbuf_addf(err, "unable to write packed-refs file: "
1243                            "error iterating over old contents");
1244                goto error;
1245        }
1246
1247        if (close_tempfile(&refs->tempfile)) {
1248                strbuf_addf(err, "error closing file %s: %s",
1249                            get_tempfile_path(&refs->tempfile),
1250                            strerror(errno));
1251                strbuf_release(&sb);
1252                return -1;
1253        }
1254
1255        return 0;
1256
1257write_error:
1258        strbuf_addf(err, "error writing to %s: %s",
1259                    get_tempfile_path(&refs->tempfile), strerror(errno));
1260
1261error:
1262        if (iter)
1263                ref_iterator_abort(iter);
1264
1265        delete_tempfile(&refs->tempfile);
1266        return -1;
1267}
1268
1269struct packed_transaction_backend_data {
1270        /* True iff the transaction owns the packed-refs lock. */
1271        int own_lock;
1272
1273        struct string_list updates;
1274};
1275
1276static void packed_transaction_cleanup(struct packed_ref_store *refs,
1277                                       struct ref_transaction *transaction)
1278{
1279        struct packed_transaction_backend_data *data = transaction->backend_data;
1280
1281        if (data) {
1282                string_list_clear(&data->updates, 0);
1283
1284                if (is_tempfile_active(&refs->tempfile))
1285                        delete_tempfile(&refs->tempfile);
1286
1287                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
1288                        packed_refs_unlock(&refs->base);
1289                        data->own_lock = 0;
1290                }
1291
1292                free(data);
1293                transaction->backend_data = NULL;
1294        }
1295
1296        transaction->state = REF_TRANSACTION_CLOSED;
1297}
1298
1299static int packed_transaction_prepare(struct ref_store *ref_store,
1300                                      struct ref_transaction *transaction,
1301                                      struct strbuf *err)
1302{
1303        struct packed_ref_store *refs = packed_downcast(
1304                        ref_store,
1305                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1306                        "ref_transaction_prepare");
1307        struct packed_transaction_backend_data *data;
1308        size_t i;
1309        int ret = TRANSACTION_GENERIC_ERROR;
1310
1311        /*
1312         * Note that we *don't* skip transactions with zero updates,
1313         * because such a transaction might be executed for the side
1314         * effect of ensuring that all of the references are peeled.
1315         * If the caller wants to optimize away empty transactions, it
1316         * should do so itself.
1317         */
1318
1319        data = xcalloc(1, sizeof(*data));
1320        string_list_init(&data->updates, 0);
1321
1322        transaction->backend_data = data;
1323
1324        /*
1325         * Stick the updates in a string list by refname so that we
1326         * can sort them:
1327         */
1328        for (i = 0; i < transaction->nr; i++) {
1329                struct ref_update *update = transaction->updates[i];
1330                struct string_list_item *item =
1331                        string_list_append(&data->updates, update->refname);
1332
1333                /* Store a pointer to update in item->util: */
1334                item->util = update;
1335        }
1336        string_list_sort(&data->updates);
1337
1338        if (ref_update_reject_duplicates(&data->updates, err))
1339                goto failure;
1340
1341        if (!is_lock_file_locked(&refs->lock)) {
1342                if (packed_refs_lock(ref_store, 0, err))
1343                        goto failure;
1344                data->own_lock = 1;
1345        }
1346
1347        if (write_with_updates(refs, &data->updates, err))
1348                goto failure;
1349
1350        transaction->state = REF_TRANSACTION_PREPARED;
1351        return 0;
1352
1353failure:
1354        packed_transaction_cleanup(refs, transaction);
1355        return ret;
1356}
1357
1358static int packed_transaction_abort(struct ref_store *ref_store,
1359                                    struct ref_transaction *transaction,
1360                                    struct strbuf *err)
1361{
1362        struct packed_ref_store *refs = packed_downcast(
1363                        ref_store,
1364                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1365                        "ref_transaction_abort");
1366
1367        packed_transaction_cleanup(refs, transaction);
1368        return 0;
1369}
1370
1371static int packed_transaction_finish(struct ref_store *ref_store,
1372                                     struct ref_transaction *transaction,
1373                                     struct strbuf *err)
1374{
1375        struct packed_ref_store *refs = packed_downcast(
1376                        ref_store,
1377                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1378                        "ref_transaction_finish");
1379        int ret = TRANSACTION_GENERIC_ERROR;
1380        char *packed_refs_path;
1381
1382        clear_packed_ref_cache(refs);
1383
1384        packed_refs_path = get_locked_file_path(&refs->lock);
1385        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1386                strbuf_addf(err, "error replacing %s: %s",
1387                            refs->path, strerror(errno));
1388                goto cleanup;
1389        }
1390
1391        ret = 0;
1392
1393cleanup:
1394        free(packed_refs_path);
1395        packed_transaction_cleanup(refs, transaction);
1396        return ret;
1397}
1398
1399static int packed_initial_transaction_commit(struct ref_store *ref_store,
1400                                            struct ref_transaction *transaction,
1401                                            struct strbuf *err)
1402{
1403        return ref_transaction_commit(transaction, err);
1404}
1405
1406static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1407                             struct string_list *refnames, unsigned int flags)
1408{
1409        struct packed_ref_store *refs =
1410                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1411        struct strbuf err = STRBUF_INIT;
1412        struct ref_transaction *transaction;
1413        struct string_list_item *item;
1414        int ret;
1415
1416        (void)refs; /* We need the check above, but don't use the variable */
1417
1418        if (!refnames->nr)
1419                return 0;
1420
1421        /*
1422         * Since we don't check the references' old_oids, the
1423         * individual updates can't fail, so we can pack all of the
1424         * updates into a single transaction.
1425         */
1426
1427        transaction = ref_store_transaction_begin(ref_store, &err);
1428        if (!transaction)
1429                return -1;
1430
1431        for_each_string_list_item(item, refnames) {
1432                if (ref_transaction_delete(transaction, item->string, NULL,
1433                                           flags, msg, &err)) {
1434                        warning(_("could not delete reference %s: %s"),
1435                                item->string, err.buf);
1436                        strbuf_reset(&err);
1437                }
1438        }
1439
1440        ret = ref_transaction_commit(transaction, &err);
1441
1442        if (ret) {
1443                if (refnames->nr == 1)
1444                        error(_("could not delete reference %s: %s"),
1445                              refnames->items[0].string, err.buf);
1446                else
1447                        error(_("could not delete references: %s"), err.buf);
1448        }
1449
1450        ref_transaction_free(transaction);
1451        strbuf_release(&err);
1452        return ret;
1453}
1454
1455static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1456{
1457        /*
1458         * Packed refs are already packed. It might be that loose refs
1459         * are packed *into* a packed refs store, but that is done by
1460         * updating the packed references via a transaction.
1461         */
1462        return 0;
1463}
1464
1465static int packed_create_symref(struct ref_store *ref_store,
1466                               const char *refname, const char *target,
1467                               const char *logmsg)
1468{
1469        die("BUG: packed reference store does not support symrefs");
1470}
1471
1472static int packed_rename_ref(struct ref_store *ref_store,
1473                            const char *oldrefname, const char *newrefname,
1474                            const char *logmsg)
1475{
1476        die("BUG: packed reference store does not support renaming references");
1477}
1478
1479static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1480{
1481        return empty_ref_iterator_begin();
1482}
1483
1484static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1485                                      const char *refname,
1486                                      each_reflog_ent_fn fn, void *cb_data)
1487{
1488        return 0;
1489}
1490
1491static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1492                                              const char *refname,
1493                                              each_reflog_ent_fn fn,
1494                                              void *cb_data)
1495{
1496        return 0;
1497}
1498
1499static int packed_reflog_exists(struct ref_store *ref_store,
1500                               const char *refname)
1501{
1502        return 0;
1503}
1504
1505static int packed_create_reflog(struct ref_store *ref_store,
1506                               const char *refname, int force_create,
1507                               struct strbuf *err)
1508{
1509        die("BUG: packed reference store does not support reflogs");
1510}
1511
1512static int packed_delete_reflog(struct ref_store *ref_store,
1513                               const char *refname)
1514{
1515        return 0;
1516}
1517
1518static int packed_reflog_expire(struct ref_store *ref_store,
1519                                const char *refname, const unsigned char *sha1,
1520                                unsigned int flags,
1521                                reflog_expiry_prepare_fn prepare_fn,
1522                                reflog_expiry_should_prune_fn should_prune_fn,
1523                                reflog_expiry_cleanup_fn cleanup_fn,
1524                                void *policy_cb_data)
1525{
1526        return 0;
1527}
1528
1529struct ref_storage_be refs_be_packed = {
1530        NULL,
1531        "packed",
1532        packed_ref_store_create,
1533        packed_init_db,
1534        packed_transaction_prepare,
1535        packed_transaction_finish,
1536        packed_transaction_abort,
1537        packed_initial_transaction_commit,
1538
1539        packed_pack_refs,
1540        packed_create_symref,
1541        packed_delete_refs,
1542        packed_rename_ref,
1543
1544        packed_ref_iterator_begin,
1545        packed_read_raw_ref,
1546
1547        packed_reflog_iterator_begin,
1548        packed_for_each_reflog_ent,
1549        packed_for_each_reflog_ent_reverse,
1550        packed_reflog_exists,
1551        packed_create_reflog,
1552        packed_delete_reflog,
1553        packed_reflog_expire
1554};