refs / packed-backend.con commit files_transaction_prepare(): fix handling of ref lock failure (da5267f)
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10struct packed_ref_cache {
  11        struct ref_cache *cache;
  12
  13        /*
  14         * Count of references to the data structure in this instance,
  15         * including the pointer from files_ref_store::packed if any.
  16         * The data will not be freed as long as the reference count
  17         * is nonzero.
  18         */
  19        unsigned int referrers;
  20
  21        /* The metadata from when this packed-refs cache was read */
  22        struct stat_validity validity;
  23};
  24
  25/*
  26 * Increment the reference count of *packed_refs.
  27 */
  28static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
  29{
  30        packed_refs->referrers++;
  31}
  32
  33/*
  34 * Decrease the reference count of *packed_refs.  If it goes to zero,
  35 * free *packed_refs and return true; otherwise return false.
  36 */
  37static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
  38{
  39        if (!--packed_refs->referrers) {
  40                free_ref_cache(packed_refs->cache);
  41                stat_validity_clear(&packed_refs->validity);
  42                free(packed_refs);
  43                return 1;
  44        } else {
  45                return 0;
  46        }
  47}
  48
  49/*
  50 * A container for `packed-refs`-related data. It is not (yet) a
  51 * `ref_store`.
  52 */
  53struct packed_ref_store {
  54        struct ref_store base;
  55
  56        unsigned int store_flags;
  57
  58        /* The path of the "packed-refs" file: */
  59        char *path;
  60
  61        /*
  62         * A cache of the values read from the `packed-refs` file, if
  63         * it might still be current; otherwise, NULL.
  64         */
  65        struct packed_ref_cache *cache;
  66
  67        /*
  68         * Lock used for the "packed-refs" file. Note that this (and
  69         * thus the enclosing `packed_ref_store`) must not be freed.
  70         */
  71        struct lock_file lock;
  72
  73        /*
  74         * Temporary file used when rewriting new contents to the
  75         * "packed-refs" file. Note that this (and thus the enclosing
  76         * `packed_ref_store`) must not be freed.
  77         */
  78        struct tempfile tempfile;
  79};
  80
  81struct ref_store *packed_ref_store_create(const char *path,
  82                                          unsigned int store_flags)
  83{
  84        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
  85        struct ref_store *ref_store = (struct ref_store *)refs;
  86
  87        base_ref_store_init(ref_store, &refs_be_packed);
  88        refs->store_flags = store_flags;
  89
  90        refs->path = xstrdup(path);
  91        return ref_store;
  92}
  93
  94/*
  95 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
  96 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
  97 * support at least the flags specified in `required_flags`. `caller`
  98 * is used in any necessary error messages.
  99 */
 100static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 101                                                unsigned int required_flags,
 102                                                const char *caller)
 103{
 104        struct packed_ref_store *refs;
 105
 106        if (ref_store->be != &refs_be_packed)
 107                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 108                    ref_store->be->name, caller);
 109
 110        refs = (struct packed_ref_store *)ref_store;
 111
 112        if ((refs->store_flags & required_flags) != required_flags)
 113                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 114                    caller, required_flags, refs->store_flags);
 115
 116        return refs;
 117}
 118
 119static void clear_packed_ref_cache(struct packed_ref_store *refs)
 120{
 121        if (refs->cache) {
 122                struct packed_ref_cache *cache = refs->cache;
 123
 124                refs->cache = NULL;
 125                release_packed_ref_cache(cache);
 126        }
 127}
 128
 129/* The length of a peeled reference line in packed-refs, including EOL: */
 130#define PEELED_LINE_LENGTH 42
 131
 132/*
 133 * Parse one line from a packed-refs file.  Write the SHA1 to sha1.
 134 * Return a pointer to the refname within the line (null-terminated),
 135 * or NULL if there was a problem.
 136 */
 137static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
 138{
 139        const char *ref;
 140
 141        if (parse_oid_hex(line->buf, oid, &ref) < 0)
 142                return NULL;
 143        if (!isspace(*ref++))
 144                return NULL;
 145
 146        if (isspace(*ref))
 147                return NULL;
 148
 149        if (line->buf[line->len - 1] != '\n')
 150                return NULL;
 151        line->buf[--line->len] = 0;
 152
 153        return ref;
 154}
 155
 156/*
 157 * Read from `packed_refs_file` into a newly-allocated
 158 * `packed_ref_cache` and return it. The return value will already
 159 * have its reference count incremented.
 160 *
 161 * A comment line of the form "# pack-refs with: " may contain zero or
 162 * more traits. We interpret the traits as follows:
 163 *
 164 *   No traits:
 165 *
 166 *      Probably no references are peeled. But if the file contains a
 167 *      peeled value for a reference, we will use it.
 168 *
 169 *   peeled:
 170 *
 171 *      References under "refs/tags/", if they *can* be peeled, *are*
 172 *      peeled in this file. References outside of "refs/tags/" are
 173 *      probably not peeled even if they could have been, but if we find
 174 *      a peeled value for such a reference we will use it.
 175 *
 176 *   fully-peeled:
 177 *
 178 *      All references in the file that can be peeled are peeled.
 179 *      Inversely (and this is more important), any references in the
 180 *      file for which no peeled value is recorded is not peelable. This
 181 *      trait should typically be written alongside "peeled" for
 182 *      compatibility with older clients, but we do not require it
 183 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 184 */
 185static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
 186{
 187        FILE *f;
 188        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 189        struct ref_entry *last = NULL;
 190        struct strbuf line = STRBUF_INIT;
 191        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
 192        struct ref_dir *dir;
 193
 194        acquire_packed_ref_cache(packed_refs);
 195        packed_refs->cache = create_ref_cache(NULL, NULL);
 196        packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
 197
 198        f = fopen(packed_refs_file, "r");
 199        if (!f) {
 200                if (errno == ENOENT) {
 201                        /*
 202                         * This is OK; it just means that no
 203                         * "packed-refs" file has been written yet,
 204                         * which is equivalent to it being empty.
 205                         */
 206                        return packed_refs;
 207                } else {
 208                        die_errno("couldn't read %s", packed_refs_file);
 209                }
 210        }
 211
 212        stat_validity_update(&packed_refs->validity, fileno(f));
 213
 214        dir = get_ref_dir(packed_refs->cache->root);
 215        while (strbuf_getwholeline(&line, f, '\n') != EOF) {
 216                struct object_id oid;
 217                const char *refname;
 218                const char *traits;
 219
 220                if (!line.len || line.buf[line.len - 1] != '\n')
 221                        die("unterminated line in %s: %s", packed_refs_file, line.buf);
 222
 223                if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
 224                        if (strstr(traits, " fully-peeled "))
 225                                peeled = PEELED_FULLY;
 226                        else if (strstr(traits, " peeled "))
 227                                peeled = PEELED_TAGS;
 228                        /* perhaps other traits later as well */
 229                        continue;
 230                }
 231
 232                refname = parse_ref_line(&line, &oid);
 233                if (refname) {
 234                        int flag = REF_ISPACKED;
 235
 236                        if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
 237                                if (!refname_is_safe(refname))
 238                                        die("packed refname is dangerous: %s", refname);
 239                                oidclr(&oid);
 240                                flag |= REF_BAD_NAME | REF_ISBROKEN;
 241                        }
 242                        last = create_ref_entry(refname, &oid, flag);
 243                        if (peeled == PEELED_FULLY ||
 244                            (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
 245                                last->flag |= REF_KNOWS_PEELED;
 246                        add_ref_entry(dir, last);
 247                } else if (last &&
 248                    line.buf[0] == '^' &&
 249                    line.len == PEELED_LINE_LENGTH &&
 250                    line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
 251                    !get_oid_hex(line.buf + 1, &oid)) {
 252                        oidcpy(&last->u.value.peeled, &oid);
 253                        /*
 254                         * Regardless of what the file header said,
 255                         * we definitely know the value of *this*
 256                         * reference:
 257                         */
 258                        last->flag |= REF_KNOWS_PEELED;
 259                } else {
 260                        strbuf_setlen(&line, line.len - 1);
 261                        die("unexpected line in %s: %s", packed_refs_file, line.buf);
 262                }
 263        }
 264
 265        fclose(f);
 266        strbuf_release(&line);
 267
 268        return packed_refs;
 269}
 270
 271/*
 272 * Check that the packed refs cache (if any) still reflects the
 273 * contents of the file. If not, clear the cache.
 274 */
 275static void validate_packed_ref_cache(struct packed_ref_store *refs)
 276{
 277        if (refs->cache &&
 278            !stat_validity_check(&refs->cache->validity, refs->path))
 279                clear_packed_ref_cache(refs);
 280}
 281
 282/*
 283 * Get the packed_ref_cache for the specified packed_ref_store,
 284 * creating and populating it if it hasn't been read before or if the
 285 * file has been changed (according to its `validity` field) since it
 286 * was last read. On the other hand, if we hold the lock, then assume
 287 * that the file hasn't been changed out from under us, so skip the
 288 * extra `stat()` call in `stat_validity_check()`.
 289 */
 290static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 291{
 292        if (!is_lock_file_locked(&refs->lock))
 293                validate_packed_ref_cache(refs);
 294
 295        if (!refs->cache)
 296                refs->cache = read_packed_refs(refs->path);
 297
 298        return refs->cache;
 299}
 300
 301static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
 302{
 303        return get_ref_dir(packed_ref_cache->cache->root);
 304}
 305
 306static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
 307{
 308        return get_packed_ref_dir(get_packed_ref_cache(refs));
 309}
 310
 311/*
 312 * Return the ref_entry for the given refname from the packed
 313 * references.  If it does not exist, return NULL.
 314 */
 315static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
 316                                        const char *refname)
 317{
 318        return find_ref_entry(get_packed_refs(refs), refname);
 319}
 320
 321static int packed_read_raw_ref(struct ref_store *ref_store,
 322                               const char *refname, unsigned char *sha1,
 323                               struct strbuf *referent, unsigned int *type)
 324{
 325        struct packed_ref_store *refs =
 326                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 327
 328        struct ref_entry *entry;
 329
 330        *type = 0;
 331
 332        entry = get_packed_ref(refs, refname);
 333        if (!entry) {
 334                errno = ENOENT;
 335                return -1;
 336        }
 337
 338        hashcpy(sha1, entry->u.value.oid.hash);
 339        *type = REF_ISPACKED;
 340        return 0;
 341}
 342
 343static int packed_peel_ref(struct ref_store *ref_store,
 344                           const char *refname, unsigned char *sha1)
 345{
 346        struct packed_ref_store *refs =
 347                packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
 348                                "peel_ref");
 349        struct ref_entry *r = get_packed_ref(refs, refname);
 350
 351        if (!r || peel_entry(r, 0))
 352                return -1;
 353
 354        hashcpy(sha1, r->u.value.peeled.hash);
 355        return 0;
 356}
 357
 358struct packed_ref_iterator {
 359        struct ref_iterator base;
 360
 361        struct packed_ref_cache *cache;
 362        struct ref_iterator *iter0;
 363        unsigned int flags;
 364};
 365
 366static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 367{
 368        struct packed_ref_iterator *iter =
 369                (struct packed_ref_iterator *)ref_iterator;
 370        int ok;
 371
 372        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 373                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 374                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 375                        continue;
 376
 377                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 378                    !ref_resolves_to_object(iter->iter0->refname,
 379                                            iter->iter0->oid,
 380                                            iter->iter0->flags))
 381                        continue;
 382
 383                iter->base.refname = iter->iter0->refname;
 384                iter->base.oid = iter->iter0->oid;
 385                iter->base.flags = iter->iter0->flags;
 386                return ITER_OK;
 387        }
 388
 389        iter->iter0 = NULL;
 390        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 391                ok = ITER_ERROR;
 392
 393        return ok;
 394}
 395
 396static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 397                                   struct object_id *peeled)
 398{
 399        struct packed_ref_iterator *iter =
 400                (struct packed_ref_iterator *)ref_iterator;
 401
 402        return ref_iterator_peel(iter->iter0, peeled);
 403}
 404
 405static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 406{
 407        struct packed_ref_iterator *iter =
 408                (struct packed_ref_iterator *)ref_iterator;
 409        int ok = ITER_DONE;
 410
 411        if (iter->iter0)
 412                ok = ref_iterator_abort(iter->iter0);
 413
 414        release_packed_ref_cache(iter->cache);
 415        base_ref_iterator_free(ref_iterator);
 416        return ok;
 417}
 418
 419static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 420        packed_ref_iterator_advance,
 421        packed_ref_iterator_peel,
 422        packed_ref_iterator_abort
 423};
 424
 425static struct ref_iterator *packed_ref_iterator_begin(
 426                struct ref_store *ref_store,
 427                const char *prefix, unsigned int flags)
 428{
 429        struct packed_ref_store *refs;
 430        struct packed_ref_iterator *iter;
 431        struct ref_iterator *ref_iterator;
 432        unsigned int required_flags = REF_STORE_READ;
 433
 434        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 435                required_flags |= REF_STORE_ODB;
 436        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 437
 438        iter = xcalloc(1, sizeof(*iter));
 439        ref_iterator = &iter->base;
 440        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
 441
 442        /*
 443         * Note that get_packed_ref_cache() internally checks whether
 444         * the packed-ref cache is up to date with what is on disk,
 445         * and re-reads it if not.
 446         */
 447
 448        iter->cache = get_packed_ref_cache(refs);
 449        acquire_packed_ref_cache(iter->cache);
 450        iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
 451
 452        iter->flags = flags;
 453
 454        return ref_iterator;
 455}
 456
 457/*
 458 * Write an entry to the packed-refs file for the specified refname.
 459 * If peeled is non-NULL, write it as the entry's peeled value. On
 460 * error, return a nonzero value and leave errno set at the value left
 461 * by the failing call to `fprintf()`.
 462 */
 463static int write_packed_entry(FILE *fh, const char *refname,
 464                              const unsigned char *sha1,
 465                              const unsigned char *peeled)
 466{
 467        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 468            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 469                return -1;
 470
 471        return 0;
 472}
 473
 474int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 475{
 476        struct packed_ref_store *refs =
 477                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 478                                "packed_refs_lock");
 479        static int timeout_configured = 0;
 480        static int timeout_value = 1000;
 481
 482        if (!timeout_configured) {
 483                git_config_get_int("core.packedrefstimeout", &timeout_value);
 484                timeout_configured = 1;
 485        }
 486
 487        /*
 488         * Note that we close the lockfile immediately because we
 489         * don't write new content to it, but rather to a separate
 490         * tempfile.
 491         */
 492        if (hold_lock_file_for_update_timeout(
 493                            &refs->lock,
 494                            refs->path,
 495                            flags, timeout_value) < 0) {
 496                unable_to_lock_message(refs->path, errno, err);
 497                return -1;
 498        }
 499
 500        if (close_lock_file(&refs->lock)) {
 501                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
 502                return -1;
 503        }
 504
 505        /*
 506         * Now that we hold the `packed-refs` lock, make sure that our
 507         * cache matches the current version of the file. Normally
 508         * `get_packed_ref_cache()` does that for us, but that
 509         * function assumes that when the file is locked, any existing
 510         * cache is still valid. We've just locked the file, but it
 511         * might have changed the moment *before* we locked it.
 512         */
 513        validate_packed_ref_cache(refs);
 514
 515        /*
 516         * Now make sure that the packed-refs file as it exists in the
 517         * locked state is loaded into the cache:
 518         */
 519        get_packed_ref_cache(refs);
 520        return 0;
 521}
 522
 523void packed_refs_unlock(struct ref_store *ref_store)
 524{
 525        struct packed_ref_store *refs = packed_downcast(
 526                        ref_store,
 527                        REF_STORE_READ | REF_STORE_WRITE,
 528                        "packed_refs_unlock");
 529
 530        if (!is_lock_file_locked(&refs->lock))
 531                die("BUG: packed_refs_unlock() called when not locked");
 532        rollback_lock_file(&refs->lock);
 533}
 534
 535int packed_refs_is_locked(struct ref_store *ref_store)
 536{
 537        struct packed_ref_store *refs = packed_downcast(
 538                        ref_store,
 539                        REF_STORE_READ | REF_STORE_WRITE,
 540                        "packed_refs_is_locked");
 541
 542        return is_lock_file_locked(&refs->lock);
 543}
 544
 545/*
 546 * The packed-refs header line that we write out.  Perhaps other
 547 * traits will be added later.  The trailing space is required.
 548 */
 549static const char PACKED_REFS_HEADER[] =
 550        "# pack-refs with: peeled fully-peeled \n";
 551
 552static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
 553{
 554        /* Nothing to do. */
 555        return 0;
 556}
 557
 558/*
 559 * Write the packed-refs from the cache to the packed-refs tempfile,
 560 * incorporating any changes from `updates`. `updates` must be a
 561 * sorted string list whose keys are the refnames and whose util
 562 * values are `struct ref_update *`. On error, rollback the tempfile,
 563 * write an error message to `err`, and return a nonzero value.
 564 *
 565 * The packfile must be locked before calling this function and will
 566 * remain locked when it is done.
 567 */
 568static int write_with_updates(struct packed_ref_store *refs,
 569                              struct string_list *updates,
 570                              struct strbuf *err)
 571{
 572        struct ref_iterator *iter = NULL;
 573        size_t i;
 574        int ok;
 575        FILE *out;
 576        struct strbuf sb = STRBUF_INIT;
 577        char *packed_refs_path;
 578
 579        if (!is_lock_file_locked(&refs->lock))
 580                die("BUG: write_with_updates() called while unlocked");
 581
 582        /*
 583         * If packed-refs is a symlink, we want to overwrite the
 584         * symlinked-to file, not the symlink itself. Also, put the
 585         * staging file next to it:
 586         */
 587        packed_refs_path = get_locked_file_path(&refs->lock);
 588        strbuf_addf(&sb, "%s.new", packed_refs_path);
 589        free(packed_refs_path);
 590        if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
 591                strbuf_addf(err, "unable to create file %s: %s",
 592                            sb.buf, strerror(errno));
 593                strbuf_release(&sb);
 594                return -1;
 595        }
 596        strbuf_release(&sb);
 597
 598        out = fdopen_tempfile(&refs->tempfile, "w");
 599        if (!out) {
 600                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
 601                            strerror(errno));
 602                goto error;
 603        }
 604
 605        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
 606                goto write_error;
 607
 608        /*
 609         * We iterate in parallel through the current list of refs and
 610         * the list of updates, processing an entry from at least one
 611         * of the lists each time through the loop. When the current
 612         * list of refs is exhausted, set iter to NULL. When the list
 613         * of updates is exhausted, leave i set to updates->nr.
 614         */
 615        iter = packed_ref_iterator_begin(&refs->base, "",
 616                                         DO_FOR_EACH_INCLUDE_BROKEN);
 617        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 618                iter = NULL;
 619
 620        i = 0;
 621
 622        while (iter || i < updates->nr) {
 623                struct ref_update *update = NULL;
 624                int cmp;
 625
 626                if (i >= updates->nr) {
 627                        cmp = -1;
 628                } else {
 629                        update = updates->items[i].util;
 630
 631                        if (!iter)
 632                                cmp = +1;
 633                        else
 634                                cmp = strcmp(iter->refname, update->refname);
 635                }
 636
 637                if (!cmp) {
 638                        /*
 639                         * There is both an old value and an update
 640                         * for this reference. Check the old value if
 641                         * necessary:
 642                         */
 643                        if ((update->flags & REF_HAVE_OLD)) {
 644                                if (is_null_oid(&update->old_oid)) {
 645                                        strbuf_addf(err, "cannot update ref '%s': "
 646                                                    "reference already exists",
 647                                                    update->refname);
 648                                        goto error;
 649                                } else if (oidcmp(&update->old_oid, iter->oid)) {
 650                                        strbuf_addf(err, "cannot update ref '%s': "
 651                                                    "is at %s but expected %s",
 652                                                    update->refname,
 653                                                    oid_to_hex(iter->oid),
 654                                                    oid_to_hex(&update->old_oid));
 655                                        goto error;
 656                                }
 657                        }
 658
 659                        /* Now figure out what to use for the new value: */
 660                        if ((update->flags & REF_HAVE_NEW)) {
 661                                /*
 662                                 * The update takes precedence. Skip
 663                                 * the iterator over the unneeded
 664                                 * value.
 665                                 */
 666                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 667                                        iter = NULL;
 668                                cmp = +1;
 669                        } else {
 670                                /*
 671                                 * The update doesn't actually want to
 672                                 * change anything. We're done with it.
 673                                 */
 674                                i++;
 675                                cmp = -1;
 676                        }
 677                } else if (cmp > 0) {
 678                        /*
 679                         * There is no old value but there is an
 680                         * update for this reference. Make sure that
 681                         * the update didn't expect an existing value:
 682                         */
 683                        if ((update->flags & REF_HAVE_OLD) &&
 684                            !is_null_oid(&update->old_oid)) {
 685                                strbuf_addf(err, "cannot update ref '%s': "
 686                                            "reference is missing but expected %s",
 687                                            update->refname,
 688                                            oid_to_hex(&update->old_oid));
 689                                goto error;
 690                        }
 691                }
 692
 693                if (cmp < 0) {
 694                        /* Pass the old reference through. */
 695
 696                        struct object_id peeled;
 697                        int peel_error = ref_iterator_peel(iter, &peeled);
 698
 699                        if (write_packed_entry(out, iter->refname,
 700                                               iter->oid->hash,
 701                                               peel_error ? NULL : peeled.hash))
 702                                goto write_error;
 703
 704                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 705                                iter = NULL;
 706                } else if (is_null_oid(&update->new_oid)) {
 707                        /*
 708                         * The update wants to delete the reference,
 709                         * and the reference either didn't exist or we
 710                         * have already skipped it. So we're done with
 711                         * the update (and don't have to write
 712                         * anything).
 713                         */
 714                        i++;
 715                } else {
 716                        struct object_id peeled;
 717                        int peel_error = peel_object(update->new_oid.hash,
 718                                                     peeled.hash);
 719
 720                        if (write_packed_entry(out, update->refname,
 721                                               update->new_oid.hash,
 722                                               peel_error ? NULL : peeled.hash))
 723                                goto write_error;
 724
 725                        i++;
 726                }
 727        }
 728
 729        if (ok != ITER_DONE) {
 730                strbuf_addf(err, "unable to write packed-refs file: "
 731                            "error iterating over old contents");
 732                goto error;
 733        }
 734
 735        if (close_tempfile(&refs->tempfile)) {
 736                strbuf_addf(err, "error closing file %s: %s",
 737                            get_tempfile_path(&refs->tempfile),
 738                            strerror(errno));
 739                strbuf_release(&sb);
 740                return -1;
 741        }
 742
 743        return 0;
 744
 745write_error:
 746        strbuf_addf(err, "error writing to %s: %s",
 747                    get_tempfile_path(&refs->tempfile), strerror(errno));
 748
 749error:
 750        if (iter)
 751                ref_iterator_abort(iter);
 752
 753        delete_tempfile(&refs->tempfile);
 754        return -1;
 755}
 756
 757struct packed_transaction_backend_data {
 758        /* True iff the transaction owns the packed-refs lock. */
 759        int own_lock;
 760
 761        struct string_list updates;
 762};
 763
 764static void packed_transaction_cleanup(struct packed_ref_store *refs,
 765                                       struct ref_transaction *transaction)
 766{
 767        struct packed_transaction_backend_data *data = transaction->backend_data;
 768
 769        if (data) {
 770                string_list_clear(&data->updates, 0);
 771
 772                if (is_tempfile_active(&refs->tempfile))
 773                        delete_tempfile(&refs->tempfile);
 774
 775                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
 776                        packed_refs_unlock(&refs->base);
 777                        data->own_lock = 0;
 778                }
 779
 780                free(data);
 781                transaction->backend_data = NULL;
 782        }
 783
 784        transaction->state = REF_TRANSACTION_CLOSED;
 785}
 786
 787static int packed_transaction_prepare(struct ref_store *ref_store,
 788                                      struct ref_transaction *transaction,
 789                                      struct strbuf *err)
 790{
 791        struct packed_ref_store *refs = packed_downcast(
 792                        ref_store,
 793                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 794                        "ref_transaction_prepare");
 795        struct packed_transaction_backend_data *data;
 796        size_t i;
 797        int ret = TRANSACTION_GENERIC_ERROR;
 798
 799        /*
 800         * Note that we *don't* skip transactions with zero updates,
 801         * because such a transaction might be executed for the side
 802         * effect of ensuring that all of the references are peeled.
 803         * If the caller wants to optimize away empty transactions, it
 804         * should do so itself.
 805         */
 806
 807        data = xcalloc(1, sizeof(*data));
 808        string_list_init(&data->updates, 0);
 809
 810        transaction->backend_data = data;
 811
 812        /*
 813         * Stick the updates in a string list by refname so that we
 814         * can sort them:
 815         */
 816        for (i = 0; i < transaction->nr; i++) {
 817                struct ref_update *update = transaction->updates[i];
 818                struct string_list_item *item =
 819                        string_list_append(&data->updates, update->refname);
 820
 821                /* Store a pointer to update in item->util: */
 822                item->util = update;
 823        }
 824        string_list_sort(&data->updates);
 825
 826        if (ref_update_reject_duplicates(&data->updates, err))
 827                goto failure;
 828
 829        if (!is_lock_file_locked(&refs->lock)) {
 830                if (packed_refs_lock(ref_store, 0, err))
 831                        goto failure;
 832                data->own_lock = 1;
 833        }
 834
 835        if (write_with_updates(refs, &data->updates, err))
 836                goto failure;
 837
 838        transaction->state = REF_TRANSACTION_PREPARED;
 839        return 0;
 840
 841failure:
 842        packed_transaction_cleanup(refs, transaction);
 843        return ret;
 844}
 845
 846static int packed_transaction_abort(struct ref_store *ref_store,
 847                                    struct ref_transaction *transaction,
 848                                    struct strbuf *err)
 849{
 850        struct packed_ref_store *refs = packed_downcast(
 851                        ref_store,
 852                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 853                        "ref_transaction_abort");
 854
 855        packed_transaction_cleanup(refs, transaction);
 856        return 0;
 857}
 858
 859static int packed_transaction_finish(struct ref_store *ref_store,
 860                                     struct ref_transaction *transaction,
 861                                     struct strbuf *err)
 862{
 863        struct packed_ref_store *refs = packed_downcast(
 864                        ref_store,
 865                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 866                        "ref_transaction_finish");
 867        int ret = TRANSACTION_GENERIC_ERROR;
 868        char *packed_refs_path;
 869
 870        packed_refs_path = get_locked_file_path(&refs->lock);
 871        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
 872                strbuf_addf(err, "error replacing %s: %s",
 873                            refs->path, strerror(errno));
 874                goto cleanup;
 875        }
 876
 877        clear_packed_ref_cache(refs);
 878        ret = 0;
 879
 880cleanup:
 881        free(packed_refs_path);
 882        packed_transaction_cleanup(refs, transaction);
 883        return ret;
 884}
 885
 886static int packed_initial_transaction_commit(struct ref_store *ref_store,
 887                                            struct ref_transaction *transaction,
 888                                            struct strbuf *err)
 889{
 890        return ref_transaction_commit(transaction, err);
 891}
 892
 893static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
 894                             struct string_list *refnames, unsigned int flags)
 895{
 896        struct packed_ref_store *refs =
 897                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
 898        struct strbuf err = STRBUF_INIT;
 899        struct ref_transaction *transaction;
 900        struct string_list_item *item;
 901        int ret;
 902
 903        (void)refs; /* We need the check above, but don't use the variable */
 904
 905        if (!refnames->nr)
 906                return 0;
 907
 908        /*
 909         * Since we don't check the references' old_oids, the
 910         * individual updates can't fail, so we can pack all of the
 911         * updates into a single transaction.
 912         */
 913
 914        transaction = ref_store_transaction_begin(ref_store, &err);
 915        if (!transaction)
 916                return -1;
 917
 918        for_each_string_list_item(item, refnames) {
 919                if (ref_transaction_delete(transaction, item->string, NULL,
 920                                           flags, msg, &err)) {
 921                        warning(_("could not delete reference %s: %s"),
 922                                item->string, err.buf);
 923                        strbuf_reset(&err);
 924                }
 925        }
 926
 927        ret = ref_transaction_commit(transaction, &err);
 928
 929        if (ret) {
 930                if (refnames->nr == 1)
 931                        error(_("could not delete reference %s: %s"),
 932                              refnames->items[0].string, err.buf);
 933                else
 934                        error(_("could not delete references: %s"), err.buf);
 935        }
 936
 937        ref_transaction_free(transaction);
 938        strbuf_release(&err);
 939        return ret;
 940}
 941
 942static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
 943{
 944        /*
 945         * Packed refs are already packed. It might be that loose refs
 946         * are packed *into* a packed refs store, but that is done by
 947         * updating the packed references via a transaction.
 948         */
 949        return 0;
 950}
 951
 952static int packed_create_symref(struct ref_store *ref_store,
 953                               const char *refname, const char *target,
 954                               const char *logmsg)
 955{
 956        die("BUG: packed reference store does not support symrefs");
 957}
 958
 959static int packed_rename_ref(struct ref_store *ref_store,
 960                            const char *oldrefname, const char *newrefname,
 961                            const char *logmsg)
 962{
 963        die("BUG: packed reference store does not support renaming references");
 964}
 965
 966static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
 967{
 968        return empty_ref_iterator_begin();
 969}
 970
 971static int packed_for_each_reflog_ent(struct ref_store *ref_store,
 972                                      const char *refname,
 973                                      each_reflog_ent_fn fn, void *cb_data)
 974{
 975        return 0;
 976}
 977
 978static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
 979                                              const char *refname,
 980                                              each_reflog_ent_fn fn,
 981                                              void *cb_data)
 982{
 983        return 0;
 984}
 985
 986static int packed_reflog_exists(struct ref_store *ref_store,
 987                               const char *refname)
 988{
 989        return 0;
 990}
 991
 992static int packed_create_reflog(struct ref_store *ref_store,
 993                               const char *refname, int force_create,
 994                               struct strbuf *err)
 995{
 996        die("BUG: packed reference store does not support reflogs");
 997}
 998
 999static int packed_delete_reflog(struct ref_store *ref_store,
1000                               const char *refname)
1001{
1002        return 0;
1003}
1004
1005static int packed_reflog_expire(struct ref_store *ref_store,
1006                                const char *refname, const unsigned char *sha1,
1007                                unsigned int flags,
1008                                reflog_expiry_prepare_fn prepare_fn,
1009                                reflog_expiry_should_prune_fn should_prune_fn,
1010                                reflog_expiry_cleanup_fn cleanup_fn,
1011                                void *policy_cb_data)
1012{
1013        return 0;
1014}
1015
1016struct ref_storage_be refs_be_packed = {
1017        NULL,
1018        "packed",
1019        packed_ref_store_create,
1020        packed_init_db,
1021        packed_transaction_prepare,
1022        packed_transaction_finish,
1023        packed_transaction_abort,
1024        packed_initial_transaction_commit,
1025
1026        packed_pack_refs,
1027        packed_peel_ref,
1028        packed_create_symref,
1029        packed_delete_refs,
1030        packed_rename_ref,
1031
1032        packed_ref_iterator_begin,
1033        packed_read_raw_ref,
1034
1035        packed_reflog_iterator_begin,
1036        packed_for_each_reflog_ent,
1037        packed_for_each_reflog_ent_reverse,
1038        packed_reflog_exists,
1039        packed_create_reflog,
1040        packed_delete_reflog,
1041        packed_reflog_expire
1042};