refs / packed-backend.con commit Merge branch 'mh/for-each-string-list-item-empty-fix' (48f1e49)
   1#include "../cache.h"
   2#include "../config.h"
   3#include "../refs.h"
   4#include "refs-internal.h"
   5#include "ref-cache.h"
   6#include "packed-backend.h"
   7#include "../iterator.h"
   8#include "../lockfile.h"
   9
  10struct packed_ref_cache {
  11        struct ref_cache *cache;
  12
  13        /*
  14         * Count of references to the data structure in this instance,
  15         * including the pointer from files_ref_store::packed if any.
  16         * The data will not be freed as long as the reference count
  17         * is nonzero.
  18         */
  19        unsigned int referrers;
  20
  21        /* The metadata from when this packed-refs cache was read */
  22        struct stat_validity validity;
  23};
  24
  25/*
  26 * Increment the reference count of *packed_refs.
  27 */
  28static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
  29{
  30        packed_refs->referrers++;
  31}
  32
  33/*
  34 * Decrease the reference count of *packed_refs.  If it goes to zero,
  35 * free *packed_refs and return true; otherwise return false.
  36 */
  37static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
  38{
  39        if (!--packed_refs->referrers) {
  40                free_ref_cache(packed_refs->cache);
  41                stat_validity_clear(&packed_refs->validity);
  42                free(packed_refs);
  43                return 1;
  44        } else {
  45                return 0;
  46        }
  47}
  48
  49/*
  50 * A container for `packed-refs`-related data. It is not (yet) a
  51 * `ref_store`.
  52 */
  53struct packed_ref_store {
  54        struct ref_store base;
  55
  56        unsigned int store_flags;
  57
  58        /* The path of the "packed-refs" file: */
  59        char *path;
  60
  61        /*
  62         * A cache of the values read from the `packed-refs` file, if
  63         * it might still be current; otherwise, NULL.
  64         */
  65        struct packed_ref_cache *cache;
  66
  67        /*
  68         * Lock used for the "packed-refs" file. Note that this (and
  69         * thus the enclosing `packed_ref_store`) must not be freed.
  70         */
  71        struct lock_file lock;
  72
  73        /*
  74         * Temporary file used when rewriting new contents to the
  75         * "packed-refs" file. Note that this (and thus the enclosing
  76         * `packed_ref_store`) must not be freed.
  77         */
  78        struct tempfile *tempfile;
  79};
  80
  81struct ref_store *packed_ref_store_create(const char *path,
  82                                          unsigned int store_flags)
  83{
  84        struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
  85        struct ref_store *ref_store = (struct ref_store *)refs;
  86
  87        base_ref_store_init(ref_store, &refs_be_packed);
  88        refs->store_flags = store_flags;
  89
  90        refs->path = xstrdup(path);
  91        return ref_store;
  92}
  93
  94/*
  95 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
  96 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
  97 * support at least the flags specified in `required_flags`. `caller`
  98 * is used in any necessary error messages.
  99 */
 100static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
 101                                                unsigned int required_flags,
 102                                                const char *caller)
 103{
 104        struct packed_ref_store *refs;
 105
 106        if (ref_store->be != &refs_be_packed)
 107                die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
 108                    ref_store->be->name, caller);
 109
 110        refs = (struct packed_ref_store *)ref_store;
 111
 112        if ((refs->store_flags & required_flags) != required_flags)
 113                die("BUG: unallowed operation (%s), requires %x, has %x\n",
 114                    caller, required_flags, refs->store_flags);
 115
 116        return refs;
 117}
 118
 119static void clear_packed_ref_cache(struct packed_ref_store *refs)
 120{
 121        if (refs->cache) {
 122                struct packed_ref_cache *cache = refs->cache;
 123
 124                refs->cache = NULL;
 125                release_packed_ref_cache(cache);
 126        }
 127}
 128
 129/* The length of a peeled reference line in packed-refs, including EOL: */
 130#define PEELED_LINE_LENGTH 42
 131
 132/*
 133 * Parse one line from a packed-refs file.  Write the SHA1 to sha1.
 134 * Return a pointer to the refname within the line (null-terminated),
 135 * or NULL if there was a problem.
 136 */
 137static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
 138{
 139        const char *ref;
 140
 141        if (parse_oid_hex(line->buf, oid, &ref) < 0)
 142                return NULL;
 143        if (!isspace(*ref++))
 144                return NULL;
 145
 146        if (isspace(*ref))
 147                return NULL;
 148
 149        if (line->buf[line->len - 1] != '\n')
 150                return NULL;
 151        line->buf[--line->len] = 0;
 152
 153        return ref;
 154}
 155
 156/*
 157 * Read from `packed_refs_file` into a newly-allocated
 158 * `packed_ref_cache` and return it. The return value will already
 159 * have its reference count incremented.
 160 *
 161 * A comment line of the form "# pack-refs with: " may contain zero or
 162 * more traits. We interpret the traits as follows:
 163 *
 164 *   No traits:
 165 *
 166 *      Probably no references are peeled. But if the file contains a
 167 *      peeled value for a reference, we will use it.
 168 *
 169 *   peeled:
 170 *
 171 *      References under "refs/tags/", if they *can* be peeled, *are*
 172 *      peeled in this file. References outside of "refs/tags/" are
 173 *      probably not peeled even if they could have been, but if we find
 174 *      a peeled value for such a reference we will use it.
 175 *
 176 *   fully-peeled:
 177 *
 178 *      All references in the file that can be peeled are peeled.
 179 *      Inversely (and this is more important), any references in the
 180 *      file for which no peeled value is recorded is not peelable. This
 181 *      trait should typically be written alongside "peeled" for
 182 *      compatibility with older clients, but we do not require it
 183 *      (i.e., "peeled" is a no-op if "fully-peeled" is set).
 184 */
 185static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
 186{
 187        FILE *f;
 188        struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
 189        struct ref_entry *last = NULL;
 190        struct strbuf line = STRBUF_INIT;
 191        enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
 192        struct ref_dir *dir;
 193
 194        acquire_packed_ref_cache(packed_refs);
 195        packed_refs->cache = create_ref_cache(NULL, NULL);
 196        packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
 197
 198        f = fopen(packed_refs_file, "r");
 199        if (!f) {
 200                if (errno == ENOENT) {
 201                        /*
 202                         * This is OK; it just means that no
 203                         * "packed-refs" file has been written yet,
 204                         * which is equivalent to it being empty.
 205                         */
 206                        return packed_refs;
 207                } else {
 208                        die_errno("couldn't read %s", packed_refs_file);
 209                }
 210        }
 211
 212        stat_validity_update(&packed_refs->validity, fileno(f));
 213
 214        dir = get_ref_dir(packed_refs->cache->root);
 215        while (strbuf_getwholeline(&line, f, '\n') != EOF) {
 216                struct object_id oid;
 217                const char *refname;
 218                const char *traits;
 219
 220                if (!line.len || line.buf[line.len - 1] != '\n')
 221                        die("unterminated line in %s: %s", packed_refs_file, line.buf);
 222
 223                if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
 224                        if (strstr(traits, " fully-peeled "))
 225                                peeled = PEELED_FULLY;
 226                        else if (strstr(traits, " peeled "))
 227                                peeled = PEELED_TAGS;
 228                        /* perhaps other traits later as well */
 229                        continue;
 230                }
 231
 232                refname = parse_ref_line(&line, &oid);
 233                if (refname) {
 234                        int flag = REF_ISPACKED;
 235
 236                        if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
 237                                if (!refname_is_safe(refname))
 238                                        die("packed refname is dangerous: %s", refname);
 239                                oidclr(&oid);
 240                                flag |= REF_BAD_NAME | REF_ISBROKEN;
 241                        }
 242                        last = create_ref_entry(refname, &oid, flag);
 243                        if (peeled == PEELED_FULLY ||
 244                            (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
 245                                last->flag |= REF_KNOWS_PEELED;
 246                        add_ref_entry(dir, last);
 247                } else if (last &&
 248                    line.buf[0] == '^' &&
 249                    line.len == PEELED_LINE_LENGTH &&
 250                    line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
 251                    !get_oid_hex(line.buf + 1, &oid)) {
 252                        oidcpy(&last->u.value.peeled, &oid);
 253                        /*
 254                         * Regardless of what the file header said,
 255                         * we definitely know the value of *this*
 256                         * reference:
 257                         */
 258                        last->flag |= REF_KNOWS_PEELED;
 259                } else {
 260                        strbuf_setlen(&line, line.len - 1);
 261                        die("unexpected line in %s: %s", packed_refs_file, line.buf);
 262                }
 263        }
 264
 265        fclose(f);
 266        strbuf_release(&line);
 267
 268        return packed_refs;
 269}
 270
 271/*
 272 * Check that the packed refs cache (if any) still reflects the
 273 * contents of the file. If not, clear the cache.
 274 */
 275static void validate_packed_ref_cache(struct packed_ref_store *refs)
 276{
 277        if (refs->cache &&
 278            !stat_validity_check(&refs->cache->validity, refs->path))
 279                clear_packed_ref_cache(refs);
 280}
 281
 282/*
 283 * Get the packed_ref_cache for the specified packed_ref_store,
 284 * creating and populating it if it hasn't been read before or if the
 285 * file has been changed (according to its `validity` field) since it
 286 * was last read. On the other hand, if we hold the lock, then assume
 287 * that the file hasn't been changed out from under us, so skip the
 288 * extra `stat()` call in `stat_validity_check()`.
 289 */
 290static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
 291{
 292        if (!is_lock_file_locked(&refs->lock))
 293                validate_packed_ref_cache(refs);
 294
 295        if (!refs->cache)
 296                refs->cache = read_packed_refs(refs->path);
 297
 298        return refs->cache;
 299}
 300
 301static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
 302{
 303        return get_ref_dir(packed_ref_cache->cache->root);
 304}
 305
 306static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
 307{
 308        return get_packed_ref_dir(get_packed_ref_cache(refs));
 309}
 310
 311/*
 312 * Return the ref_entry for the given refname from the packed
 313 * references.  If it does not exist, return NULL.
 314 */
 315static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
 316                                        const char *refname)
 317{
 318        return find_ref_entry(get_packed_refs(refs), refname);
 319}
 320
 321static int packed_read_raw_ref(struct ref_store *ref_store,
 322                               const char *refname, unsigned char *sha1,
 323                               struct strbuf *referent, unsigned int *type)
 324{
 325        struct packed_ref_store *refs =
 326                packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
 327
 328        struct ref_entry *entry;
 329
 330        *type = 0;
 331
 332        entry = get_packed_ref(refs, refname);
 333        if (!entry) {
 334                errno = ENOENT;
 335                return -1;
 336        }
 337
 338        hashcpy(sha1, entry->u.value.oid.hash);
 339        *type = REF_ISPACKED;
 340        return 0;
 341}
 342
 343static int packed_peel_ref(struct ref_store *ref_store,
 344                           const char *refname, unsigned char *sha1)
 345{
 346        struct packed_ref_store *refs =
 347                packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
 348                                "peel_ref");
 349        struct ref_entry *r = get_packed_ref(refs, refname);
 350
 351        if (!r || peel_entry(r, 0))
 352                return -1;
 353
 354        hashcpy(sha1, r->u.value.peeled.hash);
 355        return 0;
 356}
 357
 358struct packed_ref_iterator {
 359        struct ref_iterator base;
 360
 361        struct packed_ref_cache *cache;
 362        struct ref_iterator *iter0;
 363        unsigned int flags;
 364};
 365
 366static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
 367{
 368        struct packed_ref_iterator *iter =
 369                (struct packed_ref_iterator *)ref_iterator;
 370        int ok;
 371
 372        while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
 373                if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
 374                    ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
 375                        continue;
 376
 377                if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
 378                    !ref_resolves_to_object(iter->iter0->refname,
 379                                            iter->iter0->oid,
 380                                            iter->iter0->flags))
 381                        continue;
 382
 383                iter->base.refname = iter->iter0->refname;
 384                iter->base.oid = iter->iter0->oid;
 385                iter->base.flags = iter->iter0->flags;
 386                return ITER_OK;
 387        }
 388
 389        iter->iter0 = NULL;
 390        if (ref_iterator_abort(ref_iterator) != ITER_DONE)
 391                ok = ITER_ERROR;
 392
 393        return ok;
 394}
 395
 396static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
 397                                   struct object_id *peeled)
 398{
 399        struct packed_ref_iterator *iter =
 400                (struct packed_ref_iterator *)ref_iterator;
 401
 402        return ref_iterator_peel(iter->iter0, peeled);
 403}
 404
 405static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
 406{
 407        struct packed_ref_iterator *iter =
 408                (struct packed_ref_iterator *)ref_iterator;
 409        int ok = ITER_DONE;
 410
 411        if (iter->iter0)
 412                ok = ref_iterator_abort(iter->iter0);
 413
 414        release_packed_ref_cache(iter->cache);
 415        base_ref_iterator_free(ref_iterator);
 416        return ok;
 417}
 418
 419static struct ref_iterator_vtable packed_ref_iterator_vtable = {
 420        packed_ref_iterator_advance,
 421        packed_ref_iterator_peel,
 422        packed_ref_iterator_abort
 423};
 424
 425static struct ref_iterator *packed_ref_iterator_begin(
 426                struct ref_store *ref_store,
 427                const char *prefix, unsigned int flags)
 428{
 429        struct packed_ref_store *refs;
 430        struct packed_ref_iterator *iter;
 431        struct ref_iterator *ref_iterator;
 432        unsigned int required_flags = REF_STORE_READ;
 433
 434        if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
 435                required_flags |= REF_STORE_ODB;
 436        refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
 437
 438        iter = xcalloc(1, sizeof(*iter));
 439        ref_iterator = &iter->base;
 440        base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
 441
 442        /*
 443         * Note that get_packed_ref_cache() internally checks whether
 444         * the packed-ref cache is up to date with what is on disk,
 445         * and re-reads it if not.
 446         */
 447
 448        iter->cache = get_packed_ref_cache(refs);
 449        acquire_packed_ref_cache(iter->cache);
 450        iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
 451
 452        iter->flags = flags;
 453
 454        return ref_iterator;
 455}
 456
 457/*
 458 * Write an entry to the packed-refs file for the specified refname.
 459 * If peeled is non-NULL, write it as the entry's peeled value. On
 460 * error, return a nonzero value and leave errno set at the value left
 461 * by the failing call to `fprintf()`.
 462 */
 463static int write_packed_entry(FILE *fh, const char *refname,
 464                              const unsigned char *sha1,
 465                              const unsigned char *peeled)
 466{
 467        if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
 468            (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
 469                return -1;
 470
 471        return 0;
 472}
 473
 474int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
 475{
 476        struct packed_ref_store *refs =
 477                packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
 478                                "packed_refs_lock");
 479        static int timeout_configured = 0;
 480        static int timeout_value = 1000;
 481
 482        if (!timeout_configured) {
 483                git_config_get_int("core.packedrefstimeout", &timeout_value);
 484                timeout_configured = 1;
 485        }
 486
 487        /*
 488         * Note that we close the lockfile immediately because we
 489         * don't write new content to it, but rather to a separate
 490         * tempfile.
 491         */
 492        if (hold_lock_file_for_update_timeout(
 493                            &refs->lock,
 494                            refs->path,
 495                            flags, timeout_value) < 0) {
 496                unable_to_lock_message(refs->path, errno, err);
 497                return -1;
 498        }
 499
 500        if (close_lock_file_gently(&refs->lock)) {
 501                strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
 502                rollback_lock_file(&refs->lock);
 503                return -1;
 504        }
 505
 506        /*
 507         * Now that we hold the `packed-refs` lock, make sure that our
 508         * cache matches the current version of the file. Normally
 509         * `get_packed_ref_cache()` does that for us, but that
 510         * function assumes that when the file is locked, any existing
 511         * cache is still valid. We've just locked the file, but it
 512         * might have changed the moment *before* we locked it.
 513         */
 514        validate_packed_ref_cache(refs);
 515
 516        /*
 517         * Now make sure that the packed-refs file as it exists in the
 518         * locked state is loaded into the cache:
 519         */
 520        get_packed_ref_cache(refs);
 521        return 0;
 522}
 523
 524void packed_refs_unlock(struct ref_store *ref_store)
 525{
 526        struct packed_ref_store *refs = packed_downcast(
 527                        ref_store,
 528                        REF_STORE_READ | REF_STORE_WRITE,
 529                        "packed_refs_unlock");
 530
 531        if (!is_lock_file_locked(&refs->lock))
 532                die("BUG: packed_refs_unlock() called when not locked");
 533        rollback_lock_file(&refs->lock);
 534}
 535
 536int packed_refs_is_locked(struct ref_store *ref_store)
 537{
 538        struct packed_ref_store *refs = packed_downcast(
 539                        ref_store,
 540                        REF_STORE_READ | REF_STORE_WRITE,
 541                        "packed_refs_is_locked");
 542
 543        return is_lock_file_locked(&refs->lock);
 544}
 545
 546/*
 547 * The packed-refs header line that we write out.  Perhaps other
 548 * traits will be added later.  The trailing space is required.
 549 */
 550static const char PACKED_REFS_HEADER[] =
 551        "# pack-refs with: peeled fully-peeled \n";
 552
 553static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
 554{
 555        /* Nothing to do. */
 556        return 0;
 557}
 558
 559/*
 560 * Write the packed-refs from the cache to the packed-refs tempfile,
 561 * incorporating any changes from `updates`. `updates` must be a
 562 * sorted string list whose keys are the refnames and whose util
 563 * values are `struct ref_update *`. On error, rollback the tempfile,
 564 * write an error message to `err`, and return a nonzero value.
 565 *
 566 * The packfile must be locked before calling this function and will
 567 * remain locked when it is done.
 568 */
 569static int write_with_updates(struct packed_ref_store *refs,
 570                              struct string_list *updates,
 571                              struct strbuf *err)
 572{
 573        struct ref_iterator *iter = NULL;
 574        size_t i;
 575        int ok;
 576        FILE *out;
 577        struct strbuf sb = STRBUF_INIT;
 578        char *packed_refs_path;
 579
 580        if (!is_lock_file_locked(&refs->lock))
 581                die("BUG: write_with_updates() called while unlocked");
 582
 583        /*
 584         * If packed-refs is a symlink, we want to overwrite the
 585         * symlinked-to file, not the symlink itself. Also, put the
 586         * staging file next to it:
 587         */
 588        packed_refs_path = get_locked_file_path(&refs->lock);
 589        strbuf_addf(&sb, "%s.new", packed_refs_path);
 590        free(packed_refs_path);
 591        refs->tempfile = create_tempfile(sb.buf);
 592        if (!refs->tempfile) {
 593                strbuf_addf(err, "unable to create file %s: %s",
 594                            sb.buf, strerror(errno));
 595                strbuf_release(&sb);
 596                return -1;
 597        }
 598        strbuf_release(&sb);
 599
 600        out = fdopen_tempfile(refs->tempfile, "w");
 601        if (!out) {
 602                strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
 603                            strerror(errno));
 604                goto error;
 605        }
 606
 607        if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
 608                goto write_error;
 609
 610        /*
 611         * We iterate in parallel through the current list of refs and
 612         * the list of updates, processing an entry from at least one
 613         * of the lists each time through the loop. When the current
 614         * list of refs is exhausted, set iter to NULL. When the list
 615         * of updates is exhausted, leave i set to updates->nr.
 616         */
 617        iter = packed_ref_iterator_begin(&refs->base, "",
 618                                         DO_FOR_EACH_INCLUDE_BROKEN);
 619        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 620                iter = NULL;
 621
 622        i = 0;
 623
 624        while (iter || i < updates->nr) {
 625                struct ref_update *update = NULL;
 626                int cmp;
 627
 628                if (i >= updates->nr) {
 629                        cmp = -1;
 630                } else {
 631                        update = updates->items[i].util;
 632
 633                        if (!iter)
 634                                cmp = +1;
 635                        else
 636                                cmp = strcmp(iter->refname, update->refname);
 637                }
 638
 639                if (!cmp) {
 640                        /*
 641                         * There is both an old value and an update
 642                         * for this reference. Check the old value if
 643                         * necessary:
 644                         */
 645                        if ((update->flags & REF_HAVE_OLD)) {
 646                                if (is_null_oid(&update->old_oid)) {
 647                                        strbuf_addf(err, "cannot update ref '%s': "
 648                                                    "reference already exists",
 649                                                    update->refname);
 650                                        goto error;
 651                                } else if (oidcmp(&update->old_oid, iter->oid)) {
 652                                        strbuf_addf(err, "cannot update ref '%s': "
 653                                                    "is at %s but expected %s",
 654                                                    update->refname,
 655                                                    oid_to_hex(iter->oid),
 656                                                    oid_to_hex(&update->old_oid));
 657                                        goto error;
 658                                }
 659                        }
 660
 661                        /* Now figure out what to use for the new value: */
 662                        if ((update->flags & REF_HAVE_NEW)) {
 663                                /*
 664                                 * The update takes precedence. Skip
 665                                 * the iterator over the unneeded
 666                                 * value.
 667                                 */
 668                                if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 669                                        iter = NULL;
 670                                cmp = +1;
 671                        } else {
 672                                /*
 673                                 * The update doesn't actually want to
 674                                 * change anything. We're done with it.
 675                                 */
 676                                i++;
 677                                cmp = -1;
 678                        }
 679                } else if (cmp > 0) {
 680                        /*
 681                         * There is no old value but there is an
 682                         * update for this reference. Make sure that
 683                         * the update didn't expect an existing value:
 684                         */
 685                        if ((update->flags & REF_HAVE_OLD) &&
 686                            !is_null_oid(&update->old_oid)) {
 687                                strbuf_addf(err, "cannot update ref '%s': "
 688                                            "reference is missing but expected %s",
 689                                            update->refname,
 690                                            oid_to_hex(&update->old_oid));
 691                                goto error;
 692                        }
 693                }
 694
 695                if (cmp < 0) {
 696                        /* Pass the old reference through. */
 697
 698                        struct object_id peeled;
 699                        int peel_error = ref_iterator_peel(iter, &peeled);
 700
 701                        if (write_packed_entry(out, iter->refname,
 702                                               iter->oid->hash,
 703                                               peel_error ? NULL : peeled.hash))
 704                                goto write_error;
 705
 706                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
 707                                iter = NULL;
 708                } else if (is_null_oid(&update->new_oid)) {
 709                        /*
 710                         * The update wants to delete the reference,
 711                         * and the reference either didn't exist or we
 712                         * have already skipped it. So we're done with
 713                         * the update (and don't have to write
 714                         * anything).
 715                         */
 716                        i++;
 717                } else {
 718                        struct object_id peeled;
 719                        int peel_error = peel_object(update->new_oid.hash,
 720                                                     peeled.hash);
 721
 722                        if (write_packed_entry(out, update->refname,
 723                                               update->new_oid.hash,
 724                                               peel_error ? NULL : peeled.hash))
 725                                goto write_error;
 726
 727                        i++;
 728                }
 729        }
 730
 731        if (ok != ITER_DONE) {
 732                strbuf_addf(err, "unable to write packed-refs file: "
 733                            "error iterating over old contents");
 734                goto error;
 735        }
 736
 737        if (close_tempfile_gently(refs->tempfile)) {
 738                strbuf_addf(err, "error closing file %s: %s",
 739                            get_tempfile_path(refs->tempfile),
 740                            strerror(errno));
 741                strbuf_release(&sb);
 742                delete_tempfile(&refs->tempfile);
 743                return -1;
 744        }
 745
 746        return 0;
 747
 748write_error:
 749        strbuf_addf(err, "error writing to %s: %s",
 750                    get_tempfile_path(refs->tempfile), strerror(errno));
 751
 752error:
 753        if (iter)
 754                ref_iterator_abort(iter);
 755
 756        delete_tempfile(&refs->tempfile);
 757        return -1;
 758}
 759
 760struct packed_transaction_backend_data {
 761        /* True iff the transaction owns the packed-refs lock. */
 762        int own_lock;
 763
 764        struct string_list updates;
 765};
 766
 767static void packed_transaction_cleanup(struct packed_ref_store *refs,
 768                                       struct ref_transaction *transaction)
 769{
 770        struct packed_transaction_backend_data *data = transaction->backend_data;
 771
 772        if (data) {
 773                string_list_clear(&data->updates, 0);
 774
 775                if (is_tempfile_active(refs->tempfile))
 776                        delete_tempfile(&refs->tempfile);
 777
 778                if (data->own_lock && is_lock_file_locked(&refs->lock)) {
 779                        packed_refs_unlock(&refs->base);
 780                        data->own_lock = 0;
 781                }
 782
 783                free(data);
 784                transaction->backend_data = NULL;
 785        }
 786
 787        transaction->state = REF_TRANSACTION_CLOSED;
 788}
 789
 790static int packed_transaction_prepare(struct ref_store *ref_store,
 791                                      struct ref_transaction *transaction,
 792                                      struct strbuf *err)
 793{
 794        struct packed_ref_store *refs = packed_downcast(
 795                        ref_store,
 796                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 797                        "ref_transaction_prepare");
 798        struct packed_transaction_backend_data *data;
 799        size_t i;
 800        int ret = TRANSACTION_GENERIC_ERROR;
 801
 802        /*
 803         * Note that we *don't* skip transactions with zero updates,
 804         * because such a transaction might be executed for the side
 805         * effect of ensuring that all of the references are peeled.
 806         * If the caller wants to optimize away empty transactions, it
 807         * should do so itself.
 808         */
 809
 810        data = xcalloc(1, sizeof(*data));
 811        string_list_init(&data->updates, 0);
 812
 813        transaction->backend_data = data;
 814
 815        /*
 816         * Stick the updates in a string list by refname so that we
 817         * can sort them:
 818         */
 819        for (i = 0; i < transaction->nr; i++) {
 820                struct ref_update *update = transaction->updates[i];
 821                struct string_list_item *item =
 822                        string_list_append(&data->updates, update->refname);
 823
 824                /* Store a pointer to update in item->util: */
 825                item->util = update;
 826        }
 827        string_list_sort(&data->updates);
 828
 829        if (ref_update_reject_duplicates(&data->updates, err))
 830                goto failure;
 831
 832        if (!is_lock_file_locked(&refs->lock)) {
 833                if (packed_refs_lock(ref_store, 0, err))
 834                        goto failure;
 835                data->own_lock = 1;
 836        }
 837
 838        if (write_with_updates(refs, &data->updates, err))
 839                goto failure;
 840
 841        transaction->state = REF_TRANSACTION_PREPARED;
 842        return 0;
 843
 844failure:
 845        packed_transaction_cleanup(refs, transaction);
 846        return ret;
 847}
 848
 849static int packed_transaction_abort(struct ref_store *ref_store,
 850                                    struct ref_transaction *transaction,
 851                                    struct strbuf *err)
 852{
 853        struct packed_ref_store *refs = packed_downcast(
 854                        ref_store,
 855                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 856                        "ref_transaction_abort");
 857
 858        packed_transaction_cleanup(refs, transaction);
 859        return 0;
 860}
 861
 862static int packed_transaction_finish(struct ref_store *ref_store,
 863                                     struct ref_transaction *transaction,
 864                                     struct strbuf *err)
 865{
 866        struct packed_ref_store *refs = packed_downcast(
 867                        ref_store,
 868                        REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
 869                        "ref_transaction_finish");
 870        int ret = TRANSACTION_GENERIC_ERROR;
 871        char *packed_refs_path;
 872
 873        packed_refs_path = get_locked_file_path(&refs->lock);
 874        if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
 875                strbuf_addf(err, "error replacing %s: %s",
 876                            refs->path, strerror(errno));
 877                goto cleanup;
 878        }
 879
 880        clear_packed_ref_cache(refs);
 881        ret = 0;
 882
 883cleanup:
 884        free(packed_refs_path);
 885        packed_transaction_cleanup(refs, transaction);
 886        return ret;
 887}
 888
 889static int packed_initial_transaction_commit(struct ref_store *ref_store,
 890                                            struct ref_transaction *transaction,
 891                                            struct strbuf *err)
 892{
 893        return ref_transaction_commit(transaction, err);
 894}
 895
 896static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
 897                             struct string_list *refnames, unsigned int flags)
 898{
 899        struct packed_ref_store *refs =
 900                packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
 901        struct strbuf err = STRBUF_INIT;
 902        struct ref_transaction *transaction;
 903        struct string_list_item *item;
 904        int ret;
 905
 906        (void)refs; /* We need the check above, but don't use the variable */
 907
 908        if (!refnames->nr)
 909                return 0;
 910
 911        /*
 912         * Since we don't check the references' old_oids, the
 913         * individual updates can't fail, so we can pack all of the
 914         * updates into a single transaction.
 915         */
 916
 917        transaction = ref_store_transaction_begin(ref_store, &err);
 918        if (!transaction)
 919                return -1;
 920
 921        for_each_string_list_item(item, refnames) {
 922                if (ref_transaction_delete(transaction, item->string, NULL,
 923                                           flags, msg, &err)) {
 924                        warning(_("could not delete reference %s: %s"),
 925                                item->string, err.buf);
 926                        strbuf_reset(&err);
 927                }
 928        }
 929
 930        ret = ref_transaction_commit(transaction, &err);
 931
 932        if (ret) {
 933                if (refnames->nr == 1)
 934                        error(_("could not delete reference %s: %s"),
 935                              refnames->items[0].string, err.buf);
 936                else
 937                        error(_("could not delete references: %s"), err.buf);
 938        }
 939
 940        ref_transaction_free(transaction);
 941        strbuf_release(&err);
 942        return ret;
 943}
 944
 945static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
 946{
 947        /*
 948         * Packed refs are already packed. It might be that loose refs
 949         * are packed *into* a packed refs store, but that is done by
 950         * updating the packed references via a transaction.
 951         */
 952        return 0;
 953}
 954
 955static int packed_create_symref(struct ref_store *ref_store,
 956                               const char *refname, const char *target,
 957                               const char *logmsg)
 958{
 959        die("BUG: packed reference store does not support symrefs");
 960}
 961
 962static int packed_rename_ref(struct ref_store *ref_store,
 963                            const char *oldrefname, const char *newrefname,
 964                            const char *logmsg)
 965{
 966        die("BUG: packed reference store does not support renaming references");
 967}
 968
 969static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
 970{
 971        return empty_ref_iterator_begin();
 972}
 973
 974static int packed_for_each_reflog_ent(struct ref_store *ref_store,
 975                                      const char *refname,
 976                                      each_reflog_ent_fn fn, void *cb_data)
 977{
 978        return 0;
 979}
 980
 981static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
 982                                              const char *refname,
 983                                              each_reflog_ent_fn fn,
 984                                              void *cb_data)
 985{
 986        return 0;
 987}
 988
 989static int packed_reflog_exists(struct ref_store *ref_store,
 990                               const char *refname)
 991{
 992        return 0;
 993}
 994
 995static int packed_create_reflog(struct ref_store *ref_store,
 996                               const char *refname, int force_create,
 997                               struct strbuf *err)
 998{
 999        die("BUG: packed reference store does not support reflogs");
1000}
1001
1002static int packed_delete_reflog(struct ref_store *ref_store,
1003                               const char *refname)
1004{
1005        return 0;
1006}
1007
1008static int packed_reflog_expire(struct ref_store *ref_store,
1009                                const char *refname, const unsigned char *sha1,
1010                                unsigned int flags,
1011                                reflog_expiry_prepare_fn prepare_fn,
1012                                reflog_expiry_should_prune_fn should_prune_fn,
1013                                reflog_expiry_cleanup_fn cleanup_fn,
1014                                void *policy_cb_data)
1015{
1016        return 0;
1017}
1018
1019struct ref_storage_be refs_be_packed = {
1020        NULL,
1021        "packed",
1022        packed_ref_store_create,
1023        packed_init_db,
1024        packed_transaction_prepare,
1025        packed_transaction_finish,
1026        packed_transaction_abort,
1027        packed_initial_transaction_commit,
1028
1029        packed_pack_refs,
1030        packed_peel_ref,
1031        packed_create_symref,
1032        packed_delete_refs,
1033        packed_rename_ref,
1034
1035        packed_ref_iterator_begin,
1036        packed_read_raw_ref,
1037
1038        packed_reflog_iterator_begin,
1039        packed_for_each_reflog_ent,
1040        packed_for_each_reflog_ent_reverse,
1041        packed_reflog_exists,
1042        packed_create_reflog,
1043        packed_delete_reflog,
1044        packed_reflog_expire
1045};