1#include "../cache.h" 2#include "../config.h" 3#include "../refs.h" 4#include "refs-internal.h" 5#include "ref-cache.h" 6#include "packed-backend.h" 7#include "../iterator.h" 8#include "../lockfile.h" 9 10struct packed_ref_cache { 11 struct ref_cache *cache; 12 13 /* 14 * Count of references to the data structure in this instance, 15 * including the pointer from files_ref_store::packed if any. 16 * The data will not be freed as long as the reference count 17 * is nonzero. 18 */ 19 unsigned int referrers; 20 21 /* The metadata from when this packed-refs cache was read */ 22 struct stat_validity validity; 23}; 24 25/* 26 * Increment the reference count of *packed_refs. 27 */ 28static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 29{ 30 packed_refs->referrers++; 31} 32 33/* 34 * Decrease the reference count of *packed_refs. If it goes to zero, 35 * free *packed_refs and return true; otherwise return false. 36 */ 37static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) 38{ 39 if (!--packed_refs->referrers) { 40 free_ref_cache(packed_refs->cache); 41 stat_validity_clear(&packed_refs->validity); 42 free(packed_refs); 43 return 1; 44 } else { 45 return 0; 46 } 47} 48 49/* 50 * A container for `packed-refs`-related data. It is not (yet) a 51 * `ref_store`. 52 */ 53struct packed_ref_store { 54 struct ref_store base; 55 56 unsigned int store_flags; 57 58 /* The path of the "packed-refs" file: */ 59 char *path; 60 61 /* 62 * A cache of the values read from the `packed-refs` file, if 63 * it might still be current; otherwise, NULL. 64 */ 65 struct packed_ref_cache *cache; 66 67 /* 68 * Lock used for the "packed-refs" file. Note that this (and 69 * thus the enclosing `packed_ref_store`) must not be freed. 70 */ 71 struct lock_file lock; 72 73 /* 74 * Temporary file used when rewriting new contents to the 75 * "packed-refs" file. Note that this (and thus the enclosing 76 * `packed_ref_store`) must not be freed. 77 */ 78 struct tempfile tempfile; 79}; 80 81struct ref_store *packed_ref_store_create(const char *path, 82 unsigned int store_flags) 83{ 84 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); 85 struct ref_store *ref_store = (struct ref_store *)refs; 86 87 base_ref_store_init(ref_store, &refs_be_packed); 88 refs->store_flags = store_flags; 89 90 refs->path = xstrdup(path); 91 return ref_store; 92} 93 94/* 95 * Die if refs is not the main ref store. caller is used in any 96 * necessary error messages. 97 */ 98static void packed_assert_main_repository(struct packed_ref_store *refs, 99 const char *caller) 100{ 101 if (refs->store_flags & REF_STORE_MAIN) 102 return; 103 104 die("BUG: operation %s only allowed for main ref store", caller); 105} 106 107/* 108 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 109 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 110 * support at least the flags specified in `required_flags`. `caller` 111 * is used in any necessary error messages. 112 */ 113static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 114 unsigned int required_flags, 115 const char *caller) 116{ 117 struct packed_ref_store *refs; 118 119 if (ref_store->be != &refs_be_packed) 120 die("BUG: ref_store is type \"%s\" not \"packed\" in %s", 121 ref_store->be->name, caller); 122 123 refs = (struct packed_ref_store *)ref_store; 124 125 if ((refs->store_flags & required_flags) != required_flags) 126 die("BUG: unallowed operation (%s), requires %x, has %x\n", 127 caller, required_flags, refs->store_flags); 128 129 return refs; 130} 131 132static void clear_packed_ref_cache(struct packed_ref_store *refs) 133{ 134 if (refs->cache) { 135 struct packed_ref_cache *cache = refs->cache; 136 137 refs->cache = NULL; 138 release_packed_ref_cache(cache); 139 } 140} 141 142/* The length of a peeled reference line in packed-refs, including EOL: */ 143#define PEELED_LINE_LENGTH 42 144 145/* 146 * Parse one line from a packed-refs file. Write the SHA1 to sha1. 147 * Return a pointer to the refname within the line (null-terminated), 148 * or NULL if there was a problem. 149 */ 150static const char *parse_ref_line(struct strbuf *line, struct object_id *oid) 151{ 152 const char *ref; 153 154 if (parse_oid_hex(line->buf, oid, &ref) < 0) 155 return NULL; 156 if (!isspace(*ref++)) 157 return NULL; 158 159 if (isspace(*ref)) 160 return NULL; 161 162 if (line->buf[line->len - 1] != '\n') 163 return NULL; 164 line->buf[--line->len] = 0; 165 166 return ref; 167} 168 169/* 170 * Read from `packed_refs_file` into a newly-allocated 171 * `packed_ref_cache` and return it. The return value will already 172 * have its reference count incremented. 173 * 174 * A comment line of the form "# pack-refs with: " may contain zero or 175 * more traits. We interpret the traits as follows: 176 * 177 * No traits: 178 * 179 * Probably no references are peeled. But if the file contains a 180 * peeled value for a reference, we will use it. 181 * 182 * peeled: 183 * 184 * References under "refs/tags/", if they *can* be peeled, *are* 185 * peeled in this file. References outside of "refs/tags/" are 186 * probably not peeled even if they could have been, but if we find 187 * a peeled value for such a reference we will use it. 188 * 189 * fully-peeled: 190 * 191 * All references in the file that can be peeled are peeled. 192 * Inversely (and this is more important), any references in the 193 * file for which no peeled value is recorded is not peelable. This 194 * trait should typically be written alongside "peeled" for 195 * compatibility with older clients, but we do not require it 196 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 197 */ 198static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file) 199{ 200 FILE *f; 201 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); 202 struct ref_entry *last = NULL; 203 struct strbuf line = STRBUF_INIT; 204 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE; 205 struct ref_dir *dir; 206 207 acquire_packed_ref_cache(packed_refs); 208 packed_refs->cache = create_ref_cache(NULL, NULL); 209 packed_refs->cache->root->flag &= ~REF_INCOMPLETE; 210 211 f = fopen(packed_refs_file, "r"); 212 if (!f) { 213 if (errno == ENOENT) { 214 /* 215 * This is OK; it just means that no 216 * "packed-refs" file has been written yet, 217 * which is equivalent to it being empty. 218 */ 219 return packed_refs; 220 } else { 221 die_errno("couldn't read %s", packed_refs_file); 222 } 223 } 224 225 stat_validity_update(&packed_refs->validity, fileno(f)); 226 227 dir = get_ref_dir(packed_refs->cache->root); 228 while (strbuf_getwholeline(&line, f, '\n') != EOF) { 229 struct object_id oid; 230 const char *refname; 231 const char *traits; 232 233 if (!line.len || line.buf[line.len - 1] != '\n') 234 die("unterminated line in %s: %s", packed_refs_file, line.buf); 235 236 if (skip_prefix(line.buf, "# pack-refs with:", &traits)) { 237 if (strstr(traits, " fully-peeled ")) 238 peeled = PEELED_FULLY; 239 else if (strstr(traits, " peeled ")) 240 peeled = PEELED_TAGS; 241 /* perhaps other traits later as well */ 242 continue; 243 } 244 245 refname = parse_ref_line(&line, &oid); 246 if (refname) { 247 int flag = REF_ISPACKED; 248 249 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { 250 if (!refname_is_safe(refname)) 251 die("packed refname is dangerous: %s", refname); 252 oidclr(&oid); 253 flag |= REF_BAD_NAME | REF_ISBROKEN; 254 } 255 last = create_ref_entry(refname, &oid, flag); 256 if (peeled == PEELED_FULLY || 257 (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/"))) 258 last->flag |= REF_KNOWS_PEELED; 259 add_ref_entry(dir, last); 260 } else if (last && 261 line.buf[0] == '^' && 262 line.len == PEELED_LINE_LENGTH && 263 line.buf[PEELED_LINE_LENGTH - 1] == '\n' && 264 !get_oid_hex(line.buf + 1, &oid)) { 265 oidcpy(&last->u.value.peeled, &oid); 266 /* 267 * Regardless of what the file header said, 268 * we definitely know the value of *this* 269 * reference: 270 */ 271 last->flag |= REF_KNOWS_PEELED; 272 } else { 273 strbuf_setlen(&line, line.len - 1); 274 die("unexpected line in %s: %s", packed_refs_file, line.buf); 275 } 276 } 277 278 fclose(f); 279 strbuf_release(&line); 280 281 return packed_refs; 282} 283 284/* 285 * Check that the packed refs cache (if any) still reflects the 286 * contents of the file. If not, clear the cache. 287 */ 288static void validate_packed_ref_cache(struct packed_ref_store *refs) 289{ 290 if (refs->cache && 291 !stat_validity_check(&refs->cache->validity, refs->path)) 292 clear_packed_ref_cache(refs); 293} 294 295/* 296 * Get the packed_ref_cache for the specified packed_ref_store, 297 * creating and populating it if it hasn't been read before or if the 298 * file has been changed (according to its `validity` field) since it 299 * was last read. On the other hand, if we hold the lock, then assume 300 * that the file hasn't been changed out from under us, so skip the 301 * extra `stat()` call in `stat_validity_check()`. 302 */ 303static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 304{ 305 if (!is_lock_file_locked(&refs->lock)) 306 validate_packed_ref_cache(refs); 307 308 if (!refs->cache) 309 refs->cache = read_packed_refs(refs->path); 310 311 return refs->cache; 312} 313 314static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) 315{ 316 return get_ref_dir(packed_ref_cache->cache->root); 317} 318 319static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) 320{ 321 return get_packed_ref_dir(get_packed_ref_cache(refs)); 322} 323 324/* 325 * Add or overwrite a reference in the in-memory packed reference 326 * cache. This may only be called while the packed-refs file is locked 327 * (see packed_refs_lock()). To actually write the packed-refs file, 328 * call commit_packed_refs(). 329 */ 330void add_packed_ref(struct ref_store *ref_store, 331 const char *refname, const struct object_id *oid) 332{ 333 struct packed_ref_store *refs = 334 packed_downcast(ref_store, REF_STORE_WRITE, 335 "add_packed_ref"); 336 struct ref_dir *packed_refs; 337 struct ref_entry *packed_entry; 338 339 if (!is_lock_file_locked(&refs->lock)) 340 die("BUG: packed refs not locked"); 341 342 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) 343 die("Reference has invalid format: '%s'", refname); 344 345 packed_refs = get_packed_refs(refs); 346 packed_entry = find_ref_entry(packed_refs, refname); 347 if (packed_entry) { 348 /* Overwrite the existing entry: */ 349 oidcpy(&packed_entry->u.value.oid, oid); 350 packed_entry->flag = REF_ISPACKED; 351 oidclr(&packed_entry->u.value.peeled); 352 } else { 353 packed_entry = create_ref_entry(refname, oid, REF_ISPACKED); 354 add_ref_entry(packed_refs, packed_entry); 355 } 356} 357 358/* 359 * Return the ref_entry for the given refname from the packed 360 * references. If it does not exist, return NULL. 361 */ 362static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, 363 const char *refname) 364{ 365 return find_ref_entry(get_packed_refs(refs), refname); 366} 367 368static int packed_read_raw_ref(struct ref_store *ref_store, 369 const char *refname, unsigned char *sha1, 370 struct strbuf *referent, unsigned int *type) 371{ 372 struct packed_ref_store *refs = 373 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); 374 375 struct ref_entry *entry; 376 377 *type = 0; 378 379 entry = get_packed_ref(refs, refname); 380 if (!entry) { 381 errno = ENOENT; 382 return -1; 383 } 384 385 hashcpy(sha1, entry->u.value.oid.hash); 386 *type = REF_ISPACKED; 387 return 0; 388} 389 390static int packed_peel_ref(struct ref_store *ref_store, 391 const char *refname, unsigned char *sha1) 392{ 393 struct packed_ref_store *refs = 394 packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, 395 "peel_ref"); 396 struct ref_entry *r = get_packed_ref(refs, refname); 397 398 if (!r || peel_entry(r, 0)) 399 return -1; 400 401 hashcpy(sha1, r->u.value.peeled.hash); 402 return 0; 403} 404 405struct packed_ref_iterator { 406 struct ref_iterator base; 407 408 struct packed_ref_cache *cache; 409 struct ref_iterator *iter0; 410 unsigned int flags; 411}; 412 413static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) 414{ 415 struct packed_ref_iterator *iter = 416 (struct packed_ref_iterator *)ref_iterator; 417 int ok; 418 419 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { 420 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 421 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) 422 continue; 423 424 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 425 !ref_resolves_to_object(iter->iter0->refname, 426 iter->iter0->oid, 427 iter->iter0->flags)) 428 continue; 429 430 iter->base.refname = iter->iter0->refname; 431 iter->base.oid = iter->iter0->oid; 432 iter->base.flags = iter->iter0->flags; 433 return ITER_OK; 434 } 435 436 iter->iter0 = NULL; 437 if (ref_iterator_abort(ref_iterator) != ITER_DONE) 438 ok = ITER_ERROR; 439 440 return ok; 441} 442 443static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, 444 struct object_id *peeled) 445{ 446 struct packed_ref_iterator *iter = 447 (struct packed_ref_iterator *)ref_iterator; 448 449 return ref_iterator_peel(iter->iter0, peeled); 450} 451 452static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) 453{ 454 struct packed_ref_iterator *iter = 455 (struct packed_ref_iterator *)ref_iterator; 456 int ok = ITER_DONE; 457 458 if (iter->iter0) 459 ok = ref_iterator_abort(iter->iter0); 460 461 release_packed_ref_cache(iter->cache); 462 base_ref_iterator_free(ref_iterator); 463 return ok; 464} 465 466static struct ref_iterator_vtable packed_ref_iterator_vtable = { 467 packed_ref_iterator_advance, 468 packed_ref_iterator_peel, 469 packed_ref_iterator_abort 470}; 471 472static struct ref_iterator *packed_ref_iterator_begin( 473 struct ref_store *ref_store, 474 const char *prefix, unsigned int flags) 475{ 476 struct packed_ref_store *refs; 477 struct packed_ref_iterator *iter; 478 struct ref_iterator *ref_iterator; 479 unsigned int required_flags = REF_STORE_READ; 480 481 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 482 required_flags |= REF_STORE_ODB; 483 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); 484 485 iter = xcalloc(1, sizeof(*iter)); 486 ref_iterator = &iter->base; 487 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable); 488 489 /* 490 * Note that get_packed_ref_cache() internally checks whether 491 * the packed-ref cache is up to date with what is on disk, 492 * and re-reads it if not. 493 */ 494 495 iter->cache = get_packed_ref_cache(refs); 496 acquire_packed_ref_cache(iter->cache); 497 iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0); 498 499 iter->flags = flags; 500 501 return ref_iterator; 502} 503 504/* 505 * Write an entry to the packed-refs file for the specified refname. 506 * If peeled is non-NULL, write it as the entry's peeled value. On 507 * error, return a nonzero value and leave errno set at the value left 508 * by the failing call to `fprintf()`. 509 */ 510static int write_packed_entry(FILE *fh, const char *refname, 511 const unsigned char *sha1, 512 const unsigned char *peeled) 513{ 514 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 || 515 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0)) 516 return -1; 517 518 return 0; 519} 520 521int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) 522{ 523 struct packed_ref_store *refs = 524 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 525 "packed_refs_lock"); 526 static int timeout_configured = 0; 527 static int timeout_value = 1000; 528 529 if (!timeout_configured) { 530 git_config_get_int("core.packedrefstimeout", &timeout_value); 531 timeout_configured = 1; 532 } 533 534 /* 535 * Note that we close the lockfile immediately because we 536 * don't write new content to it, but rather to a separate 537 * tempfile. 538 */ 539 if (hold_lock_file_for_update_timeout( 540 &refs->lock, 541 refs->path, 542 flags, timeout_value) < 0) { 543 unable_to_lock_message(refs->path, errno, err); 544 return -1; 545 } 546 547 if (close_lock_file(&refs->lock)) { 548 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno)); 549 return -1; 550 } 551 552 /* 553 * Now that we hold the `packed-refs` lock, make sure that our 554 * cache matches the current version of the file. Normally 555 * `get_packed_ref_cache()` does that for us, but that 556 * function assumes that when the file is locked, any existing 557 * cache is still valid. We've just locked the file, but it 558 * might have changed the moment *before* we locked it. 559 */ 560 validate_packed_ref_cache(refs); 561 562 /* 563 * Now make sure that the packed-refs file as it exists in the 564 * locked state is loaded into the cache: 565 */ 566 get_packed_ref_cache(refs); 567 return 0; 568} 569 570void packed_refs_unlock(struct ref_store *ref_store) 571{ 572 struct packed_ref_store *refs = packed_downcast( 573 ref_store, 574 REF_STORE_READ | REF_STORE_WRITE, 575 "packed_refs_unlock"); 576 577 if (!is_lock_file_locked(&refs->lock)) 578 die("BUG: packed_refs_unlock() called when not locked"); 579 rollback_lock_file(&refs->lock); 580} 581 582int packed_refs_is_locked(struct ref_store *ref_store) 583{ 584 struct packed_ref_store *refs = packed_downcast( 585 ref_store, 586 REF_STORE_READ | REF_STORE_WRITE, 587 "packed_refs_is_locked"); 588 589 return is_lock_file_locked(&refs->lock); 590} 591 592/* 593 * The packed-refs header line that we write out. Perhaps other 594 * traits will be added later. The trailing space is required. 595 */ 596static const char PACKED_REFS_HEADER[] = 597 "# pack-refs with: peeled fully-peeled \n"; 598 599/* 600 * Write the current version of the packed refs cache from memory to 601 * disk. The packed-refs file must already be locked for writing (see 602 * packed_refs_lock()). Return zero on success. On errors, rollback 603 * the lockfile, write an error message to `err`, and return a nonzero 604 * value. 605 */ 606int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err) 607{ 608 struct packed_ref_store *refs = 609 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 610 "commit_packed_refs"); 611 struct packed_ref_cache *packed_ref_cache = 612 get_packed_ref_cache(refs); 613 int ok; 614 int ret = -1; 615 struct strbuf sb = STRBUF_INIT; 616 FILE *out; 617 struct ref_iterator *iter; 618 char *packed_refs_path; 619 620 if (!is_lock_file_locked(&refs->lock)) 621 die("BUG: commit_packed_refs() called when unlocked"); 622 623 /* 624 * If packed-refs is a symlink, we want to overwrite the 625 * symlinked-to file, not the symlink itself. Also, put the 626 * staging file next to it: 627 */ 628 packed_refs_path = get_locked_file_path(&refs->lock); 629 strbuf_addf(&sb, "%s.new", packed_refs_path); 630 if (create_tempfile(&refs->tempfile, sb.buf) < 0) { 631 strbuf_addf(err, "unable to create file %s: %s", 632 sb.buf, strerror(errno)); 633 strbuf_release(&sb); 634 goto out; 635 } 636 strbuf_release(&sb); 637 638 out = fdopen_tempfile(&refs->tempfile, "w"); 639 if (!out) { 640 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s", 641 strerror(errno)); 642 goto error; 643 } 644 645 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) { 646 strbuf_addf(err, "error writing to %s: %s", 647 get_tempfile_path(&refs->tempfile), strerror(errno)); 648 goto error; 649 } 650 651 iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0); 652 while ((ok = ref_iterator_advance(iter)) == ITER_OK) { 653 struct object_id peeled; 654 int peel_error = ref_iterator_peel(iter, &peeled); 655 656 if (write_packed_entry(out, iter->refname, iter->oid->hash, 657 peel_error ? NULL : peeled.hash)) { 658 strbuf_addf(err, "error writing to %s: %s", 659 get_tempfile_path(&refs->tempfile), 660 strerror(errno)); 661 ref_iterator_abort(iter); 662 goto error; 663 } 664 } 665 666 if (ok != ITER_DONE) { 667 strbuf_addf(err, "unable to rewrite packed-refs file: " 668 "error iterating over old contents"); 669 goto error; 670 } 671 672 if (rename_tempfile(&refs->tempfile, packed_refs_path)) { 673 strbuf_addf(err, "error replacing %s: %s", 674 refs->path, strerror(errno)); 675 goto out; 676 } 677 678 ret = 0; 679 goto out; 680 681error: 682 delete_tempfile(&refs->tempfile); 683 684out: 685 free(packed_refs_path); 686 return ret; 687} 688 689/* 690 * Rewrite the packed-refs file, omitting any refs listed in 691 * 'refnames'. On error, leave packed-refs unchanged, write an error 692 * message to 'err', and return a nonzero value. The packed refs lock 693 * must be held when calling this function; it will still be held when 694 * the function returns. 695 * 696 * The refs in 'refnames' needn't be sorted. `err` must not be NULL. 697 */ 698int repack_without_refs(struct ref_store *ref_store, 699 struct string_list *refnames, struct strbuf *err) 700{ 701 struct packed_ref_store *refs = 702 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 703 "repack_without_refs"); 704 struct ref_dir *packed; 705 struct string_list_item *refname; 706 int needs_repacking = 0, removed = 0; 707 708 packed_assert_main_repository(refs, "repack_without_refs"); 709 assert(err); 710 711 if (!is_lock_file_locked(&refs->lock)) 712 die("BUG: repack_without_refs called without holding lock"); 713 714 /* Look for a packed ref */ 715 for_each_string_list_item(refname, refnames) { 716 if (get_packed_ref(refs, refname->string)) { 717 needs_repacking = 1; 718 break; 719 } 720 } 721 722 /* Avoid locking if we have nothing to do */ 723 if (!needs_repacking) 724 return 0; /* no refname exists in packed refs */ 725 726 packed = get_packed_refs(refs); 727 728 /* Remove refnames from the cache */ 729 for_each_string_list_item(refname, refnames) 730 if (remove_entry_from_dir(packed, refname->string) != -1) 731 removed = 1; 732 if (!removed) { 733 /* 734 * All packed entries disappeared while we were 735 * acquiring the lock. 736 */ 737 clear_packed_ref_cache(refs); 738 return 0; 739 } 740 741 /* Write what remains */ 742 return commit_packed_refs(&refs->base, err); 743} 744 745static int packed_init_db(struct ref_store *ref_store, struct strbuf *err) 746{ 747 /* Nothing to do. */ 748 return 0; 749} 750 751/* 752 * Write the packed-refs from the cache to the packed-refs tempfile, 753 * incorporating any changes from `updates`. `updates` must be a 754 * sorted string list whose keys are the refnames and whose util 755 * values are `struct ref_update *`. On error, rollback the tempfile, 756 * write an error message to `err`, and return a nonzero value. 757 * 758 * The packfile must be locked before calling this function and will 759 * remain locked when it is done. 760 */ 761static int write_with_updates(struct packed_ref_store *refs, 762 struct string_list *updates, 763 struct strbuf *err) 764{ 765 struct ref_iterator *iter = NULL; 766 size_t i; 767 int ok; 768 FILE *out; 769 struct strbuf sb = STRBUF_INIT; 770 char *packed_refs_path; 771 772 if (!is_lock_file_locked(&refs->lock)) 773 die("BUG: write_with_updates() called while unlocked"); 774 775 /* 776 * If packed-refs is a symlink, we want to overwrite the 777 * symlinked-to file, not the symlink itself. Also, put the 778 * staging file next to it: 779 */ 780 packed_refs_path = get_locked_file_path(&refs->lock); 781 strbuf_addf(&sb, "%s.new", packed_refs_path); 782 free(packed_refs_path); 783 if (create_tempfile(&refs->tempfile, sb.buf) < 0) { 784 strbuf_addf(err, "unable to create file %s: %s", 785 sb.buf, strerror(errno)); 786 strbuf_release(&sb); 787 return -1; 788 } 789 strbuf_release(&sb); 790 791 out = fdopen_tempfile(&refs->tempfile, "w"); 792 if (!out) { 793 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s", 794 strerror(errno)); 795 goto error; 796 } 797 798 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) 799 goto write_error; 800 801 /* 802 * We iterate in parallel through the current list of refs and 803 * the list of updates, processing an entry from at least one 804 * of the lists each time through the loop. When the current 805 * list of refs is exhausted, set iter to NULL. When the list 806 * of updates is exhausted, leave i set to updates->nr. 807 */ 808 iter = packed_ref_iterator_begin(&refs->base, "", 809 DO_FOR_EACH_INCLUDE_BROKEN); 810 if ((ok = ref_iterator_advance(iter)) != ITER_OK) 811 iter = NULL; 812 813 i = 0; 814 815 while (iter || i < updates->nr) { 816 struct ref_update *update = NULL; 817 int cmp; 818 819 if (i >= updates->nr) { 820 cmp = -1; 821 } else { 822 update = updates->items[i].util; 823 824 if (!iter) 825 cmp = +1; 826 else 827 cmp = strcmp(iter->refname, update->refname); 828 } 829 830 if (!cmp) { 831 /* 832 * There is both an old value and an update 833 * for this reference. Check the old value if 834 * necessary: 835 */ 836 if ((update->flags & REF_HAVE_OLD)) { 837 if (is_null_oid(&update->old_oid)) { 838 strbuf_addf(err, "cannot update ref '%s': " 839 "reference already exists", 840 update->refname); 841 goto error; 842 } else if (oidcmp(&update->old_oid, iter->oid)) { 843 strbuf_addf(err, "cannot update ref '%s': " 844 "is at %s but expected %s", 845 update->refname, 846 oid_to_hex(iter->oid), 847 oid_to_hex(&update->old_oid)); 848 goto error; 849 } 850 } 851 852 /* Now figure out what to use for the new value: */ 853 if ((update->flags & REF_HAVE_NEW)) { 854 /* 855 * The update takes precedence. Skip 856 * the iterator over the unneeded 857 * value. 858 */ 859 if ((ok = ref_iterator_advance(iter)) != ITER_OK) 860 iter = NULL; 861 cmp = +1; 862 } else { 863 /* 864 * The update doesn't actually want to 865 * change anything. We're done with it. 866 */ 867 i++; 868 cmp = -1; 869 } 870 } else if (cmp > 0) { 871 /* 872 * There is no old value but there is an 873 * update for this reference. Make sure that 874 * the update didn't expect an existing value: 875 */ 876 if ((update->flags & REF_HAVE_OLD) && 877 !is_null_oid(&update->old_oid)) { 878 strbuf_addf(err, "cannot update ref '%s': " 879 "reference is missing but expected %s", 880 update->refname, 881 oid_to_hex(&update->old_oid)); 882 goto error; 883 } 884 } 885 886 if (cmp < 0) { 887 /* Pass the old reference through. */ 888 889 struct object_id peeled; 890 int peel_error = ref_iterator_peel(iter, &peeled); 891 892 if (write_packed_entry(out, iter->refname, 893 iter->oid->hash, 894 peel_error ? NULL : peeled.hash)) 895 goto write_error; 896 897 if ((ok = ref_iterator_advance(iter)) != ITER_OK) 898 iter = NULL; 899 } else if (is_null_oid(&update->new_oid)) { 900 /* 901 * The update wants to delete the reference, 902 * and the reference either didn't exist or we 903 * have already skipped it. So we're done with 904 * the update (and don't have to write 905 * anything). 906 */ 907 i++; 908 } else { 909 struct object_id peeled; 910 int peel_error = peel_object(update->new_oid.hash, 911 peeled.hash); 912 913 if (write_packed_entry(out, update->refname, 914 update->new_oid.hash, 915 peel_error ? NULL : peeled.hash)) 916 goto write_error; 917 918 i++; 919 } 920 } 921 922 if (ok != ITER_DONE) { 923 strbuf_addf(err, "unable to write packed-refs file: " 924 "error iterating over old contents"); 925 goto error; 926 } 927 928 if (close_tempfile(&refs->tempfile)) { 929 strbuf_addf(err, "error closing file %s: %s", 930 get_tempfile_path(&refs->tempfile), 931 strerror(errno)); 932 strbuf_release(&sb); 933 return -1; 934 } 935 936 return 0; 937 938write_error: 939 strbuf_addf(err, "error writing to %s: %s", 940 get_tempfile_path(&refs->tempfile), strerror(errno)); 941 942error: 943 if (iter) 944 ref_iterator_abort(iter); 945 946 delete_tempfile(&refs->tempfile); 947 return -1; 948} 949 950struct packed_transaction_backend_data { 951 /* True iff the transaction owns the packed-refs lock. */ 952 int own_lock; 953 954 struct string_list updates; 955}; 956 957static void packed_transaction_cleanup(struct packed_ref_store *refs, 958 struct ref_transaction *transaction) 959{ 960 struct packed_transaction_backend_data *data = transaction->backend_data; 961 962 if (data) { 963 string_list_clear(&data->updates, 0); 964 965 if (is_tempfile_active(&refs->tempfile)) 966 delete_tempfile(&refs->tempfile); 967 968 if (data->own_lock && is_lock_file_locked(&refs->lock)) { 969 packed_refs_unlock(&refs->base); 970 data->own_lock = 0; 971 } 972 973 free(data); 974 transaction->backend_data = NULL; 975 } 976 977 transaction->state = REF_TRANSACTION_CLOSED; 978} 979 980static int packed_transaction_prepare(struct ref_store *ref_store, 981 struct ref_transaction *transaction, 982 struct strbuf *err) 983{ 984 struct packed_ref_store *refs = packed_downcast( 985 ref_store, 986 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 987 "ref_transaction_prepare"); 988 struct packed_transaction_backend_data *data; 989 size_t i; 990 int ret = TRANSACTION_GENERIC_ERROR; 991 992 /* 993 * Note that we *don't* skip transactions with zero updates, 994 * because such a transaction might be executed for the side 995 * effect of ensuring that all of the references are peeled. 996 * If the caller wants to optimize away empty transactions, it 997 * should do so itself. 998 */ 9991000 data = xcalloc(1, sizeof(*data));1001 string_list_init(&data->updates, 0);10021003 transaction->backend_data = data;10041005 /*1006 * Stick the updates in a string list by refname so that we1007 * can sort them:1008 */1009 for (i = 0; i < transaction->nr; i++) {1010 struct ref_update *update = transaction->updates[i];1011 struct string_list_item *item =1012 string_list_append(&data->updates, update->refname);10131014 /* Store a pointer to update in item->util: */1015 item->util = update;1016 }1017 string_list_sort(&data->updates);10181019 if (ref_update_reject_duplicates(&data->updates, err))1020 goto failure;10211022 if (!is_lock_file_locked(&refs->lock)) {1023 if (packed_refs_lock(ref_store, 0, err))1024 goto failure;1025 data->own_lock = 1;1026 }10271028 if (write_with_updates(refs, &data->updates, err))1029 goto failure;10301031 transaction->state = REF_TRANSACTION_PREPARED;1032 return 0;10331034failure:1035 packed_transaction_cleanup(refs, transaction);1036 return ret;1037}10381039static int packed_transaction_abort(struct ref_store *ref_store,1040 struct ref_transaction *transaction,1041 struct strbuf *err)1042{1043 struct packed_ref_store *refs = packed_downcast(1044 ref_store,1045 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1046 "ref_transaction_abort");10471048 packed_transaction_cleanup(refs, transaction);1049 return 0;1050}10511052static int packed_transaction_finish(struct ref_store *ref_store,1053 struct ref_transaction *transaction,1054 struct strbuf *err)1055{1056 struct packed_ref_store *refs = packed_downcast(1057 ref_store,1058 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1059 "ref_transaction_finish");1060 int ret = TRANSACTION_GENERIC_ERROR;1061 char *packed_refs_path;10621063 packed_refs_path = get_locked_file_path(&refs->lock);1064 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {1065 strbuf_addf(err, "error replacing %s: %s",1066 refs->path, strerror(errno));1067 goto cleanup;1068 }10691070 clear_packed_ref_cache(refs);1071 ret = 0;10721073cleanup:1074 free(packed_refs_path);1075 packed_transaction_cleanup(refs, transaction);1076 return ret;1077}10781079static int packed_initial_transaction_commit(struct ref_store *ref_store,1080 struct ref_transaction *transaction,1081 struct strbuf *err)1082{1083 return ref_transaction_commit(transaction, err);1084}10851086static int packed_delete_refs(struct ref_store *ref_store, const char *msg,1087 struct string_list *refnames, unsigned int flags)1088{1089 die("BUG: not implemented yet");1090}10911092static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)1093{1094 /*1095 * Packed refs are already packed. It might be that loose refs1096 * are packed *into* a packed refs store, but that is done by1097 * updating the packed references via a transaction.1098 */1099 return 0;1100}11011102static int packed_create_symref(struct ref_store *ref_store,1103 const char *refname, const char *target,1104 const char *logmsg)1105{1106 die("BUG: packed reference store does not support symrefs");1107}11081109static int packed_rename_ref(struct ref_store *ref_store,1110 const char *oldrefname, const char *newrefname,1111 const char *logmsg)1112{1113 die("BUG: packed reference store does not support renaming references");1114}11151116static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1117{1118 return empty_ref_iterator_begin();1119}11201121static int packed_for_each_reflog_ent(struct ref_store *ref_store,1122 const char *refname,1123 each_reflog_ent_fn fn, void *cb_data)1124{1125 return 0;1126}11271128static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,1129 const char *refname,1130 each_reflog_ent_fn fn,1131 void *cb_data)1132{1133 return 0;1134}11351136static int packed_reflog_exists(struct ref_store *ref_store,1137 const char *refname)1138{1139 return 0;1140}11411142static int packed_create_reflog(struct ref_store *ref_store,1143 const char *refname, int force_create,1144 struct strbuf *err)1145{1146 die("BUG: packed reference store does not support reflogs");1147}11481149static int packed_delete_reflog(struct ref_store *ref_store,1150 const char *refname)1151{1152 return 0;1153}11541155static int packed_reflog_expire(struct ref_store *ref_store,1156 const char *refname, const unsigned char *sha1,1157 unsigned int flags,1158 reflog_expiry_prepare_fn prepare_fn,1159 reflog_expiry_should_prune_fn should_prune_fn,1160 reflog_expiry_cleanup_fn cleanup_fn,1161 void *policy_cb_data)1162{1163 return 0;1164}11651166struct ref_storage_be refs_be_packed = {1167 NULL,1168 "packed",1169 packed_ref_store_create,1170 packed_init_db,1171 packed_transaction_prepare,1172 packed_transaction_finish,1173 packed_transaction_abort,1174 packed_initial_transaction_commit,11751176 packed_pack_refs,1177 packed_peel_ref,1178 packed_create_symref,1179 packed_delete_refs,1180 packed_rename_ref,11811182 packed_ref_iterator_begin,1183 packed_read_raw_ref,11841185 packed_reflog_iterator_begin,1186 packed_for_each_reflog_ent,1187 packed_for_each_reflog_ent_reverse,1188 packed_reflog_exists,1189 packed_create_reflog,1190 packed_delete_reflog,1191 packed_reflog_expire1192};