1#include "../cache.h" 2#include "../config.h" 3#include "../refs.h" 4#include "refs-internal.h" 5#include "packed-backend.h" 6#include "../iterator.h" 7#include "../lockfile.h" 8#include "../chdir-notify.h" 9 10enum mmap_strategy { 11 /* 12 * Don't use mmap() at all for reading `packed-refs`. 13 */ 14 MMAP_NONE, 15 16 /* 17 * Can use mmap() for reading `packed-refs`, but the file must 18 * not remain mmapped. This is the usual option on Windows, 19 * where you cannot rename a new version of a file onto a file 20 * that is currently mmapped. 21 */ 22 MMAP_TEMPORARY, 23 24 /* 25 * It is OK to leave the `packed-refs` file mmapped while 26 * arbitrary other code is running. 27 */ 28 MMAP_OK 29}; 30 31#if defined(NO_MMAP) 32static enum mmap_strategy mmap_strategy = MMAP_NONE; 33#elif defined(MMAP_PREVENTS_DELETE) 34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 35#else 36static enum mmap_strategy mmap_strategy = MMAP_OK; 37#endif 38 39struct packed_ref_store; 40 41/* 42 * A `snapshot` represents one snapshot of a `packed-refs` file. 43 * 44 * Normally, this will be a mmapped view of the contents of the 45 * `packed-refs` file at the time the snapshot was created. However, 46 * if the `packed-refs` file was not sorted, this might point at heap 47 * memory holding the contents of the `packed-refs` file with its 48 * records sorted by refname. 49 * 50 * `snapshot` instances are reference counted (via 51 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent 52 * an instance from disappearing while an iterator is still iterating 53 * over it. Instances are garbage collected when their `referrers` 54 * count goes to zero. 55 * 56 * The most recent `snapshot`, if available, is referenced by the 57 * `packed_ref_store`. Its freshness is checked whenever 58 * `get_snapshot()` is called; if the existing snapshot is obsolete, a 59 * new snapshot is taken. 60 */ 61struct snapshot { 62 /* 63 * A back-pointer to the packed_ref_store with which this 64 * snapshot is associated: 65 */ 66 struct packed_ref_store *refs; 67 68 /* Is the `packed-refs` file currently mmapped? */ 69 int mmapped; 70 71 /* 72 * The contents of the `packed-refs` file: 73 * 74 * - buf -- a pointer to the start of the memory 75 * - start -- a pointer to the first byte of actual references 76 * (i.e., after the header line, if one is present) 77 * - eof -- a pointer just past the end of the reference 78 * contents 79 * 80 * If the `packed-refs` file was already sorted, `buf` points 81 * at the mmapped contents of the file. If not, it points at 82 * heap-allocated memory containing the contents, sorted. If 83 * there were no contents (e.g., because the file didn't 84 * exist), `buf`, `start`, and `eof` are all NULL. 85 */ 86 char *buf, *start, *eof; 87 88 /* 89 * What is the peeled state of the `packed-refs` file that 90 * this snapshot represents? (This is usually determined from 91 * the file's header.) 92 */ 93 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 94 95 /* 96 * Count of references to this instance, including the pointer 97 * from `packed_ref_store::snapshot`, if any. The instance 98 * will not be freed as long as the reference count is 99 * nonzero. 100 */ 101 unsigned int referrers; 102 103 /* 104 * The metadata of the `packed-refs` file from which this 105 * snapshot was created, used to tell if the file has been 106 * replaced since we read it. 107 */ 108 struct stat_validity validity; 109}; 110 111/* 112 * A `ref_store` representing references stored in a `packed-refs` 113 * file. It implements the `ref_store` interface, though it has some 114 * limitations: 115 * 116 * - It cannot store symbolic references. 117 * 118 * - It cannot store reflogs. 119 * 120 * - It does not support reference renaming (though it could). 121 * 122 * On the other hand, it can be locked outside of a reference 123 * transaction. In that case, it remains locked even after the 124 * transaction is done and the new `packed-refs` file is activated. 125 */ 126struct packed_ref_store { 127 struct ref_store base; 128 129 unsigned int store_flags; 130 131 /* The path of the "packed-refs" file: */ 132 char *path; 133 134 /* 135 * A snapshot of the values read from the `packed-refs` file, 136 * if it might still be current; otherwise, NULL. 137 */ 138 struct snapshot *snapshot; 139 140 /* 141 * Lock used for the "packed-refs" file. Note that this (and 142 * thus the enclosing `packed_ref_store`) must not be freed. 143 */ 144 struct lock_file lock; 145 146 /* 147 * Temporary file used when rewriting new contents to the 148 * "packed-refs" file. Note that this (and thus the enclosing 149 * `packed_ref_store`) must not be freed. 150 */ 151 struct tempfile *tempfile; 152}; 153 154/* 155 * Increment the reference count of `*snapshot`. 156 */ 157static void acquire_snapshot(struct snapshot *snapshot) 158{ 159 snapshot->referrers++; 160} 161 162/* 163 * If the buffer in `snapshot` is active, then either munmap the 164 * memory and close the file, or free the memory. Then set the buffer 165 * pointers to NULL. 166 */ 167static void clear_snapshot_buffer(struct snapshot *snapshot) 168{ 169 if (snapshot->mmapped) { 170 if (munmap(snapshot->buf, snapshot->eof - snapshot->buf)) 171 die_errno("error ummapping packed-refs file %s", 172 snapshot->refs->path); 173 snapshot->mmapped = 0; 174 } else { 175 free(snapshot->buf); 176 } 177 snapshot->buf = snapshot->start = snapshot->eof = NULL; 178} 179 180/* 181 * Decrease the reference count of `*snapshot`. If it goes to zero, 182 * free `*snapshot` and return true; otherwise return false. 183 */ 184static int release_snapshot(struct snapshot *snapshot) 185{ 186 if (!--snapshot->referrers) { 187 stat_validity_clear(&snapshot->validity); 188 clear_snapshot_buffer(snapshot); 189 free(snapshot); 190 return 1; 191 } else { 192 return 0; 193 } 194} 195 196struct ref_store *packed_ref_store_create(const char *path, 197 unsigned int store_flags) 198{ 199 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); 200 struct ref_store *ref_store = (struct ref_store *)refs; 201 202 base_ref_store_init(ref_store, &refs_be_packed); 203 refs->store_flags = store_flags; 204 205 refs->path = xstrdup(path); 206 chdir_notify_reparent("packed-refs", &refs->path); 207 208 return ref_store; 209} 210 211/* 212 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 213 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 214 * support at least the flags specified in `required_flags`. `caller` 215 * is used in any necessary error messages. 216 */ 217static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 218 unsigned int required_flags, 219 const char *caller) 220{ 221 struct packed_ref_store *refs; 222 223 if (ref_store->be != &refs_be_packed) 224 BUG("ref_store is type \"%s\" not \"packed\" in %s", 225 ref_store->be->name, caller); 226 227 refs = (struct packed_ref_store *)ref_store; 228 229 if ((refs->store_flags & required_flags) != required_flags) 230 BUG("unallowed operation (%s), requires %x, has %x\n", 231 caller, required_flags, refs->store_flags); 232 233 return refs; 234} 235 236static void clear_snapshot(struct packed_ref_store *refs) 237{ 238 if (refs->snapshot) { 239 struct snapshot *snapshot = refs->snapshot; 240 241 refs->snapshot = NULL; 242 release_snapshot(snapshot); 243 } 244} 245 246static NORETURN void die_unterminated_line(const char *path, 247 const char *p, size_t len) 248{ 249 if (len < 80) 250 die("unterminated line in %s: %.*s", path, (int)len, p); 251 else 252 die("unterminated line in %s: %.75s...", path, p); 253} 254 255static NORETURN void die_invalid_line(const char *path, 256 const char *p, size_t len) 257{ 258 const char *eol = memchr(p, '\n', len); 259 260 if (!eol) 261 die_unterminated_line(path, p, len); 262 else if (eol - p < 80) 263 die("unexpected line in %s: %.*s", path, (int)(eol - p), p); 264 else 265 die("unexpected line in %s: %.75s...", path, p); 266 267} 268 269struct snapshot_record { 270 const char *start; 271 size_t len; 272}; 273 274static int cmp_packed_ref_records(const void *v1, const void *v2) 275{ 276 const struct snapshot_record *e1 = v1, *e2 = v2; 277 const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1; 278 const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1; 279 280 while (1) { 281 if (*r1 == '\n') 282 return *r2 == '\n' ? 0 : -1; 283 if (*r1 != *r2) { 284 if (*r2 == '\n') 285 return 1; 286 else 287 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 288 } 289 r1++; 290 r2++; 291 } 292} 293 294/* 295 * Compare a snapshot record at `rec` to the specified NUL-terminated 296 * refname. 297 */ 298static int cmp_record_to_refname(const char *rec, const char *refname) 299{ 300 const char *r1 = rec + GIT_SHA1_HEXSZ + 1; 301 const char *r2 = refname; 302 303 while (1) { 304 if (*r1 == '\n') 305 return *r2 ? -1 : 0; 306 if (!*r2) 307 return 1; 308 if (*r1 != *r2) 309 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 310 r1++; 311 r2++; 312 } 313} 314 315/* 316 * `snapshot->buf` is not known to be sorted. Check whether it is, and 317 * if not, sort it into new memory and munmap/free the old storage. 318 */ 319static void sort_snapshot(struct snapshot *snapshot) 320{ 321 struct snapshot_record *records = NULL; 322 size_t alloc = 0, nr = 0; 323 int sorted = 1; 324 const char *pos, *eof, *eol; 325 size_t len, i; 326 char *new_buffer, *dst; 327 328 pos = snapshot->start; 329 eof = snapshot->eof; 330 331 if (pos == eof) 332 return; 333 334 len = eof - pos; 335 336 /* 337 * Initialize records based on a crude estimate of the number 338 * of references in the file (we'll grow it below if needed): 339 */ 340 ALLOC_GROW(records, len / 80 + 20, alloc); 341 342 while (pos < eof) { 343 eol = memchr(pos, '\n', eof - pos); 344 if (!eol) 345 /* The safety check should prevent this. */ 346 BUG("unterminated line found in packed-refs"); 347 if (eol - pos < GIT_SHA1_HEXSZ + 2) 348 die_invalid_line(snapshot->refs->path, 349 pos, eof - pos); 350 eol++; 351 if (eol < eof && *eol == '^') { 352 /* 353 * Keep any peeled line together with its 354 * reference: 355 */ 356 const char *peeled_start = eol; 357 358 eol = memchr(peeled_start, '\n', eof - peeled_start); 359 if (!eol) 360 /* The safety check should prevent this. */ 361 BUG("unterminated peeled line found in packed-refs"); 362 eol++; 363 } 364 365 ALLOC_GROW(records, nr + 1, alloc); 366 records[nr].start = pos; 367 records[nr].len = eol - pos; 368 nr++; 369 370 if (sorted && 371 nr > 1 && 372 cmp_packed_ref_records(&records[nr - 2], 373 &records[nr - 1]) >= 0) 374 sorted = 0; 375 376 pos = eol; 377 } 378 379 if (sorted) 380 goto cleanup; 381 382 /* We need to sort the memory. First we sort the records array: */ 383 QSORT(records, nr, cmp_packed_ref_records); 384 385 /* 386 * Allocate a new chunk of memory, and copy the old memory to 387 * the new in the order indicated by `records` (not bothering 388 * with the header line): 389 */ 390 new_buffer = xmalloc(len); 391 for (dst = new_buffer, i = 0; i < nr; i++) { 392 memcpy(dst, records[i].start, records[i].len); 393 dst += records[i].len; 394 } 395 396 /* 397 * Now munmap the old buffer and use the sorted buffer in its 398 * place: 399 */ 400 clear_snapshot_buffer(snapshot); 401 snapshot->buf = snapshot->start = new_buffer; 402 snapshot->eof = new_buffer + len; 403 404cleanup: 405 free(records); 406} 407 408/* 409 * Return a pointer to the start of the record that contains the 410 * character `*p` (which must be within the buffer). If no other 411 * record start is found, return `buf`. 412 */ 413static const char *find_start_of_record(const char *buf, const char *p) 414{ 415 while (p > buf && (p[-1] != '\n' || p[0] == '^')) 416 p--; 417 return p; 418} 419 420/* 421 * Return a pointer to the start of the record following the record 422 * that contains `*p`. If none is found before `end`, return `end`. 423 */ 424static const char *find_end_of_record(const char *p, const char *end) 425{ 426 while (++p < end && (p[-1] != '\n' || p[0] == '^')) 427 ; 428 return p; 429} 430 431/* 432 * We want to be able to compare mmapped reference records quickly, 433 * without totally parsing them. We can do so because the records are 434 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 435 * + 1) bytes past the beginning of the record. 436 * 437 * But what if the `packed-refs` file contains garbage? We're willing 438 * to tolerate not detecting the problem, as long as we don't produce 439 * totally garbled output (we can't afford to check the integrity of 440 * the whole file during every Git invocation). But we do want to be 441 * sure that we never read past the end of the buffer in memory and 442 * perform an illegal memory access. 443 * 444 * Guarantee that minimum level of safety by verifying that the last 445 * record in the file is LF-terminated, and that it has at least 446 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 447 * these checks fails. 448 */ 449static void verify_buffer_safe(struct snapshot *snapshot) 450{ 451 const char *start = snapshot->start; 452 const char *eof = snapshot->eof; 453 const char *last_line; 454 455 if (start == eof) 456 return; 457 458 last_line = find_start_of_record(start, eof - 1); 459 if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) 460 die_invalid_line(snapshot->refs->path, 461 last_line, eof - last_line); 462} 463 464#define SMALL_FILE_SIZE (32*1024) 465 466/* 467 * Depending on `mmap_strategy`, either mmap or read the contents of 468 * the `packed-refs` file into the snapshot. Return 1 if the file 469 * existed and was read, or 0 if the file was absent or empty. Die on 470 * errors. 471 */ 472static int load_contents(struct snapshot *snapshot) 473{ 474 int fd; 475 struct stat st; 476 size_t size; 477 ssize_t bytes_read; 478 479 fd = open(snapshot->refs->path, O_RDONLY); 480 if (fd < 0) { 481 if (errno == ENOENT) { 482 /* 483 * This is OK; it just means that no 484 * "packed-refs" file has been written yet, 485 * which is equivalent to it being empty, 486 * which is its state when initialized with 487 * zeros. 488 */ 489 return 0; 490 } else { 491 die_errno("couldn't read %s", snapshot->refs->path); 492 } 493 } 494 495 stat_validity_update(&snapshot->validity, fd); 496 497 if (fstat(fd, &st) < 0) 498 die_errno("couldn't stat %s", snapshot->refs->path); 499 size = xsize_t(st.st_size); 500 501 if (!size) { 502 close(fd); 503 return 0; 504 } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { 505 snapshot->buf = xmalloc(size); 506 bytes_read = read_in_full(fd, snapshot->buf, size); 507 if (bytes_read < 0 || bytes_read != size) 508 die_errno("couldn't read %s", snapshot->refs->path); 509 snapshot->mmapped = 0; 510 } else { 511 snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); 512 snapshot->mmapped = 1; 513 } 514 close(fd); 515 516 snapshot->start = snapshot->buf; 517 snapshot->eof = snapshot->buf + size; 518 519 return 1; 520} 521 522/* 523 * Find the place in `snapshot->buf` where the start of the record for 524 * `refname` starts. If `mustexist` is true and the reference doesn't 525 * exist, then return NULL. If `mustexist` is false and the reference 526 * doesn't exist, then return the point where that reference would be 527 * inserted, or `snapshot->eof` (which might be NULL) if it would be 528 * inserted at the end of the file. In the latter mode, `refname` 529 * doesn't have to be a proper reference name; for example, one could 530 * search for "refs/replace/" to find the start of any replace 531 * references. 532 * 533 * The record is sought using a binary search, so `snapshot->buf` must 534 * be sorted. 535 */ 536static const char *find_reference_location(struct snapshot *snapshot, 537 const char *refname, int mustexist) 538{ 539 /* 540 * This is not *quite* a garden-variety binary search, because 541 * the data we're searching is made up of records, and we 542 * always need to find the beginning of a record to do a 543 * comparison. A "record" here is one line for the reference 544 * itself and zero or one peel lines that start with '^'. Our 545 * loop invariant is described in the next two comments. 546 */ 547 548 /* 549 * A pointer to the character at the start of a record whose 550 * preceding records all have reference names that come 551 * *before* `refname`. 552 */ 553 const char *lo = snapshot->start; 554 555 /* 556 * A pointer to a the first character of a record whose 557 * reference name comes *after* `refname`. 558 */ 559 const char *hi = snapshot->eof; 560 561 while (lo != hi) { 562 const char *mid, *rec; 563 int cmp; 564 565 mid = lo + (hi - lo) / 2; 566 rec = find_start_of_record(lo, mid); 567 cmp = cmp_record_to_refname(rec, refname); 568 if (cmp < 0) { 569 lo = find_end_of_record(mid, hi); 570 } else if (cmp > 0) { 571 hi = rec; 572 } else { 573 return rec; 574 } 575 } 576 577 if (mustexist) 578 return NULL; 579 else 580 return lo; 581} 582 583/* 584 * Create a newly-allocated `snapshot` of the `packed-refs` file in 585 * its current state and return it. The return value will already have 586 * its reference count incremented. 587 * 588 * A comment line of the form "# pack-refs with: " may contain zero or 589 * more traits. We interpret the traits as follows: 590 * 591 * Neither `peeled` nor `fully-peeled`: 592 * 593 * Probably no references are peeled. But if the file contains a 594 * peeled value for a reference, we will use it. 595 * 596 * `peeled`: 597 * 598 * References under "refs/tags/", if they *can* be peeled, *are* 599 * peeled in this file. References outside of "refs/tags/" are 600 * probably not peeled even if they could have been, but if we find 601 * a peeled value for such a reference we will use it. 602 * 603 * `fully-peeled`: 604 * 605 * All references in the file that can be peeled are peeled. 606 * Inversely (and this is more important), any references in the 607 * file for which no peeled value is recorded is not peelable. This 608 * trait should typically be written alongside "peeled" for 609 * compatibility with older clients, but we do not require it 610 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 611 * 612 * `sorted`: 613 * 614 * The references in this file are known to be sorted by refname. 615 */ 616static struct snapshot *create_snapshot(struct packed_ref_store *refs) 617{ 618 struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot)); 619 int sorted = 0; 620 621 snapshot->refs = refs; 622 acquire_snapshot(snapshot); 623 snapshot->peeled = PEELED_NONE; 624 625 if (!load_contents(snapshot)) 626 return snapshot; 627 628 /* If the file has a header line, process it: */ 629 if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') { 630 char *tmp, *p, *eol; 631 struct string_list traits = STRING_LIST_INIT_NODUP; 632 633 eol = memchr(snapshot->buf, '\n', 634 snapshot->eof - snapshot->buf); 635 if (!eol) 636 die_unterminated_line(refs->path, 637 snapshot->buf, 638 snapshot->eof - snapshot->buf); 639 640 tmp = xmemdupz(snapshot->buf, eol - snapshot->buf); 641 642 if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p)) 643 die_invalid_line(refs->path, 644 snapshot->buf, 645 snapshot->eof - snapshot->buf); 646 647 string_list_split_in_place(&traits, p, ' ', -1); 648 649 if (unsorted_string_list_has_string(&traits, "fully-peeled")) 650 snapshot->peeled = PEELED_FULLY; 651 else if (unsorted_string_list_has_string(&traits, "peeled")) 652 snapshot->peeled = PEELED_TAGS; 653 654 sorted = unsorted_string_list_has_string(&traits, "sorted"); 655 656 /* perhaps other traits later as well */ 657 658 /* The "+ 1" is for the LF character. */ 659 snapshot->start = eol + 1; 660 661 string_list_clear(&traits, 0); 662 free(tmp); 663 } 664 665 verify_buffer_safe(snapshot); 666 667 if (!sorted) { 668 sort_snapshot(snapshot); 669 670 /* 671 * Reordering the records might have moved a short one 672 * to the end of the buffer, so verify the buffer's 673 * safety again: 674 */ 675 verify_buffer_safe(snapshot); 676 } 677 678 if (mmap_strategy != MMAP_OK && snapshot->mmapped) { 679 /* 680 * We don't want to leave the file mmapped, so we are 681 * forced to make a copy now: 682 */ 683 size_t size = snapshot->eof - snapshot->start; 684 char *buf_copy = xmalloc(size); 685 686 memcpy(buf_copy, snapshot->start, size); 687 clear_snapshot_buffer(snapshot); 688 snapshot->buf = snapshot->start = buf_copy; 689 snapshot->eof = buf_copy + size; 690 } 691 692 return snapshot; 693} 694 695/* 696 * Check that `refs->snapshot` (if present) still reflects the 697 * contents of the `packed-refs` file. If not, clear the snapshot. 698 */ 699static void validate_snapshot(struct packed_ref_store *refs) 700{ 701 if (refs->snapshot && 702 !stat_validity_check(&refs->snapshot->validity, refs->path)) 703 clear_snapshot(refs); 704} 705 706/* 707 * Get the `snapshot` for the specified packed_ref_store, creating and 708 * populating it if it hasn't been read before or if the file has been 709 * changed (according to its `validity` field) since it was last read. 710 * On the other hand, if we hold the lock, then assume that the file 711 * hasn't been changed out from under us, so skip the extra `stat()` 712 * call in `stat_validity_check()`. This function does *not* increase 713 * the snapshot's reference count on behalf of the caller. 714 */ 715static struct snapshot *get_snapshot(struct packed_ref_store *refs) 716{ 717 if (!is_lock_file_locked(&refs->lock)) 718 validate_snapshot(refs); 719 720 if (!refs->snapshot) 721 refs->snapshot = create_snapshot(refs); 722 723 return refs->snapshot; 724} 725 726static int packed_read_raw_ref(struct ref_store *ref_store, 727 const char *refname, struct object_id *oid, 728 struct strbuf *referent, unsigned int *type) 729{ 730 struct packed_ref_store *refs = 731 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); 732 struct snapshot *snapshot = get_snapshot(refs); 733 const char *rec; 734 735 *type = 0; 736 737 rec = find_reference_location(snapshot, refname, 1); 738 739 if (!rec) { 740 /* refname is not a packed reference. */ 741 errno = ENOENT; 742 return -1; 743 } 744 745 if (get_oid_hex(rec, oid)) 746 die_invalid_line(refs->path, rec, snapshot->eof - rec); 747 748 *type = REF_ISPACKED; 749 return 0; 750} 751 752/* 753 * This value is set in `base.flags` if the peeled value of the 754 * current reference is known. In that case, `peeled` contains the 755 * correct peeled value for the reference, which might be `null_oid` 756 * if the reference is not a tag or if it is broken. 757 */ 758#define REF_KNOWS_PEELED 0x40 759 760/* 761 * An iterator over a snapshot of a `packed-refs` file. 762 */ 763struct packed_ref_iterator { 764 struct ref_iterator base; 765 766 struct snapshot *snapshot; 767 768 /* The current position in the snapshot's buffer: */ 769 const char *pos; 770 771 /* The end of the part of the buffer that will be iterated over: */ 772 const char *eof; 773 774 /* Scratch space for current values: */ 775 struct object_id oid, peeled; 776 struct strbuf refname_buf; 777 778 unsigned int flags; 779}; 780 781/* 782 * Move the iterator to the next record in the snapshot, without 783 * respect for whether the record is actually required by the current 784 * iteration. Adjust the fields in `iter` and return `ITER_OK` or 785 * `ITER_DONE`. This function does not free the iterator in the case 786 * of `ITER_DONE`. 787 */ 788static int next_record(struct packed_ref_iterator *iter) 789{ 790 const char *p = iter->pos, *eol; 791 792 strbuf_reset(&iter->refname_buf); 793 794 if (iter->pos == iter->eof) 795 return ITER_DONE; 796 797 iter->base.flags = REF_ISPACKED; 798 799 if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || 800 parse_oid_hex(p, &iter->oid, &p) || 801 !isspace(*p++)) 802 die_invalid_line(iter->snapshot->refs->path, 803 iter->pos, iter->eof - iter->pos); 804 805 eol = memchr(p, '\n', iter->eof - p); 806 if (!eol) 807 die_unterminated_line(iter->snapshot->refs->path, 808 iter->pos, iter->eof - iter->pos); 809 810 strbuf_add(&iter->refname_buf, p, eol - p); 811 iter->base.refname = iter->refname_buf.buf; 812 813 if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 814 if (!refname_is_safe(iter->base.refname)) 815 die("packed refname is dangerous: %s", 816 iter->base.refname); 817 oidclr(&iter->oid); 818 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 819 } 820 if (iter->snapshot->peeled == PEELED_FULLY || 821 (iter->snapshot->peeled == PEELED_TAGS && 822 starts_with(iter->base.refname, "refs/tags/"))) 823 iter->base.flags |= REF_KNOWS_PEELED; 824 825 iter->pos = eol + 1; 826 827 if (iter->pos < iter->eof && *iter->pos == '^') { 828 p = iter->pos + 1; 829 if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || 830 parse_oid_hex(p, &iter->peeled, &p) || 831 *p++ != '\n') 832 die_invalid_line(iter->snapshot->refs->path, 833 iter->pos, iter->eof - iter->pos); 834 iter->pos = p; 835 836 /* 837 * Regardless of what the file header said, we 838 * definitely know the value of *this* reference. But 839 * we suppress it if the reference is broken: 840 */ 841 if ((iter->base.flags & REF_ISBROKEN)) { 842 oidclr(&iter->peeled); 843 iter->base.flags &= ~REF_KNOWS_PEELED; 844 } else { 845 iter->base.flags |= REF_KNOWS_PEELED; 846 } 847 } else { 848 oidclr(&iter->peeled); 849 } 850 851 return ITER_OK; 852} 853 854static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) 855{ 856 struct packed_ref_iterator *iter = 857 (struct packed_ref_iterator *)ref_iterator; 858 int ok; 859 860 while ((ok = next_record(iter)) == ITER_OK) { 861 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 862 ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 863 continue; 864 865 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 866 !ref_resolves_to_object(iter->base.refname, &iter->oid, 867 iter->flags)) 868 continue; 869 870 return ITER_OK; 871 } 872 873 if (ref_iterator_abort(ref_iterator) != ITER_DONE) 874 ok = ITER_ERROR; 875 876 return ok; 877} 878 879static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, 880 struct object_id *peeled) 881{ 882 struct packed_ref_iterator *iter = 883 (struct packed_ref_iterator *)ref_iterator; 884 885 if ((iter->base.flags & REF_KNOWS_PEELED)) { 886 oidcpy(peeled, &iter->peeled); 887 return is_null_oid(&iter->peeled) ? -1 : 0; 888 } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 889 return -1; 890 } else { 891 return !!peel_object(&iter->oid, peeled); 892 } 893} 894 895static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) 896{ 897 struct packed_ref_iterator *iter = 898 (struct packed_ref_iterator *)ref_iterator; 899 int ok = ITER_DONE; 900 901 strbuf_release(&iter->refname_buf); 902 release_snapshot(iter->snapshot); 903 base_ref_iterator_free(ref_iterator); 904 return ok; 905} 906 907static struct ref_iterator_vtable packed_ref_iterator_vtable = { 908 packed_ref_iterator_advance, 909 packed_ref_iterator_peel, 910 packed_ref_iterator_abort 911}; 912 913static struct ref_iterator *packed_ref_iterator_begin( 914 struct ref_store *ref_store, 915 const char *prefix, unsigned int flags) 916{ 917 struct packed_ref_store *refs; 918 struct snapshot *snapshot; 919 const char *start; 920 struct packed_ref_iterator *iter; 921 struct ref_iterator *ref_iterator; 922 unsigned int required_flags = REF_STORE_READ; 923 924 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 925 required_flags |= REF_STORE_ODB; 926 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); 927 928 /* 929 * Note that `get_snapshot()` internally checks whether the 930 * snapshot is up to date with what is on disk, and re-reads 931 * it if not. 932 */ 933 snapshot = get_snapshot(refs); 934 935 if (prefix && *prefix) 936 start = find_reference_location(snapshot, prefix, 0); 937 else 938 start = snapshot->start; 939 940 if (start == snapshot->eof) 941 return empty_ref_iterator_begin(); 942 943 iter = xcalloc(1, sizeof(*iter)); 944 ref_iterator = &iter->base; 945 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1); 946 947 iter->snapshot = snapshot; 948 acquire_snapshot(snapshot); 949 950 iter->pos = start; 951 iter->eof = snapshot->eof; 952 strbuf_init(&iter->refname_buf, 0); 953 954 iter->base.oid = &iter->oid; 955 956 iter->flags = flags; 957 958 if (prefix && *prefix) 959 /* Stop iteration after we've gone *past* prefix: */ 960 ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0); 961 962 return ref_iterator; 963} 964 965/* 966 * Write an entry to the packed-refs file for the specified refname. 967 * If peeled is non-NULL, write it as the entry's peeled value. On 968 * error, return a nonzero value and leave errno set at the value left 969 * by the failing call to `fprintf()`. 970 */ 971static int write_packed_entry(FILE *fh, const char *refname, 972 const struct object_id *oid, 973 const struct object_id *peeled) 974{ 975 if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 || 976 (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0)) 977 return -1; 978 979 return 0; 980} 981 982int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) 983{ 984 struct packed_ref_store *refs = 985 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 986 "packed_refs_lock"); 987 static int timeout_configured = 0; 988 static int timeout_value = 1000; 989 990 if (!timeout_configured) { 991 git_config_get_int("core.packedrefstimeout", &timeout_value); 992 timeout_configured = 1; 993 } 994 995 /* 996 * Note that we close the lockfile immediately because we 997 * don't write new content to it, but rather to a separate 998 * tempfile. 999 */1000 if (hold_lock_file_for_update_timeout(1001 &refs->lock,1002 refs->path,1003 flags, timeout_value) < 0) {1004 unable_to_lock_message(refs->path, errno, err);1005 return -1;1006 }10071008 if (close_lock_file_gently(&refs->lock)) {1009 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));1010 rollback_lock_file(&refs->lock);1011 return -1;1012 }10131014 /*1015 * Now that we hold the `packed-refs` lock, make sure that our1016 * snapshot matches the current version of the file. Normally1017 * `get_snapshot()` does that for us, but that function1018 * assumes that when the file is locked, any existing snapshot1019 * is still valid. We've just locked the file, but it might1020 * have changed the moment *before* we locked it.1021 */1022 validate_snapshot(refs);10231024 /*1025 * Now make sure that the packed-refs file as it exists in the1026 * locked state is loaded into the snapshot:1027 */1028 get_snapshot(refs);1029 return 0;1030}10311032void packed_refs_unlock(struct ref_store *ref_store)1033{1034 struct packed_ref_store *refs = packed_downcast(1035 ref_store,1036 REF_STORE_READ | REF_STORE_WRITE,1037 "packed_refs_unlock");10381039 if (!is_lock_file_locked(&refs->lock))1040 BUG("packed_refs_unlock() called when not locked");1041 rollback_lock_file(&refs->lock);1042}10431044int packed_refs_is_locked(struct ref_store *ref_store)1045{1046 struct packed_ref_store *refs = packed_downcast(1047 ref_store,1048 REF_STORE_READ | REF_STORE_WRITE,1049 "packed_refs_is_locked");10501051 return is_lock_file_locked(&refs->lock);1052}10531054/*1055 * The packed-refs header line that we write out. Perhaps other traits1056 * will be added later.1057 *1058 * Note that earlier versions of Git used to parse these traits by1059 * looking for " trait " in the line. For this reason, the space after1060 * the colon and the trailing space are required.1061 */1062static const char PACKED_REFS_HEADER[] =1063 "# pack-refs with: peeled fully-peeled sorted \n";10641065static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)1066{1067 /* Nothing to do. */1068 return 0;1069}10701071/*1072 * Write the packed refs from the current snapshot to the packed-refs1073 * tempfile, incorporating any changes from `updates`. `updates` must1074 * be a sorted string list whose keys are the refnames and whose util1075 * values are `struct ref_update *`. On error, rollback the tempfile,1076 * write an error message to `err`, and return a nonzero value.1077 *1078 * The packfile must be locked before calling this function and will1079 * remain locked when it is done.1080 */1081static int write_with_updates(struct packed_ref_store *refs,1082 struct string_list *updates,1083 struct strbuf *err)1084{1085 struct ref_iterator *iter = NULL;1086 size_t i;1087 int ok;1088 FILE *out;1089 struct strbuf sb = STRBUF_INIT;1090 char *packed_refs_path;10911092 if (!is_lock_file_locked(&refs->lock))1093 BUG("write_with_updates() called while unlocked");10941095 /*1096 * If packed-refs is a symlink, we want to overwrite the1097 * symlinked-to file, not the symlink itself. Also, put the1098 * staging file next to it:1099 */1100 packed_refs_path = get_locked_file_path(&refs->lock);1101 strbuf_addf(&sb, "%s.new", packed_refs_path);1102 free(packed_refs_path);1103 refs->tempfile = create_tempfile(sb.buf);1104 if (!refs->tempfile) {1105 strbuf_addf(err, "unable to create file %s: %s",1106 sb.buf, strerror(errno));1107 strbuf_release(&sb);1108 return -1;1109 }1110 strbuf_release(&sb);11111112 out = fdopen_tempfile(refs->tempfile, "w");1113 if (!out) {1114 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",1115 strerror(errno));1116 goto error;1117 }11181119 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)1120 goto write_error;11211122 /*1123 * We iterate in parallel through the current list of refs and1124 * the list of updates, processing an entry from at least one1125 * of the lists each time through the loop. When the current1126 * list of refs is exhausted, set iter to NULL. When the list1127 * of updates is exhausted, leave i set to updates->nr.1128 */1129 iter = packed_ref_iterator_begin(&refs->base, "",1130 DO_FOR_EACH_INCLUDE_BROKEN);1131 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1132 iter = NULL;11331134 i = 0;11351136 while (iter || i < updates->nr) {1137 struct ref_update *update = NULL;1138 int cmp;11391140 if (i >= updates->nr) {1141 cmp = -1;1142 } else {1143 update = updates->items[i].util;11441145 if (!iter)1146 cmp = +1;1147 else1148 cmp = strcmp(iter->refname, update->refname);1149 }11501151 if (!cmp) {1152 /*1153 * There is both an old value and an update1154 * for this reference. Check the old value if1155 * necessary:1156 */1157 if ((update->flags & REF_HAVE_OLD)) {1158 if (is_null_oid(&update->old_oid)) {1159 strbuf_addf(err, "cannot update ref '%s': "1160 "reference already exists",1161 update->refname);1162 goto error;1163 } else if (!oideq(&update->old_oid, iter->oid)) {1164 strbuf_addf(err, "cannot update ref '%s': "1165 "is at %s but expected %s",1166 update->refname,1167 oid_to_hex(iter->oid),1168 oid_to_hex(&update->old_oid));1169 goto error;1170 }1171 }11721173 /* Now figure out what to use for the new value: */1174 if ((update->flags & REF_HAVE_NEW)) {1175 /*1176 * The update takes precedence. Skip1177 * the iterator over the unneeded1178 * value.1179 */1180 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1181 iter = NULL;1182 cmp = +1;1183 } else {1184 /*1185 * The update doesn't actually want to1186 * change anything. We're done with it.1187 */1188 i++;1189 cmp = -1;1190 }1191 } else if (cmp > 0) {1192 /*1193 * There is no old value but there is an1194 * update for this reference. Make sure that1195 * the update didn't expect an existing value:1196 */1197 if ((update->flags & REF_HAVE_OLD) &&1198 !is_null_oid(&update->old_oid)) {1199 strbuf_addf(err, "cannot update ref '%s': "1200 "reference is missing but expected %s",1201 update->refname,1202 oid_to_hex(&update->old_oid));1203 goto error;1204 }1205 }12061207 if (cmp < 0) {1208 /* Pass the old reference through. */12091210 struct object_id peeled;1211 int peel_error = ref_iterator_peel(iter, &peeled);12121213 if (write_packed_entry(out, iter->refname,1214 iter->oid,1215 peel_error ? NULL : &peeled))1216 goto write_error;12171218 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1219 iter = NULL;1220 } else if (is_null_oid(&update->new_oid)) {1221 /*1222 * The update wants to delete the reference,1223 * and the reference either didn't exist or we1224 * have already skipped it. So we're done with1225 * the update (and don't have to write1226 * anything).1227 */1228 i++;1229 } else {1230 struct object_id peeled;1231 int peel_error = peel_object(&update->new_oid,1232 &peeled);12331234 if (write_packed_entry(out, update->refname,1235 &update->new_oid,1236 peel_error ? NULL : &peeled))1237 goto write_error;12381239 i++;1240 }1241 }12421243 if (ok != ITER_DONE) {1244 strbuf_addstr(err, "unable to write packed-refs file: "1245 "error iterating over old contents");1246 goto error;1247 }12481249 if (close_tempfile_gently(refs->tempfile)) {1250 strbuf_addf(err, "error closing file %s: %s",1251 get_tempfile_path(refs->tempfile),1252 strerror(errno));1253 strbuf_release(&sb);1254 delete_tempfile(&refs->tempfile);1255 return -1;1256 }12571258 return 0;12591260write_error:1261 strbuf_addf(err, "error writing to %s: %s",1262 get_tempfile_path(refs->tempfile), strerror(errno));12631264error:1265 if (iter)1266 ref_iterator_abort(iter);12671268 delete_tempfile(&refs->tempfile);1269 return -1;1270}12711272int is_packed_transaction_needed(struct ref_store *ref_store,1273 struct ref_transaction *transaction)1274{1275 struct packed_ref_store *refs = packed_downcast(1276 ref_store,1277 REF_STORE_READ,1278 "is_packed_transaction_needed");1279 struct strbuf referent = STRBUF_INIT;1280 size_t i;1281 int ret;12821283 if (!is_lock_file_locked(&refs->lock))1284 BUG("is_packed_transaction_needed() called while unlocked");12851286 /*1287 * We're only going to bother returning false for the common,1288 * trivial case that references are only being deleted, their1289 * old values are not being checked, and the old `packed-refs`1290 * file doesn't contain any of those reference(s). This gives1291 * false positives for some other cases that could1292 * theoretically be optimized away:1293 *1294 * 1. It could be that the old value is being verified without1295 * setting a new value. In this case, we could verify the1296 * old value here and skip the update if it agrees. If it1297 * disagrees, we could either let the update go through1298 * (the actual commit would re-detect and report the1299 * problem), or come up with a way of reporting such an1300 * error to *our* caller.1301 *1302 * 2. It could be that a new value is being set, but that it1303 * is identical to the current packed value of the1304 * reference.1305 *1306 * Neither of these cases will come up in the current code,1307 * because the only caller of this function passes to it a1308 * transaction that only includes `delete` updates with no1309 * `old_id`. Even if that ever changes, false positives only1310 * cause an optimization to be missed; they do not affect1311 * correctness.1312 */13131314 /*1315 * Start with the cheap checks that don't require old1316 * reference values to be read:1317 */1318 for (i = 0; i < transaction->nr; i++) {1319 struct ref_update *update = transaction->updates[i];13201321 if (update->flags & REF_HAVE_OLD)1322 /* Have to check the old value -> needed. */1323 return 1;13241325 if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1326 /* Have to set a new value -> needed. */1327 return 1;1328 }13291330 /*1331 * The transaction isn't checking any old values nor is it1332 * setting any nonzero new values, so it still might be able1333 * to be skipped. Now do the more expensive check: the update1334 * is needed if any of the updates is a delete, and the old1335 * `packed-refs` file contains a value for that reference.1336 */1337 ret = 0;1338 for (i = 0; i < transaction->nr; i++) {1339 struct ref_update *update = transaction->updates[i];1340 unsigned int type;1341 struct object_id oid;13421343 if (!(update->flags & REF_HAVE_NEW))1344 /*1345 * This reference isn't being deleted -> not1346 * needed.1347 */1348 continue;13491350 if (!refs_read_raw_ref(ref_store, update->refname,1351 &oid, &referent, &type) ||1352 errno != ENOENT) {1353 /*1354 * We have to actually delete that reference1355 * -> this transaction is needed.1356 */1357 ret = 1;1358 break;1359 }1360 }13611362 strbuf_release(&referent);1363 return ret;1364}13651366struct packed_transaction_backend_data {1367 /* True iff the transaction owns the packed-refs lock. */1368 int own_lock;13691370 struct string_list updates;1371};13721373static void packed_transaction_cleanup(struct packed_ref_store *refs,1374 struct ref_transaction *transaction)1375{1376 struct packed_transaction_backend_data *data = transaction->backend_data;13771378 if (data) {1379 string_list_clear(&data->updates, 0);13801381 if (is_tempfile_active(refs->tempfile))1382 delete_tempfile(&refs->tempfile);13831384 if (data->own_lock && is_lock_file_locked(&refs->lock)) {1385 packed_refs_unlock(&refs->base);1386 data->own_lock = 0;1387 }13881389 free(data);1390 transaction->backend_data = NULL;1391 }13921393 transaction->state = REF_TRANSACTION_CLOSED;1394}13951396static int packed_transaction_prepare(struct ref_store *ref_store,1397 struct ref_transaction *transaction,1398 struct strbuf *err)1399{1400 struct packed_ref_store *refs = packed_downcast(1401 ref_store,1402 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1403 "ref_transaction_prepare");1404 struct packed_transaction_backend_data *data;1405 size_t i;1406 int ret = TRANSACTION_GENERIC_ERROR;14071408 /*1409 * Note that we *don't* skip transactions with zero updates,1410 * because such a transaction might be executed for the side1411 * effect of ensuring that all of the references are peeled or1412 * ensuring that the `packed-refs` file is sorted. If the1413 * caller wants to optimize away empty transactions, it should1414 * do so itself.1415 */14161417 data = xcalloc(1, sizeof(*data));1418 string_list_init(&data->updates, 0);14191420 transaction->backend_data = data;14211422 /*1423 * Stick the updates in a string list by refname so that we1424 * can sort them:1425 */1426 for (i = 0; i < transaction->nr; i++) {1427 struct ref_update *update = transaction->updates[i];1428 struct string_list_item *item =1429 string_list_append(&data->updates, update->refname);14301431 /* Store a pointer to update in item->util: */1432 item->util = update;1433 }1434 string_list_sort(&data->updates);14351436 if (ref_update_reject_duplicates(&data->updates, err))1437 goto failure;14381439 if (!is_lock_file_locked(&refs->lock)) {1440 if (packed_refs_lock(ref_store, 0, err))1441 goto failure;1442 data->own_lock = 1;1443 }14441445 if (write_with_updates(refs, &data->updates, err))1446 goto failure;14471448 transaction->state = REF_TRANSACTION_PREPARED;1449 return 0;14501451failure:1452 packed_transaction_cleanup(refs, transaction);1453 return ret;1454}14551456static int packed_transaction_abort(struct ref_store *ref_store,1457 struct ref_transaction *transaction,1458 struct strbuf *err)1459{1460 struct packed_ref_store *refs = packed_downcast(1461 ref_store,1462 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1463 "ref_transaction_abort");14641465 packed_transaction_cleanup(refs, transaction);1466 return 0;1467}14681469static int packed_transaction_finish(struct ref_store *ref_store,1470 struct ref_transaction *transaction,1471 struct strbuf *err)1472{1473 struct packed_ref_store *refs = packed_downcast(1474 ref_store,1475 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1476 "ref_transaction_finish");1477 int ret = TRANSACTION_GENERIC_ERROR;1478 char *packed_refs_path;14791480 clear_snapshot(refs);14811482 packed_refs_path = get_locked_file_path(&refs->lock);1483 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {1484 strbuf_addf(err, "error replacing %s: %s",1485 refs->path, strerror(errno));1486 goto cleanup;1487 }14881489 ret = 0;14901491cleanup:1492 free(packed_refs_path);1493 packed_transaction_cleanup(refs, transaction);1494 return ret;1495}14961497static int packed_initial_transaction_commit(struct ref_store *ref_store,1498 struct ref_transaction *transaction,1499 struct strbuf *err)1500{1501 return ref_transaction_commit(transaction, err);1502}15031504static int packed_delete_refs(struct ref_store *ref_store, const char *msg,1505 struct string_list *refnames, unsigned int flags)1506{1507 struct packed_ref_store *refs =1508 packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");1509 struct strbuf err = STRBUF_INIT;1510 struct ref_transaction *transaction;1511 struct string_list_item *item;1512 int ret;15131514 (void)refs; /* We need the check above, but don't use the variable */15151516 if (!refnames->nr)1517 return 0;15181519 /*1520 * Since we don't check the references' old_oids, the1521 * individual updates can't fail, so we can pack all of the1522 * updates into a single transaction.1523 */15241525 transaction = ref_store_transaction_begin(ref_store, &err);1526 if (!transaction)1527 return -1;15281529 for_each_string_list_item(item, refnames) {1530 if (ref_transaction_delete(transaction, item->string, NULL,1531 flags, msg, &err)) {1532 warning(_("could not delete reference %s: %s"),1533 item->string, err.buf);1534 strbuf_reset(&err);1535 }1536 }15371538 ret = ref_transaction_commit(transaction, &err);15391540 if (ret) {1541 if (refnames->nr == 1)1542 error(_("could not delete reference %s: %s"),1543 refnames->items[0].string, err.buf);1544 else1545 error(_("could not delete references: %s"), err.buf);1546 }15471548 ref_transaction_free(transaction);1549 strbuf_release(&err);1550 return ret;1551}15521553static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)1554{1555 /*1556 * Packed refs are already packed. It might be that loose refs1557 * are packed *into* a packed refs store, but that is done by1558 * updating the packed references via a transaction.1559 */1560 return 0;1561}15621563static int packed_create_symref(struct ref_store *ref_store,1564 const char *refname, const char *target,1565 const char *logmsg)1566{1567 BUG("packed reference store does not support symrefs");1568}15691570static int packed_rename_ref(struct ref_store *ref_store,1571 const char *oldrefname, const char *newrefname,1572 const char *logmsg)1573{1574 BUG("packed reference store does not support renaming references");1575}15761577static int packed_copy_ref(struct ref_store *ref_store,1578 const char *oldrefname, const char *newrefname,1579 const char *logmsg)1580{1581 BUG("packed reference store does not support copying references");1582}15831584static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1585{1586 return empty_ref_iterator_begin();1587}15881589static int packed_for_each_reflog_ent(struct ref_store *ref_store,1590 const char *refname,1591 each_reflog_ent_fn fn, void *cb_data)1592{1593 return 0;1594}15951596static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,1597 const char *refname,1598 each_reflog_ent_fn fn,1599 void *cb_data)1600{1601 return 0;1602}16031604static int packed_reflog_exists(struct ref_store *ref_store,1605 const char *refname)1606{1607 return 0;1608}16091610static int packed_create_reflog(struct ref_store *ref_store,1611 const char *refname, int force_create,1612 struct strbuf *err)1613{1614 BUG("packed reference store does not support reflogs");1615}16161617static int packed_delete_reflog(struct ref_store *ref_store,1618 const char *refname)1619{1620 return 0;1621}16221623static int packed_reflog_expire(struct ref_store *ref_store,1624 const char *refname, const struct object_id *oid,1625 unsigned int flags,1626 reflog_expiry_prepare_fn prepare_fn,1627 reflog_expiry_should_prune_fn should_prune_fn,1628 reflog_expiry_cleanup_fn cleanup_fn,1629 void *policy_cb_data)1630{1631 return 0;1632}16331634struct ref_storage_be refs_be_packed = {1635 NULL,1636 "packed",1637 packed_ref_store_create,1638 packed_init_db,1639 packed_transaction_prepare,1640 packed_transaction_finish,1641 packed_transaction_abort,1642 packed_initial_transaction_commit,16431644 packed_pack_refs,1645 packed_create_symref,1646 packed_delete_refs,1647 packed_rename_ref,1648 packed_copy_ref,16491650 packed_ref_iterator_begin,1651 packed_read_raw_ref,16521653 packed_reflog_iterator_begin,1654 packed_for_each_reflog_ent,1655 packed_for_each_reflog_ent_reverse,1656 packed_reflog_exists,1657 packed_create_reflog,1658 packed_delete_reflog,1659 packed_reflog_expire1660};