1#include "../cache.h" 2#include "../config.h" 3#include "../refs.h" 4#include "refs-internal.h" 5#include "packed-backend.h" 6#include "../iterator.h" 7#include "../lockfile.h" 8 9enum mmap_strategy { 10 /* 11 * Don't use mmap() at all for reading `packed-refs`. 12 */ 13 MMAP_NONE, 14 15 /* 16 * Can use mmap() for reading `packed-refs`, but the file must 17 * not remain mmapped. This is the usual option on Windows, 18 * where you cannot rename a new version of a file onto a file 19 * that is currently mmapped. 20 */ 21 MMAP_TEMPORARY, 22 23 /* 24 * It is OK to leave the `packed-refs` file mmapped while 25 * arbitrary other code is running. 26 */ 27 MMAP_OK 28}; 29 30#if defined(NO_MMAP) 31static enum mmap_strategy mmap_strategy = MMAP_NONE; 32#elif defined(MMAP_PREVENTS_DELETE) 33static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 34#else 35static enum mmap_strategy mmap_strategy = MMAP_OK; 36#endif 37 38struct packed_ref_store; 39 40struct packed_ref_cache { 41 /* 42 * A back-pointer to the packed_ref_store with which this 43 * cache is associated: 44 */ 45 struct packed_ref_store *refs; 46 47 /* Is the `packed-refs` file currently mmapped? */ 48 int mmapped; 49 50 /* 51 * The contents of the `packed-refs` file. If the file was 52 * already sorted, this points at the mmapped contents of the 53 * file. If not, this points at heap-allocated memory 54 * containing the contents, sorted. If there were no contents 55 * (e.g., because the file didn't exist), `buf` and `eof` are 56 * both NULL. 57 */ 58 char *buf, *eof; 59 60 /* The size of the header line, if any; otherwise, 0: */ 61 size_t header_len; 62 63 /* 64 * What is the peeled state of this cache? (This is usually 65 * determined from the header of the "packed-refs" file.) 66 */ 67 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 68 69 /* 70 * Count of references to the data structure in this instance, 71 * including the pointer from files_ref_store::packed if any. 72 * The data will not be freed as long as the reference count 73 * is nonzero. 74 */ 75 unsigned int referrers; 76 77 /* The metadata from when this packed-refs cache was read */ 78 struct stat_validity validity; 79}; 80 81/* 82 * A container for `packed-refs`-related data. It is not (yet) a 83 * `ref_store`. 84 */ 85struct packed_ref_store { 86 struct ref_store base; 87 88 unsigned int store_flags; 89 90 /* The path of the "packed-refs" file: */ 91 char *path; 92 93 /* 94 * A cache of the values read from the `packed-refs` file, if 95 * it might still be current; otherwise, NULL. 96 */ 97 struct packed_ref_cache *cache; 98 99 /* 100 * Lock used for the "packed-refs" file. Note that this (and 101 * thus the enclosing `packed_ref_store`) must not be freed. 102 */ 103 struct lock_file lock; 104 105 /* 106 * Temporary file used when rewriting new contents to the 107 * "packed-refs" file. Note that this (and thus the enclosing 108 * `packed_ref_store`) must not be freed. 109 */ 110 struct tempfile tempfile; 111}; 112 113/* 114 * Increment the reference count of *packed_refs. 115 */ 116static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 117{ 118 packed_refs->referrers++; 119} 120 121/* 122 * If the buffer in `packed_refs` is active, then either munmap the 123 * memory and close the file, or free the memory. Then set the buffer 124 * pointers to NULL. 125 */ 126static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs) 127{ 128 if (packed_refs->mmapped) { 129 if (munmap(packed_refs->buf, 130 packed_refs->eof - packed_refs->buf)) 131 die_errno("error ummapping packed-refs file %s", 132 packed_refs->refs->path); 133 packed_refs->mmapped = 0; 134 } else { 135 free(packed_refs->buf); 136 } 137 packed_refs->buf = packed_refs->eof = NULL; 138 packed_refs->header_len = 0; 139} 140 141/* 142 * Decrease the reference count of *packed_refs. If it goes to zero, 143 * free *packed_refs and return true; otherwise return false. 144 */ 145static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) 146{ 147 if (!--packed_refs->referrers) { 148 stat_validity_clear(&packed_refs->validity); 149 release_packed_ref_buffer(packed_refs); 150 free(packed_refs); 151 return 1; 152 } else { 153 return 0; 154 } 155} 156 157struct ref_store *packed_ref_store_create(const char *path, 158 unsigned int store_flags) 159{ 160 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); 161 struct ref_store *ref_store = (struct ref_store *)refs; 162 163 base_ref_store_init(ref_store, &refs_be_packed); 164 refs->store_flags = store_flags; 165 166 refs->path = xstrdup(path); 167 return ref_store; 168} 169 170/* 171 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 172 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 173 * support at least the flags specified in `required_flags`. `caller` 174 * is used in any necessary error messages. 175 */ 176static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 177 unsigned int required_flags, 178 const char *caller) 179{ 180 struct packed_ref_store *refs; 181 182 if (ref_store->be != &refs_be_packed) 183 die("BUG: ref_store is type \"%s\" not \"packed\" in %s", 184 ref_store->be->name, caller); 185 186 refs = (struct packed_ref_store *)ref_store; 187 188 if ((refs->store_flags & required_flags) != required_flags) 189 die("BUG: unallowed operation (%s), requires %x, has %x\n", 190 caller, required_flags, refs->store_flags); 191 192 return refs; 193} 194 195static void clear_packed_ref_cache(struct packed_ref_store *refs) 196{ 197 if (refs->cache) { 198 struct packed_ref_cache *cache = refs->cache; 199 200 refs->cache = NULL; 201 release_packed_ref_cache(cache); 202 } 203} 204 205static NORETURN void die_unterminated_line(const char *path, 206 const char *p, size_t len) 207{ 208 if (len < 80) 209 die("unterminated line in %s: %.*s", path, (int)len, p); 210 else 211 die("unterminated line in %s: %.75s...", path, p); 212} 213 214static NORETURN void die_invalid_line(const char *path, 215 const char *p, size_t len) 216{ 217 const char *eol = memchr(p, '\n', len); 218 219 if (!eol) 220 die_unterminated_line(path, p, len); 221 else if (eol - p < 80) 222 die("unexpected line in %s: %.*s", path, (int)(eol - p), p); 223 else 224 die("unexpected line in %s: %.75s...", path, p); 225 226} 227 228/* 229 * This value is set in `base.flags` if the peeled value of the 230 * current reference is known. In that case, `peeled` contains the 231 * correct peeled value for the reference, which might be `null_sha1` 232 * if the reference is not a tag or if it is broken. 233 */ 234#define REF_KNOWS_PEELED 0x40 235 236/* 237 * An iterator over a packed-refs file that is currently mmapped. 238 */ 239struct mmapped_ref_iterator { 240 struct ref_iterator base; 241 242 struct packed_ref_cache *packed_refs; 243 244 /* The current position in the mmapped file: */ 245 const char *pos; 246 247 /* The end of the mmapped file: */ 248 const char *eof; 249 250 struct object_id oid, peeled; 251 252 struct strbuf refname_buf; 253}; 254 255static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator) 256{ 257 struct mmapped_ref_iterator *iter = 258 (struct mmapped_ref_iterator *)ref_iterator; 259 const char *p = iter->pos, *eol; 260 261 strbuf_reset(&iter->refname_buf); 262 263 if (iter->pos == iter->eof) 264 return ref_iterator_abort(ref_iterator); 265 266 iter->base.flags = REF_ISPACKED; 267 268 if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || 269 parse_oid_hex(p, &iter->oid, &p) || 270 !isspace(*p++)) 271 die_invalid_line(iter->packed_refs->refs->path, 272 iter->pos, iter->eof - iter->pos); 273 274 eol = memchr(p, '\n', iter->eof - p); 275 if (!eol) 276 die_unterminated_line(iter->packed_refs->refs->path, 277 iter->pos, iter->eof - iter->pos); 278 279 strbuf_add(&iter->refname_buf, p, eol - p); 280 iter->base.refname = iter->refname_buf.buf; 281 282 if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 283 if (!refname_is_safe(iter->base.refname)) 284 die("packed refname is dangerous: %s", 285 iter->base.refname); 286 oidclr(&iter->oid); 287 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 288 } 289 if (iter->packed_refs->peeled == PEELED_FULLY || 290 (iter->packed_refs->peeled == PEELED_TAGS && 291 starts_with(iter->base.refname, "refs/tags/"))) 292 iter->base.flags |= REF_KNOWS_PEELED; 293 294 iter->pos = eol + 1; 295 296 if (iter->pos < iter->eof && *iter->pos == '^') { 297 p = iter->pos + 1; 298 if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || 299 parse_oid_hex(p, &iter->peeled, &p) || 300 *p++ != '\n') 301 die_invalid_line(iter->packed_refs->refs->path, 302 iter->pos, iter->eof - iter->pos); 303 iter->pos = p; 304 305 /* 306 * Regardless of what the file header said, we 307 * definitely know the value of *this* reference. But 308 * we suppress it if the reference is broken: 309 */ 310 if ((iter->base.flags & REF_ISBROKEN)) { 311 oidclr(&iter->peeled); 312 iter->base.flags &= ~REF_KNOWS_PEELED; 313 } else { 314 iter->base.flags |= REF_KNOWS_PEELED; 315 } 316 } else { 317 oidclr(&iter->peeled); 318 } 319 320 return ITER_OK; 321} 322 323static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator, 324 struct object_id *peeled) 325{ 326 struct mmapped_ref_iterator *iter = 327 (struct mmapped_ref_iterator *)ref_iterator; 328 329 if ((iter->base.flags & REF_KNOWS_PEELED)) { 330 oidcpy(peeled, &iter->peeled); 331 return is_null_oid(&iter->peeled) ? -1 : 0; 332 } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 333 return -1; 334 } else { 335 return !!peel_object(iter->oid.hash, peeled->hash); 336 } 337} 338 339static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator) 340{ 341 struct mmapped_ref_iterator *iter = 342 (struct mmapped_ref_iterator *)ref_iterator; 343 344 release_packed_ref_cache(iter->packed_refs); 345 strbuf_release(&iter->refname_buf); 346 base_ref_iterator_free(ref_iterator); 347 return ITER_DONE; 348} 349 350static struct ref_iterator_vtable mmapped_ref_iterator_vtable = { 351 mmapped_ref_iterator_advance, 352 mmapped_ref_iterator_peel, 353 mmapped_ref_iterator_abort 354}; 355 356struct ref_iterator *mmapped_ref_iterator_begin( 357 struct packed_ref_cache *packed_refs, 358 const char *pos, const char *eof) 359{ 360 struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter)); 361 struct ref_iterator *ref_iterator = &iter->base; 362 363 if (!packed_refs->buf) 364 return empty_ref_iterator_begin(); 365 366 base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1); 367 368 iter->packed_refs = packed_refs; 369 acquire_packed_ref_cache(iter->packed_refs); 370 iter->pos = pos; 371 iter->eof = eof; 372 strbuf_init(&iter->refname_buf, 0); 373 374 iter->base.oid = &iter->oid; 375 376 return ref_iterator; 377} 378 379struct packed_ref_entry { 380 const char *start; 381 size_t len; 382}; 383 384static int cmp_packed_ref_entries(const void *v1, const void *v2) 385{ 386 const struct packed_ref_entry *e1 = v1, *e2 = v2; 387 const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1; 388 const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1; 389 390 while (1) { 391 if (*r1 == '\n') 392 return *r2 == '\n' ? 0 : -1; 393 if (*r1 != *r2) { 394 if (*r2 == '\n') 395 return 1; 396 else 397 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 398 } 399 r1++; 400 r2++; 401 } 402} 403 404/* 405 * Compare a packed-refs record pointed to by `rec` to the specified 406 * NUL-terminated refname. 407 */ 408static int cmp_entry_to_refname(const char *rec, const char *refname) 409{ 410 const char *r1 = rec + GIT_SHA1_HEXSZ + 1; 411 const char *r2 = refname; 412 413 while (1) { 414 if (*r1 == '\n') 415 return *r2 ? -1 : 0; 416 if (!*r2) 417 return 1; 418 if (*r1 != *r2) 419 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 420 r1++; 421 r2++; 422 } 423} 424 425/* 426 * `packed_refs->buf` is not known to be sorted. Check whether it is, 427 * and if not, sort it into new memory and munmap/free the old 428 * storage. 429 */ 430static void sort_packed_refs(struct packed_ref_cache *packed_refs) 431{ 432 struct packed_ref_entry *entries = NULL; 433 size_t alloc = 0, nr = 0; 434 int sorted = 1; 435 const char *pos, *eof, *eol; 436 size_t len, i; 437 char *new_buffer, *dst; 438 439 pos = packed_refs->buf + packed_refs->header_len; 440 eof = packed_refs->eof; 441 len = eof - pos; 442 443 if (!len) 444 return; 445 446 /* 447 * Initialize entries based on a crude estimate of the number 448 * of references in the file (we'll grow it below if needed): 449 */ 450 ALLOC_GROW(entries, len / 80 + 20, alloc); 451 452 while (pos < eof) { 453 eol = memchr(pos, '\n', eof - pos); 454 if (!eol) 455 /* The safety check should prevent this. */ 456 BUG("unterminated line found in packed-refs"); 457 if (eol - pos < GIT_SHA1_HEXSZ + 2) 458 die_invalid_line(packed_refs->refs->path, 459 pos, eof - pos); 460 eol++; 461 if (eol < eof && *eol == '^') { 462 /* 463 * Keep any peeled line together with its 464 * reference: 465 */ 466 const char *peeled_start = eol; 467 468 eol = memchr(peeled_start, '\n', eof - peeled_start); 469 if (!eol) 470 /* The safety check should prevent this. */ 471 BUG("unterminated peeled line found in packed-refs"); 472 eol++; 473 } 474 475 ALLOC_GROW(entries, nr + 1, alloc); 476 entries[nr].start = pos; 477 entries[nr].len = eol - pos; 478 nr++; 479 480 if (sorted && 481 nr > 1 && 482 cmp_packed_ref_entries(&entries[nr - 2], 483 &entries[nr - 1]) >= 0) 484 sorted = 0; 485 486 pos = eol; 487 } 488 489 if (sorted) 490 goto cleanup; 491 492 /* We need to sort the memory. First we sort the entries array: */ 493 QSORT(entries, nr, cmp_packed_ref_entries); 494 495 /* 496 * Allocate a new chunk of memory, and copy the old memory to 497 * the new in the order indicated by `entries` (not bothering 498 * with the header line): 499 */ 500 new_buffer = xmalloc(len); 501 for (dst = new_buffer, i = 0; i < nr; i++) { 502 memcpy(dst, entries[i].start, entries[i].len); 503 dst += entries[i].len; 504 } 505 506 /* 507 * Now munmap the old buffer and use the sorted buffer in its 508 * place: 509 */ 510 release_packed_ref_buffer(packed_refs); 511 packed_refs->buf = new_buffer; 512 packed_refs->eof = new_buffer + len; 513 packed_refs->header_len = 0; 514 515cleanup: 516 free(entries); 517} 518 519/* 520 * Return a pointer to the start of the record that contains the 521 * character `*p` (which must be within the buffer). If no other 522 * record start is found, return `buf`. 523 */ 524static const char *find_start_of_record(const char *buf, const char *p) 525{ 526 while (p > buf && (p[-1] != '\n' || p[0] == '^')) 527 p--; 528 return p; 529} 530 531/* 532 * Return a pointer to the start of the record following the record 533 * that contains `*p`. If none is found before `end`, return `end`. 534 */ 535static const char *find_end_of_record(const char *p, const char *end) 536{ 537 while (++p < end && (p[-1] != '\n' || p[0] == '^')) 538 ; 539 return p; 540} 541 542/* 543 * We want to be able to compare mmapped reference records quickly, 544 * without totally parsing them. We can do so because the records are 545 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 546 * + 1) bytes past the beginning of the record. 547 * 548 * But what if the `packed-refs` file contains garbage? We're willing 549 * to tolerate not detecting the problem, as long as we don't produce 550 * totally garbled output (we can't afford to check the integrity of 551 * the whole file during every Git invocation). But we do want to be 552 * sure that we never read past the end of the buffer in memory and 553 * perform an illegal memory access. 554 * 555 * Guarantee that minimum level of safety by verifying that the last 556 * record in the file is LF-terminated, and that it has at least 557 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 558 * these checks fails. 559 */ 560static void verify_buffer_safe(struct packed_ref_cache *packed_refs) 561{ 562 const char *buf = packed_refs->buf + packed_refs->header_len; 563 const char *eof = packed_refs->eof; 564 const char *last_line; 565 566 if (buf == eof) 567 return; 568 569 last_line = find_start_of_record(buf, eof - 1); 570 if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) 571 die_invalid_line(packed_refs->refs->path, 572 last_line, eof - last_line); 573} 574 575/* 576 * Depending on `mmap_strategy`, either mmap or read the contents of 577 * the `packed-refs` file into the `packed_refs` instance. Return 1 if 578 * the file existed and was read, or 0 if the file was absent. Die on 579 * errors. 580 */ 581static int load_contents(struct packed_ref_cache *packed_refs) 582{ 583 int fd; 584 struct stat st; 585 size_t size; 586 ssize_t bytes_read; 587 588 fd = open(packed_refs->refs->path, O_RDONLY); 589 if (fd < 0) { 590 if (errno == ENOENT) { 591 /* 592 * This is OK; it just means that no 593 * "packed-refs" file has been written yet, 594 * which is equivalent to it being empty, 595 * which is its state when initialized with 596 * zeros. 597 */ 598 return 0; 599 } else { 600 die_errno("couldn't read %s", packed_refs->refs->path); 601 } 602 } 603 604 stat_validity_update(&packed_refs->validity, fd); 605 606 if (fstat(fd, &st) < 0) 607 die_errno("couldn't stat %s", packed_refs->refs->path); 608 size = xsize_t(st.st_size); 609 610 switch (mmap_strategy) { 611 case MMAP_NONE: 612 packed_refs->buf = xmalloc(size); 613 bytes_read = read_in_full(fd, packed_refs->buf, size); 614 if (bytes_read < 0 || bytes_read != size) 615 die_errno("couldn't read %s", packed_refs->refs->path); 616 packed_refs->eof = packed_refs->buf + size; 617 packed_refs->mmapped = 0; 618 break; 619 case MMAP_TEMPORARY: 620 case MMAP_OK: 621 packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); 622 packed_refs->eof = packed_refs->buf + size; 623 packed_refs->mmapped = 1; 624 break; 625 } 626 close(fd); 627 628 return 1; 629} 630 631/* 632 * Find the place in `cache->buf` where the start of the record for 633 * `refname` starts. If `mustexist` is true and the reference doesn't 634 * exist, then return NULL. If `mustexist` is false and the reference 635 * doesn't exist, then return the point where that reference would be 636 * inserted. In the latter mode, `refname` doesn't have to be a proper 637 * reference name; for example, one could search for "refs/replace/" 638 * to find the start of any replace references. 639 * 640 * The record is sought using a binary search, so `cache->buf` must be 641 * sorted. 642 */ 643static const char *find_reference_location(struct packed_ref_cache *cache, 644 const char *refname, int mustexist) 645{ 646 /* 647 * This is not *quite* a garden-variety binary search, because 648 * the data we're searching is made up of records, and we 649 * always need to find the beginning of a record to do a 650 * comparison. A "record" here is one line for the reference 651 * itself and zero or one peel lines that start with '^'. Our 652 * loop invariant is described in the next two comments. 653 */ 654 655 /* 656 * A pointer to the character at the start of a record whose 657 * preceding records all have reference names that come 658 * *before* `refname`. 659 */ 660 const char *lo = cache->buf + cache->header_len; 661 662 /* 663 * A pointer to a the first character of a record whose 664 * reference name comes *after* `refname`. 665 */ 666 const char *hi = cache->eof; 667 668 while (lo < hi) { 669 const char *mid, *rec; 670 int cmp; 671 672 mid = lo + (hi - lo) / 2; 673 rec = find_start_of_record(lo, mid); 674 cmp = cmp_entry_to_refname(rec, refname); 675 if (cmp < 0) { 676 lo = find_end_of_record(mid, hi); 677 } else if (cmp > 0) { 678 hi = rec; 679 } else { 680 return rec; 681 } 682 } 683 684 if (mustexist) 685 return NULL; 686 else 687 return lo; 688} 689 690/* 691 * Read from the `packed-refs` file into a newly-allocated 692 * `packed_ref_cache` and return it. The return value will already 693 * have its reference count incremented. 694 * 695 * A comment line of the form "# pack-refs with: " may contain zero or 696 * more traits. We interpret the traits as follows: 697 * 698 * Neither `peeled` nor `fully-peeled`: 699 * 700 * Probably no references are peeled. But if the file contains a 701 * peeled value for a reference, we will use it. 702 * 703 * `peeled`: 704 * 705 * References under "refs/tags/", if they *can* be peeled, *are* 706 * peeled in this file. References outside of "refs/tags/" are 707 * probably not peeled even if they could have been, but if we find 708 * a peeled value for such a reference we will use it. 709 * 710 * `fully-peeled`: 711 * 712 * All references in the file that can be peeled are peeled. 713 * Inversely (and this is more important), any references in the 714 * file for which no peeled value is recorded is not peelable. This 715 * trait should typically be written alongside "peeled" for 716 * compatibility with older clients, but we do not require it 717 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 718 * 719 * `sorted`: 720 * 721 * The references in this file are known to be sorted by refname. 722 */ 723static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs) 724{ 725 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); 726 int sorted = 0; 727 728 packed_refs->refs = refs; 729 acquire_packed_ref_cache(packed_refs); 730 packed_refs->peeled = PEELED_NONE; 731 732 if (!load_contents(packed_refs)) 733 return packed_refs; 734 735 /* If the file has a header line, process it: */ 736 if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') { 737 struct strbuf tmp = STRBUF_INIT; 738 char *p; 739 const char *eol; 740 struct string_list traits = STRING_LIST_INIT_NODUP; 741 742 eol = memchr(packed_refs->buf, '\n', 743 packed_refs->eof - packed_refs->buf); 744 if (!eol) 745 die_unterminated_line(refs->path, 746 packed_refs->buf, 747 packed_refs->eof - packed_refs->buf); 748 749 strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf); 750 751 if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) 752 die_invalid_line(refs->path, 753 packed_refs->buf, 754 packed_refs->eof - packed_refs->buf); 755 756 string_list_split_in_place(&traits, p, ' ', -1); 757 758 if (unsorted_string_list_has_string(&traits, "fully-peeled")) 759 packed_refs->peeled = PEELED_FULLY; 760 else if (unsorted_string_list_has_string(&traits, "peeled")) 761 packed_refs->peeled = PEELED_TAGS; 762 763 sorted = unsorted_string_list_has_string(&traits, "sorted"); 764 765 /* perhaps other traits later as well */ 766 767 /* The "+ 1" is for the LF character. */ 768 packed_refs->header_len = eol + 1 - packed_refs->buf; 769 770 string_list_clear(&traits, 0); 771 strbuf_release(&tmp); 772 } 773 774 verify_buffer_safe(packed_refs); 775 776 if (!sorted) { 777 sort_packed_refs(packed_refs); 778 779 /* 780 * Reordering the records might have moved a short one 781 * to the end of the buffer, so verify the buffer's 782 * safety again: 783 */ 784 verify_buffer_safe(packed_refs); 785 } 786 787 if (mmap_strategy != MMAP_OK && packed_refs->mmapped) { 788 /* 789 * We don't want to leave the file mmapped, so we are 790 * forced to make a copy now: 791 */ 792 size_t size = packed_refs->eof - 793 (packed_refs->buf + packed_refs->header_len); 794 char *buf_copy = xmalloc(size); 795 796 memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size); 797 release_packed_ref_buffer(packed_refs); 798 packed_refs->buf = buf_copy; 799 packed_refs->eof = buf_copy + size; 800 } 801 802 return packed_refs; 803} 804 805/* 806 * Check that the packed refs cache (if any) still reflects the 807 * contents of the file. If not, clear the cache. 808 */ 809static void validate_packed_ref_cache(struct packed_ref_store *refs) 810{ 811 if (refs->cache && 812 !stat_validity_check(&refs->cache->validity, refs->path)) 813 clear_packed_ref_cache(refs); 814} 815 816/* 817 * Get the packed_ref_cache for the specified packed_ref_store, 818 * creating and populating it if it hasn't been read before or if the 819 * file has been changed (according to its `validity` field) since it 820 * was last read. On the other hand, if we hold the lock, then assume 821 * that the file hasn't been changed out from under us, so skip the 822 * extra `stat()` call in `stat_validity_check()`. 823 */ 824static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 825{ 826 if (!is_lock_file_locked(&refs->lock)) 827 validate_packed_ref_cache(refs); 828 829 if (!refs->cache) 830 refs->cache = read_packed_refs(refs); 831 832 return refs->cache; 833} 834 835static int packed_read_raw_ref(struct ref_store *ref_store, 836 const char *refname, unsigned char *sha1, 837 struct strbuf *referent, unsigned int *type) 838{ 839 struct packed_ref_store *refs = 840 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); 841 struct packed_ref_cache *packed_refs = get_packed_ref_cache(refs); 842 const char *rec; 843 844 *type = 0; 845 846 rec = find_reference_location(packed_refs, refname, 1); 847 848 if (!rec) { 849 /* refname is not a packed reference. */ 850 errno = ENOENT; 851 return -1; 852 } 853 854 if (get_sha1_hex(rec, sha1)) 855 die_invalid_line(refs->path, rec, packed_refs->eof - rec); 856 857 *type = REF_ISPACKED; 858 return 0; 859} 860 861struct packed_ref_iterator { 862 struct ref_iterator base; 863 864 struct packed_ref_cache *cache; 865 struct ref_iterator *iter0; 866 unsigned int flags; 867}; 868 869static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) 870{ 871 struct packed_ref_iterator *iter = 872 (struct packed_ref_iterator *)ref_iterator; 873 int ok; 874 875 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) { 876 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 877 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) 878 continue; 879 880 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 881 !ref_resolves_to_object(iter->iter0->refname, 882 iter->iter0->oid, 883 iter->iter0->flags)) 884 continue; 885 886 iter->base.refname = iter->iter0->refname; 887 iter->base.oid = iter->iter0->oid; 888 iter->base.flags = iter->iter0->flags; 889 return ITER_OK; 890 } 891 892 iter->iter0 = NULL; 893 if (ref_iterator_abort(ref_iterator) != ITER_DONE) 894 ok = ITER_ERROR; 895 896 return ok; 897} 898 899static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, 900 struct object_id *peeled) 901{ 902 struct packed_ref_iterator *iter = 903 (struct packed_ref_iterator *)ref_iterator; 904 905 return ref_iterator_peel(iter->iter0, peeled); 906} 907 908static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) 909{ 910 struct packed_ref_iterator *iter = 911 (struct packed_ref_iterator *)ref_iterator; 912 int ok = ITER_DONE; 913 914 if (iter->iter0) 915 ok = ref_iterator_abort(iter->iter0); 916 917 release_packed_ref_cache(iter->cache); 918 base_ref_iterator_free(ref_iterator); 919 return ok; 920} 921 922static struct ref_iterator_vtable packed_ref_iterator_vtable = { 923 packed_ref_iterator_advance, 924 packed_ref_iterator_peel, 925 packed_ref_iterator_abort 926}; 927 928static struct ref_iterator *packed_ref_iterator_begin( 929 struct ref_store *ref_store, 930 const char *prefix, unsigned int flags) 931{ 932 struct packed_ref_store *refs; 933 struct packed_ref_cache *packed_refs; 934 const char *start; 935 struct packed_ref_iterator *iter; 936 struct ref_iterator *ref_iterator; 937 unsigned int required_flags = REF_STORE_READ; 938 939 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 940 required_flags |= REF_STORE_ODB; 941 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); 942 943 iter = xcalloc(1, sizeof(*iter)); 944 ref_iterator = &iter->base; 945 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1); 946 947 /* 948 * Note that get_packed_ref_cache() internally checks whether 949 * the packed-ref cache is up to date with what is on disk, 950 * and re-reads it if not. 951 */ 952 iter->cache = packed_refs = get_packed_ref_cache(refs); 953 acquire_packed_ref_cache(packed_refs); 954 955 if (prefix && *prefix) 956 start = find_reference_location(packed_refs, prefix, 0); 957 else 958 start = packed_refs->buf + packed_refs->header_len; 959 960 iter->iter0 = mmapped_ref_iterator_begin(packed_refs, 961 start, packed_refs->eof); 962 963 iter->flags = flags; 964 965 if (prefix && *prefix) 966 /* Stop iteration after we've gone *past* prefix: */ 967 ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0); 968 969 return ref_iterator; 970} 971 972/* 973 * Write an entry to the packed-refs file for the specified refname. 974 * If peeled is non-NULL, write it as the entry's peeled value. On 975 * error, return a nonzero value and leave errno set at the value left 976 * by the failing call to `fprintf()`. 977 */ 978static int write_packed_entry(FILE *fh, const char *refname, 979 const unsigned char *sha1, 980 const unsigned char *peeled) 981{ 982 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 || 983 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0)) 984 return -1; 985 986 return 0; 987} 988 989int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) 990{ 991 struct packed_ref_store *refs = 992 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 993 "packed_refs_lock"); 994 static int timeout_configured = 0; 995 static int timeout_value = 1000; 996 997 if (!timeout_configured) { 998 git_config_get_int("core.packedrefstimeout", &timeout_value); 999 timeout_configured = 1;1000 }10011002 /*1003 * Note that we close the lockfile immediately because we1004 * don't write new content to it, but rather to a separate1005 * tempfile.1006 */1007 if (hold_lock_file_for_update_timeout(1008 &refs->lock,1009 refs->path,1010 flags, timeout_value) < 0) {1011 unable_to_lock_message(refs->path, errno, err);1012 return -1;1013 }10141015 if (close_lock_file(&refs->lock)) {1016 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));1017 return -1;1018 }10191020 /*1021 * Now that we hold the `packed-refs` lock, make sure that our1022 * cache matches the current version of the file. Normally1023 * `get_packed_ref_cache()` does that for us, but that1024 * function assumes that when the file is locked, any existing1025 * cache is still valid. We've just locked the file, but it1026 * might have changed the moment *before* we locked it.1027 */1028 validate_packed_ref_cache(refs);10291030 /*1031 * Now make sure that the packed-refs file as it exists in the1032 * locked state is loaded into the cache:1033 */1034 get_packed_ref_cache(refs);1035 return 0;1036}10371038void packed_refs_unlock(struct ref_store *ref_store)1039{1040 struct packed_ref_store *refs = packed_downcast(1041 ref_store,1042 REF_STORE_READ | REF_STORE_WRITE,1043 "packed_refs_unlock");10441045 if (!is_lock_file_locked(&refs->lock))1046 die("BUG: packed_refs_unlock() called when not locked");1047 rollback_lock_file(&refs->lock);1048}10491050int packed_refs_is_locked(struct ref_store *ref_store)1051{1052 struct packed_ref_store *refs = packed_downcast(1053 ref_store,1054 REF_STORE_READ | REF_STORE_WRITE,1055 "packed_refs_is_locked");10561057 return is_lock_file_locked(&refs->lock);1058}10591060/*1061 * The packed-refs header line that we write out. Perhaps other1062 * traits will be added later.1063 *1064 * Note that earlier versions of Git used to parse these traits by1065 * looking for " trait " in the line. For this reason, the space after1066 * the colon and the trailing space are required.1067 */1068static const char PACKED_REFS_HEADER[] =1069 "# pack-refs with: peeled fully-peeled sorted \n";10701071static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)1072{1073 /* Nothing to do. */1074 return 0;1075}10761077/*1078 * Write the packed-refs from the cache to the packed-refs tempfile,1079 * incorporating any changes from `updates`. `updates` must be a1080 * sorted string list whose keys are the refnames and whose util1081 * values are `struct ref_update *`. On error, rollback the tempfile,1082 * write an error message to `err`, and return a nonzero value.1083 *1084 * The packfile must be locked before calling this function and will1085 * remain locked when it is done.1086 */1087static int write_with_updates(struct packed_ref_store *refs,1088 struct string_list *updates,1089 struct strbuf *err)1090{1091 struct ref_iterator *iter = NULL;1092 size_t i;1093 int ok;1094 FILE *out;1095 struct strbuf sb = STRBUF_INIT;1096 char *packed_refs_path;10971098 if (!is_lock_file_locked(&refs->lock))1099 die("BUG: write_with_updates() called while unlocked");11001101 /*1102 * If packed-refs is a symlink, we want to overwrite the1103 * symlinked-to file, not the symlink itself. Also, put the1104 * staging file next to it:1105 */1106 packed_refs_path = get_locked_file_path(&refs->lock);1107 strbuf_addf(&sb, "%s.new", packed_refs_path);1108 free(packed_refs_path);1109 if (create_tempfile(&refs->tempfile, sb.buf) < 0) {1110 strbuf_addf(err, "unable to create file %s: %s",1111 sb.buf, strerror(errno));1112 strbuf_release(&sb);1113 return -1;1114 }1115 strbuf_release(&sb);11161117 out = fdopen_tempfile(&refs->tempfile, "w");1118 if (!out) {1119 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",1120 strerror(errno));1121 goto error;1122 }11231124 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)1125 goto write_error;11261127 /*1128 * We iterate in parallel through the current list of refs and1129 * the list of updates, processing an entry from at least one1130 * of the lists each time through the loop. When the current1131 * list of refs is exhausted, set iter to NULL. When the list1132 * of updates is exhausted, leave i set to updates->nr.1133 */1134 iter = packed_ref_iterator_begin(&refs->base, "",1135 DO_FOR_EACH_INCLUDE_BROKEN);1136 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1137 iter = NULL;11381139 i = 0;11401141 while (iter || i < updates->nr) {1142 struct ref_update *update = NULL;1143 int cmp;11441145 if (i >= updates->nr) {1146 cmp = -1;1147 } else {1148 update = updates->items[i].util;11491150 if (!iter)1151 cmp = +1;1152 else1153 cmp = strcmp(iter->refname, update->refname);1154 }11551156 if (!cmp) {1157 /*1158 * There is both an old value and an update1159 * for this reference. Check the old value if1160 * necessary:1161 */1162 if ((update->flags & REF_HAVE_OLD)) {1163 if (is_null_oid(&update->old_oid)) {1164 strbuf_addf(err, "cannot update ref '%s': "1165 "reference already exists",1166 update->refname);1167 goto error;1168 } else if (oidcmp(&update->old_oid, iter->oid)) {1169 strbuf_addf(err, "cannot update ref '%s': "1170 "is at %s but expected %s",1171 update->refname,1172 oid_to_hex(iter->oid),1173 oid_to_hex(&update->old_oid));1174 goto error;1175 }1176 }11771178 /* Now figure out what to use for the new value: */1179 if ((update->flags & REF_HAVE_NEW)) {1180 /*1181 * The update takes precedence. Skip1182 * the iterator over the unneeded1183 * value.1184 */1185 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1186 iter = NULL;1187 cmp = +1;1188 } else {1189 /*1190 * The update doesn't actually want to1191 * change anything. We're done with it.1192 */1193 i++;1194 cmp = -1;1195 }1196 } else if (cmp > 0) {1197 /*1198 * There is no old value but there is an1199 * update for this reference. Make sure that1200 * the update didn't expect an existing value:1201 */1202 if ((update->flags & REF_HAVE_OLD) &&1203 !is_null_oid(&update->old_oid)) {1204 strbuf_addf(err, "cannot update ref '%s': "1205 "reference is missing but expected %s",1206 update->refname,1207 oid_to_hex(&update->old_oid));1208 goto error;1209 }1210 }12111212 if (cmp < 0) {1213 /* Pass the old reference through. */12141215 struct object_id peeled;1216 int peel_error = ref_iterator_peel(iter, &peeled);12171218 if (write_packed_entry(out, iter->refname,1219 iter->oid->hash,1220 peel_error ? NULL : peeled.hash))1221 goto write_error;12221223 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1224 iter = NULL;1225 } else if (is_null_oid(&update->new_oid)) {1226 /*1227 * The update wants to delete the reference,1228 * and the reference either didn't exist or we1229 * have already skipped it. So we're done with1230 * the update (and don't have to write1231 * anything).1232 */1233 i++;1234 } else {1235 struct object_id peeled;1236 int peel_error = peel_object(update->new_oid.hash,1237 peeled.hash);12381239 if (write_packed_entry(out, update->refname,1240 update->new_oid.hash,1241 peel_error ? NULL : peeled.hash))1242 goto write_error;12431244 i++;1245 }1246 }12471248 if (ok != ITER_DONE) {1249 strbuf_addf(err, "unable to write packed-refs file: "1250 "error iterating over old contents");1251 goto error;1252 }12531254 if (close_tempfile(&refs->tempfile)) {1255 strbuf_addf(err, "error closing file %s: %s",1256 get_tempfile_path(&refs->tempfile),1257 strerror(errno));1258 strbuf_release(&sb);1259 return -1;1260 }12611262 return 0;12631264write_error:1265 strbuf_addf(err, "error writing to %s: %s",1266 get_tempfile_path(&refs->tempfile), strerror(errno));12671268error:1269 if (iter)1270 ref_iterator_abort(iter);12711272 delete_tempfile(&refs->tempfile);1273 return -1;1274}12751276struct packed_transaction_backend_data {1277 /* True iff the transaction owns the packed-refs lock. */1278 int own_lock;12791280 struct string_list updates;1281};12821283static void packed_transaction_cleanup(struct packed_ref_store *refs,1284 struct ref_transaction *transaction)1285{1286 struct packed_transaction_backend_data *data = transaction->backend_data;12871288 if (data) {1289 string_list_clear(&data->updates, 0);12901291 if (is_tempfile_active(&refs->tempfile))1292 delete_tempfile(&refs->tempfile);12931294 if (data->own_lock && is_lock_file_locked(&refs->lock)) {1295 packed_refs_unlock(&refs->base);1296 data->own_lock = 0;1297 }12981299 free(data);1300 transaction->backend_data = NULL;1301 }13021303 transaction->state = REF_TRANSACTION_CLOSED;1304}13051306static int packed_transaction_prepare(struct ref_store *ref_store,1307 struct ref_transaction *transaction,1308 struct strbuf *err)1309{1310 struct packed_ref_store *refs = packed_downcast(1311 ref_store,1312 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1313 "ref_transaction_prepare");1314 struct packed_transaction_backend_data *data;1315 size_t i;1316 int ret = TRANSACTION_GENERIC_ERROR;13171318 /*1319 * Note that we *don't* skip transactions with zero updates,1320 * because such a transaction might be executed for the side1321 * effect of ensuring that all of the references are peeled.1322 * If the caller wants to optimize away empty transactions, it1323 * should do so itself.1324 */13251326 data = xcalloc(1, sizeof(*data));1327 string_list_init(&data->updates, 0);13281329 transaction->backend_data = data;13301331 /*1332 * Stick the updates in a string list by refname so that we1333 * can sort them:1334 */1335 for (i = 0; i < transaction->nr; i++) {1336 struct ref_update *update = transaction->updates[i];1337 struct string_list_item *item =1338 string_list_append(&data->updates, update->refname);13391340 /* Store a pointer to update in item->util: */1341 item->util = update;1342 }1343 string_list_sort(&data->updates);13441345 if (ref_update_reject_duplicates(&data->updates, err))1346 goto failure;13471348 if (!is_lock_file_locked(&refs->lock)) {1349 if (packed_refs_lock(ref_store, 0, err))1350 goto failure;1351 data->own_lock = 1;1352 }13531354 if (write_with_updates(refs, &data->updates, err))1355 goto failure;13561357 transaction->state = REF_TRANSACTION_PREPARED;1358 return 0;13591360failure:1361 packed_transaction_cleanup(refs, transaction);1362 return ret;1363}13641365static int packed_transaction_abort(struct ref_store *ref_store,1366 struct ref_transaction *transaction,1367 struct strbuf *err)1368{1369 struct packed_ref_store *refs = packed_downcast(1370 ref_store,1371 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1372 "ref_transaction_abort");13731374 packed_transaction_cleanup(refs, transaction);1375 return 0;1376}13771378static int packed_transaction_finish(struct ref_store *ref_store,1379 struct ref_transaction *transaction,1380 struct strbuf *err)1381{1382 struct packed_ref_store *refs = packed_downcast(1383 ref_store,1384 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1385 "ref_transaction_finish");1386 int ret = TRANSACTION_GENERIC_ERROR;1387 char *packed_refs_path;13881389 clear_packed_ref_cache(refs);13901391 packed_refs_path = get_locked_file_path(&refs->lock);1392 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {1393 strbuf_addf(err, "error replacing %s: %s",1394 refs->path, strerror(errno));1395 goto cleanup;1396 }13971398 ret = 0;13991400cleanup:1401 free(packed_refs_path);1402 packed_transaction_cleanup(refs, transaction);1403 return ret;1404}14051406static int packed_initial_transaction_commit(struct ref_store *ref_store,1407 struct ref_transaction *transaction,1408 struct strbuf *err)1409{1410 return ref_transaction_commit(transaction, err);1411}14121413static int packed_delete_refs(struct ref_store *ref_store, const char *msg,1414 struct string_list *refnames, unsigned int flags)1415{1416 struct packed_ref_store *refs =1417 packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");1418 struct strbuf err = STRBUF_INIT;1419 struct ref_transaction *transaction;1420 struct string_list_item *item;1421 int ret;14221423 (void)refs; /* We need the check above, but don't use the variable */14241425 if (!refnames->nr)1426 return 0;14271428 /*1429 * Since we don't check the references' old_oids, the1430 * individual updates can't fail, so we can pack all of the1431 * updates into a single transaction.1432 */14331434 transaction = ref_store_transaction_begin(ref_store, &err);1435 if (!transaction)1436 return -1;14371438 for_each_string_list_item(item, refnames) {1439 if (ref_transaction_delete(transaction, item->string, NULL,1440 flags, msg, &err)) {1441 warning(_("could not delete reference %s: %s"),1442 item->string, err.buf);1443 strbuf_reset(&err);1444 }1445 }14461447 ret = ref_transaction_commit(transaction, &err);14481449 if (ret) {1450 if (refnames->nr == 1)1451 error(_("could not delete reference %s: %s"),1452 refnames->items[0].string, err.buf);1453 else1454 error(_("could not delete references: %s"), err.buf);1455 }14561457 ref_transaction_free(transaction);1458 strbuf_release(&err);1459 return ret;1460}14611462static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)1463{1464 /*1465 * Packed refs are already packed. It might be that loose refs1466 * are packed *into* a packed refs store, but that is done by1467 * updating the packed references via a transaction.1468 */1469 return 0;1470}14711472static int packed_create_symref(struct ref_store *ref_store,1473 const char *refname, const char *target,1474 const char *logmsg)1475{1476 die("BUG: packed reference store does not support symrefs");1477}14781479static int packed_rename_ref(struct ref_store *ref_store,1480 const char *oldrefname, const char *newrefname,1481 const char *logmsg)1482{1483 die("BUG: packed reference store does not support renaming references");1484}14851486static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1487{1488 return empty_ref_iterator_begin();1489}14901491static int packed_for_each_reflog_ent(struct ref_store *ref_store,1492 const char *refname,1493 each_reflog_ent_fn fn, void *cb_data)1494{1495 return 0;1496}14971498static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,1499 const char *refname,1500 each_reflog_ent_fn fn,1501 void *cb_data)1502{1503 return 0;1504}15051506static int packed_reflog_exists(struct ref_store *ref_store,1507 const char *refname)1508{1509 return 0;1510}15111512static int packed_create_reflog(struct ref_store *ref_store,1513 const char *refname, int force_create,1514 struct strbuf *err)1515{1516 die("BUG: packed reference store does not support reflogs");1517}15181519static int packed_delete_reflog(struct ref_store *ref_store,1520 const char *refname)1521{1522 return 0;1523}15241525static int packed_reflog_expire(struct ref_store *ref_store,1526 const char *refname, const unsigned char *sha1,1527 unsigned int flags,1528 reflog_expiry_prepare_fn prepare_fn,1529 reflog_expiry_should_prune_fn should_prune_fn,1530 reflog_expiry_cleanup_fn cleanup_fn,1531 void *policy_cb_data)1532{1533 return 0;1534}15351536struct ref_storage_be refs_be_packed = {1537 NULL,1538 "packed",1539 packed_ref_store_create,1540 packed_init_db,1541 packed_transaction_prepare,1542 packed_transaction_finish,1543 packed_transaction_abort,1544 packed_initial_transaction_commit,15451546 packed_pack_refs,1547 packed_create_symref,1548 packed_delete_refs,1549 packed_rename_ref,15501551 packed_ref_iterator_begin,1552 packed_read_raw_ref,15531554 packed_reflog_iterator_begin,1555 packed_for_each_reflog_ent,1556 packed_for_each_reflog_ent_reverse,1557 packed_reflog_exists,1558 packed_create_reflog,1559 packed_delete_reflog,1560 packed_reflog_expire1561};