1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"packed-backend.h" 6#include"../iterator.h" 7#include"../lockfile.h" 8 9enum mmap_strategy { 10/* 11 * Don't use mmap() at all for reading `packed-refs`. 12 */ 13 MMAP_NONE, 14 15/* 16 * Can use mmap() for reading `packed-refs`, but the file must 17 * not remain mmapped. This is the usual option on Windows, 18 * where you cannot rename a new version of a file onto a file 19 * that is currently mmapped. 20 */ 21 MMAP_TEMPORARY, 22 23/* 24 * It is OK to leave the `packed-refs` file mmapped while 25 * arbitrary other code is running. 26 */ 27 MMAP_OK 28}; 29 30#if defined(NO_MMAP) 31static enum mmap_strategy mmap_strategy = MMAP_NONE; 32#elif defined(MMAP_PREVENTS_DELETE) 33static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 34#else 35static enum mmap_strategy mmap_strategy = MMAP_OK; 36#endif 37 38struct packed_ref_store; 39 40/* 41 * A `snapshot` represents one snapshot of a `packed-refs` file. 42 * 43 * Normally, this will be a mmapped view of the contents of the 44 * `packed-refs` file at the time the snapshot was created. However, 45 * if the `packed-refs` file was not sorted, this might point at heap 46 * memory holding the contents of the `packed-refs` file with its 47 * records sorted by refname. 48 * 49 * `snapshot` instances are reference counted (via 50 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent 51 * an instance from disappearing while an iterator is still iterating 52 * over it. Instances are garbage collected when their `referrers` 53 * count goes to zero. 54 * 55 * The most recent `snapshot`, if available, is referenced by the 56 * `packed_ref_store`. Its freshness is checked whenever 57 * `get_snapshot()` is called; if the existing snapshot is obsolete, a 58 * new snapshot is taken. 59 */ 60struct snapshot { 61/* 62 * A back-pointer to the packed_ref_store with which this 63 * snapshot is associated: 64 */ 65struct packed_ref_store *refs; 66 67/* Is the `packed-refs` file currently mmapped? */ 68int mmapped; 69 70/* 71 * The contents of the `packed-refs` file: 72 * 73 * - buf -- a pointer to the start of the memory 74 * - start -- a pointer to the first byte of actual references 75 * (i.e., after the header line, if one is present) 76 * - eof -- a pointer just past the end of the reference 77 * contents 78 * 79 * If the `packed-refs` file was already sorted, `buf` points 80 * at the mmapped contents of the file. If not, it points at 81 * heap-allocated memory containing the contents, sorted. If 82 * there were no contents (e.g., because the file didn't 83 * exist), `buf`, `start`, and `eof` are all NULL. 84 */ 85char*buf, *start, *eof; 86 87/* 88 * What is the peeled state of the `packed-refs` file that 89 * this snapshot represents? (This is usually determined from 90 * the file's header.) 91 */ 92enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 93 94/* 95 * Count of references to this instance, including the pointer 96 * from `packed_ref_store::snapshot`, if any. The instance 97 * will not be freed as long as the reference count is 98 * nonzero. 99 */ 100unsigned int referrers; 101 102/* 103 * The metadata of the `packed-refs` file from which this 104 * snapshot was created, used to tell if the file has been 105 * replaced since we read it. 106 */ 107struct stat_validity validity; 108}; 109 110/* 111 * A `ref_store` representing references stored in a `packed-refs` 112 * file. It implements the `ref_store` interface, though it has some 113 * limitations: 114 * 115 * - It cannot store symbolic references. 116 * 117 * - It cannot store reflogs. 118 * 119 * - It does not support reference renaming (though it could). 120 * 121 * On the other hand, it can be locked outside of a reference 122 * transaction. In that case, it remains locked even after the 123 * transaction is done and the new `packed-refs` file is activated. 124 */ 125struct packed_ref_store { 126struct ref_store base; 127 128unsigned int store_flags; 129 130/* The path of the "packed-refs" file: */ 131char*path; 132 133/* 134 * A snapshot of the values read from the `packed-refs` file, 135 * if it might still be current; otherwise, NULL. 136 */ 137struct snapshot *snapshot; 138 139/* 140 * Lock used for the "packed-refs" file. Note that this (and 141 * thus the enclosing `packed_ref_store`) must not be freed. 142 */ 143struct lock_file lock; 144 145/* 146 * Temporary file used when rewriting new contents to the 147 * "packed-refs" file. Note that this (and thus the enclosing 148 * `packed_ref_store`) must not be freed. 149 */ 150struct tempfile *tempfile; 151}; 152 153/* 154 * Increment the reference count of `*snapshot`. 155 */ 156static voidacquire_snapshot(struct snapshot *snapshot) 157{ 158 snapshot->referrers++; 159} 160 161/* 162 * If the buffer in `snapshot` is active, then either munmap the 163 * memory and close the file, or free the memory. Then set the buffer 164 * pointers to NULL. 165 */ 166static voidclear_snapshot_buffer(struct snapshot *snapshot) 167{ 168if(snapshot->mmapped) { 169if(munmap(snapshot->buf, snapshot->eof - snapshot->buf)) 170die_errno("error ummapping packed-refs file%s", 171 snapshot->refs->path); 172 snapshot->mmapped =0; 173}else{ 174free(snapshot->buf); 175} 176 snapshot->buf = snapshot->start = snapshot->eof = NULL; 177} 178 179/* 180 * Decrease the reference count of `*snapshot`. If it goes to zero, 181 * free `*snapshot` and return true; otherwise return false. 182 */ 183static intrelease_snapshot(struct snapshot *snapshot) 184{ 185if(!--snapshot->referrers) { 186stat_validity_clear(&snapshot->validity); 187clear_snapshot_buffer(snapshot); 188free(snapshot); 189return1; 190}else{ 191return0; 192} 193} 194 195struct ref_store *packed_ref_store_create(const char*path, 196unsigned int store_flags) 197{ 198struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 199struct ref_store *ref_store = (struct ref_store *)refs; 200 201base_ref_store_init(ref_store, &refs_be_packed); 202 refs->store_flags = store_flags; 203 204 refs->path =xstrdup(path); 205return ref_store; 206} 207 208/* 209 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 210 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 211 * support at least the flags specified in `required_flags`. `caller` 212 * is used in any necessary error messages. 213 */ 214static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 215unsigned int required_flags, 216const char*caller) 217{ 218struct packed_ref_store *refs; 219 220if(ref_store->be != &refs_be_packed) 221die("BUG: ref_store is type\"%s\"not\"packed\"in%s", 222 ref_store->be->name, caller); 223 224 refs = (struct packed_ref_store *)ref_store; 225 226if((refs->store_flags & required_flags) != required_flags) 227die("BUG: unallowed operation (%s), requires%x, has%x\n", 228 caller, required_flags, refs->store_flags); 229 230return refs; 231} 232 233static voidclear_snapshot(struct packed_ref_store *refs) 234{ 235if(refs->snapshot) { 236struct snapshot *snapshot = refs->snapshot; 237 238 refs->snapshot = NULL; 239release_snapshot(snapshot); 240} 241} 242 243static NORETURN voiddie_unterminated_line(const char*path, 244const char*p,size_t len) 245{ 246if(len <80) 247die("unterminated line in%s: %.*s", path, (int)len, p); 248else 249die("unterminated line in%s: %.75s...", path, p); 250} 251 252static NORETURN voiddie_invalid_line(const char*path, 253const char*p,size_t len) 254{ 255const char*eol =memchr(p,'\n', len); 256 257if(!eol) 258die_unterminated_line(path, p, len); 259else if(eol - p <80) 260die("unexpected line in%s: %.*s", path, (int)(eol - p), p); 261else 262die("unexpected line in%s: %.75s...", path, p); 263 264} 265 266struct snapshot_record { 267const char*start; 268size_t len; 269}; 270 271static intcmp_packed_ref_records(const void*v1,const void*v2) 272{ 273const struct snapshot_record *e1 = v1, *e2 = v2; 274const char*r1 = e1->start + GIT_SHA1_HEXSZ +1; 275const char*r2 = e2->start + GIT_SHA1_HEXSZ +1; 276 277while(1) { 278if(*r1 =='\n') 279return*r2 =='\n'?0: -1; 280if(*r1 != *r2) { 281if(*r2 =='\n') 282return1; 283else 284return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 285} 286 r1++; 287 r2++; 288} 289} 290 291/* 292 * Compare a snapshot record at `rec` to the specified NUL-terminated 293 * refname. 294 */ 295static intcmp_record_to_refname(const char*rec,const char*refname) 296{ 297const char*r1 = rec + GIT_SHA1_HEXSZ +1; 298const char*r2 = refname; 299 300while(1) { 301if(*r1 =='\n') 302return*r2 ? -1:0; 303if(!*r2) 304return1; 305if(*r1 != *r2) 306return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 307 r1++; 308 r2++; 309} 310} 311 312/* 313 * `snapshot->buf` is not known to be sorted. Check whether it is, and 314 * if not, sort it into new memory and munmap/free the old storage. 315 */ 316static voidsort_snapshot(struct snapshot *snapshot) 317{ 318struct snapshot_record *records = NULL; 319size_t alloc =0, nr =0; 320int sorted =1; 321const char*pos, *eof, *eol; 322size_t len, i; 323char*new_buffer, *dst; 324 325 pos = snapshot->start; 326 eof = snapshot->eof; 327 328if(pos == eof) 329return; 330 331 len = eof - pos; 332 333/* 334 * Initialize records based on a crude estimate of the number 335 * of references in the file (we'll grow it below if needed): 336 */ 337ALLOC_GROW(records, len /80+20, alloc); 338 339while(pos < eof) { 340 eol =memchr(pos,'\n', eof - pos); 341if(!eol) 342/* The safety check should prevent this. */ 343BUG("unterminated line found in packed-refs"); 344if(eol - pos < GIT_SHA1_HEXSZ +2) 345die_invalid_line(snapshot->refs->path, 346 pos, eof - pos); 347 eol++; 348if(eol < eof && *eol =='^') { 349/* 350 * Keep any peeled line together with its 351 * reference: 352 */ 353const char*peeled_start = eol; 354 355 eol =memchr(peeled_start,'\n', eof - peeled_start); 356if(!eol) 357/* The safety check should prevent this. */ 358BUG("unterminated peeled line found in packed-refs"); 359 eol++; 360} 361 362ALLOC_GROW(records, nr +1, alloc); 363 records[nr].start = pos; 364 records[nr].len = eol - pos; 365 nr++; 366 367if(sorted && 368 nr >1&& 369cmp_packed_ref_records(&records[nr -2], 370&records[nr -1]) >=0) 371 sorted =0; 372 373 pos = eol; 374} 375 376if(sorted) 377goto cleanup; 378 379/* We need to sort the memory. First we sort the records array: */ 380QSORT(records, nr, cmp_packed_ref_records); 381 382/* 383 * Allocate a new chunk of memory, and copy the old memory to 384 * the new in the order indicated by `records` (not bothering 385 * with the header line): 386 */ 387 new_buffer =xmalloc(len); 388for(dst = new_buffer, i =0; i < nr; i++) { 389memcpy(dst, records[i].start, records[i].len); 390 dst += records[i].len; 391} 392 393/* 394 * Now munmap the old buffer and use the sorted buffer in its 395 * place: 396 */ 397clear_snapshot_buffer(snapshot); 398 snapshot->buf = snapshot->start = new_buffer; 399 snapshot->eof = new_buffer + len; 400 401cleanup: 402free(records); 403} 404 405/* 406 * Return a pointer to the start of the record that contains the 407 * character `*p` (which must be within the buffer). If no other 408 * record start is found, return `buf`. 409 */ 410static const char*find_start_of_record(const char*buf,const char*p) 411{ 412while(p > buf && (p[-1] !='\n'|| p[0] =='^')) 413 p--; 414return p; 415} 416 417/* 418 * Return a pointer to the start of the record following the record 419 * that contains `*p`. If none is found before `end`, return `end`. 420 */ 421static const char*find_end_of_record(const char*p,const char*end) 422{ 423while(++p < end && (p[-1] !='\n'|| p[0] =='^')) 424; 425return p; 426} 427 428/* 429 * We want to be able to compare mmapped reference records quickly, 430 * without totally parsing them. We can do so because the records are 431 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 432 * + 1) bytes past the beginning of the record. 433 * 434 * But what if the `packed-refs` file contains garbage? We're willing 435 * to tolerate not detecting the problem, as long as we don't produce 436 * totally garbled output (we can't afford to check the integrity of 437 * the whole file during every Git invocation). But we do want to be 438 * sure that we never read past the end of the buffer in memory and 439 * perform an illegal memory access. 440 * 441 * Guarantee that minimum level of safety by verifying that the last 442 * record in the file is LF-terminated, and that it has at least 443 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 444 * these checks fails. 445 */ 446static voidverify_buffer_safe(struct snapshot *snapshot) 447{ 448const char*start = snapshot->start; 449const char*eof = snapshot->eof; 450const char*last_line; 451 452if(start == eof) 453return; 454 455 last_line =find_start_of_record(start, eof -1); 456if(*(eof -1) !='\n'|| eof - last_line < GIT_SHA1_HEXSZ +2) 457die_invalid_line(snapshot->refs->path, 458 last_line, eof - last_line); 459} 460 461/* 462 * Depending on `mmap_strategy`, either mmap or read the contents of 463 * the `packed-refs` file into the snapshot. Return 1 if the file 464 * existed and was read, or 0 if the file was absent or empty. Die on 465 * errors. 466 */ 467static intload_contents(struct snapshot *snapshot) 468{ 469int fd; 470struct stat st; 471size_t size; 472 ssize_t bytes_read; 473 474 fd =open(snapshot->refs->path, O_RDONLY); 475if(fd <0) { 476if(errno == ENOENT) { 477/* 478 * This is OK; it just means that no 479 * "packed-refs" file has been written yet, 480 * which is equivalent to it being empty, 481 * which is its state when initialized with 482 * zeros. 483 */ 484return0; 485}else{ 486die_errno("couldn't read%s", snapshot->refs->path); 487} 488} 489 490stat_validity_update(&snapshot->validity, fd); 491 492if(fstat(fd, &st) <0) 493die_errno("couldn't stat%s", snapshot->refs->path); 494 size =xsize_t(st.st_size); 495 496if(!size) { 497return0; 498}else if(mmap_strategy == MMAP_NONE) { 499 snapshot->buf =xmalloc(size); 500 bytes_read =read_in_full(fd, snapshot->buf, size); 501if(bytes_read <0|| bytes_read != size) 502die_errno("couldn't read%s", snapshot->refs->path); 503 snapshot->mmapped =0; 504}else{ 505 snapshot->buf =xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd,0); 506 snapshot->mmapped =1; 507} 508close(fd); 509 510 snapshot->start = snapshot->buf; 511 snapshot->eof = snapshot->buf + size; 512 513return1; 514} 515 516/* 517 * Find the place in `snapshot->buf` where the start of the record for 518 * `refname` starts. If `mustexist` is true and the reference doesn't 519 * exist, then return NULL. If `mustexist` is false and the reference 520 * doesn't exist, then return the point where that reference would be 521 * inserted, or `snapshot->eof` (which might be NULL) if it would be 522 * inserted at the end of the file. In the latter mode, `refname` 523 * doesn't have to be a proper reference name; for example, one could 524 * search for "refs/replace/" to find the start of any replace 525 * references. 526 * 527 * The record is sought using a binary search, so `snapshot->buf` must 528 * be sorted. 529 */ 530static const char*find_reference_location(struct snapshot *snapshot, 531const char*refname,int mustexist) 532{ 533/* 534 * This is not *quite* a garden-variety binary search, because 535 * the data we're searching is made up of records, and we 536 * always need to find the beginning of a record to do a 537 * comparison. A "record" here is one line for the reference 538 * itself and zero or one peel lines that start with '^'. Our 539 * loop invariant is described in the next two comments. 540 */ 541 542/* 543 * A pointer to the character at the start of a record whose 544 * preceding records all have reference names that come 545 * *before* `refname`. 546 */ 547const char*lo = snapshot->start; 548 549/* 550 * A pointer to a the first character of a record whose 551 * reference name comes *after* `refname`. 552 */ 553const char*hi = snapshot->eof; 554 555while(lo != hi) { 556const char*mid, *rec; 557int cmp; 558 559 mid = lo + (hi - lo) /2; 560 rec =find_start_of_record(lo, mid); 561 cmp =cmp_record_to_refname(rec, refname); 562if(cmp <0) { 563 lo =find_end_of_record(mid, hi); 564}else if(cmp >0) { 565 hi = rec; 566}else{ 567return rec; 568} 569} 570 571if(mustexist) 572return NULL; 573else 574return lo; 575} 576 577/* 578 * Create a newly-allocated `snapshot` of the `packed-refs` file in 579 * its current state and return it. The return value will already have 580 * its reference count incremented. 581 * 582 * A comment line of the form "# pack-refs with: " may contain zero or 583 * more traits. We interpret the traits as follows: 584 * 585 * Neither `peeled` nor `fully-peeled`: 586 * 587 * Probably no references are peeled. But if the file contains a 588 * peeled value for a reference, we will use it. 589 * 590 * `peeled`: 591 * 592 * References under "refs/tags/", if they *can* be peeled, *are* 593 * peeled in this file. References outside of "refs/tags/" are 594 * probably not peeled even if they could have been, but if we find 595 * a peeled value for such a reference we will use it. 596 * 597 * `fully-peeled`: 598 * 599 * All references in the file that can be peeled are peeled. 600 * Inversely (and this is more important), any references in the 601 * file for which no peeled value is recorded is not peelable. This 602 * trait should typically be written alongside "peeled" for 603 * compatibility with older clients, but we do not require it 604 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 605 * 606 * `sorted`: 607 * 608 * The references in this file are known to be sorted by refname. 609 */ 610static struct snapshot *create_snapshot(struct packed_ref_store *refs) 611{ 612struct snapshot *snapshot =xcalloc(1,sizeof(*snapshot)); 613int sorted =0; 614 615 snapshot->refs = refs; 616acquire_snapshot(snapshot); 617 snapshot->peeled = PEELED_NONE; 618 619if(!load_contents(snapshot)) 620return snapshot; 621 622/* If the file has a header line, process it: */ 623if(snapshot->buf < snapshot->eof && *snapshot->buf =='#') { 624char*tmp, *p, *eol; 625struct string_list traits = STRING_LIST_INIT_NODUP; 626 627 eol =memchr(snapshot->buf,'\n', 628 snapshot->eof - snapshot->buf); 629if(!eol) 630die_unterminated_line(refs->path, 631 snapshot->buf, 632 snapshot->eof - snapshot->buf); 633 634 tmp =xmemdupz(snapshot->buf, eol - snapshot->buf); 635 636if(!skip_prefix(tmp,"# pack-refs with:", (const char**)&p)) 637die_invalid_line(refs->path, 638 snapshot->buf, 639 snapshot->eof - snapshot->buf); 640 641string_list_split_in_place(&traits, p,' ', -1); 642 643if(unsorted_string_list_has_string(&traits,"fully-peeled")) 644 snapshot->peeled = PEELED_FULLY; 645else if(unsorted_string_list_has_string(&traits,"peeled")) 646 snapshot->peeled = PEELED_TAGS; 647 648 sorted =unsorted_string_list_has_string(&traits,"sorted"); 649 650/* perhaps other traits later as well */ 651 652/* The "+ 1" is for the LF character. */ 653 snapshot->start = eol +1; 654 655string_list_clear(&traits,0); 656free(tmp); 657} 658 659verify_buffer_safe(snapshot); 660 661if(!sorted) { 662sort_snapshot(snapshot); 663 664/* 665 * Reordering the records might have moved a short one 666 * to the end of the buffer, so verify the buffer's 667 * safety again: 668 */ 669verify_buffer_safe(snapshot); 670} 671 672if(mmap_strategy != MMAP_OK && snapshot->mmapped) { 673/* 674 * We don't want to leave the file mmapped, so we are 675 * forced to make a copy now: 676 */ 677size_t size = snapshot->eof - snapshot->start; 678char*buf_copy =xmalloc(size); 679 680memcpy(buf_copy, snapshot->start, size); 681clear_snapshot_buffer(snapshot); 682 snapshot->buf = snapshot->start = buf_copy; 683 snapshot->eof = buf_copy + size; 684} 685 686return snapshot; 687} 688 689/* 690 * Check that `refs->snapshot` (if present) still reflects the 691 * contents of the `packed-refs` file. If not, clear the snapshot. 692 */ 693static voidvalidate_snapshot(struct packed_ref_store *refs) 694{ 695if(refs->snapshot && 696!stat_validity_check(&refs->snapshot->validity, refs->path)) 697clear_snapshot(refs); 698} 699 700/* 701 * Get the `snapshot` for the specified packed_ref_store, creating and 702 * populating it if it hasn't been read before or if the file has been 703 * changed (according to its `validity` field) since it was last read. 704 * On the other hand, if we hold the lock, then assume that the file 705 * hasn't been changed out from under us, so skip the extra `stat()` 706 * call in `stat_validity_check()`. This function does *not* increase 707 * the snapshot's reference count on behalf of the caller. 708 */ 709static struct snapshot *get_snapshot(struct packed_ref_store *refs) 710{ 711if(!is_lock_file_locked(&refs->lock)) 712validate_snapshot(refs); 713 714if(!refs->snapshot) 715 refs->snapshot =create_snapshot(refs); 716 717return refs->snapshot; 718} 719 720static intpacked_read_raw_ref(struct ref_store *ref_store, 721const char*refname,unsigned char*sha1, 722struct strbuf *referent,unsigned int*type) 723{ 724struct packed_ref_store *refs = 725packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 726struct snapshot *snapshot =get_snapshot(refs); 727const char*rec; 728 729*type =0; 730 731 rec =find_reference_location(snapshot, refname,1); 732 733if(!rec) { 734/* refname is not a packed reference. */ 735 errno = ENOENT; 736return-1; 737} 738 739if(get_sha1_hex(rec, sha1)) 740die_invalid_line(refs->path, rec, snapshot->eof - rec); 741 742*type = REF_ISPACKED; 743return0; 744} 745 746/* 747 * This value is set in `base.flags` if the peeled value of the 748 * current reference is known. In that case, `peeled` contains the 749 * correct peeled value for the reference, which might be `null_sha1` 750 * if the reference is not a tag or if it is broken. 751 */ 752#define REF_KNOWS_PEELED 0x40 753 754/* 755 * An iterator over a snapshot of a `packed-refs` file. 756 */ 757struct packed_ref_iterator { 758struct ref_iterator base; 759 760struct snapshot *snapshot; 761 762/* The current position in the snapshot's buffer: */ 763const char*pos; 764 765/* The end of the part of the buffer that will be iterated over: */ 766const char*eof; 767 768/* Scratch space for current values: */ 769struct object_id oid, peeled; 770struct strbuf refname_buf; 771 772unsigned int flags; 773}; 774 775/* 776 * Move the iterator to the next record in the snapshot, without 777 * respect for whether the record is actually required by the current 778 * iteration. Adjust the fields in `iter` and return `ITER_OK` or 779 * `ITER_DONE`. This function does not free the iterator in the case 780 * of `ITER_DONE`. 781 */ 782static intnext_record(struct packed_ref_iterator *iter) 783{ 784const char*p = iter->pos, *eol; 785 786strbuf_reset(&iter->refname_buf); 787 788if(iter->pos == iter->eof) 789return ITER_DONE; 790 791 iter->base.flags = REF_ISPACKED; 792 793if(iter->eof - p < GIT_SHA1_HEXSZ +2|| 794parse_oid_hex(p, &iter->oid, &p) || 795!isspace(*p++)) 796die_invalid_line(iter->snapshot->refs->path, 797 iter->pos, iter->eof - iter->pos); 798 799 eol =memchr(p,'\n', iter->eof - p); 800if(!eol) 801die_unterminated_line(iter->snapshot->refs->path, 802 iter->pos, iter->eof - iter->pos); 803 804strbuf_add(&iter->refname_buf, p, eol - p); 805 iter->base.refname = iter->refname_buf.buf; 806 807if(check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 808if(!refname_is_safe(iter->base.refname)) 809die("packed refname is dangerous:%s", 810 iter->base.refname); 811oidclr(&iter->oid); 812 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 813} 814if(iter->snapshot->peeled == PEELED_FULLY || 815(iter->snapshot->peeled == PEELED_TAGS && 816starts_with(iter->base.refname,"refs/tags/"))) 817 iter->base.flags |= REF_KNOWS_PEELED; 818 819 iter->pos = eol +1; 820 821if(iter->pos < iter->eof && *iter->pos =='^') { 822 p = iter->pos +1; 823if(iter->eof - p < GIT_SHA1_HEXSZ +1|| 824parse_oid_hex(p, &iter->peeled, &p) || 825*p++ !='\n') 826die_invalid_line(iter->snapshot->refs->path, 827 iter->pos, iter->eof - iter->pos); 828 iter->pos = p; 829 830/* 831 * Regardless of what the file header said, we 832 * definitely know the value of *this* reference. But 833 * we suppress it if the reference is broken: 834 */ 835if((iter->base.flags & REF_ISBROKEN)) { 836oidclr(&iter->peeled); 837 iter->base.flags &= ~REF_KNOWS_PEELED; 838}else{ 839 iter->base.flags |= REF_KNOWS_PEELED; 840} 841}else{ 842oidclr(&iter->peeled); 843} 844 845return ITER_OK; 846} 847 848static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 849{ 850struct packed_ref_iterator *iter = 851(struct packed_ref_iterator *)ref_iterator; 852int ok; 853 854while((ok =next_record(iter)) == ITER_OK) { 855if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 856ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 857continue; 858 859if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 860!ref_resolves_to_object(iter->base.refname, &iter->oid, 861 iter->flags)) 862continue; 863 864return ITER_OK; 865} 866 867if(ref_iterator_abort(ref_iterator) != ITER_DONE) 868 ok = ITER_ERROR; 869 870return ok; 871} 872 873static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 874struct object_id *peeled) 875{ 876struct packed_ref_iterator *iter = 877(struct packed_ref_iterator *)ref_iterator; 878 879if((iter->base.flags & REF_KNOWS_PEELED)) { 880oidcpy(peeled, &iter->peeled); 881returnis_null_oid(&iter->peeled) ? -1:0; 882}else if((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 883return-1; 884}else{ 885return!!peel_object(iter->oid.hash, peeled->hash); 886} 887} 888 889static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 890{ 891struct packed_ref_iterator *iter = 892(struct packed_ref_iterator *)ref_iterator; 893int ok = ITER_DONE; 894 895strbuf_release(&iter->refname_buf); 896release_snapshot(iter->snapshot); 897base_ref_iterator_free(ref_iterator); 898return ok; 899} 900 901static struct ref_iterator_vtable packed_ref_iterator_vtable = { 902 packed_ref_iterator_advance, 903 packed_ref_iterator_peel, 904 packed_ref_iterator_abort 905}; 906 907static struct ref_iterator *packed_ref_iterator_begin( 908struct ref_store *ref_store, 909const char*prefix,unsigned int flags) 910{ 911struct packed_ref_store *refs; 912struct snapshot *snapshot; 913const char*start; 914struct packed_ref_iterator *iter; 915struct ref_iterator *ref_iterator; 916unsigned int required_flags = REF_STORE_READ; 917 918if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 919 required_flags |= REF_STORE_ODB; 920 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 921 922/* 923 * Note that `get_snapshot()` internally checks whether the 924 * snapshot is up to date with what is on disk, and re-reads 925 * it if not. 926 */ 927 snapshot =get_snapshot(refs); 928 929if(prefix && *prefix) 930 start =find_reference_location(snapshot, prefix,0); 931else 932 start = snapshot->start; 933 934if(start == snapshot->eof) 935returnempty_ref_iterator_begin(); 936 937 iter =xcalloc(1,sizeof(*iter)); 938 ref_iterator = &iter->base; 939base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable,1); 940 941 iter->snapshot = snapshot; 942acquire_snapshot(snapshot); 943 944 iter->pos = start; 945 iter->eof = snapshot->eof; 946strbuf_init(&iter->refname_buf,0); 947 948 iter->base.oid = &iter->oid; 949 950 iter->flags = flags; 951 952if(prefix && *prefix) 953/* Stop iteration after we've gone *past* prefix: */ 954 ref_iterator =prefix_ref_iterator_begin(ref_iterator, prefix,0); 955 956return ref_iterator; 957} 958 959/* 960 * Write an entry to the packed-refs file for the specified refname. 961 * If peeled is non-NULL, write it as the entry's peeled value. On 962 * error, return a nonzero value and leave errno set at the value left 963 * by the failing call to `fprintf()`. 964 */ 965static intwrite_packed_entry(FILE*fh,const char*refname, 966const unsigned char*sha1, 967const unsigned char*peeled) 968{ 969if(fprintf(fh,"%s %s\n",sha1_to_hex(sha1), refname) <0|| 970(peeled &&fprintf(fh,"^%s\n",sha1_to_hex(peeled)) <0)) 971return-1; 972 973return0; 974} 975 976intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 977{ 978struct packed_ref_store *refs = 979packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 980"packed_refs_lock"); 981static int timeout_configured =0; 982static int timeout_value =1000; 983 984if(!timeout_configured) { 985git_config_get_int("core.packedrefstimeout", &timeout_value); 986 timeout_configured =1; 987} 988 989/* 990 * Note that we close the lockfile immediately because we 991 * don't write new content to it, but rather to a separate 992 * tempfile. 993 */ 994if(hold_lock_file_for_update_timeout( 995&refs->lock, 996 refs->path, 997 flags, timeout_value) <0) { 998unable_to_lock_message(refs->path, errno, err); 999return-1;1000}10011002if(close_lock_file_gently(&refs->lock)) {1003strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno));1004rollback_lock_file(&refs->lock);1005return-1;1006}10071008/*1009 * Now that we hold the `packed-refs` lock, make sure that our1010 * snapshot matches the current version of the file. Normally1011 * `get_snapshot()` does that for us, but that function1012 * assumes that when the file is locked, any existing snapshot1013 * is still valid. We've just locked the file, but it might1014 * have changed the moment *before* we locked it.1015 */1016validate_snapshot(refs);10171018/*1019 * Now make sure that the packed-refs file as it exists in the1020 * locked state is loaded into the snapshot:1021 */1022get_snapshot(refs);1023return0;1024}10251026voidpacked_refs_unlock(struct ref_store *ref_store)1027{1028struct packed_ref_store *refs =packed_downcast(1029 ref_store,1030 REF_STORE_READ | REF_STORE_WRITE,1031"packed_refs_unlock");10321033if(!is_lock_file_locked(&refs->lock))1034die("BUG: packed_refs_unlock() called when not locked");1035rollback_lock_file(&refs->lock);1036}10371038intpacked_refs_is_locked(struct ref_store *ref_store)1039{1040struct packed_ref_store *refs =packed_downcast(1041 ref_store,1042 REF_STORE_READ | REF_STORE_WRITE,1043"packed_refs_is_locked");10441045returnis_lock_file_locked(&refs->lock);1046}10471048/*1049 * The packed-refs header line that we write out. Perhaps other traits1050 * will be added later.1051 *1052 * Note that earlier versions of Git used to parse these traits by1053 * looking for " trait " in the line. For this reason, the space after1054 * the colon and the trailing space are required.1055 */1056static const char PACKED_REFS_HEADER[] =1057"# pack-refs with: peeled fully-peeled sorted\n";10581059static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err)1060{1061/* Nothing to do. */1062return0;1063}10641065/*1066 * Write the packed refs from the current snapshot to the packed-refs1067 * tempfile, incorporating any changes from `updates`. `updates` must1068 * be a sorted string list whose keys are the refnames and whose util1069 * values are `struct ref_update *`. On error, rollback the tempfile,1070 * write an error message to `err`, and return a nonzero value.1071 *1072 * The packfile must be locked before calling this function and will1073 * remain locked when it is done.1074 */1075static intwrite_with_updates(struct packed_ref_store *refs,1076struct string_list *updates,1077struct strbuf *err)1078{1079struct ref_iterator *iter = NULL;1080size_t i;1081int ok;1082FILE*out;1083struct strbuf sb = STRBUF_INIT;1084char*packed_refs_path;10851086if(!is_lock_file_locked(&refs->lock))1087die("BUG: write_with_updates() called while unlocked");10881089/*1090 * If packed-refs is a symlink, we want to overwrite the1091 * symlinked-to file, not the symlink itself. Also, put the1092 * staging file next to it:1093 */1094 packed_refs_path =get_locked_file_path(&refs->lock);1095strbuf_addf(&sb,"%s.new", packed_refs_path);1096free(packed_refs_path);1097 refs->tempfile =create_tempfile(sb.buf);1098if(!refs->tempfile) {1099strbuf_addf(err,"unable to create file%s:%s",1100 sb.buf,strerror(errno));1101strbuf_release(&sb);1102return-1;1103}1104strbuf_release(&sb);11051106 out =fdopen_tempfile(refs->tempfile,"w");1107if(!out) {1108strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s",1109strerror(errno));1110goto error;1111}11121113if(fprintf(out,"%s", PACKED_REFS_HEADER) <0)1114goto write_error;11151116/*1117 * We iterate in parallel through the current list of refs and1118 * the list of updates, processing an entry from at least one1119 * of the lists each time through the loop. When the current1120 * list of refs is exhausted, set iter to NULL. When the list1121 * of updates is exhausted, leave i set to updates->nr.1122 */1123 iter =packed_ref_iterator_begin(&refs->base,"",1124 DO_FOR_EACH_INCLUDE_BROKEN);1125if((ok =ref_iterator_advance(iter)) != ITER_OK)1126 iter = NULL;11271128 i =0;11291130while(iter || i < updates->nr) {1131struct ref_update *update = NULL;1132int cmp;11331134if(i >= updates->nr) {1135 cmp = -1;1136}else{1137 update = updates->items[i].util;11381139if(!iter)1140 cmp = +1;1141else1142 cmp =strcmp(iter->refname, update->refname);1143}11441145if(!cmp) {1146/*1147 * There is both an old value and an update1148 * for this reference. Check the old value if1149 * necessary:1150 */1151if((update->flags & REF_HAVE_OLD)) {1152if(is_null_oid(&update->old_oid)) {1153strbuf_addf(err,"cannot update ref '%s': "1154"reference already exists",1155 update->refname);1156goto error;1157}else if(oidcmp(&update->old_oid, iter->oid)) {1158strbuf_addf(err,"cannot update ref '%s': "1159"is at%sbut expected%s",1160 update->refname,1161oid_to_hex(iter->oid),1162oid_to_hex(&update->old_oid));1163goto error;1164}1165}11661167/* Now figure out what to use for the new value: */1168if((update->flags & REF_HAVE_NEW)) {1169/*1170 * The update takes precedence. Skip1171 * the iterator over the unneeded1172 * value.1173 */1174if((ok =ref_iterator_advance(iter)) != ITER_OK)1175 iter = NULL;1176 cmp = +1;1177}else{1178/*1179 * The update doesn't actually want to1180 * change anything. We're done with it.1181 */1182 i++;1183 cmp = -1;1184}1185}else if(cmp >0) {1186/*1187 * There is no old value but there is an1188 * update for this reference. Make sure that1189 * the update didn't expect an existing value:1190 */1191if((update->flags & REF_HAVE_OLD) &&1192!is_null_oid(&update->old_oid)) {1193strbuf_addf(err,"cannot update ref '%s': "1194"reference is missing but expected%s",1195 update->refname,1196oid_to_hex(&update->old_oid));1197goto error;1198}1199}12001201if(cmp <0) {1202/* Pass the old reference through. */12031204struct object_id peeled;1205int peel_error =ref_iterator_peel(iter, &peeled);12061207if(write_packed_entry(out, iter->refname,1208 iter->oid->hash,1209 peel_error ? NULL : peeled.hash))1210goto write_error;12111212if((ok =ref_iterator_advance(iter)) != ITER_OK)1213 iter = NULL;1214}else if(is_null_oid(&update->new_oid)) {1215/*1216 * The update wants to delete the reference,1217 * and the reference either didn't exist or we1218 * have already skipped it. So we're done with1219 * the update (and don't have to write1220 * anything).1221 */1222 i++;1223}else{1224struct object_id peeled;1225int peel_error =peel_object(update->new_oid.hash,1226 peeled.hash);12271228if(write_packed_entry(out, update->refname,1229 update->new_oid.hash,1230 peel_error ? NULL : peeled.hash))1231goto write_error;12321233 i++;1234}1235}12361237if(ok != ITER_DONE) {1238strbuf_addstr(err,"unable to write packed-refs file: "1239"error iterating over old contents");1240goto error;1241}12421243if(close_tempfile_gently(refs->tempfile)) {1244strbuf_addf(err,"error closing file%s:%s",1245get_tempfile_path(refs->tempfile),1246strerror(errno));1247strbuf_release(&sb);1248delete_tempfile(&refs->tempfile);1249return-1;1250}12511252return0;12531254write_error:1255strbuf_addf(err,"error writing to%s:%s",1256get_tempfile_path(refs->tempfile),strerror(errno));12571258error:1259if(iter)1260ref_iterator_abort(iter);12611262delete_tempfile(&refs->tempfile);1263return-1;1264}12651266intis_packed_transaction_needed(struct ref_store *ref_store,1267struct ref_transaction *transaction)1268{1269struct packed_ref_store *refs =packed_downcast(1270 ref_store,1271 REF_STORE_READ,1272"is_packed_transaction_needed");1273struct strbuf referent = STRBUF_INIT;1274size_t i;1275int ret;12761277if(!is_lock_file_locked(&refs->lock))1278BUG("is_packed_transaction_needed() called while unlocked");12791280/*1281 * We're only going to bother returning false for the common,1282 * trivial case that references are only being deleted, their1283 * old values are not being checked, and the old `packed-refs`1284 * file doesn't contain any of those reference(s). This gives1285 * false positives for some other cases that could1286 * theoretically be optimized away:1287 *1288 * 1. It could be that the old value is being verified without1289 * setting a new value. In this case, we could verify the1290 * old value here and skip the update if it agrees. If it1291 * disagrees, we could either let the update go through1292 * (the actual commit would re-detect and report the1293 * problem), or come up with a way of reporting such an1294 * error to *our* caller.1295 *1296 * 2. It could be that a new value is being set, but that it1297 * is identical to the current packed value of the1298 * reference.1299 *1300 * Neither of these cases will come up in the current code,1301 * because the only caller of this function passes to it a1302 * transaction that only includes `delete` updates with no1303 * `old_id`. Even if that ever changes, false positives only1304 * cause an optimization to be missed; they do not affect1305 * correctness.1306 */13071308/*1309 * Start with the cheap checks that don't require old1310 * reference values to be read:1311 */1312for(i =0; i < transaction->nr; i++) {1313struct ref_update *update = transaction->updates[i];13141315if(update->flags & REF_HAVE_OLD)1316/* Have to check the old value -> needed. */1317return1;13181319if((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1320/* Have to set a new value -> needed. */1321return1;1322}13231324/*1325 * The transaction isn't checking any old values nor is it1326 * setting any nonzero new values, so it still might be able1327 * to be skipped. Now do the more expensive check: the update1328 * is needed if any of the updates is a delete, and the old1329 * `packed-refs` file contains a value for that reference.1330 */1331 ret =0;1332for(i =0; i < transaction->nr; i++) {1333struct ref_update *update = transaction->updates[i];1334unsigned int type;1335struct object_id oid;13361337if(!(update->flags & REF_HAVE_NEW))1338/*1339 * This reference isn't being deleted -> not1340 * needed.1341 */1342continue;13431344if(!refs_read_raw_ref(ref_store, update->refname,1345 oid.hash, &referent, &type) ||1346 errno != ENOENT) {1347/*1348 * We have to actually delete that reference1349 * -> this transaction is needed.1350 */1351 ret =1;1352break;1353}1354}13551356strbuf_release(&referent);1357return ret;1358}13591360struct packed_transaction_backend_data {1361/* True iff the transaction owns the packed-refs lock. */1362int own_lock;13631364struct string_list updates;1365};13661367static voidpacked_transaction_cleanup(struct packed_ref_store *refs,1368struct ref_transaction *transaction)1369{1370struct packed_transaction_backend_data *data = transaction->backend_data;13711372if(data) {1373string_list_clear(&data->updates,0);13741375if(is_tempfile_active(refs->tempfile))1376delete_tempfile(&refs->tempfile);13771378if(data->own_lock &&is_lock_file_locked(&refs->lock)) {1379packed_refs_unlock(&refs->base);1380 data->own_lock =0;1381}13821383free(data);1384 transaction->backend_data = NULL;1385}13861387 transaction->state = REF_TRANSACTION_CLOSED;1388}13891390static intpacked_transaction_prepare(struct ref_store *ref_store,1391struct ref_transaction *transaction,1392struct strbuf *err)1393{1394struct packed_ref_store *refs =packed_downcast(1395 ref_store,1396 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1397"ref_transaction_prepare");1398struct packed_transaction_backend_data *data;1399size_t i;1400int ret = TRANSACTION_GENERIC_ERROR;14011402/*1403 * Note that we *don't* skip transactions with zero updates,1404 * because such a transaction might be executed for the side1405 * effect of ensuring that all of the references are peeled or1406 * ensuring that the `packed-refs` file is sorted. If the1407 * caller wants to optimize away empty transactions, it should1408 * do so itself.1409 */14101411 data =xcalloc(1,sizeof(*data));1412string_list_init(&data->updates,0);14131414 transaction->backend_data = data;14151416/*1417 * Stick the updates in a string list by refname so that we1418 * can sort them:1419 */1420for(i =0; i < transaction->nr; i++) {1421struct ref_update *update = transaction->updates[i];1422struct string_list_item *item =1423string_list_append(&data->updates, update->refname);14241425/* Store a pointer to update in item->util: */1426 item->util = update;1427}1428string_list_sort(&data->updates);14291430if(ref_update_reject_duplicates(&data->updates, err))1431goto failure;14321433if(!is_lock_file_locked(&refs->lock)) {1434if(packed_refs_lock(ref_store,0, err))1435goto failure;1436 data->own_lock =1;1437}14381439if(write_with_updates(refs, &data->updates, err))1440goto failure;14411442 transaction->state = REF_TRANSACTION_PREPARED;1443return0;14441445failure:1446packed_transaction_cleanup(refs, transaction);1447return ret;1448}14491450static intpacked_transaction_abort(struct ref_store *ref_store,1451struct ref_transaction *transaction,1452struct strbuf *err)1453{1454struct packed_ref_store *refs =packed_downcast(1455 ref_store,1456 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1457"ref_transaction_abort");14581459packed_transaction_cleanup(refs, transaction);1460return0;1461}14621463static intpacked_transaction_finish(struct ref_store *ref_store,1464struct ref_transaction *transaction,1465struct strbuf *err)1466{1467struct packed_ref_store *refs =packed_downcast(1468 ref_store,1469 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1470"ref_transaction_finish");1471int ret = TRANSACTION_GENERIC_ERROR;1472char*packed_refs_path;14731474clear_snapshot(refs);14751476 packed_refs_path =get_locked_file_path(&refs->lock);1477if(rename_tempfile(&refs->tempfile, packed_refs_path)) {1478strbuf_addf(err,"error replacing%s:%s",1479 refs->path,strerror(errno));1480goto cleanup;1481}14821483 ret =0;14841485cleanup:1486free(packed_refs_path);1487packed_transaction_cleanup(refs, transaction);1488return ret;1489}14901491static intpacked_initial_transaction_commit(struct ref_store *ref_store,1492struct ref_transaction *transaction,1493struct strbuf *err)1494{1495returnref_transaction_commit(transaction, err);1496}14971498static intpacked_delete_refs(struct ref_store *ref_store,const char*msg,1499struct string_list *refnames,unsigned int flags)1500{1501struct packed_ref_store *refs =1502packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs");1503struct strbuf err = STRBUF_INIT;1504struct ref_transaction *transaction;1505struct string_list_item *item;1506int ret;15071508(void)refs;/* We need the check above, but don't use the variable */15091510if(!refnames->nr)1511return0;15121513/*1514 * Since we don't check the references' old_oids, the1515 * individual updates can't fail, so we can pack all of the1516 * updates into a single transaction.1517 */15181519 transaction =ref_store_transaction_begin(ref_store, &err);1520if(!transaction)1521return-1;15221523for_each_string_list_item(item, refnames) {1524if(ref_transaction_delete(transaction, item->string, NULL,1525 flags, msg, &err)) {1526warning(_("could not delete reference%s:%s"),1527 item->string, err.buf);1528strbuf_reset(&err);1529}1530}15311532 ret =ref_transaction_commit(transaction, &err);15331534if(ret) {1535if(refnames->nr ==1)1536error(_("could not delete reference%s:%s"),1537 refnames->items[0].string, err.buf);1538else1539error(_("could not delete references:%s"), err.buf);1540}15411542ref_transaction_free(transaction);1543strbuf_release(&err);1544return ret;1545}15461547static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags)1548{1549/*1550 * Packed refs are already packed. It might be that loose refs1551 * are packed *into* a packed refs store, but that is done by1552 * updating the packed references via a transaction.1553 */1554return0;1555}15561557static intpacked_create_symref(struct ref_store *ref_store,1558const char*refname,const char*target,1559const char*logmsg)1560{1561die("BUG: packed reference store does not support symrefs");1562}15631564static intpacked_rename_ref(struct ref_store *ref_store,1565const char*oldrefname,const char*newrefname,1566const char*logmsg)1567{1568die("BUG: packed reference store does not support renaming references");1569}15701571static intpacked_copy_ref(struct ref_store *ref_store,1572const char*oldrefname,const char*newrefname,1573const char*logmsg)1574{1575die("BUG: packed reference store does not support copying references");1576}15771578static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1579{1580returnempty_ref_iterator_begin();1581}15821583static intpacked_for_each_reflog_ent(struct ref_store *ref_store,1584const char*refname,1585 each_reflog_ent_fn fn,void*cb_data)1586{1587return0;1588}15891590static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store,1591const char*refname,1592 each_reflog_ent_fn fn,1593void*cb_data)1594{1595return0;1596}15971598static intpacked_reflog_exists(struct ref_store *ref_store,1599const char*refname)1600{1601return0;1602}16031604static intpacked_create_reflog(struct ref_store *ref_store,1605const char*refname,int force_create,1606struct strbuf *err)1607{1608die("BUG: packed reference store does not support reflogs");1609}16101611static intpacked_delete_reflog(struct ref_store *ref_store,1612const char*refname)1613{1614return0;1615}16161617static intpacked_reflog_expire(struct ref_store *ref_store,1618const char*refname,const unsigned char*sha1,1619unsigned int flags,1620 reflog_expiry_prepare_fn prepare_fn,1621 reflog_expiry_should_prune_fn should_prune_fn,1622 reflog_expiry_cleanup_fn cleanup_fn,1623void*policy_cb_data)1624{1625return0;1626}16271628struct ref_storage_be refs_be_packed = {1629 NULL,1630"packed",1631 packed_ref_store_create,1632 packed_init_db,1633 packed_transaction_prepare,1634 packed_transaction_finish,1635 packed_transaction_abort,1636 packed_initial_transaction_commit,16371638 packed_pack_refs,1639 packed_create_symref,1640 packed_delete_refs,1641 packed_rename_ref,1642 packed_copy_ref,16431644 packed_ref_iterator_begin,1645 packed_read_raw_ref,16461647 packed_reflog_iterator_begin,1648 packed_for_each_reflog_ent,1649 packed_for_each_reflog_ent_reverse,1650 packed_reflog_exists,1651 packed_create_reflog,1652 packed_delete_reflog,1653 packed_reflog_expire1654};