1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"packed-backend.h" 6#include"../iterator.h" 7#include"../lockfile.h" 8#include"../chdir-notify.h" 9 10enum mmap_strategy { 11/* 12 * Don't use mmap() at all for reading `packed-refs`. 13 */ 14 MMAP_NONE, 15 16/* 17 * Can use mmap() for reading `packed-refs`, but the file must 18 * not remain mmapped. This is the usual option on Windows, 19 * where you cannot rename a new version of a file onto a file 20 * that is currently mmapped. 21 */ 22 MMAP_TEMPORARY, 23 24/* 25 * It is OK to leave the `packed-refs` file mmapped while 26 * arbitrary other code is running. 27 */ 28 MMAP_OK 29}; 30 31#if defined(NO_MMAP) 32static enum mmap_strategy mmap_strategy = MMAP_NONE; 33#elif defined(MMAP_PREVENTS_DELETE) 34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 35#else 36static enum mmap_strategy mmap_strategy = MMAP_OK; 37#endif 38 39struct packed_ref_store; 40 41/* 42 * A `snapshot` represents one snapshot of a `packed-refs` file. 43 * 44 * Normally, this will be a mmapped view of the contents of the 45 * `packed-refs` file at the time the snapshot was created. However, 46 * if the `packed-refs` file was not sorted, this might point at heap 47 * memory holding the contents of the `packed-refs` file with its 48 * records sorted by refname. 49 * 50 * `snapshot` instances are reference counted (via 51 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent 52 * an instance from disappearing while an iterator is still iterating 53 * over it. Instances are garbage collected when their `referrers` 54 * count goes to zero. 55 * 56 * The most recent `snapshot`, if available, is referenced by the 57 * `packed_ref_store`. Its freshness is checked whenever 58 * `get_snapshot()` is called; if the existing snapshot is obsolete, a 59 * new snapshot is taken. 60 */ 61struct snapshot { 62/* 63 * A back-pointer to the packed_ref_store with which this 64 * snapshot is associated: 65 */ 66struct packed_ref_store *refs; 67 68/* Is the `packed-refs` file currently mmapped? */ 69int mmapped; 70 71/* 72 * The contents of the `packed-refs` file: 73 * 74 * - buf -- a pointer to the start of the memory 75 * - start -- a pointer to the first byte of actual references 76 * (i.e., after the header line, if one is present) 77 * - eof -- a pointer just past the end of the reference 78 * contents 79 * 80 * If the `packed-refs` file was already sorted, `buf` points 81 * at the mmapped contents of the file. If not, it points at 82 * heap-allocated memory containing the contents, sorted. If 83 * there were no contents (e.g., because the file didn't 84 * exist), `buf`, `start`, and `eof` are all NULL. 85 */ 86char*buf, *start, *eof; 87 88/* 89 * What is the peeled state of the `packed-refs` file that 90 * this snapshot represents? (This is usually determined from 91 * the file's header.) 92 */ 93enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 94 95/* 96 * Count of references to this instance, including the pointer 97 * from `packed_ref_store::snapshot`, if any. The instance 98 * will not be freed as long as the reference count is 99 * nonzero. 100 */ 101unsigned int referrers; 102 103/* 104 * The metadata of the `packed-refs` file from which this 105 * snapshot was created, used to tell if the file has been 106 * replaced since we read it. 107 */ 108struct stat_validity validity; 109}; 110 111/* 112 * A `ref_store` representing references stored in a `packed-refs` 113 * file. It implements the `ref_store` interface, though it has some 114 * limitations: 115 * 116 * - It cannot store symbolic references. 117 * 118 * - It cannot store reflogs. 119 * 120 * - It does not support reference renaming (though it could). 121 * 122 * On the other hand, it can be locked outside of a reference 123 * transaction. In that case, it remains locked even after the 124 * transaction is done and the new `packed-refs` file is activated. 125 */ 126struct packed_ref_store { 127struct ref_store base; 128 129unsigned int store_flags; 130 131/* The path of the "packed-refs" file: */ 132char*path; 133 134/* 135 * A snapshot of the values read from the `packed-refs` file, 136 * if it might still be current; otherwise, NULL. 137 */ 138struct snapshot *snapshot; 139 140/* 141 * Lock used for the "packed-refs" file. Note that this (and 142 * thus the enclosing `packed_ref_store`) must not be freed. 143 */ 144struct lock_file lock; 145 146/* 147 * Temporary file used when rewriting new contents to the 148 * "packed-refs" file. Note that this (and thus the enclosing 149 * `packed_ref_store`) must not be freed. 150 */ 151struct tempfile *tempfile; 152}; 153 154/* 155 * Increment the reference count of `*snapshot`. 156 */ 157static voidacquire_snapshot(struct snapshot *snapshot) 158{ 159 snapshot->referrers++; 160} 161 162/* 163 * If the buffer in `snapshot` is active, then either munmap the 164 * memory and close the file, or free the memory. Then set the buffer 165 * pointers to NULL. 166 */ 167static voidclear_snapshot_buffer(struct snapshot *snapshot) 168{ 169if(snapshot->mmapped) { 170if(munmap(snapshot->buf, snapshot->eof - snapshot->buf)) 171die_errno("error ummapping packed-refs file%s", 172 snapshot->refs->path); 173 snapshot->mmapped =0; 174}else{ 175free(snapshot->buf); 176} 177 snapshot->buf = snapshot->start = snapshot->eof = NULL; 178} 179 180/* 181 * Decrease the reference count of `*snapshot`. If it goes to zero, 182 * free `*snapshot` and return true; otherwise return false. 183 */ 184static intrelease_snapshot(struct snapshot *snapshot) 185{ 186if(!--snapshot->referrers) { 187stat_validity_clear(&snapshot->validity); 188clear_snapshot_buffer(snapshot); 189free(snapshot); 190return1; 191}else{ 192return0; 193} 194} 195 196struct ref_store *packed_ref_store_create(const char*path, 197unsigned int store_flags) 198{ 199struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 200struct ref_store *ref_store = (struct ref_store *)refs; 201 202base_ref_store_init(ref_store, &refs_be_packed); 203 refs->store_flags = store_flags; 204 205 refs->path =xstrdup(path); 206chdir_notify_reparent("packed-refs", &refs->path); 207 208return ref_store; 209} 210 211/* 212 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 213 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 214 * support at least the flags specified in `required_flags`. `caller` 215 * is used in any necessary error messages. 216 */ 217static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 218unsigned int required_flags, 219const char*caller) 220{ 221struct packed_ref_store *refs; 222 223if(ref_store->be != &refs_be_packed) 224BUG("ref_store is type\"%s\"not\"packed\"in%s", 225 ref_store->be->name, caller); 226 227 refs = (struct packed_ref_store *)ref_store; 228 229if((refs->store_flags & required_flags) != required_flags) 230BUG("unallowed operation (%s), requires%x, has%x\n", 231 caller, required_flags, refs->store_flags); 232 233return refs; 234} 235 236static voidclear_snapshot(struct packed_ref_store *refs) 237{ 238if(refs->snapshot) { 239struct snapshot *snapshot = refs->snapshot; 240 241 refs->snapshot = NULL; 242release_snapshot(snapshot); 243} 244} 245 246static NORETURN voiddie_unterminated_line(const char*path, 247const char*p,size_t len) 248{ 249if(len <80) 250die("unterminated line in%s: %.*s", path, (int)len, p); 251else 252die("unterminated line in%s: %.75s...", path, p); 253} 254 255static NORETURN voiddie_invalid_line(const char*path, 256const char*p,size_t len) 257{ 258const char*eol =memchr(p,'\n', len); 259 260if(!eol) 261die_unterminated_line(path, p, len); 262else if(eol - p <80) 263die("unexpected line in%s: %.*s", path, (int)(eol - p), p); 264else 265die("unexpected line in%s: %.75s...", path, p); 266 267} 268 269struct snapshot_record { 270const char*start; 271size_t len; 272}; 273 274static intcmp_packed_ref_records(const void*v1,const void*v2) 275{ 276const struct snapshot_record *e1 = v1, *e2 = v2; 277const char*r1 = e1->start + GIT_SHA1_HEXSZ +1; 278const char*r2 = e2->start + GIT_SHA1_HEXSZ +1; 279 280while(1) { 281if(*r1 =='\n') 282return*r2 =='\n'?0: -1; 283if(*r1 != *r2) { 284if(*r2 =='\n') 285return1; 286else 287return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 288} 289 r1++; 290 r2++; 291} 292} 293 294/* 295 * Compare a snapshot record at `rec` to the specified NUL-terminated 296 * refname. 297 */ 298static intcmp_record_to_refname(const char*rec,const char*refname) 299{ 300const char*r1 = rec + GIT_SHA1_HEXSZ +1; 301const char*r2 = refname; 302 303while(1) { 304if(*r1 =='\n') 305return*r2 ? -1:0; 306if(!*r2) 307return1; 308if(*r1 != *r2) 309return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 310 r1++; 311 r2++; 312} 313} 314 315/* 316 * `snapshot->buf` is not known to be sorted. Check whether it is, and 317 * if not, sort it into new memory and munmap/free the old storage. 318 */ 319static voidsort_snapshot(struct snapshot *snapshot) 320{ 321struct snapshot_record *records = NULL; 322size_t alloc =0, nr =0; 323int sorted =1; 324const char*pos, *eof, *eol; 325size_t len, i; 326char*new_buffer, *dst; 327 328 pos = snapshot->start; 329 eof = snapshot->eof; 330 331if(pos == eof) 332return; 333 334 len = eof - pos; 335 336/* 337 * Initialize records based on a crude estimate of the number 338 * of references in the file (we'll grow it below if needed): 339 */ 340ALLOC_GROW(records, len /80+20, alloc); 341 342while(pos < eof) { 343 eol =memchr(pos,'\n', eof - pos); 344if(!eol) 345/* The safety check should prevent this. */ 346BUG("unterminated line found in packed-refs"); 347if(eol - pos < GIT_SHA1_HEXSZ +2) 348die_invalid_line(snapshot->refs->path, 349 pos, eof - pos); 350 eol++; 351if(eol < eof && *eol =='^') { 352/* 353 * Keep any peeled line together with its 354 * reference: 355 */ 356const char*peeled_start = eol; 357 358 eol =memchr(peeled_start,'\n', eof - peeled_start); 359if(!eol) 360/* The safety check should prevent this. */ 361BUG("unterminated peeled line found in packed-refs"); 362 eol++; 363} 364 365ALLOC_GROW(records, nr +1, alloc); 366 records[nr].start = pos; 367 records[nr].len = eol - pos; 368 nr++; 369 370if(sorted && 371 nr >1&& 372cmp_packed_ref_records(&records[nr -2], 373&records[nr -1]) >=0) 374 sorted =0; 375 376 pos = eol; 377} 378 379if(sorted) 380goto cleanup; 381 382/* We need to sort the memory. First we sort the records array: */ 383QSORT(records, nr, cmp_packed_ref_records); 384 385/* 386 * Allocate a new chunk of memory, and copy the old memory to 387 * the new in the order indicated by `records` (not bothering 388 * with the header line): 389 */ 390 new_buffer =xmalloc(len); 391for(dst = new_buffer, i =0; i < nr; i++) { 392memcpy(dst, records[i].start, records[i].len); 393 dst += records[i].len; 394} 395 396/* 397 * Now munmap the old buffer and use the sorted buffer in its 398 * place: 399 */ 400clear_snapshot_buffer(snapshot); 401 snapshot->buf = snapshot->start = new_buffer; 402 snapshot->eof = new_buffer + len; 403 404cleanup: 405free(records); 406} 407 408/* 409 * Return a pointer to the start of the record that contains the 410 * character `*p` (which must be within the buffer). If no other 411 * record start is found, return `buf`. 412 */ 413static const char*find_start_of_record(const char*buf,const char*p) 414{ 415while(p > buf && (p[-1] !='\n'|| p[0] =='^')) 416 p--; 417return p; 418} 419 420/* 421 * Return a pointer to the start of the record following the record 422 * that contains `*p`. If none is found before `end`, return `end`. 423 */ 424static const char*find_end_of_record(const char*p,const char*end) 425{ 426while(++p < end && (p[-1] !='\n'|| p[0] =='^')) 427; 428return p; 429} 430 431/* 432 * We want to be able to compare mmapped reference records quickly, 433 * without totally parsing them. We can do so because the records are 434 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 435 * + 1) bytes past the beginning of the record. 436 * 437 * But what if the `packed-refs` file contains garbage? We're willing 438 * to tolerate not detecting the problem, as long as we don't produce 439 * totally garbled output (we can't afford to check the integrity of 440 * the whole file during every Git invocation). But we do want to be 441 * sure that we never read past the end of the buffer in memory and 442 * perform an illegal memory access. 443 * 444 * Guarantee that minimum level of safety by verifying that the last 445 * record in the file is LF-terminated, and that it has at least 446 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 447 * these checks fails. 448 */ 449static voidverify_buffer_safe(struct snapshot *snapshot) 450{ 451const char*start = snapshot->start; 452const char*eof = snapshot->eof; 453const char*last_line; 454 455if(start == eof) 456return; 457 458 last_line =find_start_of_record(start, eof -1); 459if(*(eof -1) !='\n'|| eof - last_line < GIT_SHA1_HEXSZ +2) 460die_invalid_line(snapshot->refs->path, 461 last_line, eof - last_line); 462} 463 464#define SMALL_FILE_SIZE (32*1024) 465 466/* 467 * Depending on `mmap_strategy`, either mmap or read the contents of 468 * the `packed-refs` file into the snapshot. Return 1 if the file 469 * existed and was read, or 0 if the file was absent or empty. Die on 470 * errors. 471 */ 472static intload_contents(struct snapshot *snapshot) 473{ 474int fd; 475struct stat st; 476size_t size; 477 ssize_t bytes_read; 478 479 fd =open(snapshot->refs->path, O_RDONLY); 480if(fd <0) { 481if(errno == ENOENT) { 482/* 483 * This is OK; it just means that no 484 * "packed-refs" file has been written yet, 485 * which is equivalent to it being empty, 486 * which is its state when initialized with 487 * zeros. 488 */ 489return0; 490}else{ 491die_errno("couldn't read%s", snapshot->refs->path); 492} 493} 494 495stat_validity_update(&snapshot->validity, fd); 496 497if(fstat(fd, &st) <0) 498die_errno("couldn't stat%s", snapshot->refs->path); 499 size =xsize_t(st.st_size); 500 501if(!size) { 502return0; 503}else if(mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { 504 snapshot->buf =xmalloc(size); 505 bytes_read =read_in_full(fd, snapshot->buf, size); 506if(bytes_read <0|| bytes_read != size) 507die_errno("couldn't read%s", snapshot->refs->path); 508 snapshot->mmapped =0; 509}else{ 510 snapshot->buf =xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd,0); 511 snapshot->mmapped =1; 512} 513close(fd); 514 515 snapshot->start = snapshot->buf; 516 snapshot->eof = snapshot->buf + size; 517 518return1; 519} 520 521/* 522 * Find the place in `snapshot->buf` where the start of the record for 523 * `refname` starts. If `mustexist` is true and the reference doesn't 524 * exist, then return NULL. If `mustexist` is false and the reference 525 * doesn't exist, then return the point where that reference would be 526 * inserted, or `snapshot->eof` (which might be NULL) if it would be 527 * inserted at the end of the file. In the latter mode, `refname` 528 * doesn't have to be a proper reference name; for example, one could 529 * search for "refs/replace/" to find the start of any replace 530 * references. 531 * 532 * The record is sought using a binary search, so `snapshot->buf` must 533 * be sorted. 534 */ 535static const char*find_reference_location(struct snapshot *snapshot, 536const char*refname,int mustexist) 537{ 538/* 539 * This is not *quite* a garden-variety binary search, because 540 * the data we're searching is made up of records, and we 541 * always need to find the beginning of a record to do a 542 * comparison. A "record" here is one line for the reference 543 * itself and zero or one peel lines that start with '^'. Our 544 * loop invariant is described in the next two comments. 545 */ 546 547/* 548 * A pointer to the character at the start of a record whose 549 * preceding records all have reference names that come 550 * *before* `refname`. 551 */ 552const char*lo = snapshot->start; 553 554/* 555 * A pointer to a the first character of a record whose 556 * reference name comes *after* `refname`. 557 */ 558const char*hi = snapshot->eof; 559 560while(lo != hi) { 561const char*mid, *rec; 562int cmp; 563 564 mid = lo + (hi - lo) /2; 565 rec =find_start_of_record(lo, mid); 566 cmp =cmp_record_to_refname(rec, refname); 567if(cmp <0) { 568 lo =find_end_of_record(mid, hi); 569}else if(cmp >0) { 570 hi = rec; 571}else{ 572return rec; 573} 574} 575 576if(mustexist) 577return NULL; 578else 579return lo; 580} 581 582/* 583 * Create a newly-allocated `snapshot` of the `packed-refs` file in 584 * its current state and return it. The return value will already have 585 * its reference count incremented. 586 * 587 * A comment line of the form "# pack-refs with: " may contain zero or 588 * more traits. We interpret the traits as follows: 589 * 590 * Neither `peeled` nor `fully-peeled`: 591 * 592 * Probably no references are peeled. But if the file contains a 593 * peeled value for a reference, we will use it. 594 * 595 * `peeled`: 596 * 597 * References under "refs/tags/", if they *can* be peeled, *are* 598 * peeled in this file. References outside of "refs/tags/" are 599 * probably not peeled even if they could have been, but if we find 600 * a peeled value for such a reference we will use it. 601 * 602 * `fully-peeled`: 603 * 604 * All references in the file that can be peeled are peeled. 605 * Inversely (and this is more important), any references in the 606 * file for which no peeled value is recorded is not peelable. This 607 * trait should typically be written alongside "peeled" for 608 * compatibility with older clients, but we do not require it 609 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 610 * 611 * `sorted`: 612 * 613 * The references in this file are known to be sorted by refname. 614 */ 615static struct snapshot *create_snapshot(struct packed_ref_store *refs) 616{ 617struct snapshot *snapshot =xcalloc(1,sizeof(*snapshot)); 618int sorted =0; 619 620 snapshot->refs = refs; 621acquire_snapshot(snapshot); 622 snapshot->peeled = PEELED_NONE; 623 624if(!load_contents(snapshot)) 625return snapshot; 626 627/* If the file has a header line, process it: */ 628if(snapshot->buf < snapshot->eof && *snapshot->buf =='#') { 629char*tmp, *p, *eol; 630struct string_list traits = STRING_LIST_INIT_NODUP; 631 632 eol =memchr(snapshot->buf,'\n', 633 snapshot->eof - snapshot->buf); 634if(!eol) 635die_unterminated_line(refs->path, 636 snapshot->buf, 637 snapshot->eof - snapshot->buf); 638 639 tmp =xmemdupz(snapshot->buf, eol - snapshot->buf); 640 641if(!skip_prefix(tmp,"# pack-refs with:", (const char**)&p)) 642die_invalid_line(refs->path, 643 snapshot->buf, 644 snapshot->eof - snapshot->buf); 645 646string_list_split_in_place(&traits, p,' ', -1); 647 648if(unsorted_string_list_has_string(&traits,"fully-peeled")) 649 snapshot->peeled = PEELED_FULLY; 650else if(unsorted_string_list_has_string(&traits,"peeled")) 651 snapshot->peeled = PEELED_TAGS; 652 653 sorted =unsorted_string_list_has_string(&traits,"sorted"); 654 655/* perhaps other traits later as well */ 656 657/* The "+ 1" is for the LF character. */ 658 snapshot->start = eol +1; 659 660string_list_clear(&traits,0); 661free(tmp); 662} 663 664verify_buffer_safe(snapshot); 665 666if(!sorted) { 667sort_snapshot(snapshot); 668 669/* 670 * Reordering the records might have moved a short one 671 * to the end of the buffer, so verify the buffer's 672 * safety again: 673 */ 674verify_buffer_safe(snapshot); 675} 676 677if(mmap_strategy != MMAP_OK && snapshot->mmapped) { 678/* 679 * We don't want to leave the file mmapped, so we are 680 * forced to make a copy now: 681 */ 682size_t size = snapshot->eof - snapshot->start; 683char*buf_copy =xmalloc(size); 684 685memcpy(buf_copy, snapshot->start, size); 686clear_snapshot_buffer(snapshot); 687 snapshot->buf = snapshot->start = buf_copy; 688 snapshot->eof = buf_copy + size; 689} 690 691return snapshot; 692} 693 694/* 695 * Check that `refs->snapshot` (if present) still reflects the 696 * contents of the `packed-refs` file. If not, clear the snapshot. 697 */ 698static voidvalidate_snapshot(struct packed_ref_store *refs) 699{ 700if(refs->snapshot && 701!stat_validity_check(&refs->snapshot->validity, refs->path)) 702clear_snapshot(refs); 703} 704 705/* 706 * Get the `snapshot` for the specified packed_ref_store, creating and 707 * populating it if it hasn't been read before or if the file has been 708 * changed (according to its `validity` field) since it was last read. 709 * On the other hand, if we hold the lock, then assume that the file 710 * hasn't been changed out from under us, so skip the extra `stat()` 711 * call in `stat_validity_check()`. This function does *not* increase 712 * the snapshot's reference count on behalf of the caller. 713 */ 714static struct snapshot *get_snapshot(struct packed_ref_store *refs) 715{ 716if(!is_lock_file_locked(&refs->lock)) 717validate_snapshot(refs); 718 719if(!refs->snapshot) 720 refs->snapshot =create_snapshot(refs); 721 722return refs->snapshot; 723} 724 725static intpacked_read_raw_ref(struct ref_store *ref_store, 726const char*refname,struct object_id *oid, 727struct strbuf *referent,unsigned int*type) 728{ 729struct packed_ref_store *refs = 730packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 731struct snapshot *snapshot =get_snapshot(refs); 732const char*rec; 733 734*type =0; 735 736 rec =find_reference_location(snapshot, refname,1); 737 738if(!rec) { 739/* refname is not a packed reference. */ 740 errno = ENOENT; 741return-1; 742} 743 744if(get_oid_hex(rec, oid)) 745die_invalid_line(refs->path, rec, snapshot->eof - rec); 746 747*type = REF_ISPACKED; 748return0; 749} 750 751/* 752 * This value is set in `base.flags` if the peeled value of the 753 * current reference is known. In that case, `peeled` contains the 754 * correct peeled value for the reference, which might be `null_oid` 755 * if the reference is not a tag or if it is broken. 756 */ 757#define REF_KNOWS_PEELED 0x40 758 759/* 760 * An iterator over a snapshot of a `packed-refs` file. 761 */ 762struct packed_ref_iterator { 763struct ref_iterator base; 764 765struct snapshot *snapshot; 766 767/* The current position in the snapshot's buffer: */ 768const char*pos; 769 770/* The end of the part of the buffer that will be iterated over: */ 771const char*eof; 772 773/* Scratch space for current values: */ 774struct object_id oid, peeled; 775struct strbuf refname_buf; 776 777unsigned int flags; 778}; 779 780/* 781 * Move the iterator to the next record in the snapshot, without 782 * respect for whether the record is actually required by the current 783 * iteration. Adjust the fields in `iter` and return `ITER_OK` or 784 * `ITER_DONE`. This function does not free the iterator in the case 785 * of `ITER_DONE`. 786 */ 787static intnext_record(struct packed_ref_iterator *iter) 788{ 789const char*p = iter->pos, *eol; 790 791strbuf_reset(&iter->refname_buf); 792 793if(iter->pos == iter->eof) 794return ITER_DONE; 795 796 iter->base.flags = REF_ISPACKED; 797 798if(iter->eof - p < GIT_SHA1_HEXSZ +2|| 799parse_oid_hex(p, &iter->oid, &p) || 800!isspace(*p++)) 801die_invalid_line(iter->snapshot->refs->path, 802 iter->pos, iter->eof - iter->pos); 803 804 eol =memchr(p,'\n', iter->eof - p); 805if(!eol) 806die_unterminated_line(iter->snapshot->refs->path, 807 iter->pos, iter->eof - iter->pos); 808 809strbuf_add(&iter->refname_buf, p, eol - p); 810 iter->base.refname = iter->refname_buf.buf; 811 812if(check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 813if(!refname_is_safe(iter->base.refname)) 814die("packed refname is dangerous:%s", 815 iter->base.refname); 816oidclr(&iter->oid); 817 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 818} 819if(iter->snapshot->peeled == PEELED_FULLY || 820(iter->snapshot->peeled == PEELED_TAGS && 821starts_with(iter->base.refname,"refs/tags/"))) 822 iter->base.flags |= REF_KNOWS_PEELED; 823 824 iter->pos = eol +1; 825 826if(iter->pos < iter->eof && *iter->pos =='^') { 827 p = iter->pos +1; 828if(iter->eof - p < GIT_SHA1_HEXSZ +1|| 829parse_oid_hex(p, &iter->peeled, &p) || 830*p++ !='\n') 831die_invalid_line(iter->snapshot->refs->path, 832 iter->pos, iter->eof - iter->pos); 833 iter->pos = p; 834 835/* 836 * Regardless of what the file header said, we 837 * definitely know the value of *this* reference. But 838 * we suppress it if the reference is broken: 839 */ 840if((iter->base.flags & REF_ISBROKEN)) { 841oidclr(&iter->peeled); 842 iter->base.flags &= ~REF_KNOWS_PEELED; 843}else{ 844 iter->base.flags |= REF_KNOWS_PEELED; 845} 846}else{ 847oidclr(&iter->peeled); 848} 849 850return ITER_OK; 851} 852 853static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 854{ 855struct packed_ref_iterator *iter = 856(struct packed_ref_iterator *)ref_iterator; 857int ok; 858 859while((ok =next_record(iter)) == ITER_OK) { 860if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 861ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 862continue; 863 864if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 865!ref_resolves_to_object(iter->base.refname, &iter->oid, 866 iter->flags)) 867continue; 868 869return ITER_OK; 870} 871 872if(ref_iterator_abort(ref_iterator) != ITER_DONE) 873 ok = ITER_ERROR; 874 875return ok; 876} 877 878static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 879struct object_id *peeled) 880{ 881struct packed_ref_iterator *iter = 882(struct packed_ref_iterator *)ref_iterator; 883 884if((iter->base.flags & REF_KNOWS_PEELED)) { 885oidcpy(peeled, &iter->peeled); 886returnis_null_oid(&iter->peeled) ? -1:0; 887}else if((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 888return-1; 889}else{ 890return!!peel_object(&iter->oid, peeled); 891} 892} 893 894static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 895{ 896struct packed_ref_iterator *iter = 897(struct packed_ref_iterator *)ref_iterator; 898int ok = ITER_DONE; 899 900strbuf_release(&iter->refname_buf); 901release_snapshot(iter->snapshot); 902base_ref_iterator_free(ref_iterator); 903return ok; 904} 905 906static struct ref_iterator_vtable packed_ref_iterator_vtable = { 907 packed_ref_iterator_advance, 908 packed_ref_iterator_peel, 909 packed_ref_iterator_abort 910}; 911 912static struct ref_iterator *packed_ref_iterator_begin( 913struct ref_store *ref_store, 914const char*prefix,unsigned int flags) 915{ 916struct packed_ref_store *refs; 917struct snapshot *snapshot; 918const char*start; 919struct packed_ref_iterator *iter; 920struct ref_iterator *ref_iterator; 921unsigned int required_flags = REF_STORE_READ; 922 923if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 924 required_flags |= REF_STORE_ODB; 925 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 926 927/* 928 * Note that `get_snapshot()` internally checks whether the 929 * snapshot is up to date with what is on disk, and re-reads 930 * it if not. 931 */ 932 snapshot =get_snapshot(refs); 933 934if(prefix && *prefix) 935 start =find_reference_location(snapshot, prefix,0); 936else 937 start = snapshot->start; 938 939if(start == snapshot->eof) 940returnempty_ref_iterator_begin(); 941 942 iter =xcalloc(1,sizeof(*iter)); 943 ref_iterator = &iter->base; 944base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable,1); 945 946 iter->snapshot = snapshot; 947acquire_snapshot(snapshot); 948 949 iter->pos = start; 950 iter->eof = snapshot->eof; 951strbuf_init(&iter->refname_buf,0); 952 953 iter->base.oid = &iter->oid; 954 955 iter->flags = flags; 956 957if(prefix && *prefix) 958/* Stop iteration after we've gone *past* prefix: */ 959 ref_iterator =prefix_ref_iterator_begin(ref_iterator, prefix,0); 960 961return ref_iterator; 962} 963 964/* 965 * Write an entry to the packed-refs file for the specified refname. 966 * If peeled is non-NULL, write it as the entry's peeled value. On 967 * error, return a nonzero value and leave errno set at the value left 968 * by the failing call to `fprintf()`. 969 */ 970static intwrite_packed_entry(FILE*fh,const char*refname, 971const struct object_id *oid, 972const struct object_id *peeled) 973{ 974if(fprintf(fh,"%s %s\n",oid_to_hex(oid), refname) <0|| 975(peeled &&fprintf(fh,"^%s\n",oid_to_hex(peeled)) <0)) 976return-1; 977 978return0; 979} 980 981intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 982{ 983struct packed_ref_store *refs = 984packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 985"packed_refs_lock"); 986static int timeout_configured =0; 987static int timeout_value =1000; 988 989if(!timeout_configured) { 990git_config_get_int("core.packedrefstimeout", &timeout_value); 991 timeout_configured =1; 992} 993 994/* 995 * Note that we close the lockfile immediately because we 996 * don't write new content to it, but rather to a separate 997 * tempfile. 998 */ 999if(hold_lock_file_for_update_timeout(1000&refs->lock,1001 refs->path,1002 flags, timeout_value) <0) {1003unable_to_lock_message(refs->path, errno, err);1004return-1;1005}10061007if(close_lock_file_gently(&refs->lock)) {1008strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno));1009rollback_lock_file(&refs->lock);1010return-1;1011}10121013/*1014 * Now that we hold the `packed-refs` lock, make sure that our1015 * snapshot matches the current version of the file. Normally1016 * `get_snapshot()` does that for us, but that function1017 * assumes that when the file is locked, any existing snapshot1018 * is still valid. We've just locked the file, but it might1019 * have changed the moment *before* we locked it.1020 */1021validate_snapshot(refs);10221023/*1024 * Now make sure that the packed-refs file as it exists in the1025 * locked state is loaded into the snapshot:1026 */1027get_snapshot(refs);1028return0;1029}10301031voidpacked_refs_unlock(struct ref_store *ref_store)1032{1033struct packed_ref_store *refs =packed_downcast(1034 ref_store,1035 REF_STORE_READ | REF_STORE_WRITE,1036"packed_refs_unlock");10371038if(!is_lock_file_locked(&refs->lock))1039BUG("packed_refs_unlock() called when not locked");1040rollback_lock_file(&refs->lock);1041}10421043intpacked_refs_is_locked(struct ref_store *ref_store)1044{1045struct packed_ref_store *refs =packed_downcast(1046 ref_store,1047 REF_STORE_READ | REF_STORE_WRITE,1048"packed_refs_is_locked");10491050returnis_lock_file_locked(&refs->lock);1051}10521053/*1054 * The packed-refs header line that we write out. Perhaps other traits1055 * will be added later.1056 *1057 * Note that earlier versions of Git used to parse these traits by1058 * looking for " trait " in the line. For this reason, the space after1059 * the colon and the trailing space are required.1060 */1061static const char PACKED_REFS_HEADER[] =1062"# pack-refs with: peeled fully-peeled sorted\n";10631064static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err)1065{1066/* Nothing to do. */1067return0;1068}10691070/*1071 * Write the packed refs from the current snapshot to the packed-refs1072 * tempfile, incorporating any changes from `updates`. `updates` must1073 * be a sorted string list whose keys are the refnames and whose util1074 * values are `struct ref_update *`. On error, rollback the tempfile,1075 * write an error message to `err`, and return a nonzero value.1076 *1077 * The packfile must be locked before calling this function and will1078 * remain locked when it is done.1079 */1080static intwrite_with_updates(struct packed_ref_store *refs,1081struct string_list *updates,1082struct strbuf *err)1083{1084struct ref_iterator *iter = NULL;1085size_t i;1086int ok;1087FILE*out;1088struct strbuf sb = STRBUF_INIT;1089char*packed_refs_path;10901091if(!is_lock_file_locked(&refs->lock))1092BUG("write_with_updates() called while unlocked");10931094/*1095 * If packed-refs is a symlink, we want to overwrite the1096 * symlinked-to file, not the symlink itself. Also, put the1097 * staging file next to it:1098 */1099 packed_refs_path =get_locked_file_path(&refs->lock);1100strbuf_addf(&sb,"%s.new", packed_refs_path);1101free(packed_refs_path);1102 refs->tempfile =create_tempfile(sb.buf);1103if(!refs->tempfile) {1104strbuf_addf(err,"unable to create file%s:%s",1105 sb.buf,strerror(errno));1106strbuf_release(&sb);1107return-1;1108}1109strbuf_release(&sb);11101111 out =fdopen_tempfile(refs->tempfile,"w");1112if(!out) {1113strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s",1114strerror(errno));1115goto error;1116}11171118if(fprintf(out,"%s", PACKED_REFS_HEADER) <0)1119goto write_error;11201121/*1122 * We iterate in parallel through the current list of refs and1123 * the list of updates, processing an entry from at least one1124 * of the lists each time through the loop. When the current1125 * list of refs is exhausted, set iter to NULL. When the list1126 * of updates is exhausted, leave i set to updates->nr.1127 */1128 iter =packed_ref_iterator_begin(&refs->base,"",1129 DO_FOR_EACH_INCLUDE_BROKEN);1130if((ok =ref_iterator_advance(iter)) != ITER_OK)1131 iter = NULL;11321133 i =0;11341135while(iter || i < updates->nr) {1136struct ref_update *update = NULL;1137int cmp;11381139if(i >= updates->nr) {1140 cmp = -1;1141}else{1142 update = updates->items[i].util;11431144if(!iter)1145 cmp = +1;1146else1147 cmp =strcmp(iter->refname, update->refname);1148}11491150if(!cmp) {1151/*1152 * There is both an old value and an update1153 * for this reference. Check the old value if1154 * necessary:1155 */1156if((update->flags & REF_HAVE_OLD)) {1157if(is_null_oid(&update->old_oid)) {1158strbuf_addf(err,"cannot update ref '%s': "1159"reference already exists",1160 update->refname);1161goto error;1162}else if(oidcmp(&update->old_oid, iter->oid)) {1163strbuf_addf(err,"cannot update ref '%s': "1164"is at%sbut expected%s",1165 update->refname,1166oid_to_hex(iter->oid),1167oid_to_hex(&update->old_oid));1168goto error;1169}1170}11711172/* Now figure out what to use for the new value: */1173if((update->flags & REF_HAVE_NEW)) {1174/*1175 * The update takes precedence. Skip1176 * the iterator over the unneeded1177 * value.1178 */1179if((ok =ref_iterator_advance(iter)) != ITER_OK)1180 iter = NULL;1181 cmp = +1;1182}else{1183/*1184 * The update doesn't actually want to1185 * change anything. We're done with it.1186 */1187 i++;1188 cmp = -1;1189}1190}else if(cmp >0) {1191/*1192 * There is no old value but there is an1193 * update for this reference. Make sure that1194 * the update didn't expect an existing value:1195 */1196if((update->flags & REF_HAVE_OLD) &&1197!is_null_oid(&update->old_oid)) {1198strbuf_addf(err,"cannot update ref '%s': "1199"reference is missing but expected%s",1200 update->refname,1201oid_to_hex(&update->old_oid));1202goto error;1203}1204}12051206if(cmp <0) {1207/* Pass the old reference through. */12081209struct object_id peeled;1210int peel_error =ref_iterator_peel(iter, &peeled);12111212if(write_packed_entry(out, iter->refname,1213 iter->oid,1214 peel_error ? NULL : &peeled))1215goto write_error;12161217if((ok =ref_iterator_advance(iter)) != ITER_OK)1218 iter = NULL;1219}else if(is_null_oid(&update->new_oid)) {1220/*1221 * The update wants to delete the reference,1222 * and the reference either didn't exist or we1223 * have already skipped it. So we're done with1224 * the update (and don't have to write1225 * anything).1226 */1227 i++;1228}else{1229struct object_id peeled;1230int peel_error =peel_object(&update->new_oid,1231&peeled);12321233if(write_packed_entry(out, update->refname,1234&update->new_oid,1235 peel_error ? NULL : &peeled))1236goto write_error;12371238 i++;1239}1240}12411242if(ok != ITER_DONE) {1243strbuf_addstr(err,"unable to write packed-refs file: "1244"error iterating over old contents");1245goto error;1246}12471248if(close_tempfile_gently(refs->tempfile)) {1249strbuf_addf(err,"error closing file%s:%s",1250get_tempfile_path(refs->tempfile),1251strerror(errno));1252strbuf_release(&sb);1253delete_tempfile(&refs->tempfile);1254return-1;1255}12561257return0;12581259write_error:1260strbuf_addf(err,"error writing to%s:%s",1261get_tempfile_path(refs->tempfile),strerror(errno));12621263error:1264if(iter)1265ref_iterator_abort(iter);12661267delete_tempfile(&refs->tempfile);1268return-1;1269}12701271intis_packed_transaction_needed(struct ref_store *ref_store,1272struct ref_transaction *transaction)1273{1274struct packed_ref_store *refs =packed_downcast(1275 ref_store,1276 REF_STORE_READ,1277"is_packed_transaction_needed");1278struct strbuf referent = STRBUF_INIT;1279size_t i;1280int ret;12811282if(!is_lock_file_locked(&refs->lock))1283BUG("is_packed_transaction_needed() called while unlocked");12841285/*1286 * We're only going to bother returning false for the common,1287 * trivial case that references are only being deleted, their1288 * old values are not being checked, and the old `packed-refs`1289 * file doesn't contain any of those reference(s). This gives1290 * false positives for some other cases that could1291 * theoretically be optimized away:1292 *1293 * 1. It could be that the old value is being verified without1294 * setting a new value. In this case, we could verify the1295 * old value here and skip the update if it agrees. If it1296 * disagrees, we could either let the update go through1297 * (the actual commit would re-detect and report the1298 * problem), or come up with a way of reporting such an1299 * error to *our* caller.1300 *1301 * 2. It could be that a new value is being set, but that it1302 * is identical to the current packed value of the1303 * reference.1304 *1305 * Neither of these cases will come up in the current code,1306 * because the only caller of this function passes to it a1307 * transaction that only includes `delete` updates with no1308 * `old_id`. Even if that ever changes, false positives only1309 * cause an optimization to be missed; they do not affect1310 * correctness.1311 */13121313/*1314 * Start with the cheap checks that don't require old1315 * reference values to be read:1316 */1317for(i =0; i < transaction->nr; i++) {1318struct ref_update *update = transaction->updates[i];13191320if(update->flags & REF_HAVE_OLD)1321/* Have to check the old value -> needed. */1322return1;13231324if((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1325/* Have to set a new value -> needed. */1326return1;1327}13281329/*1330 * The transaction isn't checking any old values nor is it1331 * setting any nonzero new values, so it still might be able1332 * to be skipped. Now do the more expensive check: the update1333 * is needed if any of the updates is a delete, and the old1334 * `packed-refs` file contains a value for that reference.1335 */1336 ret =0;1337for(i =0; i < transaction->nr; i++) {1338struct ref_update *update = transaction->updates[i];1339unsigned int type;1340struct object_id oid;13411342if(!(update->flags & REF_HAVE_NEW))1343/*1344 * This reference isn't being deleted -> not1345 * needed.1346 */1347continue;13481349if(!refs_read_raw_ref(ref_store, update->refname,1350&oid, &referent, &type) ||1351 errno != ENOENT) {1352/*1353 * We have to actually delete that reference1354 * -> this transaction is needed.1355 */1356 ret =1;1357break;1358}1359}13601361strbuf_release(&referent);1362return ret;1363}13641365struct packed_transaction_backend_data {1366/* True iff the transaction owns the packed-refs lock. */1367int own_lock;13681369struct string_list updates;1370};13711372static voidpacked_transaction_cleanup(struct packed_ref_store *refs,1373struct ref_transaction *transaction)1374{1375struct packed_transaction_backend_data *data = transaction->backend_data;13761377if(data) {1378string_list_clear(&data->updates,0);13791380if(is_tempfile_active(refs->tempfile))1381delete_tempfile(&refs->tempfile);13821383if(data->own_lock &&is_lock_file_locked(&refs->lock)) {1384packed_refs_unlock(&refs->base);1385 data->own_lock =0;1386}13871388free(data);1389 transaction->backend_data = NULL;1390}13911392 transaction->state = REF_TRANSACTION_CLOSED;1393}13941395static intpacked_transaction_prepare(struct ref_store *ref_store,1396struct ref_transaction *transaction,1397struct strbuf *err)1398{1399struct packed_ref_store *refs =packed_downcast(1400 ref_store,1401 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1402"ref_transaction_prepare");1403struct packed_transaction_backend_data *data;1404size_t i;1405int ret = TRANSACTION_GENERIC_ERROR;14061407/*1408 * Note that we *don't* skip transactions with zero updates,1409 * because such a transaction might be executed for the side1410 * effect of ensuring that all of the references are peeled or1411 * ensuring that the `packed-refs` file is sorted. If the1412 * caller wants to optimize away empty transactions, it should1413 * do so itself.1414 */14151416 data =xcalloc(1,sizeof(*data));1417string_list_init(&data->updates,0);14181419 transaction->backend_data = data;14201421/*1422 * Stick the updates in a string list by refname so that we1423 * can sort them:1424 */1425for(i =0; i < transaction->nr; i++) {1426struct ref_update *update = transaction->updates[i];1427struct string_list_item *item =1428string_list_append(&data->updates, update->refname);14291430/* Store a pointer to update in item->util: */1431 item->util = update;1432}1433string_list_sort(&data->updates);14341435if(ref_update_reject_duplicates(&data->updates, err))1436goto failure;14371438if(!is_lock_file_locked(&refs->lock)) {1439if(packed_refs_lock(ref_store,0, err))1440goto failure;1441 data->own_lock =1;1442}14431444if(write_with_updates(refs, &data->updates, err))1445goto failure;14461447 transaction->state = REF_TRANSACTION_PREPARED;1448return0;14491450failure:1451packed_transaction_cleanup(refs, transaction);1452return ret;1453}14541455static intpacked_transaction_abort(struct ref_store *ref_store,1456struct ref_transaction *transaction,1457struct strbuf *err)1458{1459struct packed_ref_store *refs =packed_downcast(1460 ref_store,1461 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1462"ref_transaction_abort");14631464packed_transaction_cleanup(refs, transaction);1465return0;1466}14671468static intpacked_transaction_finish(struct ref_store *ref_store,1469struct ref_transaction *transaction,1470struct strbuf *err)1471{1472struct packed_ref_store *refs =packed_downcast(1473 ref_store,1474 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1475"ref_transaction_finish");1476int ret = TRANSACTION_GENERIC_ERROR;1477char*packed_refs_path;14781479clear_snapshot(refs);14801481 packed_refs_path =get_locked_file_path(&refs->lock);1482if(rename_tempfile(&refs->tempfile, packed_refs_path)) {1483strbuf_addf(err,"error replacing%s:%s",1484 refs->path,strerror(errno));1485goto cleanup;1486}14871488 ret =0;14891490cleanup:1491free(packed_refs_path);1492packed_transaction_cleanup(refs, transaction);1493return ret;1494}14951496static intpacked_initial_transaction_commit(struct ref_store *ref_store,1497struct ref_transaction *transaction,1498struct strbuf *err)1499{1500returnref_transaction_commit(transaction, err);1501}15021503static intpacked_delete_refs(struct ref_store *ref_store,const char*msg,1504struct string_list *refnames,unsigned int flags)1505{1506struct packed_ref_store *refs =1507packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs");1508struct strbuf err = STRBUF_INIT;1509struct ref_transaction *transaction;1510struct string_list_item *item;1511int ret;15121513(void)refs;/* We need the check above, but don't use the variable */15141515if(!refnames->nr)1516return0;15171518/*1519 * Since we don't check the references' old_oids, the1520 * individual updates can't fail, so we can pack all of the1521 * updates into a single transaction.1522 */15231524 transaction =ref_store_transaction_begin(ref_store, &err);1525if(!transaction)1526return-1;15271528for_each_string_list_item(item, refnames) {1529if(ref_transaction_delete(transaction, item->string, NULL,1530 flags, msg, &err)) {1531warning(_("could not delete reference%s:%s"),1532 item->string, err.buf);1533strbuf_reset(&err);1534}1535}15361537 ret =ref_transaction_commit(transaction, &err);15381539if(ret) {1540if(refnames->nr ==1)1541error(_("could not delete reference%s:%s"),1542 refnames->items[0].string, err.buf);1543else1544error(_("could not delete references:%s"), err.buf);1545}15461547ref_transaction_free(transaction);1548strbuf_release(&err);1549return ret;1550}15511552static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags)1553{1554/*1555 * Packed refs are already packed. It might be that loose refs1556 * are packed *into* a packed refs store, but that is done by1557 * updating the packed references via a transaction.1558 */1559return0;1560}15611562static intpacked_create_symref(struct ref_store *ref_store,1563const char*refname,const char*target,1564const char*logmsg)1565{1566BUG("packed reference store does not support symrefs");1567}15681569static intpacked_rename_ref(struct ref_store *ref_store,1570const char*oldrefname,const char*newrefname,1571const char*logmsg)1572{1573BUG("packed reference store does not support renaming references");1574}15751576static intpacked_copy_ref(struct ref_store *ref_store,1577const char*oldrefname,const char*newrefname,1578const char*logmsg)1579{1580BUG("packed reference store does not support copying references");1581}15821583static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1584{1585returnempty_ref_iterator_begin();1586}15871588static intpacked_for_each_reflog_ent(struct ref_store *ref_store,1589const char*refname,1590 each_reflog_ent_fn fn,void*cb_data)1591{1592return0;1593}15941595static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store,1596const char*refname,1597 each_reflog_ent_fn fn,1598void*cb_data)1599{1600return0;1601}16021603static intpacked_reflog_exists(struct ref_store *ref_store,1604const char*refname)1605{1606return0;1607}16081609static intpacked_create_reflog(struct ref_store *ref_store,1610const char*refname,int force_create,1611struct strbuf *err)1612{1613BUG("packed reference store does not support reflogs");1614}16151616static intpacked_delete_reflog(struct ref_store *ref_store,1617const char*refname)1618{1619return0;1620}16211622static intpacked_reflog_expire(struct ref_store *ref_store,1623const char*refname,const struct object_id *oid,1624unsigned int flags,1625 reflog_expiry_prepare_fn prepare_fn,1626 reflog_expiry_should_prune_fn should_prune_fn,1627 reflog_expiry_cleanup_fn cleanup_fn,1628void*policy_cb_data)1629{1630return0;1631}16321633struct ref_storage_be refs_be_packed = {1634 NULL,1635"packed",1636 packed_ref_store_create,1637 packed_init_db,1638 packed_transaction_prepare,1639 packed_transaction_finish,1640 packed_transaction_abort,1641 packed_initial_transaction_commit,16421643 packed_pack_refs,1644 packed_create_symref,1645 packed_delete_refs,1646 packed_rename_ref,1647 packed_copy_ref,16481649 packed_ref_iterator_begin,1650 packed_read_raw_ref,16511652 packed_reflog_iterator_begin,1653 packed_for_each_reflog_ent,1654 packed_for_each_reflog_ent_reverse,1655 packed_reflog_exists,1656 packed_create_reflog,1657 packed_delete_reflog,1658 packed_reflog_expire1659};