1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"packed-backend.h" 6#include"../iterator.h" 7#include"../lockfile.h" 8#include"../chdir-notify.h" 9 10enum mmap_strategy { 11/* 12 * Don't use mmap() at all for reading `packed-refs`. 13 */ 14 MMAP_NONE, 15 16/* 17 * Can use mmap() for reading `packed-refs`, but the file must 18 * not remain mmapped. This is the usual option on Windows, 19 * where you cannot rename a new version of a file onto a file 20 * that is currently mmapped. 21 */ 22 MMAP_TEMPORARY, 23 24/* 25 * It is OK to leave the `packed-refs` file mmapped while 26 * arbitrary other code is running. 27 */ 28 MMAP_OK 29}; 30 31#if defined(NO_MMAP) 32static enum mmap_strategy mmap_strategy = MMAP_NONE; 33#elif defined(MMAP_PREVENTS_DELETE) 34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 35#else 36static enum mmap_strategy mmap_strategy = MMAP_OK; 37#endif 38 39struct packed_ref_store; 40 41/* 42 * A `snapshot` represents one snapshot of a `packed-refs` file. 43 * 44 * Normally, this will be a mmapped view of the contents of the 45 * `packed-refs` file at the time the snapshot was created. However, 46 * if the `packed-refs` file was not sorted, this might point at heap 47 * memory holding the contents of the `packed-refs` file with its 48 * records sorted by refname. 49 * 50 * `snapshot` instances are reference counted (via 51 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent 52 * an instance from disappearing while an iterator is still iterating 53 * over it. Instances are garbage collected when their `referrers` 54 * count goes to zero. 55 * 56 * The most recent `snapshot`, if available, is referenced by the 57 * `packed_ref_store`. Its freshness is checked whenever 58 * `get_snapshot()` is called; if the existing snapshot is obsolete, a 59 * new snapshot is taken. 60 */ 61struct snapshot { 62/* 63 * A back-pointer to the packed_ref_store with which this 64 * snapshot is associated: 65 */ 66struct packed_ref_store *refs; 67 68/* Is the `packed-refs` file currently mmapped? */ 69int mmapped; 70 71/* 72 * The contents of the `packed-refs` file: 73 * 74 * - buf -- a pointer to the start of the memory 75 * - start -- a pointer to the first byte of actual references 76 * (i.e., after the header line, if one is present) 77 * - eof -- a pointer just past the end of the reference 78 * contents 79 * 80 * If the `packed-refs` file was already sorted, `buf` points 81 * at the mmapped contents of the file. If not, it points at 82 * heap-allocated memory containing the contents, sorted. If 83 * there were no contents (e.g., because the file didn't 84 * exist), `buf`, `start`, and `eof` are all NULL. 85 */ 86char*buf, *start, *eof; 87 88/* 89 * What is the peeled state of the `packed-refs` file that 90 * this snapshot represents? (This is usually determined from 91 * the file's header.) 92 */ 93enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 94 95/* 96 * Count of references to this instance, including the pointer 97 * from `packed_ref_store::snapshot`, if any. The instance 98 * will not be freed as long as the reference count is 99 * nonzero. 100 */ 101unsigned int referrers; 102 103/* 104 * The metadata of the `packed-refs` file from which this 105 * snapshot was created, used to tell if the file has been 106 * replaced since we read it. 107 */ 108struct stat_validity validity; 109}; 110 111/* 112 * A `ref_store` representing references stored in a `packed-refs` 113 * file. It implements the `ref_store` interface, though it has some 114 * limitations: 115 * 116 * - It cannot store symbolic references. 117 * 118 * - It cannot store reflogs. 119 * 120 * - It does not support reference renaming (though it could). 121 * 122 * On the other hand, it can be locked outside of a reference 123 * transaction. In that case, it remains locked even after the 124 * transaction is done and the new `packed-refs` file is activated. 125 */ 126struct packed_ref_store { 127struct ref_store base; 128 129unsigned int store_flags; 130 131/* The path of the "packed-refs" file: */ 132char*path; 133 134/* 135 * A snapshot of the values read from the `packed-refs` file, 136 * if it might still be current; otherwise, NULL. 137 */ 138struct snapshot *snapshot; 139 140/* 141 * Lock used for the "packed-refs" file. Note that this (and 142 * thus the enclosing `packed_ref_store`) must not be freed. 143 */ 144struct lock_file lock; 145 146/* 147 * Temporary file used when rewriting new contents to the 148 * "packed-refs" file. Note that this (and thus the enclosing 149 * `packed_ref_store`) must not be freed. 150 */ 151struct tempfile *tempfile; 152}; 153 154/* 155 * Increment the reference count of `*snapshot`. 156 */ 157static voidacquire_snapshot(struct snapshot *snapshot) 158{ 159 snapshot->referrers++; 160} 161 162/* 163 * If the buffer in `snapshot` is active, then either munmap the 164 * memory and close the file, or free the memory. Then set the buffer 165 * pointers to NULL. 166 */ 167static voidclear_snapshot_buffer(struct snapshot *snapshot) 168{ 169if(snapshot->mmapped) { 170if(munmap(snapshot->buf, snapshot->eof - snapshot->buf)) 171die_errno("error ummapping packed-refs file%s", 172 snapshot->refs->path); 173 snapshot->mmapped =0; 174}else{ 175free(snapshot->buf); 176} 177 snapshot->buf = snapshot->start = snapshot->eof = NULL; 178} 179 180/* 181 * Decrease the reference count of `*snapshot`. If it goes to zero, 182 * free `*snapshot` and return true; otherwise return false. 183 */ 184static intrelease_snapshot(struct snapshot *snapshot) 185{ 186if(!--snapshot->referrers) { 187stat_validity_clear(&snapshot->validity); 188clear_snapshot_buffer(snapshot); 189free(snapshot); 190return1; 191}else{ 192return0; 193} 194} 195 196struct ref_store *packed_ref_store_create(const char*path, 197unsigned int store_flags) 198{ 199struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 200struct ref_store *ref_store = (struct ref_store *)refs; 201 202base_ref_store_init(ref_store, &refs_be_packed); 203 refs->store_flags = store_flags; 204 205 refs->path =xstrdup(path); 206chdir_notify_reparent("packed-refs", &refs->path); 207 208return ref_store; 209} 210 211/* 212 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 213 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 214 * support at least the flags specified in `required_flags`. `caller` 215 * is used in any necessary error messages. 216 */ 217static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 218unsigned int required_flags, 219const char*caller) 220{ 221struct packed_ref_store *refs; 222 223if(ref_store->be != &refs_be_packed) 224BUG("ref_store is type\"%s\"not\"packed\"in%s", 225 ref_store->be->name, caller); 226 227 refs = (struct packed_ref_store *)ref_store; 228 229if((refs->store_flags & required_flags) != required_flags) 230BUG("unallowed operation (%s), requires%x, has%x\n", 231 caller, required_flags, refs->store_flags); 232 233return refs; 234} 235 236static voidclear_snapshot(struct packed_ref_store *refs) 237{ 238if(refs->snapshot) { 239struct snapshot *snapshot = refs->snapshot; 240 241 refs->snapshot = NULL; 242release_snapshot(snapshot); 243} 244} 245 246static NORETURN voiddie_unterminated_line(const char*path, 247const char*p,size_t len) 248{ 249if(len <80) 250die("unterminated line in%s: %.*s", path, (int)len, p); 251else 252die("unterminated line in%s: %.75s...", path, p); 253} 254 255static NORETURN voiddie_invalid_line(const char*path, 256const char*p,size_t len) 257{ 258const char*eol =memchr(p,'\n', len); 259 260if(!eol) 261die_unterminated_line(path, p, len); 262else if(eol - p <80) 263die("unexpected line in%s: %.*s", path, (int)(eol - p), p); 264else 265die("unexpected line in%s: %.75s...", path, p); 266 267} 268 269struct snapshot_record { 270const char*start; 271size_t len; 272}; 273 274static intcmp_packed_ref_records(const void*v1,const void*v2) 275{ 276const struct snapshot_record *e1 = v1, *e2 = v2; 277const char*r1 = e1->start + the_hash_algo->hexsz +1; 278const char*r2 = e2->start + the_hash_algo->hexsz +1; 279 280while(1) { 281if(*r1 =='\n') 282return*r2 =='\n'?0: -1; 283if(*r1 != *r2) { 284if(*r2 =='\n') 285return1; 286else 287return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 288} 289 r1++; 290 r2++; 291} 292} 293 294/* 295 * Compare a snapshot record at `rec` to the specified NUL-terminated 296 * refname. 297 */ 298static intcmp_record_to_refname(const char*rec,const char*refname) 299{ 300const char*r1 = rec + the_hash_algo->hexsz +1; 301const char*r2 = refname; 302 303while(1) { 304if(*r1 =='\n') 305return*r2 ? -1:0; 306if(!*r2) 307return1; 308if(*r1 != *r2) 309return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 310 r1++; 311 r2++; 312} 313} 314 315/* 316 * `snapshot->buf` is not known to be sorted. Check whether it is, and 317 * if not, sort it into new memory and munmap/free the old storage. 318 */ 319static voidsort_snapshot(struct snapshot *snapshot) 320{ 321struct snapshot_record *records = NULL; 322size_t alloc =0, nr =0; 323int sorted =1; 324const char*pos, *eof, *eol; 325size_t len, i; 326char*new_buffer, *dst; 327 328 pos = snapshot->start; 329 eof = snapshot->eof; 330 331if(pos == eof) 332return; 333 334 len = eof - pos; 335 336/* 337 * Initialize records based on a crude estimate of the number 338 * of references in the file (we'll grow it below if needed): 339 */ 340ALLOC_GROW(records, len /80+20, alloc); 341 342while(pos < eof) { 343 eol =memchr(pos,'\n', eof - pos); 344if(!eol) 345/* The safety check should prevent this. */ 346BUG("unterminated line found in packed-refs"); 347if(eol - pos < the_hash_algo->hexsz +2) 348die_invalid_line(snapshot->refs->path, 349 pos, eof - pos); 350 eol++; 351if(eol < eof && *eol =='^') { 352/* 353 * Keep any peeled line together with its 354 * reference: 355 */ 356const char*peeled_start = eol; 357 358 eol =memchr(peeled_start,'\n', eof - peeled_start); 359if(!eol) 360/* The safety check should prevent this. */ 361BUG("unterminated peeled line found in packed-refs"); 362 eol++; 363} 364 365ALLOC_GROW(records, nr +1, alloc); 366 records[nr].start = pos; 367 records[nr].len = eol - pos; 368 nr++; 369 370if(sorted && 371 nr >1&& 372cmp_packed_ref_records(&records[nr -2], 373&records[nr -1]) >=0) 374 sorted =0; 375 376 pos = eol; 377} 378 379if(sorted) 380goto cleanup; 381 382/* We need to sort the memory. First we sort the records array: */ 383QSORT(records, nr, cmp_packed_ref_records); 384 385/* 386 * Allocate a new chunk of memory, and copy the old memory to 387 * the new in the order indicated by `records` (not bothering 388 * with the header line): 389 */ 390 new_buffer =xmalloc(len); 391for(dst = new_buffer, i =0; i < nr; i++) { 392memcpy(dst, records[i].start, records[i].len); 393 dst += records[i].len; 394} 395 396/* 397 * Now munmap the old buffer and use the sorted buffer in its 398 * place: 399 */ 400clear_snapshot_buffer(snapshot); 401 snapshot->buf = snapshot->start = new_buffer; 402 snapshot->eof = new_buffer + len; 403 404cleanup: 405free(records); 406} 407 408/* 409 * Return a pointer to the start of the record that contains the 410 * character `*p` (which must be within the buffer). If no other 411 * record start is found, return `buf`. 412 */ 413static const char*find_start_of_record(const char*buf,const char*p) 414{ 415while(p > buf && (p[-1] !='\n'|| p[0] =='^')) 416 p--; 417return p; 418} 419 420/* 421 * Return a pointer to the start of the record following the record 422 * that contains `*p`. If none is found before `end`, return `end`. 423 */ 424static const char*find_end_of_record(const char*p,const char*end) 425{ 426while(++p < end && (p[-1] !='\n'|| p[0] =='^')) 427; 428return p; 429} 430 431/* 432 * We want to be able to compare mmapped reference records quickly, 433 * without totally parsing them. We can do so because the records are 434 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 435 * + 1) bytes past the beginning of the record. 436 * 437 * But what if the `packed-refs` file contains garbage? We're willing 438 * to tolerate not detecting the problem, as long as we don't produce 439 * totally garbled output (we can't afford to check the integrity of 440 * the whole file during every Git invocation). But we do want to be 441 * sure that we never read past the end of the buffer in memory and 442 * perform an illegal memory access. 443 * 444 * Guarantee that minimum level of safety by verifying that the last 445 * record in the file is LF-terminated, and that it has at least 446 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 447 * these checks fails. 448 */ 449static voidverify_buffer_safe(struct snapshot *snapshot) 450{ 451const char*start = snapshot->start; 452const char*eof = snapshot->eof; 453const char*last_line; 454 455if(start == eof) 456return; 457 458 last_line =find_start_of_record(start, eof -1); 459if(*(eof -1) !='\n'|| eof - last_line < the_hash_algo->hexsz +2) 460die_invalid_line(snapshot->refs->path, 461 last_line, eof - last_line); 462} 463 464#define SMALL_FILE_SIZE (32*1024) 465 466/* 467 * Depending on `mmap_strategy`, either mmap or read the contents of 468 * the `packed-refs` file into the snapshot. Return 1 if the file 469 * existed and was read, or 0 if the file was absent or empty. Die on 470 * errors. 471 */ 472static intload_contents(struct snapshot *snapshot) 473{ 474int fd; 475struct stat st; 476size_t size; 477 ssize_t bytes_read; 478 479 fd =open(snapshot->refs->path, O_RDONLY); 480if(fd <0) { 481if(errno == ENOENT) { 482/* 483 * This is OK; it just means that no 484 * "packed-refs" file has been written yet, 485 * which is equivalent to it being empty, 486 * which is its state when initialized with 487 * zeros. 488 */ 489return0; 490}else{ 491die_errno("couldn't read%s", snapshot->refs->path); 492} 493} 494 495stat_validity_update(&snapshot->validity, fd); 496 497if(fstat(fd, &st) <0) 498die_errno("couldn't stat%s", snapshot->refs->path); 499 size =xsize_t(st.st_size); 500 501if(!size) { 502close(fd); 503return0; 504}else if(mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { 505 snapshot->buf =xmalloc(size); 506 bytes_read =read_in_full(fd, snapshot->buf, size); 507if(bytes_read <0|| bytes_read != size) 508die_errno("couldn't read%s", snapshot->refs->path); 509 snapshot->mmapped =0; 510}else{ 511 snapshot->buf =xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd,0); 512 snapshot->mmapped =1; 513} 514close(fd); 515 516 snapshot->start = snapshot->buf; 517 snapshot->eof = snapshot->buf + size; 518 519return1; 520} 521 522/* 523 * Find the place in `snapshot->buf` where the start of the record for 524 * `refname` starts. If `mustexist` is true and the reference doesn't 525 * exist, then return NULL. If `mustexist` is false and the reference 526 * doesn't exist, then return the point where that reference would be 527 * inserted, or `snapshot->eof` (which might be NULL) if it would be 528 * inserted at the end of the file. In the latter mode, `refname` 529 * doesn't have to be a proper reference name; for example, one could 530 * search for "refs/replace/" to find the start of any replace 531 * references. 532 * 533 * The record is sought using a binary search, so `snapshot->buf` must 534 * be sorted. 535 */ 536static const char*find_reference_location(struct snapshot *snapshot, 537const char*refname,int mustexist) 538{ 539/* 540 * This is not *quite* a garden-variety binary search, because 541 * the data we're searching is made up of records, and we 542 * always need to find the beginning of a record to do a 543 * comparison. A "record" here is one line for the reference 544 * itself and zero or one peel lines that start with '^'. Our 545 * loop invariant is described in the next two comments. 546 */ 547 548/* 549 * A pointer to the character at the start of a record whose 550 * preceding records all have reference names that come 551 * *before* `refname`. 552 */ 553const char*lo = snapshot->start; 554 555/* 556 * A pointer to a the first character of a record whose 557 * reference name comes *after* `refname`. 558 */ 559const char*hi = snapshot->eof; 560 561while(lo != hi) { 562const char*mid, *rec; 563int cmp; 564 565 mid = lo + (hi - lo) /2; 566 rec =find_start_of_record(lo, mid); 567 cmp =cmp_record_to_refname(rec, refname); 568if(cmp <0) { 569 lo =find_end_of_record(mid, hi); 570}else if(cmp >0) { 571 hi = rec; 572}else{ 573return rec; 574} 575} 576 577if(mustexist) 578return NULL; 579else 580return lo; 581} 582 583/* 584 * Create a newly-allocated `snapshot` of the `packed-refs` file in 585 * its current state and return it. The return value will already have 586 * its reference count incremented. 587 * 588 * A comment line of the form "# pack-refs with: " may contain zero or 589 * more traits. We interpret the traits as follows: 590 * 591 * Neither `peeled` nor `fully-peeled`: 592 * 593 * Probably no references are peeled. But if the file contains a 594 * peeled value for a reference, we will use it. 595 * 596 * `peeled`: 597 * 598 * References under "refs/tags/", if they *can* be peeled, *are* 599 * peeled in this file. References outside of "refs/tags/" are 600 * probably not peeled even if they could have been, but if we find 601 * a peeled value for such a reference we will use it. 602 * 603 * `fully-peeled`: 604 * 605 * All references in the file that can be peeled are peeled. 606 * Inversely (and this is more important), any references in the 607 * file for which no peeled value is recorded is not peelable. This 608 * trait should typically be written alongside "peeled" for 609 * compatibility with older clients, but we do not require it 610 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 611 * 612 * `sorted`: 613 * 614 * The references in this file are known to be sorted by refname. 615 */ 616static struct snapshot *create_snapshot(struct packed_ref_store *refs) 617{ 618struct snapshot *snapshot =xcalloc(1,sizeof(*snapshot)); 619int sorted =0; 620 621 snapshot->refs = refs; 622acquire_snapshot(snapshot); 623 snapshot->peeled = PEELED_NONE; 624 625if(!load_contents(snapshot)) 626return snapshot; 627 628/* If the file has a header line, process it: */ 629if(snapshot->buf < snapshot->eof && *snapshot->buf =='#') { 630char*tmp, *p, *eol; 631struct string_list traits = STRING_LIST_INIT_NODUP; 632 633 eol =memchr(snapshot->buf,'\n', 634 snapshot->eof - snapshot->buf); 635if(!eol) 636die_unterminated_line(refs->path, 637 snapshot->buf, 638 snapshot->eof - snapshot->buf); 639 640 tmp =xmemdupz(snapshot->buf, eol - snapshot->buf); 641 642if(!skip_prefix(tmp,"# pack-refs with:", (const char**)&p)) 643die_invalid_line(refs->path, 644 snapshot->buf, 645 snapshot->eof - snapshot->buf); 646 647string_list_split_in_place(&traits, p,' ', -1); 648 649if(unsorted_string_list_has_string(&traits,"fully-peeled")) 650 snapshot->peeled = PEELED_FULLY; 651else if(unsorted_string_list_has_string(&traits,"peeled")) 652 snapshot->peeled = PEELED_TAGS; 653 654 sorted =unsorted_string_list_has_string(&traits,"sorted"); 655 656/* perhaps other traits later as well */ 657 658/* The "+ 1" is for the LF character. */ 659 snapshot->start = eol +1; 660 661string_list_clear(&traits,0); 662free(tmp); 663} 664 665verify_buffer_safe(snapshot); 666 667if(!sorted) { 668sort_snapshot(snapshot); 669 670/* 671 * Reordering the records might have moved a short one 672 * to the end of the buffer, so verify the buffer's 673 * safety again: 674 */ 675verify_buffer_safe(snapshot); 676} 677 678if(mmap_strategy != MMAP_OK && snapshot->mmapped) { 679/* 680 * We don't want to leave the file mmapped, so we are 681 * forced to make a copy now: 682 */ 683size_t size = snapshot->eof - snapshot->start; 684char*buf_copy =xmalloc(size); 685 686memcpy(buf_copy, snapshot->start, size); 687clear_snapshot_buffer(snapshot); 688 snapshot->buf = snapshot->start = buf_copy; 689 snapshot->eof = buf_copy + size; 690} 691 692return snapshot; 693} 694 695/* 696 * Check that `refs->snapshot` (if present) still reflects the 697 * contents of the `packed-refs` file. If not, clear the snapshot. 698 */ 699static voidvalidate_snapshot(struct packed_ref_store *refs) 700{ 701if(refs->snapshot && 702!stat_validity_check(&refs->snapshot->validity, refs->path)) 703clear_snapshot(refs); 704} 705 706/* 707 * Get the `snapshot` for the specified packed_ref_store, creating and 708 * populating it if it hasn't been read before or if the file has been 709 * changed (according to its `validity` field) since it was last read. 710 * On the other hand, if we hold the lock, then assume that the file 711 * hasn't been changed out from under us, so skip the extra `stat()` 712 * call in `stat_validity_check()`. This function does *not* increase 713 * the snapshot's reference count on behalf of the caller. 714 */ 715static struct snapshot *get_snapshot(struct packed_ref_store *refs) 716{ 717if(!is_lock_file_locked(&refs->lock)) 718validate_snapshot(refs); 719 720if(!refs->snapshot) 721 refs->snapshot =create_snapshot(refs); 722 723return refs->snapshot; 724} 725 726static intpacked_read_raw_ref(struct ref_store *ref_store, 727const char*refname,struct object_id *oid, 728struct strbuf *referent,unsigned int*type) 729{ 730struct packed_ref_store *refs = 731packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 732struct snapshot *snapshot =get_snapshot(refs); 733const char*rec; 734 735*type =0; 736 737 rec =find_reference_location(snapshot, refname,1); 738 739if(!rec) { 740/* refname is not a packed reference. */ 741 errno = ENOENT; 742return-1; 743} 744 745if(get_oid_hex(rec, oid)) 746die_invalid_line(refs->path, rec, snapshot->eof - rec); 747 748*type = REF_ISPACKED; 749return0; 750} 751 752/* 753 * This value is set in `base.flags` if the peeled value of the 754 * current reference is known. In that case, `peeled` contains the 755 * correct peeled value for the reference, which might be `null_oid` 756 * if the reference is not a tag or if it is broken. 757 */ 758#define REF_KNOWS_PEELED 0x40 759 760/* 761 * An iterator over a snapshot of a `packed-refs` file. 762 */ 763struct packed_ref_iterator { 764struct ref_iterator base; 765 766struct snapshot *snapshot; 767 768/* The current position in the snapshot's buffer: */ 769const char*pos; 770 771/* The end of the part of the buffer that will be iterated over: */ 772const char*eof; 773 774/* Scratch space for current values: */ 775struct object_id oid, peeled; 776struct strbuf refname_buf; 777 778unsigned int flags; 779}; 780 781/* 782 * Move the iterator to the next record in the snapshot, without 783 * respect for whether the record is actually required by the current 784 * iteration. Adjust the fields in `iter` and return `ITER_OK` or 785 * `ITER_DONE`. This function does not free the iterator in the case 786 * of `ITER_DONE`. 787 */ 788static intnext_record(struct packed_ref_iterator *iter) 789{ 790const char*p = iter->pos, *eol; 791 792strbuf_reset(&iter->refname_buf); 793 794if(iter->pos == iter->eof) 795return ITER_DONE; 796 797 iter->base.flags = REF_ISPACKED; 798 799if(iter->eof - p < the_hash_algo->hexsz +2|| 800parse_oid_hex(p, &iter->oid, &p) || 801!isspace(*p++)) 802die_invalid_line(iter->snapshot->refs->path, 803 iter->pos, iter->eof - iter->pos); 804 805 eol =memchr(p,'\n', iter->eof - p); 806if(!eol) 807die_unterminated_line(iter->snapshot->refs->path, 808 iter->pos, iter->eof - iter->pos); 809 810strbuf_add(&iter->refname_buf, p, eol - p); 811 iter->base.refname = iter->refname_buf.buf; 812 813if(check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 814if(!refname_is_safe(iter->base.refname)) 815die("packed refname is dangerous:%s", 816 iter->base.refname); 817oidclr(&iter->oid); 818 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 819} 820if(iter->snapshot->peeled == PEELED_FULLY || 821(iter->snapshot->peeled == PEELED_TAGS && 822starts_with(iter->base.refname,"refs/tags/"))) 823 iter->base.flags |= REF_KNOWS_PEELED; 824 825 iter->pos = eol +1; 826 827if(iter->pos < iter->eof && *iter->pos =='^') { 828 p = iter->pos +1; 829if(iter->eof - p < the_hash_algo->hexsz +1|| 830parse_oid_hex(p, &iter->peeled, &p) || 831*p++ !='\n') 832die_invalid_line(iter->snapshot->refs->path, 833 iter->pos, iter->eof - iter->pos); 834 iter->pos = p; 835 836/* 837 * Regardless of what the file header said, we 838 * definitely know the value of *this* reference. But 839 * we suppress it if the reference is broken: 840 */ 841if((iter->base.flags & REF_ISBROKEN)) { 842oidclr(&iter->peeled); 843 iter->base.flags &= ~REF_KNOWS_PEELED; 844}else{ 845 iter->base.flags |= REF_KNOWS_PEELED; 846} 847}else{ 848oidclr(&iter->peeled); 849} 850 851return ITER_OK; 852} 853 854static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 855{ 856struct packed_ref_iterator *iter = 857(struct packed_ref_iterator *)ref_iterator; 858int ok; 859 860while((ok =next_record(iter)) == ITER_OK) { 861if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 862ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 863continue; 864 865if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 866!ref_resolves_to_object(iter->base.refname, &iter->oid, 867 iter->flags)) 868continue; 869 870return ITER_OK; 871} 872 873if(ref_iterator_abort(ref_iterator) != ITER_DONE) 874 ok = ITER_ERROR; 875 876return ok; 877} 878 879static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 880struct object_id *peeled) 881{ 882struct packed_ref_iterator *iter = 883(struct packed_ref_iterator *)ref_iterator; 884 885if((iter->base.flags & REF_KNOWS_PEELED)) { 886oidcpy(peeled, &iter->peeled); 887returnis_null_oid(&iter->peeled) ? -1:0; 888}else if((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 889return-1; 890}else{ 891return!!peel_object(&iter->oid, peeled); 892} 893} 894 895static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 896{ 897struct packed_ref_iterator *iter = 898(struct packed_ref_iterator *)ref_iterator; 899int ok = ITER_DONE; 900 901strbuf_release(&iter->refname_buf); 902release_snapshot(iter->snapshot); 903base_ref_iterator_free(ref_iterator); 904return ok; 905} 906 907static struct ref_iterator_vtable packed_ref_iterator_vtable = { 908 packed_ref_iterator_advance, 909 packed_ref_iterator_peel, 910 packed_ref_iterator_abort 911}; 912 913static struct ref_iterator *packed_ref_iterator_begin( 914struct ref_store *ref_store, 915const char*prefix,unsigned int flags) 916{ 917struct packed_ref_store *refs; 918struct snapshot *snapshot; 919const char*start; 920struct packed_ref_iterator *iter; 921struct ref_iterator *ref_iterator; 922unsigned int required_flags = REF_STORE_READ; 923 924if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 925 required_flags |= REF_STORE_ODB; 926 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 927 928/* 929 * Note that `get_snapshot()` internally checks whether the 930 * snapshot is up to date with what is on disk, and re-reads 931 * it if not. 932 */ 933 snapshot =get_snapshot(refs); 934 935if(prefix && *prefix) 936 start =find_reference_location(snapshot, prefix,0); 937else 938 start = snapshot->start; 939 940if(start == snapshot->eof) 941returnempty_ref_iterator_begin(); 942 943 iter =xcalloc(1,sizeof(*iter)); 944 ref_iterator = &iter->base; 945base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable,1); 946 947 iter->snapshot = snapshot; 948acquire_snapshot(snapshot); 949 950 iter->pos = start; 951 iter->eof = snapshot->eof; 952strbuf_init(&iter->refname_buf,0); 953 954 iter->base.oid = &iter->oid; 955 956 iter->flags = flags; 957 958if(prefix && *prefix) 959/* Stop iteration after we've gone *past* prefix: */ 960 ref_iterator =prefix_ref_iterator_begin(ref_iterator, prefix,0); 961 962return ref_iterator; 963} 964 965/* 966 * Write an entry to the packed-refs file for the specified refname. 967 * If peeled is non-NULL, write it as the entry's peeled value. On 968 * error, return a nonzero value and leave errno set at the value left 969 * by the failing call to `fprintf()`. 970 */ 971static intwrite_packed_entry(FILE*fh,const char*refname, 972const struct object_id *oid, 973const struct object_id *peeled) 974{ 975if(fprintf(fh,"%s %s\n",oid_to_hex(oid), refname) <0|| 976(peeled &&fprintf(fh,"^%s\n",oid_to_hex(peeled)) <0)) 977return-1; 978 979return0; 980} 981 982intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 983{ 984struct packed_ref_store *refs = 985packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 986"packed_refs_lock"); 987static int timeout_configured =0; 988static int timeout_value =1000; 989 990if(!timeout_configured) { 991git_config_get_int("core.packedrefstimeout", &timeout_value); 992 timeout_configured =1; 993} 994 995/* 996 * Note that we close the lockfile immediately because we 997 * don't write new content to it, but rather to a separate 998 * tempfile. 999 */1000if(hold_lock_file_for_update_timeout(1001&refs->lock,1002 refs->path,1003 flags, timeout_value) <0) {1004unable_to_lock_message(refs->path, errno, err);1005return-1;1006}10071008if(close_lock_file_gently(&refs->lock)) {1009strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno));1010rollback_lock_file(&refs->lock);1011return-1;1012}10131014/*1015 * Now that we hold the `packed-refs` lock, make sure that our1016 * snapshot matches the current version of the file. Normally1017 * `get_snapshot()` does that for us, but that function1018 * assumes that when the file is locked, any existing snapshot1019 * is still valid. We've just locked the file, but it might1020 * have changed the moment *before* we locked it.1021 */1022validate_snapshot(refs);10231024/*1025 * Now make sure that the packed-refs file as it exists in the1026 * locked state is loaded into the snapshot:1027 */1028get_snapshot(refs);1029return0;1030}10311032voidpacked_refs_unlock(struct ref_store *ref_store)1033{1034struct packed_ref_store *refs =packed_downcast(1035 ref_store,1036 REF_STORE_READ | REF_STORE_WRITE,1037"packed_refs_unlock");10381039if(!is_lock_file_locked(&refs->lock))1040BUG("packed_refs_unlock() called when not locked");1041rollback_lock_file(&refs->lock);1042}10431044intpacked_refs_is_locked(struct ref_store *ref_store)1045{1046struct packed_ref_store *refs =packed_downcast(1047 ref_store,1048 REF_STORE_READ | REF_STORE_WRITE,1049"packed_refs_is_locked");10501051returnis_lock_file_locked(&refs->lock);1052}10531054/*1055 * The packed-refs header line that we write out. Perhaps other traits1056 * will be added later.1057 *1058 * Note that earlier versions of Git used to parse these traits by1059 * looking for " trait " in the line. For this reason, the space after1060 * the colon and the trailing space are required.1061 */1062static const char PACKED_REFS_HEADER[] =1063"# pack-refs with: peeled fully-peeled sorted\n";10641065static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err)1066{1067/* Nothing to do. */1068return0;1069}10701071/*1072 * Write the packed refs from the current snapshot to the packed-refs1073 * tempfile, incorporating any changes from `updates`. `updates` must1074 * be a sorted string list whose keys are the refnames and whose util1075 * values are `struct ref_update *`. On error, rollback the tempfile,1076 * write an error message to `err`, and return a nonzero value.1077 *1078 * The packfile must be locked before calling this function and will1079 * remain locked when it is done.1080 */1081static intwrite_with_updates(struct packed_ref_store *refs,1082struct string_list *updates,1083struct strbuf *err)1084{1085struct ref_iterator *iter = NULL;1086size_t i;1087int ok;1088FILE*out;1089struct strbuf sb = STRBUF_INIT;1090char*packed_refs_path;10911092if(!is_lock_file_locked(&refs->lock))1093BUG("write_with_updates() called while unlocked");10941095/*1096 * If packed-refs is a symlink, we want to overwrite the1097 * symlinked-to file, not the symlink itself. Also, put the1098 * staging file next to it:1099 */1100 packed_refs_path =get_locked_file_path(&refs->lock);1101strbuf_addf(&sb,"%s.new", packed_refs_path);1102free(packed_refs_path);1103 refs->tempfile =create_tempfile(sb.buf);1104if(!refs->tempfile) {1105strbuf_addf(err,"unable to create file%s:%s",1106 sb.buf,strerror(errno));1107strbuf_release(&sb);1108return-1;1109}1110strbuf_release(&sb);11111112 out =fdopen_tempfile(refs->tempfile,"w");1113if(!out) {1114strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s",1115strerror(errno));1116goto error;1117}11181119if(fprintf(out,"%s", PACKED_REFS_HEADER) <0)1120goto write_error;11211122/*1123 * We iterate in parallel through the current list of refs and1124 * the list of updates, processing an entry from at least one1125 * of the lists each time through the loop. When the current1126 * list of refs is exhausted, set iter to NULL. When the list1127 * of updates is exhausted, leave i set to updates->nr.1128 */1129 iter =packed_ref_iterator_begin(&refs->base,"",1130 DO_FOR_EACH_INCLUDE_BROKEN);1131if((ok =ref_iterator_advance(iter)) != ITER_OK)1132 iter = NULL;11331134 i =0;11351136while(iter || i < updates->nr) {1137struct ref_update *update = NULL;1138int cmp;11391140if(i >= updates->nr) {1141 cmp = -1;1142}else{1143 update = updates->items[i].util;11441145if(!iter)1146 cmp = +1;1147else1148 cmp =strcmp(iter->refname, update->refname);1149}11501151if(!cmp) {1152/*1153 * There is both an old value and an update1154 * for this reference. Check the old value if1155 * necessary:1156 */1157if((update->flags & REF_HAVE_OLD)) {1158if(is_null_oid(&update->old_oid)) {1159strbuf_addf(err,"cannot update ref '%s': "1160"reference already exists",1161 update->refname);1162goto error;1163}else if(!oideq(&update->old_oid, iter->oid)) {1164strbuf_addf(err,"cannot update ref '%s': "1165"is at%sbut expected%s",1166 update->refname,1167oid_to_hex(iter->oid),1168oid_to_hex(&update->old_oid));1169goto error;1170}1171}11721173/* Now figure out what to use for the new value: */1174if((update->flags & REF_HAVE_NEW)) {1175/*1176 * The update takes precedence. Skip1177 * the iterator over the unneeded1178 * value.1179 */1180if((ok =ref_iterator_advance(iter)) != ITER_OK)1181 iter = NULL;1182 cmp = +1;1183}else{1184/*1185 * The update doesn't actually want to1186 * change anything. We're done with it.1187 */1188 i++;1189 cmp = -1;1190}1191}else if(cmp >0) {1192/*1193 * There is no old value but there is an1194 * update for this reference. Make sure that1195 * the update didn't expect an existing value:1196 */1197if((update->flags & REF_HAVE_OLD) &&1198!is_null_oid(&update->old_oid)) {1199strbuf_addf(err,"cannot update ref '%s': "1200"reference is missing but expected%s",1201 update->refname,1202oid_to_hex(&update->old_oid));1203goto error;1204}1205}12061207if(cmp <0) {1208/* Pass the old reference through. */12091210struct object_id peeled;1211int peel_error =ref_iterator_peel(iter, &peeled);12121213if(write_packed_entry(out, iter->refname,1214 iter->oid,1215 peel_error ? NULL : &peeled))1216goto write_error;12171218if((ok =ref_iterator_advance(iter)) != ITER_OK)1219 iter = NULL;1220}else if(is_null_oid(&update->new_oid)) {1221/*1222 * The update wants to delete the reference,1223 * and the reference either didn't exist or we1224 * have already skipped it. So we're done with1225 * the update (and don't have to write1226 * anything).1227 */1228 i++;1229}else{1230struct object_id peeled;1231int peel_error =peel_object(&update->new_oid,1232&peeled);12331234if(write_packed_entry(out, update->refname,1235&update->new_oid,1236 peel_error ? NULL : &peeled))1237goto write_error;12381239 i++;1240}1241}12421243if(ok != ITER_DONE) {1244strbuf_addstr(err,"unable to write packed-refs file: "1245"error iterating over old contents");1246goto error;1247}12481249if(close_tempfile_gently(refs->tempfile)) {1250strbuf_addf(err,"error closing file%s:%s",1251get_tempfile_path(refs->tempfile),1252strerror(errno));1253strbuf_release(&sb);1254delete_tempfile(&refs->tempfile);1255return-1;1256}12571258return0;12591260write_error:1261strbuf_addf(err,"error writing to%s:%s",1262get_tempfile_path(refs->tempfile),strerror(errno));12631264error:1265if(iter)1266ref_iterator_abort(iter);12671268delete_tempfile(&refs->tempfile);1269return-1;1270}12711272intis_packed_transaction_needed(struct ref_store *ref_store,1273struct ref_transaction *transaction)1274{1275struct packed_ref_store *refs =packed_downcast(1276 ref_store,1277 REF_STORE_READ,1278"is_packed_transaction_needed");1279struct strbuf referent = STRBUF_INIT;1280size_t i;1281int ret;12821283if(!is_lock_file_locked(&refs->lock))1284BUG("is_packed_transaction_needed() called while unlocked");12851286/*1287 * We're only going to bother returning false for the common,1288 * trivial case that references are only being deleted, their1289 * old values are not being checked, and the old `packed-refs`1290 * file doesn't contain any of those reference(s). This gives1291 * false positives for some other cases that could1292 * theoretically be optimized away:1293 *1294 * 1. It could be that the old value is being verified without1295 * setting a new value. In this case, we could verify the1296 * old value here and skip the update if it agrees. If it1297 * disagrees, we could either let the update go through1298 * (the actual commit would re-detect and report the1299 * problem), or come up with a way of reporting such an1300 * error to *our* caller.1301 *1302 * 2. It could be that a new value is being set, but that it1303 * is identical to the current packed value of the1304 * reference.1305 *1306 * Neither of these cases will come up in the current code,1307 * because the only caller of this function passes to it a1308 * transaction that only includes `delete` updates with no1309 * `old_id`. Even if that ever changes, false positives only1310 * cause an optimization to be missed; they do not affect1311 * correctness.1312 */13131314/*1315 * Start with the cheap checks that don't require old1316 * reference values to be read:1317 */1318for(i =0; i < transaction->nr; i++) {1319struct ref_update *update = transaction->updates[i];13201321if(update->flags & REF_HAVE_OLD)1322/* Have to check the old value -> needed. */1323return1;13241325if((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1326/* Have to set a new value -> needed. */1327return1;1328}13291330/*1331 * The transaction isn't checking any old values nor is it1332 * setting any nonzero new values, so it still might be able1333 * to be skipped. Now do the more expensive check: the update1334 * is needed if any of the updates is a delete, and the old1335 * `packed-refs` file contains a value for that reference.1336 */1337 ret =0;1338for(i =0; i < transaction->nr; i++) {1339struct ref_update *update = transaction->updates[i];1340unsigned int type;1341struct object_id oid;13421343if(!(update->flags & REF_HAVE_NEW))1344/*1345 * This reference isn't being deleted -> not1346 * needed.1347 */1348continue;13491350if(!refs_read_raw_ref(ref_store, update->refname,1351&oid, &referent, &type) ||1352 errno != ENOENT) {1353/*1354 * We have to actually delete that reference1355 * -> this transaction is needed.1356 */1357 ret =1;1358break;1359}1360}13611362strbuf_release(&referent);1363return ret;1364}13651366struct packed_transaction_backend_data {1367/* True iff the transaction owns the packed-refs lock. */1368int own_lock;13691370struct string_list updates;1371};13721373static voidpacked_transaction_cleanup(struct packed_ref_store *refs,1374struct ref_transaction *transaction)1375{1376struct packed_transaction_backend_data *data = transaction->backend_data;13771378if(data) {1379string_list_clear(&data->updates,0);13801381if(is_tempfile_active(refs->tempfile))1382delete_tempfile(&refs->tempfile);13831384if(data->own_lock &&is_lock_file_locked(&refs->lock)) {1385packed_refs_unlock(&refs->base);1386 data->own_lock =0;1387}13881389free(data);1390 transaction->backend_data = NULL;1391}13921393 transaction->state = REF_TRANSACTION_CLOSED;1394}13951396static intpacked_transaction_prepare(struct ref_store *ref_store,1397struct ref_transaction *transaction,1398struct strbuf *err)1399{1400struct packed_ref_store *refs =packed_downcast(1401 ref_store,1402 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1403"ref_transaction_prepare");1404struct packed_transaction_backend_data *data;1405size_t i;1406int ret = TRANSACTION_GENERIC_ERROR;14071408/*1409 * Note that we *don't* skip transactions with zero updates,1410 * because such a transaction might be executed for the side1411 * effect of ensuring that all of the references are peeled or1412 * ensuring that the `packed-refs` file is sorted. If the1413 * caller wants to optimize away empty transactions, it should1414 * do so itself.1415 */14161417 data =xcalloc(1,sizeof(*data));1418string_list_init(&data->updates,0);14191420 transaction->backend_data = data;14211422/*1423 * Stick the updates in a string list by refname so that we1424 * can sort them:1425 */1426for(i =0; i < transaction->nr; i++) {1427struct ref_update *update = transaction->updates[i];1428struct string_list_item *item =1429string_list_append(&data->updates, update->refname);14301431/* Store a pointer to update in item->util: */1432 item->util = update;1433}1434string_list_sort(&data->updates);14351436if(ref_update_reject_duplicates(&data->updates, err))1437goto failure;14381439if(!is_lock_file_locked(&refs->lock)) {1440if(packed_refs_lock(ref_store,0, err))1441goto failure;1442 data->own_lock =1;1443}14441445if(write_with_updates(refs, &data->updates, err))1446goto failure;14471448 transaction->state = REF_TRANSACTION_PREPARED;1449return0;14501451failure:1452packed_transaction_cleanup(refs, transaction);1453return ret;1454}14551456static intpacked_transaction_abort(struct ref_store *ref_store,1457struct ref_transaction *transaction,1458struct strbuf *err)1459{1460struct packed_ref_store *refs =packed_downcast(1461 ref_store,1462 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1463"ref_transaction_abort");14641465packed_transaction_cleanup(refs, transaction);1466return0;1467}14681469static intpacked_transaction_finish(struct ref_store *ref_store,1470struct ref_transaction *transaction,1471struct strbuf *err)1472{1473struct packed_ref_store *refs =packed_downcast(1474 ref_store,1475 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1476"ref_transaction_finish");1477int ret = TRANSACTION_GENERIC_ERROR;1478char*packed_refs_path;14791480clear_snapshot(refs);14811482 packed_refs_path =get_locked_file_path(&refs->lock);1483if(rename_tempfile(&refs->tempfile, packed_refs_path)) {1484strbuf_addf(err,"error replacing%s:%s",1485 refs->path,strerror(errno));1486goto cleanup;1487}14881489 ret =0;14901491cleanup:1492free(packed_refs_path);1493packed_transaction_cleanup(refs, transaction);1494return ret;1495}14961497static intpacked_initial_transaction_commit(struct ref_store *ref_store,1498struct ref_transaction *transaction,1499struct strbuf *err)1500{1501returnref_transaction_commit(transaction, err);1502}15031504static intpacked_delete_refs(struct ref_store *ref_store,const char*msg,1505struct string_list *refnames,unsigned int flags)1506{1507struct packed_ref_store *refs =1508packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs");1509struct strbuf err = STRBUF_INIT;1510struct ref_transaction *transaction;1511struct string_list_item *item;1512int ret;15131514(void)refs;/* We need the check above, but don't use the variable */15151516if(!refnames->nr)1517return0;15181519/*1520 * Since we don't check the references' old_oids, the1521 * individual updates can't fail, so we can pack all of the1522 * updates into a single transaction.1523 */15241525 transaction =ref_store_transaction_begin(ref_store, &err);1526if(!transaction)1527return-1;15281529for_each_string_list_item(item, refnames) {1530if(ref_transaction_delete(transaction, item->string, NULL,1531 flags, msg, &err)) {1532warning(_("could not delete reference%s:%s"),1533 item->string, err.buf);1534strbuf_reset(&err);1535}1536}15371538 ret =ref_transaction_commit(transaction, &err);15391540if(ret) {1541if(refnames->nr ==1)1542error(_("could not delete reference%s:%s"),1543 refnames->items[0].string, err.buf);1544else1545error(_("could not delete references:%s"), err.buf);1546}15471548ref_transaction_free(transaction);1549strbuf_release(&err);1550return ret;1551}15521553static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags)1554{1555/*1556 * Packed refs are already packed. It might be that loose refs1557 * are packed *into* a packed refs store, but that is done by1558 * updating the packed references via a transaction.1559 */1560return0;1561}15621563static intpacked_create_symref(struct ref_store *ref_store,1564const char*refname,const char*target,1565const char*logmsg)1566{1567BUG("packed reference store does not support symrefs");1568}15691570static intpacked_rename_ref(struct ref_store *ref_store,1571const char*oldrefname,const char*newrefname,1572const char*logmsg)1573{1574BUG("packed reference store does not support renaming references");1575}15761577static intpacked_copy_ref(struct ref_store *ref_store,1578const char*oldrefname,const char*newrefname,1579const char*logmsg)1580{1581BUG("packed reference store does not support copying references");1582}15831584static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1585{1586returnempty_ref_iterator_begin();1587}15881589static intpacked_for_each_reflog_ent(struct ref_store *ref_store,1590const char*refname,1591 each_reflog_ent_fn fn,void*cb_data)1592{1593return0;1594}15951596static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store,1597const char*refname,1598 each_reflog_ent_fn fn,1599void*cb_data)1600{1601return0;1602}16031604static intpacked_reflog_exists(struct ref_store *ref_store,1605const char*refname)1606{1607return0;1608}16091610static intpacked_create_reflog(struct ref_store *ref_store,1611const char*refname,int force_create,1612struct strbuf *err)1613{1614BUG("packed reference store does not support reflogs");1615}16161617static intpacked_delete_reflog(struct ref_store *ref_store,1618const char*refname)1619{1620return0;1621}16221623static intpacked_reflog_expire(struct ref_store *ref_store,1624const char*refname,const struct object_id *oid,1625unsigned int flags,1626 reflog_expiry_prepare_fn prepare_fn,1627 reflog_expiry_should_prune_fn should_prune_fn,1628 reflog_expiry_cleanup_fn cleanup_fn,1629void*policy_cb_data)1630{1631return0;1632}16331634struct ref_storage_be refs_be_packed = {1635 NULL,1636"packed",1637 packed_ref_store_create,1638 packed_init_db,1639 packed_transaction_prepare,1640 packed_transaction_finish,1641 packed_transaction_abort,1642 packed_initial_transaction_commit,16431644 packed_pack_refs,1645 packed_create_symref,1646 packed_delete_refs,1647 packed_rename_ref,1648 packed_copy_ref,16491650 packed_ref_iterator_begin,1651 packed_read_raw_ref,16521653 packed_reflog_iterator_begin,1654 packed_for_each_reflog_ent,1655 packed_for_each_reflog_ent_reverse,1656 packed_reflog_exists,1657 packed_create_reflog,1658 packed_delete_reflog,1659 packed_reflog_expire1660};