1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"packed-backend.h" 6#include"../iterator.h" 7#include"../lockfile.h" 8#include"../chdir-notify.h" 9 10enum mmap_strategy { 11/* 12 * Don't use mmap() at all for reading `packed-refs`. 13 */ 14 MMAP_NONE, 15 16/* 17 * Can use mmap() for reading `packed-refs`, but the file must 18 * not remain mmapped. This is the usual option on Windows, 19 * where you cannot rename a new version of a file onto a file 20 * that is currently mmapped. 21 */ 22 MMAP_TEMPORARY, 23 24/* 25 * It is OK to leave the `packed-refs` file mmapped while 26 * arbitrary other code is running. 27 */ 28 MMAP_OK 29}; 30 31#if defined(NO_MMAP) 32static enum mmap_strategy mmap_strategy = MMAP_NONE; 33#elif defined(MMAP_PREVENTS_DELETE) 34static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 35#else 36static enum mmap_strategy mmap_strategy = MMAP_OK; 37#endif 38 39struct packed_ref_store; 40 41/* 42 * A `snapshot` represents one snapshot of a `packed-refs` file. 43 * 44 * Normally, this will be a mmapped view of the contents of the 45 * `packed-refs` file at the time the snapshot was created. However, 46 * if the `packed-refs` file was not sorted, this might point at heap 47 * memory holding the contents of the `packed-refs` file with its 48 * records sorted by refname. 49 * 50 * `snapshot` instances are reference counted (via 51 * `acquire_snapshot()` and `release_snapshot()`). This is to prevent 52 * an instance from disappearing while an iterator is still iterating 53 * over it. Instances are garbage collected when their `referrers` 54 * count goes to zero. 55 * 56 * The most recent `snapshot`, if available, is referenced by the 57 * `packed_ref_store`. Its freshness is checked whenever 58 * `get_snapshot()` is called; if the existing snapshot is obsolete, a 59 * new snapshot is taken. 60 */ 61struct snapshot { 62/* 63 * A back-pointer to the packed_ref_store with which this 64 * snapshot is associated: 65 */ 66struct packed_ref_store *refs; 67 68/* Is the `packed-refs` file currently mmapped? */ 69int mmapped; 70 71/* 72 * The contents of the `packed-refs` file: 73 * 74 * - buf -- a pointer to the start of the memory 75 * - start -- a pointer to the first byte of actual references 76 * (i.e., after the header line, if one is present) 77 * - eof -- a pointer just past the end of the reference 78 * contents 79 * 80 * If the `packed-refs` file was already sorted, `buf` points 81 * at the mmapped contents of the file. If not, it points at 82 * heap-allocated memory containing the contents, sorted. If 83 * there were no contents (e.g., because the file didn't 84 * exist), `buf`, `start`, and `eof` are all NULL. 85 */ 86char*buf, *start, *eof; 87 88/* 89 * What is the peeled state of the `packed-refs` file that 90 * this snapshot represents? (This is usually determined from 91 * the file's header.) 92 */ 93enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 94 95/* 96 * Count of references to this instance, including the pointer 97 * from `packed_ref_store::snapshot`, if any. The instance 98 * will not be freed as long as the reference count is 99 * nonzero. 100 */ 101unsigned int referrers; 102 103/* 104 * The metadata of the `packed-refs` file from which this 105 * snapshot was created, used to tell if the file has been 106 * replaced since we read it. 107 */ 108struct stat_validity validity; 109}; 110 111/* 112 * A `ref_store` representing references stored in a `packed-refs` 113 * file. It implements the `ref_store` interface, though it has some 114 * limitations: 115 * 116 * - It cannot store symbolic references. 117 * 118 * - It cannot store reflogs. 119 * 120 * - It does not support reference renaming (though it could). 121 * 122 * On the other hand, it can be locked outside of a reference 123 * transaction. In that case, it remains locked even after the 124 * transaction is done and the new `packed-refs` file is activated. 125 */ 126struct packed_ref_store { 127struct ref_store base; 128 129unsigned int store_flags; 130 131/* The path of the "packed-refs" file: */ 132char*path; 133 134/* 135 * A snapshot of the values read from the `packed-refs` file, 136 * if it might still be current; otherwise, NULL. 137 */ 138struct snapshot *snapshot; 139 140/* 141 * Lock used for the "packed-refs" file. Note that this (and 142 * thus the enclosing `packed_ref_store`) must not be freed. 143 */ 144struct lock_file lock; 145 146/* 147 * Temporary file used when rewriting new contents to the 148 * "packed-refs" file. Note that this (and thus the enclosing 149 * `packed_ref_store`) must not be freed. 150 */ 151struct tempfile *tempfile; 152}; 153 154/* 155 * Increment the reference count of `*snapshot`. 156 */ 157static voidacquire_snapshot(struct snapshot *snapshot) 158{ 159 snapshot->referrers++; 160} 161 162/* 163 * If the buffer in `snapshot` is active, then either munmap the 164 * memory and close the file, or free the memory. Then set the buffer 165 * pointers to NULL. 166 */ 167static voidclear_snapshot_buffer(struct snapshot *snapshot) 168{ 169if(snapshot->mmapped) { 170if(munmap(snapshot->buf, snapshot->eof - snapshot->buf)) 171die_errno("error ummapping packed-refs file%s", 172 snapshot->refs->path); 173 snapshot->mmapped =0; 174}else{ 175free(snapshot->buf); 176} 177 snapshot->buf = snapshot->start = snapshot->eof = NULL; 178} 179 180/* 181 * Decrease the reference count of `*snapshot`. If it goes to zero, 182 * free `*snapshot` and return true; otherwise return false. 183 */ 184static intrelease_snapshot(struct snapshot *snapshot) 185{ 186if(!--snapshot->referrers) { 187stat_validity_clear(&snapshot->validity); 188clear_snapshot_buffer(snapshot); 189free(snapshot); 190return1; 191}else{ 192return0; 193} 194} 195 196struct ref_store *packed_ref_store_create(const char*path, 197unsigned int store_flags) 198{ 199struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 200struct ref_store *ref_store = (struct ref_store *)refs; 201 202base_ref_store_init(ref_store, &refs_be_packed); 203 refs->store_flags = store_flags; 204 205 refs->path =xstrdup(path); 206chdir_notify_reparent("packed-refs", &refs->path); 207 208return ref_store; 209} 210 211/* 212 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 213 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 214 * support at least the flags specified in `required_flags`. `caller` 215 * is used in any necessary error messages. 216 */ 217static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 218unsigned int required_flags, 219const char*caller) 220{ 221struct packed_ref_store *refs; 222 223if(ref_store->be != &refs_be_packed) 224BUG("ref_store is type\"%s\"not\"packed\"in%s", 225 ref_store->be->name, caller); 226 227 refs = (struct packed_ref_store *)ref_store; 228 229if((refs->store_flags & required_flags) != required_flags) 230BUG("unallowed operation (%s), requires%x, has%x\n", 231 caller, required_flags, refs->store_flags); 232 233return refs; 234} 235 236static voidclear_snapshot(struct packed_ref_store *refs) 237{ 238if(refs->snapshot) { 239struct snapshot *snapshot = refs->snapshot; 240 241 refs->snapshot = NULL; 242release_snapshot(snapshot); 243} 244} 245 246static NORETURN voiddie_unterminated_line(const char*path, 247const char*p,size_t len) 248{ 249if(len <80) 250die("unterminated line in%s: %.*s", path, (int)len, p); 251else 252die("unterminated line in%s: %.75s...", path, p); 253} 254 255static NORETURN voiddie_invalid_line(const char*path, 256const char*p,size_t len) 257{ 258const char*eol =memchr(p,'\n', len); 259 260if(!eol) 261die_unterminated_line(path, p, len); 262else if(eol - p <80) 263die("unexpected line in%s: %.*s", path, (int)(eol - p), p); 264else 265die("unexpected line in%s: %.75s...", path, p); 266 267} 268 269struct snapshot_record { 270const char*start; 271size_t len; 272}; 273 274static intcmp_packed_ref_records(const void*v1,const void*v2) 275{ 276const struct snapshot_record *e1 = v1, *e2 = v2; 277const char*r1 = e1->start + the_hash_algo->hexsz +1; 278const char*r2 = e2->start + the_hash_algo->hexsz +1; 279 280while(1) { 281if(*r1 =='\n') 282return*r2 =='\n'?0: -1; 283if(*r1 != *r2) { 284if(*r2 =='\n') 285return1; 286else 287return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 288} 289 r1++; 290 r2++; 291} 292} 293 294/* 295 * Compare a snapshot record at `rec` to the specified NUL-terminated 296 * refname. 297 */ 298static intcmp_record_to_refname(const char*rec,const char*refname) 299{ 300const char*r1 = rec + the_hash_algo->hexsz +1; 301const char*r2 = refname; 302 303while(1) { 304if(*r1 =='\n') 305return*r2 ? -1:0; 306if(!*r2) 307return1; 308if(*r1 != *r2) 309return(unsigned char)*r1 < (unsigned char)*r2 ? -1: +1; 310 r1++; 311 r2++; 312} 313} 314 315/* 316 * `snapshot->buf` is not known to be sorted. Check whether it is, and 317 * if not, sort it into new memory and munmap/free the old storage. 318 */ 319static voidsort_snapshot(struct snapshot *snapshot) 320{ 321struct snapshot_record *records = NULL; 322size_t alloc =0, nr =0; 323int sorted =1; 324const char*pos, *eof, *eol; 325size_t len, i; 326char*new_buffer, *dst; 327 328 pos = snapshot->start; 329 eof = snapshot->eof; 330 331if(pos == eof) 332return; 333 334 len = eof - pos; 335 336/* 337 * Initialize records based on a crude estimate of the number 338 * of references in the file (we'll grow it below if needed): 339 */ 340ALLOC_GROW(records, len /80+20, alloc); 341 342while(pos < eof) { 343 eol =memchr(pos,'\n', eof - pos); 344if(!eol) 345/* The safety check should prevent this. */ 346BUG("unterminated line found in packed-refs"); 347if(eol - pos < the_hash_algo->hexsz +2) 348die_invalid_line(snapshot->refs->path, 349 pos, eof - pos); 350 eol++; 351if(eol < eof && *eol =='^') { 352/* 353 * Keep any peeled line together with its 354 * reference: 355 */ 356const char*peeled_start = eol; 357 358 eol =memchr(peeled_start,'\n', eof - peeled_start); 359if(!eol) 360/* The safety check should prevent this. */ 361BUG("unterminated peeled line found in packed-refs"); 362 eol++; 363} 364 365ALLOC_GROW(records, nr +1, alloc); 366 records[nr].start = pos; 367 records[nr].len = eol - pos; 368 nr++; 369 370if(sorted && 371 nr >1&& 372cmp_packed_ref_records(&records[nr -2], 373&records[nr -1]) >=0) 374 sorted =0; 375 376 pos = eol; 377} 378 379if(sorted) 380goto cleanup; 381 382/* We need to sort the memory. First we sort the records array: */ 383QSORT(records, nr, cmp_packed_ref_records); 384 385/* 386 * Allocate a new chunk of memory, and copy the old memory to 387 * the new in the order indicated by `records` (not bothering 388 * with the header line): 389 */ 390 new_buffer =xmalloc(len); 391for(dst = new_buffer, i =0; i < nr; i++) { 392memcpy(dst, records[i].start, records[i].len); 393 dst += records[i].len; 394} 395 396/* 397 * Now munmap the old buffer and use the sorted buffer in its 398 * place: 399 */ 400clear_snapshot_buffer(snapshot); 401 snapshot->buf = snapshot->start = new_buffer; 402 snapshot->eof = new_buffer + len; 403 404cleanup: 405free(records); 406} 407 408/* 409 * Return a pointer to the start of the record that contains the 410 * character `*p` (which must be within the buffer). If no other 411 * record start is found, return `buf`. 412 */ 413static const char*find_start_of_record(const char*buf,const char*p) 414{ 415while(p > buf && (p[-1] !='\n'|| p[0] =='^')) 416 p--; 417return p; 418} 419 420/* 421 * Return a pointer to the start of the record following the record 422 * that contains `*p`. If none is found before `end`, return `end`. 423 */ 424static const char*find_end_of_record(const char*p,const char*end) 425{ 426while(++p < end && (p[-1] !='\n'|| p[0] =='^')) 427; 428return p; 429} 430 431/* 432 * We want to be able to compare mmapped reference records quickly, 433 * without totally parsing them. We can do so because the records are 434 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 435 * + 1) bytes past the beginning of the record. 436 * 437 * But what if the `packed-refs` file contains garbage? We're willing 438 * to tolerate not detecting the problem, as long as we don't produce 439 * totally garbled output (we can't afford to check the integrity of 440 * the whole file during every Git invocation). But we do want to be 441 * sure that we never read past the end of the buffer in memory and 442 * perform an illegal memory access. 443 * 444 * Guarantee that minimum level of safety by verifying that the last 445 * record in the file is LF-terminated, and that it has at least 446 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 447 * these checks fails. 448 */ 449static voidverify_buffer_safe(struct snapshot *snapshot) 450{ 451const char*start = snapshot->start; 452const char*eof = snapshot->eof; 453const char*last_line; 454 455if(start == eof) 456return; 457 458 last_line =find_start_of_record(start, eof -1); 459if(*(eof -1) !='\n'|| eof - last_line < the_hash_algo->hexsz +2) 460die_invalid_line(snapshot->refs->path, 461 last_line, eof - last_line); 462} 463 464#define SMALL_FILE_SIZE (32*1024) 465 466/* 467 * Depending on `mmap_strategy`, either mmap or read the contents of 468 * the `packed-refs` file into the snapshot. Return 1 if the file 469 * existed and was read, or 0 if the file was absent or empty. Die on 470 * errors. 471 */ 472static intload_contents(struct snapshot *snapshot) 473{ 474int fd; 475struct stat st; 476size_t size; 477 ssize_t bytes_read; 478 479 fd =open(snapshot->refs->path, O_RDONLY); 480if(fd <0) { 481if(errno == ENOENT) { 482/* 483 * This is OK; it just means that no 484 * "packed-refs" file has been written yet, 485 * which is equivalent to it being empty, 486 * which is its state when initialized with 487 * zeros. 488 */ 489return0; 490}else{ 491die_errno("couldn't read%s", snapshot->refs->path); 492} 493} 494 495stat_validity_update(&snapshot->validity, fd); 496 497if(fstat(fd, &st) <0) 498die_errno("couldn't stat%s", snapshot->refs->path); 499 size =xsize_t(st.st_size); 500 501if(!size) { 502close(fd); 503return0; 504}else if(mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) { 505 snapshot->buf =xmalloc(size); 506 bytes_read =read_in_full(fd, snapshot->buf, size); 507if(bytes_read <0|| bytes_read != size) 508die_errno("couldn't read%s", snapshot->refs->path); 509 snapshot->mmapped =0; 510}else{ 511 snapshot->buf =xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd,0); 512 snapshot->mmapped =1; 513} 514close(fd); 515 516 snapshot->start = snapshot->buf; 517 snapshot->eof = snapshot->buf + size; 518 519return1; 520} 521 522/* 523 * Find the place in `snapshot->buf` where the start of the record for 524 * `refname` starts. If `mustexist` is true and the reference doesn't 525 * exist, then return NULL. If `mustexist` is false and the reference 526 * doesn't exist, then return the point where that reference would be 527 * inserted, or `snapshot->eof` (which might be NULL) if it would be 528 * inserted at the end of the file. In the latter mode, `refname` 529 * doesn't have to be a proper reference name; for example, one could 530 * search for "refs/replace/" to find the start of any replace 531 * references. 532 * 533 * The record is sought using a binary search, so `snapshot->buf` must 534 * be sorted. 535 */ 536static const char*find_reference_location(struct snapshot *snapshot, 537const char*refname,int mustexist) 538{ 539/* 540 * This is not *quite* a garden-variety binary search, because 541 * the data we're searching is made up of records, and we 542 * always need to find the beginning of a record to do a 543 * comparison. A "record" here is one line for the reference 544 * itself and zero or one peel lines that start with '^'. Our 545 * loop invariant is described in the next two comments. 546 */ 547 548/* 549 * A pointer to the character at the start of a record whose 550 * preceding records all have reference names that come 551 * *before* `refname`. 552 */ 553const char*lo = snapshot->start; 554 555/* 556 * A pointer to a the first character of a record whose 557 * reference name comes *after* `refname`. 558 */ 559const char*hi = snapshot->eof; 560 561while(lo != hi) { 562const char*mid, *rec; 563int cmp; 564 565 mid = lo + (hi - lo) /2; 566 rec =find_start_of_record(lo, mid); 567 cmp =cmp_record_to_refname(rec, refname); 568if(cmp <0) { 569 lo =find_end_of_record(mid, hi); 570}else if(cmp >0) { 571 hi = rec; 572}else{ 573return rec; 574} 575} 576 577if(mustexist) 578return NULL; 579else 580return lo; 581} 582 583/* 584 * Create a newly-allocated `snapshot` of the `packed-refs` file in 585 * its current state and return it. The return value will already have 586 * its reference count incremented. 587 * 588 * A comment line of the form "# pack-refs with: " may contain zero or 589 * more traits. We interpret the traits as follows: 590 * 591 * Neither `peeled` nor `fully-peeled`: 592 * 593 * Probably no references are peeled. But if the file contains a 594 * peeled value for a reference, we will use it. 595 * 596 * `peeled`: 597 * 598 * References under "refs/tags/", if they *can* be peeled, *are* 599 * peeled in this file. References outside of "refs/tags/" are 600 * probably not peeled even if they could have been, but if we find 601 * a peeled value for such a reference we will use it. 602 * 603 * `fully-peeled`: 604 * 605 * All references in the file that can be peeled are peeled. 606 * Inversely (and this is more important), any references in the 607 * file for which no peeled value is recorded is not peelable. This 608 * trait should typically be written alongside "peeled" for 609 * compatibility with older clients, but we do not require it 610 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 611 * 612 * `sorted`: 613 * 614 * The references in this file are known to be sorted by refname. 615 */ 616static struct snapshot *create_snapshot(struct packed_ref_store *refs) 617{ 618struct snapshot *snapshot =xcalloc(1,sizeof(*snapshot)); 619int sorted =0; 620 621 snapshot->refs = refs; 622acquire_snapshot(snapshot); 623 snapshot->peeled = PEELED_NONE; 624 625if(!load_contents(snapshot)) 626return snapshot; 627 628/* If the file has a header line, process it: */ 629if(snapshot->buf < snapshot->eof && *snapshot->buf =='#') { 630char*tmp, *p, *eol; 631struct string_list traits = STRING_LIST_INIT_NODUP; 632 633 eol =memchr(snapshot->buf,'\n', 634 snapshot->eof - snapshot->buf); 635if(!eol) 636die_unterminated_line(refs->path, 637 snapshot->buf, 638 snapshot->eof - snapshot->buf); 639 640 tmp =xmemdupz(snapshot->buf, eol - snapshot->buf); 641 642if(!skip_prefix(tmp,"# pack-refs with:", (const char**)&p)) 643die_invalid_line(refs->path, 644 snapshot->buf, 645 snapshot->eof - snapshot->buf); 646 647string_list_split_in_place(&traits, p,' ', -1); 648 649if(unsorted_string_list_has_string(&traits,"fully-peeled")) 650 snapshot->peeled = PEELED_FULLY; 651else if(unsorted_string_list_has_string(&traits,"peeled")) 652 snapshot->peeled = PEELED_TAGS; 653 654 sorted =unsorted_string_list_has_string(&traits,"sorted"); 655 656/* perhaps other traits later as well */ 657 658/* The "+ 1" is for the LF character. */ 659 snapshot->start = eol +1; 660 661string_list_clear(&traits,0); 662free(tmp); 663} 664 665verify_buffer_safe(snapshot); 666 667if(!sorted) { 668sort_snapshot(snapshot); 669 670/* 671 * Reordering the records might have moved a short one 672 * to the end of the buffer, so verify the buffer's 673 * safety again: 674 */ 675verify_buffer_safe(snapshot); 676} 677 678if(mmap_strategy != MMAP_OK && snapshot->mmapped) { 679/* 680 * We don't want to leave the file mmapped, so we are 681 * forced to make a copy now: 682 */ 683size_t size = snapshot->eof - snapshot->start; 684char*buf_copy =xmalloc(size); 685 686memcpy(buf_copy, snapshot->start, size); 687clear_snapshot_buffer(snapshot); 688 snapshot->buf = snapshot->start = buf_copy; 689 snapshot->eof = buf_copy + size; 690} 691 692return snapshot; 693} 694 695/* 696 * Check that `refs->snapshot` (if present) still reflects the 697 * contents of the `packed-refs` file. If not, clear the snapshot. 698 */ 699static voidvalidate_snapshot(struct packed_ref_store *refs) 700{ 701if(refs->snapshot && 702!stat_validity_check(&refs->snapshot->validity, refs->path)) 703clear_snapshot(refs); 704} 705 706/* 707 * Get the `snapshot` for the specified packed_ref_store, creating and 708 * populating it if it hasn't been read before or if the file has been 709 * changed (according to its `validity` field) since it was last read. 710 * On the other hand, if we hold the lock, then assume that the file 711 * hasn't been changed out from under us, so skip the extra `stat()` 712 * call in `stat_validity_check()`. This function does *not* increase 713 * the snapshot's reference count on behalf of the caller. 714 */ 715static struct snapshot *get_snapshot(struct packed_ref_store *refs) 716{ 717if(!is_lock_file_locked(&refs->lock)) 718validate_snapshot(refs); 719 720if(!refs->snapshot) 721 refs->snapshot =create_snapshot(refs); 722 723return refs->snapshot; 724} 725 726static intpacked_read_raw_ref(struct ref_store *ref_store, 727const char*refname,struct object_id *oid, 728struct strbuf *referent,unsigned int*type) 729{ 730struct packed_ref_store *refs = 731packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 732struct snapshot *snapshot =get_snapshot(refs); 733const char*rec; 734 735*type =0; 736 737 rec =find_reference_location(snapshot, refname,1); 738 739if(!rec) { 740/* refname is not a packed reference. */ 741 errno = ENOENT; 742return-1; 743} 744 745if(get_oid_hex(rec, oid)) 746die_invalid_line(refs->path, rec, snapshot->eof - rec); 747 748*type = REF_ISPACKED; 749return0; 750} 751 752/* 753 * This value is set in `base.flags` if the peeled value of the 754 * current reference is known. In that case, `peeled` contains the 755 * correct peeled value for the reference, which might be `null_oid` 756 * if the reference is not a tag or if it is broken. 757 */ 758#define REF_KNOWS_PEELED 0x40 759 760/* 761 * An iterator over a snapshot of a `packed-refs` file. 762 */ 763struct packed_ref_iterator { 764struct ref_iterator base; 765 766struct snapshot *snapshot; 767 768/* The current position in the snapshot's buffer: */ 769const char*pos; 770 771/* The end of the part of the buffer that will be iterated over: */ 772const char*eof; 773 774/* Scratch space for current values: */ 775struct object_id oid, peeled; 776struct strbuf refname_buf; 777 778unsigned int flags; 779}; 780 781/* 782 * Move the iterator to the next record in the snapshot, without 783 * respect for whether the record is actually required by the current 784 * iteration. Adjust the fields in `iter` and return `ITER_OK` or 785 * `ITER_DONE`. This function does not free the iterator in the case 786 * of `ITER_DONE`. 787 */ 788static intnext_record(struct packed_ref_iterator *iter) 789{ 790const char*p = iter->pos, *eol; 791 792strbuf_reset(&iter->refname_buf); 793 794if(iter->pos == iter->eof) 795return ITER_DONE; 796 797 iter->base.flags = REF_ISPACKED; 798 799if(iter->eof - p < the_hash_algo->hexsz +2|| 800parse_oid_hex(p, &iter->oid, &p) || 801!isspace(*p++)) 802die_invalid_line(iter->snapshot->refs->path, 803 iter->pos, iter->eof - iter->pos); 804 805 eol =memchr(p,'\n', iter->eof - p); 806if(!eol) 807die_unterminated_line(iter->snapshot->refs->path, 808 iter->pos, iter->eof - iter->pos); 809 810strbuf_add(&iter->refname_buf, p, eol - p); 811 iter->base.refname = iter->refname_buf.buf; 812 813if(check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 814if(!refname_is_safe(iter->base.refname)) 815die("packed refname is dangerous:%s", 816 iter->base.refname); 817oidclr(&iter->oid); 818 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 819} 820if(iter->snapshot->peeled == PEELED_FULLY || 821(iter->snapshot->peeled == PEELED_TAGS && 822starts_with(iter->base.refname,"refs/tags/"))) 823 iter->base.flags |= REF_KNOWS_PEELED; 824 825 iter->pos = eol +1; 826 827if(iter->pos < iter->eof && *iter->pos =='^') { 828 p = iter->pos +1; 829if(iter->eof - p < the_hash_algo->hexsz +1|| 830parse_oid_hex(p, &iter->peeled, &p) || 831*p++ !='\n') 832die_invalid_line(iter->snapshot->refs->path, 833 iter->pos, iter->eof - iter->pos); 834 iter->pos = p; 835 836/* 837 * Regardless of what the file header said, we 838 * definitely know the value of *this* reference. But 839 * we suppress it if the reference is broken: 840 */ 841if((iter->base.flags & REF_ISBROKEN)) { 842oidclr(&iter->peeled); 843 iter->base.flags &= ~REF_KNOWS_PEELED; 844}else{ 845 iter->base.flags |= REF_KNOWS_PEELED; 846} 847}else{ 848oidclr(&iter->peeled); 849} 850 851return ITER_OK; 852} 853 854static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 855{ 856struct packed_ref_iterator *iter = 857(struct packed_ref_iterator *)ref_iterator; 858int ok; 859 860while((ok =next_record(iter)) == ITER_OK) { 861if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 862ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 863continue; 864 865if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 866!ref_resolves_to_object(iter->base.refname, &iter->oid, 867 iter->flags)) 868continue; 869 870return ITER_OK; 871} 872 873if(ref_iterator_abort(ref_iterator) != ITER_DONE) 874 ok = ITER_ERROR; 875 876return ok; 877} 878 879static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 880struct object_id *peeled) 881{ 882struct packed_ref_iterator *iter = 883(struct packed_ref_iterator *)ref_iterator; 884 885if((iter->base.flags & REF_KNOWS_PEELED)) { 886oidcpy(peeled, &iter->peeled); 887returnis_null_oid(&iter->peeled) ? -1:0; 888}else if((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 889return-1; 890}else{ 891return!!peel_object(&iter->oid, peeled); 892} 893} 894 895static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 896{ 897struct packed_ref_iterator *iter = 898(struct packed_ref_iterator *)ref_iterator; 899int ok = ITER_DONE; 900 901strbuf_release(&iter->refname_buf); 902release_snapshot(iter->snapshot); 903base_ref_iterator_free(ref_iterator); 904return ok; 905} 906 907static struct ref_iterator_vtable packed_ref_iterator_vtable = { 908 packed_ref_iterator_advance, 909 packed_ref_iterator_peel, 910 packed_ref_iterator_abort 911}; 912 913static struct ref_iterator *packed_ref_iterator_begin( 914struct ref_store *ref_store, 915const char*prefix,unsigned int flags) 916{ 917struct packed_ref_store *refs; 918struct snapshot *snapshot; 919const char*start; 920struct packed_ref_iterator *iter; 921struct ref_iterator *ref_iterator; 922unsigned int required_flags = REF_STORE_READ; 923 924if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 925 required_flags |= REF_STORE_ODB; 926 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 927 928/* 929 * Note that `get_snapshot()` internally checks whether the 930 * snapshot is up to date with what is on disk, and re-reads 931 * it if not. 932 */ 933 snapshot =get_snapshot(refs); 934 935if(prefix && *prefix) 936 start =find_reference_location(snapshot, prefix,0); 937else 938 start = snapshot->start; 939 940if(start == snapshot->eof) 941returnempty_ref_iterator_begin(); 942 943 iter =xcalloc(1,sizeof(*iter)); 944 ref_iterator = &iter->base; 945base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable,1); 946 947 iter->snapshot = snapshot; 948acquire_snapshot(snapshot); 949 950 iter->pos = start; 951 iter->eof = snapshot->eof; 952strbuf_init(&iter->refname_buf,0); 953 954 iter->base.oid = &iter->oid; 955 956 iter->flags = flags; 957 958if(prefix && *prefix) 959/* Stop iteration after we've gone *past* prefix: */ 960 ref_iterator =prefix_ref_iterator_begin(ref_iterator, prefix,0); 961 962return ref_iterator; 963} 964 965/* 966 * Write an entry to the packed-refs file for the specified refname. 967 * If peeled is non-NULL, write it as the entry's peeled value. On 968 * error, return a nonzero value and leave errno set at the value left 969 * by the failing call to `fprintf()`. 970 */ 971static intwrite_packed_entry(FILE*fh,const char*refname, 972const struct object_id *oid, 973const struct object_id *peeled) 974{ 975if(fprintf(fh,"%s %s\n",oid_to_hex(oid), refname) <0|| 976(peeled &&fprintf(fh,"^%s\n",oid_to_hex(peeled)) <0)) 977return-1; 978 979return0; 980} 981 982intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 983{ 984struct packed_ref_store *refs = 985packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 986"packed_refs_lock"); 987static int timeout_configured =0; 988static int timeout_value =1000; 989 990if(!timeout_configured) { 991git_config_get_int("core.packedrefstimeout", &timeout_value); 992 timeout_configured =1; 993} 994 995/* 996 * Note that we close the lockfile immediately because we 997 * don't write new content to it, but rather to a separate 998 * tempfile. 999 */1000if(hold_lock_file_for_update_timeout(1001&refs->lock,1002 refs->path,1003 flags, timeout_value) <0) {1004unable_to_lock_message(refs->path, errno, err);1005return-1;1006}10071008if(close_lock_file_gently(&refs->lock)) {1009strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno));1010rollback_lock_file(&refs->lock);1011return-1;1012}10131014/*1015 * There is a stat-validity problem might cause `update-ref -d`1016 * lost the newly commit of a ref, because a new `packed-refs`1017 * file might has the same on-disk file attributes such as1018 * timestamp, file size and inode value, but has a changed1019 * ref value.1020 *1021 * This could happen with a very small chance when1022 * `update-ref -d` is called and at the same time another1023 * `pack-refs --all` process is running.1024 *1025 * Now that we hold the `packed-refs` lock, it is important1026 * to make sure we could read the latest version of1027 * `packed-refs` file no matter we have just mmap it or not.1028 * So what need to do is clear the snapshot if we hold it1029 * already.1030 */1031clear_snapshot(refs);10321033/*1034 * Now make sure that the packed-refs file as it exists in the1035 * locked state is loaded into the snapshot:1036 */1037get_snapshot(refs);1038return0;1039}10401041voidpacked_refs_unlock(struct ref_store *ref_store)1042{1043struct packed_ref_store *refs =packed_downcast(1044 ref_store,1045 REF_STORE_READ | REF_STORE_WRITE,1046"packed_refs_unlock");10471048if(!is_lock_file_locked(&refs->lock))1049BUG("packed_refs_unlock() called when not locked");1050rollback_lock_file(&refs->lock);1051}10521053intpacked_refs_is_locked(struct ref_store *ref_store)1054{1055struct packed_ref_store *refs =packed_downcast(1056 ref_store,1057 REF_STORE_READ | REF_STORE_WRITE,1058"packed_refs_is_locked");10591060returnis_lock_file_locked(&refs->lock);1061}10621063/*1064 * The packed-refs header line that we write out. Perhaps other traits1065 * will be added later.1066 *1067 * Note that earlier versions of Git used to parse these traits by1068 * looking for " trait " in the line. For this reason, the space after1069 * the colon and the trailing space are required.1070 */1071static const char PACKED_REFS_HEADER[] =1072"# pack-refs with: peeled fully-peeled sorted\n";10731074static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err)1075{1076/* Nothing to do. */1077return0;1078}10791080/*1081 * Write the packed refs from the current snapshot to the packed-refs1082 * tempfile, incorporating any changes from `updates`. `updates` must1083 * be a sorted string list whose keys are the refnames and whose util1084 * values are `struct ref_update *`. On error, rollback the tempfile,1085 * write an error message to `err`, and return a nonzero value.1086 *1087 * The packfile must be locked before calling this function and will1088 * remain locked when it is done.1089 */1090static intwrite_with_updates(struct packed_ref_store *refs,1091struct string_list *updates,1092struct strbuf *err)1093{1094struct ref_iterator *iter = NULL;1095size_t i;1096int ok;1097FILE*out;1098struct strbuf sb = STRBUF_INIT;1099char*packed_refs_path;11001101if(!is_lock_file_locked(&refs->lock))1102BUG("write_with_updates() called while unlocked");11031104/*1105 * If packed-refs is a symlink, we want to overwrite the1106 * symlinked-to file, not the symlink itself. Also, put the1107 * staging file next to it:1108 */1109 packed_refs_path =get_locked_file_path(&refs->lock);1110strbuf_addf(&sb,"%s.new", packed_refs_path);1111free(packed_refs_path);1112 refs->tempfile =create_tempfile(sb.buf);1113if(!refs->tempfile) {1114strbuf_addf(err,"unable to create file%s:%s",1115 sb.buf,strerror(errno));1116strbuf_release(&sb);1117return-1;1118}1119strbuf_release(&sb);11201121 out =fdopen_tempfile(refs->tempfile,"w");1122if(!out) {1123strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s",1124strerror(errno));1125goto error;1126}11271128if(fprintf(out,"%s", PACKED_REFS_HEADER) <0)1129goto write_error;11301131/*1132 * We iterate in parallel through the current list of refs and1133 * the list of updates, processing an entry from at least one1134 * of the lists each time through the loop. When the current1135 * list of refs is exhausted, set iter to NULL. When the list1136 * of updates is exhausted, leave i set to updates->nr.1137 */1138 iter =packed_ref_iterator_begin(&refs->base,"",1139 DO_FOR_EACH_INCLUDE_BROKEN);1140if((ok =ref_iterator_advance(iter)) != ITER_OK)1141 iter = NULL;11421143 i =0;11441145while(iter || i < updates->nr) {1146struct ref_update *update = NULL;1147int cmp;11481149if(i >= updates->nr) {1150 cmp = -1;1151}else{1152 update = updates->items[i].util;11531154if(!iter)1155 cmp = +1;1156else1157 cmp =strcmp(iter->refname, update->refname);1158}11591160if(!cmp) {1161/*1162 * There is both an old value and an update1163 * for this reference. Check the old value if1164 * necessary:1165 */1166if((update->flags & REF_HAVE_OLD)) {1167if(is_null_oid(&update->old_oid)) {1168strbuf_addf(err,"cannot update ref '%s': "1169"reference already exists",1170 update->refname);1171goto error;1172}else if(!oideq(&update->old_oid, iter->oid)) {1173strbuf_addf(err,"cannot update ref '%s': "1174"is at%sbut expected%s",1175 update->refname,1176oid_to_hex(iter->oid),1177oid_to_hex(&update->old_oid));1178goto error;1179}1180}11811182/* Now figure out what to use for the new value: */1183if((update->flags & REF_HAVE_NEW)) {1184/*1185 * The update takes precedence. Skip1186 * the iterator over the unneeded1187 * value.1188 */1189if((ok =ref_iterator_advance(iter)) != ITER_OK)1190 iter = NULL;1191 cmp = +1;1192}else{1193/*1194 * The update doesn't actually want to1195 * change anything. We're done with it.1196 */1197 i++;1198 cmp = -1;1199}1200}else if(cmp >0) {1201/*1202 * There is no old value but there is an1203 * update for this reference. Make sure that1204 * the update didn't expect an existing value:1205 */1206if((update->flags & REF_HAVE_OLD) &&1207!is_null_oid(&update->old_oid)) {1208strbuf_addf(err,"cannot update ref '%s': "1209"reference is missing but expected%s",1210 update->refname,1211oid_to_hex(&update->old_oid));1212goto error;1213}1214}12151216if(cmp <0) {1217/* Pass the old reference through. */12181219struct object_id peeled;1220int peel_error =ref_iterator_peel(iter, &peeled);12211222if(write_packed_entry(out, iter->refname,1223 iter->oid,1224 peel_error ? NULL : &peeled))1225goto write_error;12261227if((ok =ref_iterator_advance(iter)) != ITER_OK)1228 iter = NULL;1229}else if(is_null_oid(&update->new_oid)) {1230/*1231 * The update wants to delete the reference,1232 * and the reference either didn't exist or we1233 * have already skipped it. So we're done with1234 * the update (and don't have to write1235 * anything).1236 */1237 i++;1238}else{1239struct object_id peeled;1240int peel_error =peel_object(&update->new_oid,1241&peeled);12421243if(write_packed_entry(out, update->refname,1244&update->new_oid,1245 peel_error ? NULL : &peeled))1246goto write_error;12471248 i++;1249}1250}12511252if(ok != ITER_DONE) {1253strbuf_addstr(err,"unable to write packed-refs file: "1254"error iterating over old contents");1255goto error;1256}12571258if(close_tempfile_gently(refs->tempfile)) {1259strbuf_addf(err,"error closing file%s:%s",1260get_tempfile_path(refs->tempfile),1261strerror(errno));1262strbuf_release(&sb);1263delete_tempfile(&refs->tempfile);1264return-1;1265}12661267return0;12681269write_error:1270strbuf_addf(err,"error writing to%s:%s",1271get_tempfile_path(refs->tempfile),strerror(errno));12721273error:1274if(iter)1275ref_iterator_abort(iter);12761277delete_tempfile(&refs->tempfile);1278return-1;1279}12801281intis_packed_transaction_needed(struct ref_store *ref_store,1282struct ref_transaction *transaction)1283{1284struct packed_ref_store *refs =packed_downcast(1285 ref_store,1286 REF_STORE_READ,1287"is_packed_transaction_needed");1288struct strbuf referent = STRBUF_INIT;1289size_t i;1290int ret;12911292if(!is_lock_file_locked(&refs->lock))1293BUG("is_packed_transaction_needed() called while unlocked");12941295/*1296 * We're only going to bother returning false for the common,1297 * trivial case that references are only being deleted, their1298 * old values are not being checked, and the old `packed-refs`1299 * file doesn't contain any of those reference(s). This gives1300 * false positives for some other cases that could1301 * theoretically be optimized away:1302 *1303 * 1. It could be that the old value is being verified without1304 * setting a new value. In this case, we could verify the1305 * old value here and skip the update if it agrees. If it1306 * disagrees, we could either let the update go through1307 * (the actual commit would re-detect and report the1308 * problem), or come up with a way of reporting such an1309 * error to *our* caller.1310 *1311 * 2. It could be that a new value is being set, but that it1312 * is identical to the current packed value of the1313 * reference.1314 *1315 * Neither of these cases will come up in the current code,1316 * because the only caller of this function passes to it a1317 * transaction that only includes `delete` updates with no1318 * `old_id`. Even if that ever changes, false positives only1319 * cause an optimization to be missed; they do not affect1320 * correctness.1321 */13221323/*1324 * Start with the cheap checks that don't require old1325 * reference values to be read:1326 */1327for(i =0; i < transaction->nr; i++) {1328struct ref_update *update = transaction->updates[i];13291330if(update->flags & REF_HAVE_OLD)1331/* Have to check the old value -> needed. */1332return1;13331334if((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))1335/* Have to set a new value -> needed. */1336return1;1337}13381339/*1340 * The transaction isn't checking any old values nor is it1341 * setting any nonzero new values, so it still might be able1342 * to be skipped. Now do the more expensive check: the update1343 * is needed if any of the updates is a delete, and the old1344 * `packed-refs` file contains a value for that reference.1345 */1346 ret =0;1347for(i =0; i < transaction->nr; i++) {1348struct ref_update *update = transaction->updates[i];1349unsigned int type;1350struct object_id oid;13511352if(!(update->flags & REF_HAVE_NEW))1353/*1354 * This reference isn't being deleted -> not1355 * needed.1356 */1357continue;13581359if(!refs_read_raw_ref(ref_store, update->refname,1360&oid, &referent, &type) ||1361 errno != ENOENT) {1362/*1363 * We have to actually delete that reference1364 * -> this transaction is needed.1365 */1366 ret =1;1367break;1368}1369}13701371strbuf_release(&referent);1372return ret;1373}13741375struct packed_transaction_backend_data {1376/* True iff the transaction owns the packed-refs lock. */1377int own_lock;13781379struct string_list updates;1380};13811382static voidpacked_transaction_cleanup(struct packed_ref_store *refs,1383struct ref_transaction *transaction)1384{1385struct packed_transaction_backend_data *data = transaction->backend_data;13861387if(data) {1388string_list_clear(&data->updates,0);13891390if(is_tempfile_active(refs->tempfile))1391delete_tempfile(&refs->tempfile);13921393if(data->own_lock &&is_lock_file_locked(&refs->lock)) {1394packed_refs_unlock(&refs->base);1395 data->own_lock =0;1396}13971398free(data);1399 transaction->backend_data = NULL;1400}14011402 transaction->state = REF_TRANSACTION_CLOSED;1403}14041405static intpacked_transaction_prepare(struct ref_store *ref_store,1406struct ref_transaction *transaction,1407struct strbuf *err)1408{1409struct packed_ref_store *refs =packed_downcast(1410 ref_store,1411 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1412"ref_transaction_prepare");1413struct packed_transaction_backend_data *data;1414size_t i;1415int ret = TRANSACTION_GENERIC_ERROR;14161417/*1418 * Note that we *don't* skip transactions with zero updates,1419 * because such a transaction might be executed for the side1420 * effect of ensuring that all of the references are peeled or1421 * ensuring that the `packed-refs` file is sorted. If the1422 * caller wants to optimize away empty transactions, it should1423 * do so itself.1424 */14251426 data =xcalloc(1,sizeof(*data));1427string_list_init(&data->updates,0);14281429 transaction->backend_data = data;14301431/*1432 * Stick the updates in a string list by refname so that we1433 * can sort them:1434 */1435for(i =0; i < transaction->nr; i++) {1436struct ref_update *update = transaction->updates[i];1437struct string_list_item *item =1438string_list_append(&data->updates, update->refname);14391440/* Store a pointer to update in item->util: */1441 item->util = update;1442}1443string_list_sort(&data->updates);14441445if(ref_update_reject_duplicates(&data->updates, err))1446goto failure;14471448if(!is_lock_file_locked(&refs->lock)) {1449if(packed_refs_lock(ref_store,0, err))1450goto failure;1451 data->own_lock =1;1452}14531454if(write_with_updates(refs, &data->updates, err))1455goto failure;14561457 transaction->state = REF_TRANSACTION_PREPARED;1458return0;14591460failure:1461packed_transaction_cleanup(refs, transaction);1462return ret;1463}14641465static intpacked_transaction_abort(struct ref_store *ref_store,1466struct ref_transaction *transaction,1467struct strbuf *err)1468{1469struct packed_ref_store *refs =packed_downcast(1470 ref_store,1471 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1472"ref_transaction_abort");14731474packed_transaction_cleanup(refs, transaction);1475return0;1476}14771478static intpacked_transaction_finish(struct ref_store *ref_store,1479struct ref_transaction *transaction,1480struct strbuf *err)1481{1482struct packed_ref_store *refs =packed_downcast(1483 ref_store,1484 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1485"ref_transaction_finish");1486int ret = TRANSACTION_GENERIC_ERROR;1487char*packed_refs_path;14881489clear_snapshot(refs);14901491 packed_refs_path =get_locked_file_path(&refs->lock);1492if(rename_tempfile(&refs->tempfile, packed_refs_path)) {1493strbuf_addf(err,"error replacing%s:%s",1494 refs->path,strerror(errno));1495goto cleanup;1496}14971498 ret =0;14991500cleanup:1501free(packed_refs_path);1502packed_transaction_cleanup(refs, transaction);1503return ret;1504}15051506static intpacked_initial_transaction_commit(struct ref_store *ref_store,1507struct ref_transaction *transaction,1508struct strbuf *err)1509{1510returnref_transaction_commit(transaction, err);1511}15121513static intpacked_delete_refs(struct ref_store *ref_store,const char*msg,1514struct string_list *refnames,unsigned int flags)1515{1516struct packed_ref_store *refs =1517packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs");1518struct strbuf err = STRBUF_INIT;1519struct ref_transaction *transaction;1520struct string_list_item *item;1521int ret;15221523(void)refs;/* We need the check above, but don't use the variable */15241525if(!refnames->nr)1526return0;15271528/*1529 * Since we don't check the references' old_oids, the1530 * individual updates can't fail, so we can pack all of the1531 * updates into a single transaction.1532 */15331534 transaction =ref_store_transaction_begin(ref_store, &err);1535if(!transaction)1536return-1;15371538for_each_string_list_item(item, refnames) {1539if(ref_transaction_delete(transaction, item->string, NULL,1540 flags, msg, &err)) {1541warning(_("could not delete reference%s:%s"),1542 item->string, err.buf);1543strbuf_reset(&err);1544}1545}15461547 ret =ref_transaction_commit(transaction, &err);15481549if(ret) {1550if(refnames->nr ==1)1551error(_("could not delete reference%s:%s"),1552 refnames->items[0].string, err.buf);1553else1554error(_("could not delete references:%s"), err.buf);1555}15561557ref_transaction_free(transaction);1558strbuf_release(&err);1559return ret;1560}15611562static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags)1563{1564/*1565 * Packed refs are already packed. It might be that loose refs1566 * are packed *into* a packed refs store, but that is done by1567 * updating the packed references via a transaction.1568 */1569return0;1570}15711572static intpacked_create_symref(struct ref_store *ref_store,1573const char*refname,const char*target,1574const char*logmsg)1575{1576BUG("packed reference store does not support symrefs");1577}15781579static intpacked_rename_ref(struct ref_store *ref_store,1580const char*oldrefname,const char*newrefname,1581const char*logmsg)1582{1583BUG("packed reference store does not support renaming references");1584}15851586static intpacked_copy_ref(struct ref_store *ref_store,1587const char*oldrefname,const char*newrefname,1588const char*logmsg)1589{1590BUG("packed reference store does not support copying references");1591}15921593static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1594{1595returnempty_ref_iterator_begin();1596}15971598static intpacked_for_each_reflog_ent(struct ref_store *ref_store,1599const char*refname,1600 each_reflog_ent_fn fn,void*cb_data)1601{1602return0;1603}16041605static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store,1606const char*refname,1607 each_reflog_ent_fn fn,1608void*cb_data)1609{1610return0;1611}16121613static intpacked_reflog_exists(struct ref_store *ref_store,1614const char*refname)1615{1616return0;1617}16181619static intpacked_create_reflog(struct ref_store *ref_store,1620const char*refname,int force_create,1621struct strbuf *err)1622{1623BUG("packed reference store does not support reflogs");1624}16251626static intpacked_delete_reflog(struct ref_store *ref_store,1627const char*refname)1628{1629return0;1630}16311632static intpacked_reflog_expire(struct ref_store *ref_store,1633const char*refname,const struct object_id *oid,1634unsigned int flags,1635 reflog_expiry_prepare_fn prepare_fn,1636 reflog_expiry_should_prune_fn should_prune_fn,1637 reflog_expiry_cleanup_fn cleanup_fn,1638void*policy_cb_data)1639{1640return0;1641}16421643struct ref_storage_be refs_be_packed = {1644 NULL,1645"packed",1646 packed_ref_store_create,1647 packed_init_db,1648 packed_transaction_prepare,1649 packed_transaction_finish,1650 packed_transaction_abort,1651 packed_initial_transaction_commit,16521653 packed_pack_refs,1654 packed_create_symref,1655 packed_delete_refs,1656 packed_rename_ref,1657 packed_copy_ref,16581659 packed_ref_iterator_begin,1660 packed_read_raw_ref,16611662 packed_reflog_iterator_begin,1663 packed_for_each_reflog_ent,1664 packed_for_each_reflog_ent_reverse,1665 packed_reflog_exists,1666 packed_create_reflog,1667 packed_delete_reflog,1668 packed_reflog_expire1669};