1#include "../cache.h" 2#include "../config.h" 3#include "../refs.h" 4#include "refs-internal.h" 5#include "packed-backend.h" 6#include "../iterator.h" 7#include "../lockfile.h" 8 9enum mmap_strategy { 10 /* 11 * Don't use mmap() at all for reading `packed-refs`. 12 */ 13 MMAP_NONE, 14 15 /* 16 * Can use mmap() for reading `packed-refs`, but the file must 17 * not remain mmapped. This is the usual option on Windows, 18 * where you cannot rename a new version of a file onto a file 19 * that is currently mmapped. 20 */ 21 MMAP_TEMPORARY, 22 23 /* 24 * It is OK to leave the `packed-refs` file mmapped while 25 * arbitrary other code is running. 26 */ 27 MMAP_OK 28}; 29 30#if defined(NO_MMAP) 31static enum mmap_strategy mmap_strategy = MMAP_NONE; 32#elif defined(MMAP_PREVENTS_DELETE) 33static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY; 34#else 35static enum mmap_strategy mmap_strategy = MMAP_OK; 36#endif 37 38struct packed_ref_store; 39 40struct packed_ref_cache { 41 /* 42 * A back-pointer to the packed_ref_store with which this 43 * cache is associated: 44 */ 45 struct packed_ref_store *refs; 46 47 /* Is the `packed-refs` file currently mmapped? */ 48 int mmapped; 49 50 /* 51 * The contents of the `packed-refs` file. If the file was 52 * already sorted, this points at the mmapped contents of the 53 * file. If not, this points at heap-allocated memory 54 * containing the contents, sorted. If there were no contents 55 * (e.g., because the file didn't exist), `buf` and `eof` are 56 * both NULL. 57 */ 58 char *buf, *eof; 59 60 /* The size of the header line, if any; otherwise, 0: */ 61 size_t header_len; 62 63 /* 64 * What is the peeled state of this cache? (This is usually 65 * determined from the header of the "packed-refs" file.) 66 */ 67 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled; 68 69 /* 70 * Count of references to the data structure in this instance, 71 * including the pointer from files_ref_store::packed if any. 72 * The data will not be freed as long as the reference count 73 * is nonzero. 74 */ 75 unsigned int referrers; 76 77 /* The metadata from when this packed-refs cache was read */ 78 struct stat_validity validity; 79}; 80 81/* 82 * A container for `packed-refs`-related data. It is not (yet) a 83 * `ref_store`. 84 */ 85struct packed_ref_store { 86 struct ref_store base; 87 88 unsigned int store_flags; 89 90 /* The path of the "packed-refs" file: */ 91 char *path; 92 93 /* 94 * A cache of the values read from the `packed-refs` file, if 95 * it might still be current; otherwise, NULL. 96 */ 97 struct packed_ref_cache *cache; 98 99 /* 100 * Lock used for the "packed-refs" file. Note that this (and 101 * thus the enclosing `packed_ref_store`) must not be freed. 102 */ 103 struct lock_file lock; 104 105 /* 106 * Temporary file used when rewriting new contents to the 107 * "packed-refs" file. Note that this (and thus the enclosing 108 * `packed_ref_store`) must not be freed. 109 */ 110 struct tempfile tempfile; 111}; 112 113/* 114 * Increment the reference count of *packed_refs. 115 */ 116static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 117{ 118 packed_refs->referrers++; 119} 120 121/* 122 * If the buffer in `packed_refs` is active, then either munmap the 123 * memory and close the file, or free the memory. Then set the buffer 124 * pointers to NULL. 125 */ 126static void release_packed_ref_buffer(struct packed_ref_cache *packed_refs) 127{ 128 if (packed_refs->mmapped) { 129 if (munmap(packed_refs->buf, 130 packed_refs->eof - packed_refs->buf)) 131 die_errno("error ummapping packed-refs file %s", 132 packed_refs->refs->path); 133 packed_refs->mmapped = 0; 134 } else { 135 free(packed_refs->buf); 136 } 137 packed_refs->buf = packed_refs->eof = NULL; 138 packed_refs->header_len = 0; 139} 140 141/* 142 * Decrease the reference count of *packed_refs. If it goes to zero, 143 * free *packed_refs and return true; otherwise return false. 144 */ 145static int release_packed_ref_cache(struct packed_ref_cache *packed_refs) 146{ 147 if (!--packed_refs->referrers) { 148 stat_validity_clear(&packed_refs->validity); 149 release_packed_ref_buffer(packed_refs); 150 free(packed_refs); 151 return 1; 152 } else { 153 return 0; 154 } 155} 156 157struct ref_store *packed_ref_store_create(const char *path, 158 unsigned int store_flags) 159{ 160 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs)); 161 struct ref_store *ref_store = (struct ref_store *)refs; 162 163 base_ref_store_init(ref_store, &refs_be_packed); 164 refs->store_flags = store_flags; 165 166 refs->path = xstrdup(path); 167 return ref_store; 168} 169 170/* 171 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 172 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 173 * support at least the flags specified in `required_flags`. `caller` 174 * is used in any necessary error messages. 175 */ 176static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 177 unsigned int required_flags, 178 const char *caller) 179{ 180 struct packed_ref_store *refs; 181 182 if (ref_store->be != &refs_be_packed) 183 die("BUG: ref_store is type \"%s\" not \"packed\" in %s", 184 ref_store->be->name, caller); 185 186 refs = (struct packed_ref_store *)ref_store; 187 188 if ((refs->store_flags & required_flags) != required_flags) 189 die("BUG: unallowed operation (%s), requires %x, has %x\n", 190 caller, required_flags, refs->store_flags); 191 192 return refs; 193} 194 195static void clear_packed_ref_cache(struct packed_ref_store *refs) 196{ 197 if (refs->cache) { 198 struct packed_ref_cache *cache = refs->cache; 199 200 refs->cache = NULL; 201 release_packed_ref_cache(cache); 202 } 203} 204 205static NORETURN void die_unterminated_line(const char *path, 206 const char *p, size_t len) 207{ 208 if (len < 80) 209 die("unterminated line in %s: %.*s", path, (int)len, p); 210 else 211 die("unterminated line in %s: %.75s...", path, p); 212} 213 214static NORETURN void die_invalid_line(const char *path, 215 const char *p, size_t len) 216{ 217 const char *eol = memchr(p, '\n', len); 218 219 if (!eol) 220 die_unterminated_line(path, p, len); 221 else if (eol - p < 80) 222 die("unexpected line in %s: %.*s", path, (int)(eol - p), p); 223 else 224 die("unexpected line in %s: %.75s...", path, p); 225 226} 227 228struct packed_ref_entry { 229 const char *start; 230 size_t len; 231}; 232 233static int cmp_packed_ref_entries(const void *v1, const void *v2) 234{ 235 const struct packed_ref_entry *e1 = v1, *e2 = v2; 236 const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1; 237 const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1; 238 239 while (1) { 240 if (*r1 == '\n') 241 return *r2 == '\n' ? 0 : -1; 242 if (*r1 != *r2) { 243 if (*r2 == '\n') 244 return 1; 245 else 246 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 247 } 248 r1++; 249 r2++; 250 } 251} 252 253/* 254 * Compare a packed-refs record pointed to by `rec` to the specified 255 * NUL-terminated refname. 256 */ 257static int cmp_entry_to_refname(const char *rec, const char *refname) 258{ 259 const char *r1 = rec + GIT_SHA1_HEXSZ + 1; 260 const char *r2 = refname; 261 262 while (1) { 263 if (*r1 == '\n') 264 return *r2 ? -1 : 0; 265 if (!*r2) 266 return 1; 267 if (*r1 != *r2) 268 return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1; 269 r1++; 270 r2++; 271 } 272} 273 274/* 275 * `packed_refs->buf` is not known to be sorted. Check whether it is, 276 * and if not, sort it into new memory and munmap/free the old 277 * storage. 278 */ 279static void sort_packed_refs(struct packed_ref_cache *packed_refs) 280{ 281 struct packed_ref_entry *entries = NULL; 282 size_t alloc = 0, nr = 0; 283 int sorted = 1; 284 const char *pos, *eof, *eol; 285 size_t len, i; 286 char *new_buffer, *dst; 287 288 pos = packed_refs->buf + packed_refs->header_len; 289 eof = packed_refs->eof; 290 len = eof - pos; 291 292 if (!len) 293 return; 294 295 /* 296 * Initialize entries based on a crude estimate of the number 297 * of references in the file (we'll grow it below if needed): 298 */ 299 ALLOC_GROW(entries, len / 80 + 20, alloc); 300 301 while (pos < eof) { 302 eol = memchr(pos, '\n', eof - pos); 303 if (!eol) 304 /* The safety check should prevent this. */ 305 BUG("unterminated line found in packed-refs"); 306 if (eol - pos < GIT_SHA1_HEXSZ + 2) 307 die_invalid_line(packed_refs->refs->path, 308 pos, eof - pos); 309 eol++; 310 if (eol < eof && *eol == '^') { 311 /* 312 * Keep any peeled line together with its 313 * reference: 314 */ 315 const char *peeled_start = eol; 316 317 eol = memchr(peeled_start, '\n', eof - peeled_start); 318 if (!eol) 319 /* The safety check should prevent this. */ 320 BUG("unterminated peeled line found in packed-refs"); 321 eol++; 322 } 323 324 ALLOC_GROW(entries, nr + 1, alloc); 325 entries[nr].start = pos; 326 entries[nr].len = eol - pos; 327 nr++; 328 329 if (sorted && 330 nr > 1 && 331 cmp_packed_ref_entries(&entries[nr - 2], 332 &entries[nr - 1]) >= 0) 333 sorted = 0; 334 335 pos = eol; 336 } 337 338 if (sorted) 339 goto cleanup; 340 341 /* We need to sort the memory. First we sort the entries array: */ 342 QSORT(entries, nr, cmp_packed_ref_entries); 343 344 /* 345 * Allocate a new chunk of memory, and copy the old memory to 346 * the new in the order indicated by `entries` (not bothering 347 * with the header line): 348 */ 349 new_buffer = xmalloc(len); 350 for (dst = new_buffer, i = 0; i < nr; i++) { 351 memcpy(dst, entries[i].start, entries[i].len); 352 dst += entries[i].len; 353 } 354 355 /* 356 * Now munmap the old buffer and use the sorted buffer in its 357 * place: 358 */ 359 release_packed_ref_buffer(packed_refs); 360 packed_refs->buf = new_buffer; 361 packed_refs->eof = new_buffer + len; 362 packed_refs->header_len = 0; 363 364cleanup: 365 free(entries); 366} 367 368/* 369 * Return a pointer to the start of the record that contains the 370 * character `*p` (which must be within the buffer). If no other 371 * record start is found, return `buf`. 372 */ 373static const char *find_start_of_record(const char *buf, const char *p) 374{ 375 while (p > buf && (p[-1] != '\n' || p[0] == '^')) 376 p--; 377 return p; 378} 379 380/* 381 * Return a pointer to the start of the record following the record 382 * that contains `*p`. If none is found before `end`, return `end`. 383 */ 384static const char *find_end_of_record(const char *p, const char *end) 385{ 386 while (++p < end && (p[-1] != '\n' || p[0] == '^')) 387 ; 388 return p; 389} 390 391/* 392 * We want to be able to compare mmapped reference records quickly, 393 * without totally parsing them. We can do so because the records are 394 * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ 395 * + 1) bytes past the beginning of the record. 396 * 397 * But what if the `packed-refs` file contains garbage? We're willing 398 * to tolerate not detecting the problem, as long as we don't produce 399 * totally garbled output (we can't afford to check the integrity of 400 * the whole file during every Git invocation). But we do want to be 401 * sure that we never read past the end of the buffer in memory and 402 * perform an illegal memory access. 403 * 404 * Guarantee that minimum level of safety by verifying that the last 405 * record in the file is LF-terminated, and that it has at least 406 * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of 407 * these checks fails. 408 */ 409static void verify_buffer_safe(struct packed_ref_cache *packed_refs) 410{ 411 const char *buf = packed_refs->buf + packed_refs->header_len; 412 const char *eof = packed_refs->eof; 413 const char *last_line; 414 415 if (buf == eof) 416 return; 417 418 last_line = find_start_of_record(buf, eof - 1); 419 if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2) 420 die_invalid_line(packed_refs->refs->path, 421 last_line, eof - last_line); 422} 423 424/* 425 * Depending on `mmap_strategy`, either mmap or read the contents of 426 * the `packed-refs` file into the `packed_refs` instance. Return 1 if 427 * the file existed and was read, or 0 if the file was absent. Die on 428 * errors. 429 */ 430static int load_contents(struct packed_ref_cache *packed_refs) 431{ 432 int fd; 433 struct stat st; 434 size_t size; 435 ssize_t bytes_read; 436 437 fd = open(packed_refs->refs->path, O_RDONLY); 438 if (fd < 0) { 439 if (errno == ENOENT) { 440 /* 441 * This is OK; it just means that no 442 * "packed-refs" file has been written yet, 443 * which is equivalent to it being empty, 444 * which is its state when initialized with 445 * zeros. 446 */ 447 return 0; 448 } else { 449 die_errno("couldn't read %s", packed_refs->refs->path); 450 } 451 } 452 453 stat_validity_update(&packed_refs->validity, fd); 454 455 if (fstat(fd, &st) < 0) 456 die_errno("couldn't stat %s", packed_refs->refs->path); 457 size = xsize_t(st.st_size); 458 459 switch (mmap_strategy) { 460 case MMAP_NONE: 461 packed_refs->buf = xmalloc(size); 462 bytes_read = read_in_full(fd, packed_refs->buf, size); 463 if (bytes_read < 0 || bytes_read != size) 464 die_errno("couldn't read %s", packed_refs->refs->path); 465 packed_refs->eof = packed_refs->buf + size; 466 packed_refs->mmapped = 0; 467 break; 468 case MMAP_TEMPORARY: 469 case MMAP_OK: 470 packed_refs->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); 471 packed_refs->eof = packed_refs->buf + size; 472 packed_refs->mmapped = 1; 473 break; 474 } 475 close(fd); 476 477 return 1; 478} 479 480/* 481 * Find the place in `cache->buf` where the start of the record for 482 * `refname` starts. If `mustexist` is true and the reference doesn't 483 * exist, then return NULL. If `mustexist` is false and the reference 484 * doesn't exist, then return the point where that reference would be 485 * inserted. In the latter mode, `refname` doesn't have to be a proper 486 * reference name; for example, one could search for "refs/replace/" 487 * to find the start of any replace references. 488 * 489 * The record is sought using a binary search, so `cache->buf` must be 490 * sorted. 491 */ 492static const char *find_reference_location(struct packed_ref_cache *cache, 493 const char *refname, int mustexist) 494{ 495 /* 496 * This is not *quite* a garden-variety binary search, because 497 * the data we're searching is made up of records, and we 498 * always need to find the beginning of a record to do a 499 * comparison. A "record" here is one line for the reference 500 * itself and zero or one peel lines that start with '^'. Our 501 * loop invariant is described in the next two comments. 502 */ 503 504 /* 505 * A pointer to the character at the start of a record whose 506 * preceding records all have reference names that come 507 * *before* `refname`. 508 */ 509 const char *lo = cache->buf + cache->header_len; 510 511 /* 512 * A pointer to a the first character of a record whose 513 * reference name comes *after* `refname`. 514 */ 515 const char *hi = cache->eof; 516 517 while (lo < hi) { 518 const char *mid, *rec; 519 int cmp; 520 521 mid = lo + (hi - lo) / 2; 522 rec = find_start_of_record(lo, mid); 523 cmp = cmp_entry_to_refname(rec, refname); 524 if (cmp < 0) { 525 lo = find_end_of_record(mid, hi); 526 } else if (cmp > 0) { 527 hi = rec; 528 } else { 529 return rec; 530 } 531 } 532 533 if (mustexist) 534 return NULL; 535 else 536 return lo; 537} 538 539/* 540 * Read from the `packed-refs` file into a newly-allocated 541 * `packed_ref_cache` and return it. The return value will already 542 * have its reference count incremented. 543 * 544 * A comment line of the form "# pack-refs with: " may contain zero or 545 * more traits. We interpret the traits as follows: 546 * 547 * Neither `peeled` nor `fully-peeled`: 548 * 549 * Probably no references are peeled. But if the file contains a 550 * peeled value for a reference, we will use it. 551 * 552 * `peeled`: 553 * 554 * References under "refs/tags/", if they *can* be peeled, *are* 555 * peeled in this file. References outside of "refs/tags/" are 556 * probably not peeled even if they could have been, but if we find 557 * a peeled value for such a reference we will use it. 558 * 559 * `fully-peeled`: 560 * 561 * All references in the file that can be peeled are peeled. 562 * Inversely (and this is more important), any references in the 563 * file for which no peeled value is recorded is not peelable. This 564 * trait should typically be written alongside "peeled" for 565 * compatibility with older clients, but we do not require it 566 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 567 * 568 * `sorted`: 569 * 570 * The references in this file are known to be sorted by refname. 571 */ 572static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs) 573{ 574 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs)); 575 int sorted = 0; 576 577 packed_refs->refs = refs; 578 acquire_packed_ref_cache(packed_refs); 579 packed_refs->peeled = PEELED_NONE; 580 581 if (!load_contents(packed_refs)) 582 return packed_refs; 583 584 /* If the file has a header line, process it: */ 585 if (packed_refs->buf < packed_refs->eof && *packed_refs->buf == '#') { 586 struct strbuf tmp = STRBUF_INIT; 587 char *p; 588 const char *eol; 589 struct string_list traits = STRING_LIST_INIT_NODUP; 590 591 eol = memchr(packed_refs->buf, '\n', 592 packed_refs->eof - packed_refs->buf); 593 if (!eol) 594 die_unterminated_line(refs->path, 595 packed_refs->buf, 596 packed_refs->eof - packed_refs->buf); 597 598 strbuf_add(&tmp, packed_refs->buf, eol - packed_refs->buf); 599 600 if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p)) 601 die_invalid_line(refs->path, 602 packed_refs->buf, 603 packed_refs->eof - packed_refs->buf); 604 605 string_list_split_in_place(&traits, p, ' ', -1); 606 607 if (unsorted_string_list_has_string(&traits, "fully-peeled")) 608 packed_refs->peeled = PEELED_FULLY; 609 else if (unsorted_string_list_has_string(&traits, "peeled")) 610 packed_refs->peeled = PEELED_TAGS; 611 612 sorted = unsorted_string_list_has_string(&traits, "sorted"); 613 614 /* perhaps other traits later as well */ 615 616 /* The "+ 1" is for the LF character. */ 617 packed_refs->header_len = eol + 1 - packed_refs->buf; 618 619 string_list_clear(&traits, 0); 620 strbuf_release(&tmp); 621 } 622 623 verify_buffer_safe(packed_refs); 624 625 if (!sorted) { 626 sort_packed_refs(packed_refs); 627 628 /* 629 * Reordering the records might have moved a short one 630 * to the end of the buffer, so verify the buffer's 631 * safety again: 632 */ 633 verify_buffer_safe(packed_refs); 634 } 635 636 if (mmap_strategy != MMAP_OK && packed_refs->mmapped) { 637 /* 638 * We don't want to leave the file mmapped, so we are 639 * forced to make a copy now: 640 */ 641 size_t size = packed_refs->eof - 642 (packed_refs->buf + packed_refs->header_len); 643 char *buf_copy = xmalloc(size); 644 645 memcpy(buf_copy, packed_refs->buf + packed_refs->header_len, size); 646 release_packed_ref_buffer(packed_refs); 647 packed_refs->buf = buf_copy; 648 packed_refs->eof = buf_copy + size; 649 } 650 651 return packed_refs; 652} 653 654/* 655 * Check that the packed refs cache (if any) still reflects the 656 * contents of the file. If not, clear the cache. 657 */ 658static void validate_packed_ref_cache(struct packed_ref_store *refs) 659{ 660 if (refs->cache && 661 !stat_validity_check(&refs->cache->validity, refs->path)) 662 clear_packed_ref_cache(refs); 663} 664 665/* 666 * Get the packed_ref_cache for the specified packed_ref_store, 667 * creating and populating it if it hasn't been read before or if the 668 * file has been changed (according to its `validity` field) since it 669 * was last read. On the other hand, if we hold the lock, then assume 670 * that the file hasn't been changed out from under us, so skip the 671 * extra `stat()` call in `stat_validity_check()`. 672 */ 673static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 674{ 675 if (!is_lock_file_locked(&refs->lock)) 676 validate_packed_ref_cache(refs); 677 678 if (!refs->cache) 679 refs->cache = read_packed_refs(refs); 680 681 return refs->cache; 682} 683 684static int packed_read_raw_ref(struct ref_store *ref_store, 685 const char *refname, unsigned char *sha1, 686 struct strbuf *referent, unsigned int *type) 687{ 688 struct packed_ref_store *refs = 689 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref"); 690 struct packed_ref_cache *packed_refs = get_packed_ref_cache(refs); 691 const char *rec; 692 693 *type = 0; 694 695 rec = find_reference_location(packed_refs, refname, 1); 696 697 if (!rec) { 698 /* refname is not a packed reference. */ 699 errno = ENOENT; 700 return -1; 701 } 702 703 if (get_sha1_hex(rec, sha1)) 704 die_invalid_line(refs->path, rec, packed_refs->eof - rec); 705 706 *type = REF_ISPACKED; 707 return 0; 708} 709 710/* 711 * This value is set in `base.flags` if the peeled value of the 712 * current reference is known. In that case, `peeled` contains the 713 * correct peeled value for the reference, which might be `null_sha1` 714 * if the reference is not a tag or if it is broken. 715 */ 716#define REF_KNOWS_PEELED 0x40 717 718/* 719 * An iterator over a packed-refs file that is currently mmapped. 720 */ 721struct packed_ref_iterator { 722 struct ref_iterator base; 723 724 struct packed_ref_cache *packed_refs; 725 726 /* The current position in the mmapped file: */ 727 const char *pos; 728 729 /* The end of the mmapped file: */ 730 const char *eof; 731 732 struct object_id oid, peeled; 733 734 struct strbuf refname_buf; 735 736 unsigned int flags; 737}; 738 739static int next_record(struct packed_ref_iterator *iter) 740{ 741 const char *p = iter->pos, *eol; 742 743 strbuf_reset(&iter->refname_buf); 744 745 if (iter->pos == iter->eof) 746 return ITER_DONE; 747 748 iter->base.flags = REF_ISPACKED; 749 750 if (iter->eof - p < GIT_SHA1_HEXSZ + 2 || 751 parse_oid_hex(p, &iter->oid, &p) || 752 !isspace(*p++)) 753 die_invalid_line(iter->packed_refs->refs->path, 754 iter->pos, iter->eof - iter->pos); 755 756 eol = memchr(p, '\n', iter->eof - p); 757 if (!eol) 758 die_unterminated_line(iter->packed_refs->refs->path, 759 iter->pos, iter->eof - iter->pos); 760 761 strbuf_add(&iter->refname_buf, p, eol - p); 762 iter->base.refname = iter->refname_buf.buf; 763 764 if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) { 765 if (!refname_is_safe(iter->base.refname)) 766 die("packed refname is dangerous: %s", 767 iter->base.refname); 768 oidclr(&iter->oid); 769 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN; 770 } 771 if (iter->packed_refs->peeled == PEELED_FULLY || 772 (iter->packed_refs->peeled == PEELED_TAGS && 773 starts_with(iter->base.refname, "refs/tags/"))) 774 iter->base.flags |= REF_KNOWS_PEELED; 775 776 iter->pos = eol + 1; 777 778 if (iter->pos < iter->eof && *iter->pos == '^') { 779 p = iter->pos + 1; 780 if (iter->eof - p < GIT_SHA1_HEXSZ + 1 || 781 parse_oid_hex(p, &iter->peeled, &p) || 782 *p++ != '\n') 783 die_invalid_line(iter->packed_refs->refs->path, 784 iter->pos, iter->eof - iter->pos); 785 iter->pos = p; 786 787 /* 788 * Regardless of what the file header said, we 789 * definitely know the value of *this* reference. But 790 * we suppress it if the reference is broken: 791 */ 792 if ((iter->base.flags & REF_ISBROKEN)) { 793 oidclr(&iter->peeled); 794 iter->base.flags &= ~REF_KNOWS_PEELED; 795 } else { 796 iter->base.flags |= REF_KNOWS_PEELED; 797 } 798 } else { 799 oidclr(&iter->peeled); 800 } 801 802 return ITER_OK; 803} 804 805static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator) 806{ 807 struct packed_ref_iterator *iter = 808 (struct packed_ref_iterator *)ref_iterator; 809 int ok; 810 811 while ((ok = next_record(iter)) == ITER_OK) { 812 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 813 ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE) 814 continue; 815 816 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 817 !ref_resolves_to_object(iter->base.refname, &iter->oid, 818 iter->flags)) 819 continue; 820 821 return ITER_OK; 822 } 823 824 if (ref_iterator_abort(ref_iterator) != ITER_DONE) 825 ok = ITER_ERROR; 826 827 return ok; 828} 829 830static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator, 831 struct object_id *peeled) 832{ 833 struct packed_ref_iterator *iter = 834 (struct packed_ref_iterator *)ref_iterator; 835 836 if ((iter->base.flags & REF_KNOWS_PEELED)) { 837 oidcpy(peeled, &iter->peeled); 838 return is_null_oid(&iter->peeled) ? -1 : 0; 839 } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) { 840 return -1; 841 } else { 842 return !!peel_object(iter->oid.hash, peeled->hash); 843 } 844} 845 846static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator) 847{ 848 struct packed_ref_iterator *iter = 849 (struct packed_ref_iterator *)ref_iterator; 850 int ok = ITER_DONE; 851 852 strbuf_release(&iter->refname_buf); 853 release_packed_ref_cache(iter->packed_refs); 854 base_ref_iterator_free(ref_iterator); 855 return ok; 856} 857 858static struct ref_iterator_vtable packed_ref_iterator_vtable = { 859 packed_ref_iterator_advance, 860 packed_ref_iterator_peel, 861 packed_ref_iterator_abort 862}; 863 864static struct ref_iterator *packed_ref_iterator_begin( 865 struct ref_store *ref_store, 866 const char *prefix, unsigned int flags) 867{ 868 struct packed_ref_store *refs; 869 struct packed_ref_cache *packed_refs; 870 const char *start; 871 struct packed_ref_iterator *iter; 872 struct ref_iterator *ref_iterator; 873 unsigned int required_flags = REF_STORE_READ; 874 875 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 876 required_flags |= REF_STORE_ODB; 877 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin"); 878 879 packed_refs = get_packed_ref_cache(refs); 880 881 if (!packed_refs->buf) 882 return empty_ref_iterator_begin(); 883 884 iter = xcalloc(1, sizeof(*iter)); 885 ref_iterator = &iter->base; 886 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1); 887 888 /* 889 * Note that get_packed_ref_cache() internally checks whether 890 * the packed-ref cache is up to date with what is on disk, 891 * and re-reads it if not. 892 */ 893 iter->packed_refs = packed_refs; 894 acquire_packed_ref_cache(packed_refs); 895 896 if (prefix && *prefix) 897 start = find_reference_location(packed_refs, prefix, 0); 898 else 899 start = packed_refs->buf + packed_refs->header_len; 900 901 iter->pos = start; 902 iter->eof = packed_refs->eof; 903 strbuf_init(&iter->refname_buf, 0); 904 905 iter->base.oid = &iter->oid; 906 907 iter->flags = flags; 908 909 if (prefix && *prefix) 910 /* Stop iteration after we've gone *past* prefix: */ 911 ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0); 912 913 return ref_iterator; 914} 915 916/* 917 * Write an entry to the packed-refs file for the specified refname. 918 * If peeled is non-NULL, write it as the entry's peeled value. On 919 * error, return a nonzero value and leave errno set at the value left 920 * by the failing call to `fprintf()`. 921 */ 922static int write_packed_entry(FILE *fh, const char *refname, 923 const unsigned char *sha1, 924 const unsigned char *peeled) 925{ 926 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 || 927 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0)) 928 return -1; 929 930 return 0; 931} 932 933int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err) 934{ 935 struct packed_ref_store *refs = 936 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 937 "packed_refs_lock"); 938 static int timeout_configured = 0; 939 static int timeout_value = 1000; 940 941 if (!timeout_configured) { 942 git_config_get_int("core.packedrefstimeout", &timeout_value); 943 timeout_configured = 1; 944 } 945 946 /* 947 * Note that we close the lockfile immediately because we 948 * don't write new content to it, but rather to a separate 949 * tempfile. 950 */ 951 if (hold_lock_file_for_update_timeout( 952 &refs->lock, 953 refs->path, 954 flags, timeout_value) < 0) { 955 unable_to_lock_message(refs->path, errno, err); 956 return -1; 957 } 958 959 if (close_lock_file(&refs->lock)) { 960 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno)); 961 return -1; 962 } 963 964 /* 965 * Now that we hold the `packed-refs` lock, make sure that our 966 * cache matches the current version of the file. Normally 967 * `get_packed_ref_cache()` does that for us, but that 968 * function assumes that when the file is locked, any existing 969 * cache is still valid. We've just locked the file, but it 970 * might have changed the moment *before* we locked it. 971 */ 972 validate_packed_ref_cache(refs); 973 974 /* 975 * Now make sure that the packed-refs file as it exists in the 976 * locked state is loaded into the cache: 977 */ 978 get_packed_ref_cache(refs); 979 return 0; 980} 981 982void packed_refs_unlock(struct ref_store *ref_store) 983{ 984 struct packed_ref_store *refs = packed_downcast( 985 ref_store, 986 REF_STORE_READ | REF_STORE_WRITE, 987 "packed_refs_unlock"); 988 989 if (!is_lock_file_locked(&refs->lock)) 990 die("BUG: packed_refs_unlock() called when not locked"); 991 rollback_lock_file(&refs->lock); 992} 993 994int packed_refs_is_locked(struct ref_store *ref_store) 995{ 996 struct packed_ref_store *refs = packed_downcast( 997 ref_store, 998 REF_STORE_READ | REF_STORE_WRITE, 999 "packed_refs_is_locked");10001001 return is_lock_file_locked(&refs->lock);1002}10031004/*1005 * The packed-refs header line that we write out. Perhaps other1006 * traits will be added later.1007 *1008 * Note that earlier versions of Git used to parse these traits by1009 * looking for " trait " in the line. For this reason, the space after1010 * the colon and the trailing space are required.1011 */1012static const char PACKED_REFS_HEADER[] =1013 "# pack-refs with: peeled fully-peeled sorted \n";10141015static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)1016{1017 /* Nothing to do. */1018 return 0;1019}10201021/*1022 * Write the packed-refs from the cache to the packed-refs tempfile,1023 * incorporating any changes from `updates`. `updates` must be a1024 * sorted string list whose keys are the refnames and whose util1025 * values are `struct ref_update *`. On error, rollback the tempfile,1026 * write an error message to `err`, and return a nonzero value.1027 *1028 * The packfile must be locked before calling this function and will1029 * remain locked when it is done.1030 */1031static int write_with_updates(struct packed_ref_store *refs,1032 struct string_list *updates,1033 struct strbuf *err)1034{1035 struct ref_iterator *iter = NULL;1036 size_t i;1037 int ok;1038 FILE *out;1039 struct strbuf sb = STRBUF_INIT;1040 char *packed_refs_path;10411042 if (!is_lock_file_locked(&refs->lock))1043 die("BUG: write_with_updates() called while unlocked");10441045 /*1046 * If packed-refs is a symlink, we want to overwrite the1047 * symlinked-to file, not the symlink itself. Also, put the1048 * staging file next to it:1049 */1050 packed_refs_path = get_locked_file_path(&refs->lock);1051 strbuf_addf(&sb, "%s.new", packed_refs_path);1052 free(packed_refs_path);1053 if (create_tempfile(&refs->tempfile, sb.buf) < 0) {1054 strbuf_addf(err, "unable to create file %s: %s",1055 sb.buf, strerror(errno));1056 strbuf_release(&sb);1057 return -1;1058 }1059 strbuf_release(&sb);10601061 out = fdopen_tempfile(&refs->tempfile, "w");1062 if (!out) {1063 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",1064 strerror(errno));1065 goto error;1066 }10671068 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)1069 goto write_error;10701071 /*1072 * We iterate in parallel through the current list of refs and1073 * the list of updates, processing an entry from at least one1074 * of the lists each time through the loop. When the current1075 * list of refs is exhausted, set iter to NULL. When the list1076 * of updates is exhausted, leave i set to updates->nr.1077 */1078 iter = packed_ref_iterator_begin(&refs->base, "",1079 DO_FOR_EACH_INCLUDE_BROKEN);1080 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1081 iter = NULL;10821083 i = 0;10841085 while (iter || i < updates->nr) {1086 struct ref_update *update = NULL;1087 int cmp;10881089 if (i >= updates->nr) {1090 cmp = -1;1091 } else {1092 update = updates->items[i].util;10931094 if (!iter)1095 cmp = +1;1096 else1097 cmp = strcmp(iter->refname, update->refname);1098 }10991100 if (!cmp) {1101 /*1102 * There is both an old value and an update1103 * for this reference. Check the old value if1104 * necessary:1105 */1106 if ((update->flags & REF_HAVE_OLD)) {1107 if (is_null_oid(&update->old_oid)) {1108 strbuf_addf(err, "cannot update ref '%s': "1109 "reference already exists",1110 update->refname);1111 goto error;1112 } else if (oidcmp(&update->old_oid, iter->oid)) {1113 strbuf_addf(err, "cannot update ref '%s': "1114 "is at %s but expected %s",1115 update->refname,1116 oid_to_hex(iter->oid),1117 oid_to_hex(&update->old_oid));1118 goto error;1119 }1120 }11211122 /* Now figure out what to use for the new value: */1123 if ((update->flags & REF_HAVE_NEW)) {1124 /*1125 * The update takes precedence. Skip1126 * the iterator over the unneeded1127 * value.1128 */1129 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1130 iter = NULL;1131 cmp = +1;1132 } else {1133 /*1134 * The update doesn't actually want to1135 * change anything. We're done with it.1136 */1137 i++;1138 cmp = -1;1139 }1140 } else if (cmp > 0) {1141 /*1142 * There is no old value but there is an1143 * update for this reference. Make sure that1144 * the update didn't expect an existing value:1145 */1146 if ((update->flags & REF_HAVE_OLD) &&1147 !is_null_oid(&update->old_oid)) {1148 strbuf_addf(err, "cannot update ref '%s': "1149 "reference is missing but expected %s",1150 update->refname,1151 oid_to_hex(&update->old_oid));1152 goto error;1153 }1154 }11551156 if (cmp < 0) {1157 /* Pass the old reference through. */11581159 struct object_id peeled;1160 int peel_error = ref_iterator_peel(iter, &peeled);11611162 if (write_packed_entry(out, iter->refname,1163 iter->oid->hash,1164 peel_error ? NULL : peeled.hash))1165 goto write_error;11661167 if ((ok = ref_iterator_advance(iter)) != ITER_OK)1168 iter = NULL;1169 } else if (is_null_oid(&update->new_oid)) {1170 /*1171 * The update wants to delete the reference,1172 * and the reference either didn't exist or we1173 * have already skipped it. So we're done with1174 * the update (and don't have to write1175 * anything).1176 */1177 i++;1178 } else {1179 struct object_id peeled;1180 int peel_error = peel_object(update->new_oid.hash,1181 peeled.hash);11821183 if (write_packed_entry(out, update->refname,1184 update->new_oid.hash,1185 peel_error ? NULL : peeled.hash))1186 goto write_error;11871188 i++;1189 }1190 }11911192 if (ok != ITER_DONE) {1193 strbuf_addf(err, "unable to write packed-refs file: "1194 "error iterating over old contents");1195 goto error;1196 }11971198 if (close_tempfile(&refs->tempfile)) {1199 strbuf_addf(err, "error closing file %s: %s",1200 get_tempfile_path(&refs->tempfile),1201 strerror(errno));1202 strbuf_release(&sb);1203 return -1;1204 }12051206 return 0;12071208write_error:1209 strbuf_addf(err, "error writing to %s: %s",1210 get_tempfile_path(&refs->tempfile), strerror(errno));12111212error:1213 if (iter)1214 ref_iterator_abort(iter);12151216 delete_tempfile(&refs->tempfile);1217 return -1;1218}12191220struct packed_transaction_backend_data {1221 /* True iff the transaction owns the packed-refs lock. */1222 int own_lock;12231224 struct string_list updates;1225};12261227static void packed_transaction_cleanup(struct packed_ref_store *refs,1228 struct ref_transaction *transaction)1229{1230 struct packed_transaction_backend_data *data = transaction->backend_data;12311232 if (data) {1233 string_list_clear(&data->updates, 0);12341235 if (is_tempfile_active(&refs->tempfile))1236 delete_tempfile(&refs->tempfile);12371238 if (data->own_lock && is_lock_file_locked(&refs->lock)) {1239 packed_refs_unlock(&refs->base);1240 data->own_lock = 0;1241 }12421243 free(data);1244 transaction->backend_data = NULL;1245 }12461247 transaction->state = REF_TRANSACTION_CLOSED;1248}12491250static int packed_transaction_prepare(struct ref_store *ref_store,1251 struct ref_transaction *transaction,1252 struct strbuf *err)1253{1254 struct packed_ref_store *refs = packed_downcast(1255 ref_store,1256 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1257 "ref_transaction_prepare");1258 struct packed_transaction_backend_data *data;1259 size_t i;1260 int ret = TRANSACTION_GENERIC_ERROR;12611262 /*1263 * Note that we *don't* skip transactions with zero updates,1264 * because such a transaction might be executed for the side1265 * effect of ensuring that all of the references are peeled.1266 * If the caller wants to optimize away empty transactions, it1267 * should do so itself.1268 */12691270 data = xcalloc(1, sizeof(*data));1271 string_list_init(&data->updates, 0);12721273 transaction->backend_data = data;12741275 /*1276 * Stick the updates in a string list by refname so that we1277 * can sort them:1278 */1279 for (i = 0; i < transaction->nr; i++) {1280 struct ref_update *update = transaction->updates[i];1281 struct string_list_item *item =1282 string_list_append(&data->updates, update->refname);12831284 /* Store a pointer to update in item->util: */1285 item->util = update;1286 }1287 string_list_sort(&data->updates);12881289 if (ref_update_reject_duplicates(&data->updates, err))1290 goto failure;12911292 if (!is_lock_file_locked(&refs->lock)) {1293 if (packed_refs_lock(ref_store, 0, err))1294 goto failure;1295 data->own_lock = 1;1296 }12971298 if (write_with_updates(refs, &data->updates, err))1299 goto failure;13001301 transaction->state = REF_TRANSACTION_PREPARED;1302 return 0;13031304failure:1305 packed_transaction_cleanup(refs, transaction);1306 return ret;1307}13081309static int packed_transaction_abort(struct ref_store *ref_store,1310 struct ref_transaction *transaction,1311 struct strbuf *err)1312{1313 struct packed_ref_store *refs = packed_downcast(1314 ref_store,1315 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1316 "ref_transaction_abort");13171318 packed_transaction_cleanup(refs, transaction);1319 return 0;1320}13211322static int packed_transaction_finish(struct ref_store *ref_store,1323 struct ref_transaction *transaction,1324 struct strbuf *err)1325{1326 struct packed_ref_store *refs = packed_downcast(1327 ref_store,1328 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,1329 "ref_transaction_finish");1330 int ret = TRANSACTION_GENERIC_ERROR;1331 char *packed_refs_path;13321333 clear_packed_ref_cache(refs);13341335 packed_refs_path = get_locked_file_path(&refs->lock);1336 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {1337 strbuf_addf(err, "error replacing %s: %s",1338 refs->path, strerror(errno));1339 goto cleanup;1340 }13411342 ret = 0;13431344cleanup:1345 free(packed_refs_path);1346 packed_transaction_cleanup(refs, transaction);1347 return ret;1348}13491350static int packed_initial_transaction_commit(struct ref_store *ref_store,1351 struct ref_transaction *transaction,1352 struct strbuf *err)1353{1354 return ref_transaction_commit(transaction, err);1355}13561357static int packed_delete_refs(struct ref_store *ref_store, const char *msg,1358 struct string_list *refnames, unsigned int flags)1359{1360 struct packed_ref_store *refs =1361 packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");1362 struct strbuf err = STRBUF_INIT;1363 struct ref_transaction *transaction;1364 struct string_list_item *item;1365 int ret;13661367 (void)refs; /* We need the check above, but don't use the variable */13681369 if (!refnames->nr)1370 return 0;13711372 /*1373 * Since we don't check the references' old_oids, the1374 * individual updates can't fail, so we can pack all of the1375 * updates into a single transaction.1376 */13771378 transaction = ref_store_transaction_begin(ref_store, &err);1379 if (!transaction)1380 return -1;13811382 for_each_string_list_item(item, refnames) {1383 if (ref_transaction_delete(transaction, item->string, NULL,1384 flags, msg, &err)) {1385 warning(_("could not delete reference %s: %s"),1386 item->string, err.buf);1387 strbuf_reset(&err);1388 }1389 }13901391 ret = ref_transaction_commit(transaction, &err);13921393 if (ret) {1394 if (refnames->nr == 1)1395 error(_("could not delete reference %s: %s"),1396 refnames->items[0].string, err.buf);1397 else1398 error(_("could not delete references: %s"), err.buf);1399 }14001401 ref_transaction_free(transaction);1402 strbuf_release(&err);1403 return ret;1404}14051406static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)1407{1408 /*1409 * Packed refs are already packed. It might be that loose refs1410 * are packed *into* a packed refs store, but that is done by1411 * updating the packed references via a transaction.1412 */1413 return 0;1414}14151416static int packed_create_symref(struct ref_store *ref_store,1417 const char *refname, const char *target,1418 const char *logmsg)1419{1420 die("BUG: packed reference store does not support symrefs");1421}14221423static int packed_rename_ref(struct ref_store *ref_store,1424 const char *oldrefname, const char *newrefname,1425 const char *logmsg)1426{1427 die("BUG: packed reference store does not support renaming references");1428}14291430static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)1431{1432 return empty_ref_iterator_begin();1433}14341435static int packed_for_each_reflog_ent(struct ref_store *ref_store,1436 const char *refname,1437 each_reflog_ent_fn fn, void *cb_data)1438{1439 return 0;1440}14411442static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,1443 const char *refname,1444 each_reflog_ent_fn fn,1445 void *cb_data)1446{1447 return 0;1448}14491450static int packed_reflog_exists(struct ref_store *ref_store,1451 const char *refname)1452{1453 return 0;1454}14551456static int packed_create_reflog(struct ref_store *ref_store,1457 const char *refname, int force_create,1458 struct strbuf *err)1459{1460 die("BUG: packed reference store does not support reflogs");1461}14621463static int packed_delete_reflog(struct ref_store *ref_store,1464 const char *refname)1465{1466 return 0;1467}14681469static int packed_reflog_expire(struct ref_store *ref_store,1470 const char *refname, const unsigned char *sha1,1471 unsigned int flags,1472 reflog_expiry_prepare_fn prepare_fn,1473 reflog_expiry_should_prune_fn should_prune_fn,1474 reflog_expiry_cleanup_fn cleanup_fn,1475 void *policy_cb_data)1476{1477 return 0;1478}14791480struct ref_storage_be refs_be_packed = {1481 NULL,1482 "packed",1483 packed_ref_store_create,1484 packed_init_db,1485 packed_transaction_prepare,1486 packed_transaction_finish,1487 packed_transaction_abort,1488 packed_initial_transaction_commit,14891490 packed_pack_refs,1491 packed_create_symref,1492 packed_delete_refs,1493 packed_rename_ref,14941495 packed_ref_iterator_begin,1496 packed_read_raw_ref,14971498 packed_reflog_iterator_begin,1499 packed_for_each_reflog_ent,1500 packed_for_each_reflog_ent_reverse,1501 packed_reflog_exists,1502 packed_create_reflog,1503 packed_delete_reflog,1504 packed_reflog_expire1505};