1#include"../cache.h" 2#include"../refs.h" 3#include"refs-internal.h" 4#include"../iterator.h" 5#include"../dir-iterator.h" 6#include"../lockfile.h" 7#include"../object.h" 8#include"../dir.h" 9 10struct ref_lock { 11char*ref_name; 12struct lock_file *lk; 13struct object_id old_oid; 14}; 15 16struct ref_entry; 17 18/* 19 * Information used (along with the information in ref_entry) to 20 * describe a single cached reference. This data structure only 21 * occurs embedded in a union in struct ref_entry, and only when 22 * (ref_entry->flag & REF_DIR) is zero. 23 */ 24struct ref_value { 25/* 26 * The name of the object to which this reference resolves 27 * (which may be a tag object). If REF_ISBROKEN, this is 28 * null. If REF_ISSYMREF, then this is the name of the object 29 * referred to by the last reference in the symlink chain. 30 */ 31struct object_id oid; 32 33/* 34 * If REF_KNOWS_PEELED, then this field holds the peeled value 35 * of this reference, or null if the reference is known not to 36 * be peelable. See the documentation for peel_ref() for an 37 * exact definition of "peelable". 38 */ 39struct object_id peeled; 40}; 41 42struct files_ref_store; 43 44/* 45 * Information used (along with the information in ref_entry) to 46 * describe a level in the hierarchy of references. This data 47 * structure only occurs embedded in a union in struct ref_entry, and 48 * only when (ref_entry.flag & REF_DIR) is set. In that case, 49 * (ref_entry.flag & REF_INCOMPLETE) determines whether the references 50 * in the directory have already been read: 51 * 52 * (ref_entry.flag & REF_INCOMPLETE) unset -- a directory of loose 53 * or packed references, already read. 54 * 55 * (ref_entry.flag & REF_INCOMPLETE) set -- a directory of loose 56 * references that hasn't been read yet (nor has any of its 57 * subdirectories). 58 * 59 * Entries within a directory are stored within a growable array of 60 * pointers to ref_entries (entries, nr, alloc). Entries 0 <= i < 61 * sorted are sorted by their component name in strcmp() order and the 62 * remaining entries are unsorted. 63 * 64 * Loose references are read lazily, one directory at a time. When a 65 * directory of loose references is read, then all of the references 66 * in that directory are stored, and REF_INCOMPLETE stubs are created 67 * for any subdirectories, but the subdirectories themselves are not 68 * read. The reading is triggered by get_ref_dir(). 69 */ 70struct ref_dir { 71int nr, alloc; 72 73/* 74 * Entries with index 0 <= i < sorted are sorted by name. New 75 * entries are appended to the list unsorted, and are sorted 76 * only when required; thus we avoid the need to sort the list 77 * after the addition of every reference. 78 */ 79int sorted; 80 81/* A pointer to the files_ref_store that contains this ref_dir. */ 82struct files_ref_store *ref_store; 83 84struct ref_entry **entries; 85}; 86 87/* 88 * Bit values for ref_entry::flag. REF_ISSYMREF=0x01, 89 * REF_ISPACKED=0x02, REF_ISBROKEN=0x04 and REF_BAD_NAME=0x08 are 90 * public values; see refs.h. 91 */ 92 93/* 94 * The field ref_entry->u.value.peeled of this value entry contains 95 * the correct peeled value for the reference, which might be 96 * null_sha1 if the reference is not a tag or if it is broken. 97 */ 98#define REF_KNOWS_PEELED 0x10 99 100/* ref_entry represents a directory of references */ 101#define REF_DIR 0x20 102 103/* 104 * Entry has not yet been read from disk (used only for REF_DIR 105 * entries representing loose references) 106 */ 107#define REF_INCOMPLETE 0x40 108 109/* 110 * A ref_entry represents either a reference or a "subdirectory" of 111 * references. 112 * 113 * Each directory in the reference namespace is represented by a 114 * ref_entry with (flags & REF_DIR) set and containing a subdir member 115 * that holds the entries in that directory that have been read so 116 * far. If (flags & REF_INCOMPLETE) is set, then the directory and 117 * its subdirectories haven't been read yet. REF_INCOMPLETE is only 118 * used for loose reference directories. 119 * 120 * References are represented by a ref_entry with (flags & REF_DIR) 121 * unset and a value member that describes the reference's value. The 122 * flag member is at the ref_entry level, but it is also needed to 123 * interpret the contents of the value field (in other words, a 124 * ref_value object is not very much use without the enclosing 125 * ref_entry). 126 * 127 * Reference names cannot end with slash and directories' names are 128 * always stored with a trailing slash (except for the top-level 129 * directory, which is always denoted by ""). This has two nice 130 * consequences: (1) when the entries in each subdir are sorted 131 * lexicographically by name (as they usually are), the references in 132 * a whole tree can be generated in lexicographic order by traversing 133 * the tree in left-to-right, depth-first order; (2) the names of 134 * references and subdirectories cannot conflict, and therefore the 135 * presence of an empty subdirectory does not block the creation of a 136 * similarly-named reference. (The fact that reference names with the 137 * same leading components can conflict *with each other* is a 138 * separate issue that is regulated by verify_refname_available().) 139 * 140 * Please note that the name field contains the fully-qualified 141 * reference (or subdirectory) name. Space could be saved by only 142 * storing the relative names. But that would require the full names 143 * to be generated on the fly when iterating in do_for_each_ref(), and 144 * would break callback functions, who have always been able to assume 145 * that the name strings that they are passed will not be freed during 146 * the iteration. 147 */ 148struct ref_entry { 149unsigned char flag;/* ISSYMREF? ISPACKED? */ 150union{ 151struct ref_value value;/* if not (flags&REF_DIR) */ 152struct ref_dir subdir;/* if (flags&REF_DIR) */ 153} u; 154/* 155 * The full name of the reference (e.g., "refs/heads/master") 156 * or the full name of the directory with a trailing slash 157 * (e.g., "refs/heads/"): 158 */ 159char name[FLEX_ARRAY]; 160}; 161 162static voidread_loose_refs(const char*dirname,struct ref_dir *dir); 163static intsearch_ref_dir(struct ref_dir *dir,const char*refname,size_t len); 164static struct ref_entry *create_dir_entry(struct files_ref_store *ref_store, 165const char*dirname,size_t len, 166int incomplete); 167static voidadd_entry_to_dir(struct ref_dir *dir,struct ref_entry *entry); 168static intfiles_log_ref_write(struct files_ref_store *refs, 169const char*refname,const unsigned char*old_sha1, 170const unsigned char*new_sha1,const char*msg, 171int flags,struct strbuf *err); 172 173static struct ref_dir *get_ref_dir(struct ref_entry *entry) 174{ 175struct ref_dir *dir; 176assert(entry->flag & REF_DIR); 177 dir = &entry->u.subdir; 178if(entry->flag & REF_INCOMPLETE) { 179read_loose_refs(entry->name, dir); 180 181/* 182 * Manually add refs/bisect, which, being 183 * per-worktree, might not appear in the directory 184 * listing for refs/ in the main repo. 185 */ 186if(!strcmp(entry->name,"refs/")) { 187int pos =search_ref_dir(dir,"refs/bisect/",12); 188if(pos <0) { 189struct ref_entry *child_entry; 190 child_entry =create_dir_entry(dir->ref_store, 191"refs/bisect/", 19212,1); 193add_entry_to_dir(dir, child_entry); 194} 195} 196 entry->flag &= ~REF_INCOMPLETE; 197} 198return dir; 199} 200 201static struct ref_entry *create_ref_entry(const char*refname, 202const unsigned char*sha1,int flag, 203int check_name) 204{ 205struct ref_entry *ref; 206 207if(check_name && 208check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) 209die("Reference has invalid format: '%s'", refname); 210FLEX_ALLOC_STR(ref, name, refname); 211hashcpy(ref->u.value.oid.hash, sha1); 212oidclr(&ref->u.value.peeled); 213 ref->flag = flag; 214return ref; 215} 216 217static voidclear_ref_dir(struct ref_dir *dir); 218 219static voidfree_ref_entry(struct ref_entry *entry) 220{ 221if(entry->flag & REF_DIR) { 222/* 223 * Do not use get_ref_dir() here, as that might 224 * trigger the reading of loose refs. 225 */ 226clear_ref_dir(&entry->u.subdir); 227} 228free(entry); 229} 230 231/* 232 * Add a ref_entry to the end of dir (unsorted). Entry is always 233 * stored directly in dir; no recursion into subdirectories is 234 * done. 235 */ 236static voidadd_entry_to_dir(struct ref_dir *dir,struct ref_entry *entry) 237{ 238ALLOC_GROW(dir->entries, dir->nr +1, dir->alloc); 239 dir->entries[dir->nr++] = entry; 240/* optimize for the case that entries are added in order */ 241if(dir->nr ==1|| 242(dir->nr == dir->sorted +1&& 243strcmp(dir->entries[dir->nr -2]->name, 244 dir->entries[dir->nr -1]->name) <0)) 245 dir->sorted = dir->nr; 246} 247 248/* 249 * Clear and free all entries in dir, recursively. 250 */ 251static voidclear_ref_dir(struct ref_dir *dir) 252{ 253int i; 254for(i =0; i < dir->nr; i++) 255free_ref_entry(dir->entries[i]); 256free(dir->entries); 257 dir->sorted = dir->nr = dir->alloc =0; 258 dir->entries = NULL; 259} 260 261/* 262 * Create a struct ref_entry object for the specified dirname. 263 * dirname is the name of the directory with a trailing slash (e.g., 264 * "refs/heads/") or "" for the top-level directory. 265 */ 266static struct ref_entry *create_dir_entry(struct files_ref_store *ref_store, 267const char*dirname,size_t len, 268int incomplete) 269{ 270struct ref_entry *direntry; 271FLEX_ALLOC_MEM(direntry, name, dirname, len); 272 direntry->u.subdir.ref_store = ref_store; 273 direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE :0); 274return direntry; 275} 276 277static intref_entry_cmp(const void*a,const void*b) 278{ 279struct ref_entry *one = *(struct ref_entry **)a; 280struct ref_entry *two = *(struct ref_entry **)b; 281returnstrcmp(one->name, two->name); 282} 283 284static voidsort_ref_dir(struct ref_dir *dir); 285 286struct string_slice { 287size_t len; 288const char*str; 289}; 290 291static intref_entry_cmp_sslice(const void*key_,const void*ent_) 292{ 293const struct string_slice *key = key_; 294const struct ref_entry *ent = *(const struct ref_entry *const*)ent_; 295int cmp =strncmp(key->str, ent->name, key->len); 296if(cmp) 297return cmp; 298return'\0'- (unsigned char)ent->name[key->len]; 299} 300 301/* 302 * Return the index of the entry with the given refname from the 303 * ref_dir (non-recursively), sorting dir if necessary. Return -1 if 304 * no such entry is found. dir must already be complete. 305 */ 306static intsearch_ref_dir(struct ref_dir *dir,const char*refname,size_t len) 307{ 308struct ref_entry **r; 309struct string_slice key; 310 311if(refname == NULL || !dir->nr) 312return-1; 313 314sort_ref_dir(dir); 315 key.len = len; 316 key.str = refname; 317 r =bsearch(&key, dir->entries, dir->nr,sizeof(*dir->entries), 318 ref_entry_cmp_sslice); 319 320if(r == NULL) 321return-1; 322 323return r - dir->entries; 324} 325 326/* 327 * Search for a directory entry directly within dir (without 328 * recursing). Sort dir if necessary. subdirname must be a directory 329 * name (i.e., end in '/'). If mkdir is set, then create the 330 * directory if it is missing; otherwise, return NULL if the desired 331 * directory cannot be found. dir must already be complete. 332 */ 333static struct ref_dir *search_for_subdir(struct ref_dir *dir, 334const char*subdirname,size_t len, 335int mkdir) 336{ 337int entry_index =search_ref_dir(dir, subdirname, len); 338struct ref_entry *entry; 339if(entry_index == -1) { 340if(!mkdir) 341return NULL; 342/* 343 * Since dir is complete, the absence of a subdir 344 * means that the subdir really doesn't exist; 345 * therefore, create an empty record for it but mark 346 * the record complete. 347 */ 348 entry =create_dir_entry(dir->ref_store, subdirname, len,0); 349add_entry_to_dir(dir, entry); 350}else{ 351 entry = dir->entries[entry_index]; 352} 353returnget_ref_dir(entry); 354} 355 356/* 357 * If refname is a reference name, find the ref_dir within the dir 358 * tree that should hold refname. If refname is a directory name 359 * (i.e., ends in '/'), then return that ref_dir itself. dir must 360 * represent the top-level directory and must already be complete. 361 * Sort ref_dirs and recurse into subdirectories as necessary. If 362 * mkdir is set, then create any missing directories; otherwise, 363 * return NULL if the desired directory cannot be found. 364 */ 365static struct ref_dir *find_containing_dir(struct ref_dir *dir, 366const char*refname,int mkdir) 367{ 368const char*slash; 369for(slash =strchr(refname,'/'); slash; slash =strchr(slash +1,'/')) { 370size_t dirnamelen = slash - refname +1; 371struct ref_dir *subdir; 372 subdir =search_for_subdir(dir, refname, dirnamelen, mkdir); 373if(!subdir) { 374 dir = NULL; 375break; 376} 377 dir = subdir; 378} 379 380return dir; 381} 382 383/* 384 * Find the value entry with the given name in dir, sorting ref_dirs 385 * and recursing into subdirectories as necessary. If the name is not 386 * found or it corresponds to a directory entry, return NULL. 387 */ 388static struct ref_entry *find_ref_entry(struct ref_dir *dir,const char*refname) 389{ 390int entry_index; 391struct ref_entry *entry; 392 dir =find_containing_dir(dir, refname,0); 393if(!dir) 394return NULL; 395 entry_index =search_ref_dir(dir, refname,strlen(refname)); 396if(entry_index == -1) 397return NULL; 398 entry = dir->entries[entry_index]; 399return(entry->flag & REF_DIR) ? NULL : entry; 400} 401 402/* 403 * Remove the entry with the given name from dir, recursing into 404 * subdirectories as necessary. If refname is the name of a directory 405 * (i.e., ends with '/'), then remove the directory and its contents. 406 * If the removal was successful, return the number of entries 407 * remaining in the directory entry that contained the deleted entry. 408 * If the name was not found, return -1. Please note that this 409 * function only deletes the entry from the cache; it does not delete 410 * it from the filesystem or ensure that other cache entries (which 411 * might be symbolic references to the removed entry) are updated. 412 * Nor does it remove any containing dir entries that might be made 413 * empty by the removal. dir must represent the top-level directory 414 * and must already be complete. 415 */ 416static intremove_entry(struct ref_dir *dir,const char*refname) 417{ 418int refname_len =strlen(refname); 419int entry_index; 420struct ref_entry *entry; 421int is_dir = refname[refname_len -1] =='/'; 422if(is_dir) { 423/* 424 * refname represents a reference directory. Remove 425 * the trailing slash; otherwise we will get the 426 * directory *representing* refname rather than the 427 * one *containing* it. 428 */ 429char*dirname =xmemdupz(refname, refname_len -1); 430 dir =find_containing_dir(dir, dirname,0); 431free(dirname); 432}else{ 433 dir =find_containing_dir(dir, refname,0); 434} 435if(!dir) 436return-1; 437 entry_index =search_ref_dir(dir, refname, refname_len); 438if(entry_index == -1) 439return-1; 440 entry = dir->entries[entry_index]; 441 442memmove(&dir->entries[entry_index], 443&dir->entries[entry_index +1], 444(dir->nr - entry_index -1) *sizeof(*dir->entries) 445); 446 dir->nr--; 447if(dir->sorted > entry_index) 448 dir->sorted--; 449free_ref_entry(entry); 450return dir->nr; 451} 452 453/* 454 * Add a ref_entry to the ref_dir (unsorted), recursing into 455 * subdirectories as necessary. dir must represent the top-level 456 * directory. Return 0 on success. 457 */ 458static intadd_ref_entry(struct ref_dir *dir,struct ref_entry *ref) 459{ 460 dir =find_containing_dir(dir, ref->name,1); 461if(!dir) 462return-1; 463add_entry_to_dir(dir, ref); 464return0; 465} 466 467/* 468 * Emit a warning and return true iff ref1 and ref2 have the same name 469 * and the same sha1. Die if they have the same name but different 470 * sha1s. 471 */ 472static intis_dup_ref(const struct ref_entry *ref1,const struct ref_entry *ref2) 473{ 474if(strcmp(ref1->name, ref2->name)) 475return0; 476 477/* Duplicate name; make sure that they don't conflict: */ 478 479if((ref1->flag & REF_DIR) || (ref2->flag & REF_DIR)) 480/* This is impossible by construction */ 481die("Reference directory conflict:%s", ref1->name); 482 483if(oidcmp(&ref1->u.value.oid, &ref2->u.value.oid)) 484die("Duplicated ref, and SHA1s don't match:%s", ref1->name); 485 486warning("Duplicated ref:%s", ref1->name); 487return1; 488} 489 490/* 491 * Sort the entries in dir non-recursively (if they are not already 492 * sorted) and remove any duplicate entries. 493 */ 494static voidsort_ref_dir(struct ref_dir *dir) 495{ 496int i, j; 497struct ref_entry *last = NULL; 498 499/* 500 * This check also prevents passing a zero-length array to qsort(), 501 * which is a problem on some platforms. 502 */ 503if(dir->sorted == dir->nr) 504return; 505 506QSORT(dir->entries, dir->nr, ref_entry_cmp); 507 508/* Remove any duplicates: */ 509for(i =0, j =0; j < dir->nr; j++) { 510struct ref_entry *entry = dir->entries[j]; 511if(last &&is_dup_ref(last, entry)) 512free_ref_entry(entry); 513else 514 last = dir->entries[i++] = entry; 515} 516 dir->sorted = dir->nr = i; 517} 518 519/* 520 * Return true if refname, which has the specified oid and flags, can 521 * be resolved to an object in the database. If the referred-to object 522 * does not exist, emit a warning and return false. 523 */ 524static intref_resolves_to_object(const char*refname, 525const struct object_id *oid, 526unsigned int flags) 527{ 528if(flags & REF_ISBROKEN) 529return0; 530if(!has_sha1_file(oid->hash)) { 531error("%sdoes not point to a valid object!", refname); 532return0; 533} 534return1; 535} 536 537/* 538 * Return true if the reference described by entry can be resolved to 539 * an object in the database; otherwise, emit a warning and return 540 * false. 541 */ 542static intentry_resolves_to_object(struct ref_entry *entry) 543{ 544returnref_resolves_to_object(entry->name, 545&entry->u.value.oid, entry->flag); 546} 547 548typedefinteach_ref_entry_fn(struct ref_entry *entry,void*cb_data); 549 550/* 551 * Call fn for each reference in dir that has index in the range 552 * offset <= index < dir->nr. Recurse into subdirectories that are in 553 * that index range, sorting them before iterating. This function 554 * does not sort dir itself; it should be sorted beforehand. fn is 555 * called for all references, including broken ones. 556 */ 557static intdo_for_each_entry_in_dir(struct ref_dir *dir,int offset, 558 each_ref_entry_fn fn,void*cb_data) 559{ 560int i; 561assert(dir->sorted == dir->nr); 562for(i = offset; i < dir->nr; i++) { 563struct ref_entry *entry = dir->entries[i]; 564int retval; 565if(entry->flag & REF_DIR) { 566struct ref_dir *subdir =get_ref_dir(entry); 567sort_ref_dir(subdir); 568 retval =do_for_each_entry_in_dir(subdir,0, fn, cb_data); 569}else{ 570 retval =fn(entry, cb_data); 571} 572if(retval) 573return retval; 574} 575return0; 576} 577 578/* 579 * Load all of the refs from the dir into our in-memory cache. The hard work 580 * of loading loose refs is done by get_ref_dir(), so we just need to recurse 581 * through all of the sub-directories. We do not even need to care about 582 * sorting, as traversal order does not matter to us. 583 */ 584static voidprime_ref_dir(struct ref_dir *dir) 585{ 586int i; 587for(i =0; i < dir->nr; i++) { 588struct ref_entry *entry = dir->entries[i]; 589if(entry->flag & REF_DIR) 590prime_ref_dir(get_ref_dir(entry)); 591} 592} 593 594/* 595 * A level in the reference hierarchy that is currently being iterated 596 * through. 597 */ 598struct cache_ref_iterator_level { 599/* 600 * The ref_dir being iterated over at this level. The ref_dir 601 * is sorted before being stored here. 602 */ 603struct ref_dir *dir; 604 605/* 606 * The index of the current entry within dir (which might 607 * itself be a directory). If index == -1, then the iteration 608 * hasn't yet begun. If index == dir->nr, then the iteration 609 * through this level is over. 610 */ 611int index; 612}; 613 614/* 615 * Represent an iteration through a ref_dir in the memory cache. The 616 * iteration recurses through subdirectories. 617 */ 618struct cache_ref_iterator { 619struct ref_iterator base; 620 621/* 622 * The number of levels currently on the stack. This is always 623 * at least 1, because when it becomes zero the iteration is 624 * ended and this struct is freed. 625 */ 626size_t levels_nr; 627 628/* The number of levels that have been allocated on the stack */ 629size_t levels_alloc; 630 631/* 632 * A stack of levels. levels[0] is the uppermost level that is 633 * being iterated over in this iteration. (This is not 634 * necessary the top level in the references hierarchy. If we 635 * are iterating through a subtree, then levels[0] will hold 636 * the ref_dir for that subtree, and subsequent levels will go 637 * on from there.) 638 */ 639struct cache_ref_iterator_level *levels; 640}; 641 642static intcache_ref_iterator_advance(struct ref_iterator *ref_iterator) 643{ 644struct cache_ref_iterator *iter = 645(struct cache_ref_iterator *)ref_iterator; 646 647while(1) { 648struct cache_ref_iterator_level *level = 649&iter->levels[iter->levels_nr -1]; 650struct ref_dir *dir = level->dir; 651struct ref_entry *entry; 652 653if(level->index == -1) 654sort_ref_dir(dir); 655 656if(++level->index == level->dir->nr) { 657/* This level is exhausted; pop up a level */ 658if(--iter->levels_nr ==0) 659returnref_iterator_abort(ref_iterator); 660 661continue; 662} 663 664 entry = dir->entries[level->index]; 665 666if(entry->flag & REF_DIR) { 667/* push down a level */ 668ALLOC_GROW(iter->levels, iter->levels_nr +1, 669 iter->levels_alloc); 670 671 level = &iter->levels[iter->levels_nr++]; 672 level->dir =get_ref_dir(entry); 673 level->index = -1; 674}else{ 675 iter->base.refname = entry->name; 676 iter->base.oid = &entry->u.value.oid; 677 iter->base.flags = entry->flag; 678return ITER_OK; 679} 680} 681} 682 683static enum peel_status peel_entry(struct ref_entry *entry,int repeel); 684 685static intcache_ref_iterator_peel(struct ref_iterator *ref_iterator, 686struct object_id *peeled) 687{ 688struct cache_ref_iterator *iter = 689(struct cache_ref_iterator *)ref_iterator; 690struct cache_ref_iterator_level *level; 691struct ref_entry *entry; 692 693 level = &iter->levels[iter->levels_nr -1]; 694 695if(level->index == -1) 696die("BUG: peel called before advance for cache iterator"); 697 698 entry = level->dir->entries[level->index]; 699 700if(peel_entry(entry,0)) 701return-1; 702oidcpy(peeled, &entry->u.value.peeled); 703return0; 704} 705 706static intcache_ref_iterator_abort(struct ref_iterator *ref_iterator) 707{ 708struct cache_ref_iterator *iter = 709(struct cache_ref_iterator *)ref_iterator; 710 711free(iter->levels); 712base_ref_iterator_free(ref_iterator); 713return ITER_DONE; 714} 715 716static struct ref_iterator_vtable cache_ref_iterator_vtable = { 717 cache_ref_iterator_advance, 718 cache_ref_iterator_peel, 719 cache_ref_iterator_abort 720}; 721 722static struct ref_iterator *cache_ref_iterator_begin(struct ref_dir *dir) 723{ 724struct cache_ref_iterator *iter; 725struct ref_iterator *ref_iterator; 726struct cache_ref_iterator_level *level; 727 728 iter =xcalloc(1,sizeof(*iter)); 729 ref_iterator = &iter->base; 730base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable); 731ALLOC_GROW(iter->levels,10, iter->levels_alloc); 732 733 iter->levels_nr =1; 734 level = &iter->levels[0]; 735 level->index = -1; 736 level->dir = dir; 737 738return ref_iterator; 739} 740 741struct packed_ref_cache { 742struct ref_entry *root; 743 744/* 745 * Count of references to the data structure in this instance, 746 * including the pointer from files_ref_store::packed if any. 747 * The data will not be freed as long as the reference count 748 * is nonzero. 749 */ 750unsigned int referrers; 751 752/* 753 * Iff the packed-refs file associated with this instance is 754 * currently locked for writing, this points at the associated 755 * lock (which is owned by somebody else). The referrer count 756 * is also incremented when the file is locked and decremented 757 * when it is unlocked. 758 */ 759struct lock_file *lock; 760 761/* The metadata from when this packed-refs cache was read */ 762struct stat_validity validity; 763}; 764 765/* 766 * Future: need to be in "struct repository" 767 * when doing a full libification. 768 */ 769struct files_ref_store { 770struct ref_store base; 771unsigned int store_flags; 772 773char*gitdir; 774char*gitcommondir; 775char*packed_refs_path; 776 777struct ref_entry *loose; 778struct packed_ref_cache *packed; 779}; 780 781/* Lock used for the main packed-refs file: */ 782static struct lock_file packlock; 783 784/* 785 * Increment the reference count of *packed_refs. 786 */ 787static voidacquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 788{ 789 packed_refs->referrers++; 790} 791 792/* 793 * Decrease the reference count of *packed_refs. If it goes to zero, 794 * free *packed_refs and return true; otherwise return false. 795 */ 796static intrelease_packed_ref_cache(struct packed_ref_cache *packed_refs) 797{ 798if(!--packed_refs->referrers) { 799free_ref_entry(packed_refs->root); 800stat_validity_clear(&packed_refs->validity); 801free(packed_refs); 802return1; 803}else{ 804return0; 805} 806} 807 808static voidclear_packed_ref_cache(struct files_ref_store *refs) 809{ 810if(refs->packed) { 811struct packed_ref_cache *packed_refs = refs->packed; 812 813if(packed_refs->lock) 814die("internal error: packed-ref cache cleared while locked"); 815 refs->packed = NULL; 816release_packed_ref_cache(packed_refs); 817} 818} 819 820static voidclear_loose_ref_cache(struct files_ref_store *refs) 821{ 822if(refs->loose) { 823free_ref_entry(refs->loose); 824 refs->loose = NULL; 825} 826} 827 828/* 829 * Create a new submodule ref cache and add it to the internal 830 * set of caches. 831 */ 832static struct ref_store *files_ref_store_create(const char*gitdir, 833unsigned int flags) 834{ 835struct files_ref_store *refs =xcalloc(1,sizeof(*refs)); 836struct ref_store *ref_store = (struct ref_store *)refs; 837struct strbuf sb = STRBUF_INIT; 838 839base_ref_store_init(ref_store, &refs_be_files); 840 refs->store_flags = flags; 841 842 refs->gitdir =xstrdup(gitdir); 843get_common_dir_noenv(&sb, gitdir); 844 refs->gitcommondir =strbuf_detach(&sb, NULL); 845strbuf_addf(&sb,"%s/packed-refs", refs->gitcommondir); 846 refs->packed_refs_path =strbuf_detach(&sb, NULL); 847 848return ref_store; 849} 850 851/* 852 * Die if refs is not the main ref store. caller is used in any 853 * necessary error messages. 854 */ 855static voidfiles_assert_main_repository(struct files_ref_store *refs, 856const char*caller) 857{ 858if(refs->store_flags & REF_STORE_MAIN) 859return; 860 861die("BUG: operation%sonly allowed for main ref store", caller); 862} 863 864/* 865 * Downcast ref_store to files_ref_store. Die if ref_store is not a 866 * files_ref_store. required_flags is compared with ref_store's 867 * store_flags to ensure the ref_store has all required capabilities. 868 * "caller" is used in any necessary error messages. 869 */ 870static struct files_ref_store *files_downcast(struct ref_store *ref_store, 871unsigned int required_flags, 872const char*caller) 873{ 874struct files_ref_store *refs; 875 876if(ref_store->be != &refs_be_files) 877die("BUG: ref_store is type\"%s\"not\"files\"in%s", 878 ref_store->be->name, caller); 879 880 refs = (struct files_ref_store *)ref_store; 881 882if((refs->store_flags & required_flags) != required_flags) 883die("BUG: operation%srequires abilities 0x%x, but only have 0x%x", 884 caller, required_flags, refs->store_flags); 885 886return refs; 887} 888 889/* The length of a peeled reference line in packed-refs, including EOL: */ 890#define PEELED_LINE_LENGTH 42 891 892/* 893 * The packed-refs header line that we write out. Perhaps other 894 * traits will be added later. The trailing space is required. 895 */ 896static const char PACKED_REFS_HEADER[] = 897"# pack-refs with: peeled fully-peeled\n"; 898 899/* 900 * Parse one line from a packed-refs file. Write the SHA1 to sha1. 901 * Return a pointer to the refname within the line (null-terminated), 902 * or NULL if there was a problem. 903 */ 904static const char*parse_ref_line(struct strbuf *line,unsigned char*sha1) 905{ 906const char*ref; 907 908/* 909 * 42: the answer to everything. 910 * 911 * In this case, it happens to be the answer to 912 * 40 (length of sha1 hex representation) 913 * +1 (space in between hex and name) 914 * +1 (newline at the end of the line) 915 */ 916if(line->len <=42) 917return NULL; 918 919if(get_sha1_hex(line->buf, sha1) <0) 920return NULL; 921if(!isspace(line->buf[40])) 922return NULL; 923 924 ref = line->buf +41; 925if(isspace(*ref)) 926return NULL; 927 928if(line->buf[line->len -1] !='\n') 929return NULL; 930 line->buf[--line->len] =0; 931 932return ref; 933} 934 935/* 936 * Read f, which is a packed-refs file, into dir. 937 * 938 * A comment line of the form "# pack-refs with: " may contain zero or 939 * more traits. We interpret the traits as follows: 940 * 941 * No traits: 942 * 943 * Probably no references are peeled. But if the file contains a 944 * peeled value for a reference, we will use it. 945 * 946 * peeled: 947 * 948 * References under "refs/tags/", if they *can* be peeled, *are* 949 * peeled in this file. References outside of "refs/tags/" are 950 * probably not peeled even if they could have been, but if we find 951 * a peeled value for such a reference we will use it. 952 * 953 * fully-peeled: 954 * 955 * All references in the file that can be peeled are peeled. 956 * Inversely (and this is more important), any references in the 957 * file for which no peeled value is recorded is not peelable. This 958 * trait should typically be written alongside "peeled" for 959 * compatibility with older clients, but we do not require it 960 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 961 */ 962static voidread_packed_refs(FILE*f,struct ref_dir *dir) 963{ 964struct ref_entry *last = NULL; 965struct strbuf line = STRBUF_INIT; 966enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE; 967 968while(strbuf_getwholeline(&line, f,'\n') != EOF) { 969unsigned char sha1[20]; 970const char*refname; 971const char*traits; 972 973if(skip_prefix(line.buf,"# pack-refs with:", &traits)) { 974if(strstr(traits," fully-peeled ")) 975 peeled = PEELED_FULLY; 976else if(strstr(traits," peeled ")) 977 peeled = PEELED_TAGS; 978/* perhaps other traits later as well */ 979continue; 980} 981 982 refname =parse_ref_line(&line, sha1); 983if(refname) { 984int flag = REF_ISPACKED; 985 986if(check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { 987if(!refname_is_safe(refname)) 988die("packed refname is dangerous:%s", refname); 989hashclr(sha1); 990 flag |= REF_BAD_NAME | REF_ISBROKEN; 991} 992 last =create_ref_entry(refname, sha1, flag,0); 993if(peeled == PEELED_FULLY || 994(peeled == PEELED_TAGS &&starts_with(refname,"refs/tags/"))) 995 last->flag |= REF_KNOWS_PEELED; 996add_ref_entry(dir, last); 997continue; 998} 999if(last &&1000 line.buf[0] =='^'&&1001 line.len == PEELED_LINE_LENGTH &&1002 line.buf[PEELED_LINE_LENGTH -1] =='\n'&&1003!get_sha1_hex(line.buf +1, sha1)) {1004hashcpy(last->u.value.peeled.hash, sha1);1005/*1006 * Regardless of what the file header said,1007 * we definitely know the value of *this*1008 * reference:1009 */1010 last->flag |= REF_KNOWS_PEELED;1011}1012}10131014strbuf_release(&line);1015}10161017static const char*files_packed_refs_path(struct files_ref_store *refs)1018{1019return refs->packed_refs_path;1020}10211022static voidfiles_reflog_path(struct files_ref_store *refs,1023struct strbuf *sb,1024const char*refname)1025{1026if(!refname) {1027/*1028 * FIXME: of course this is wrong in multi worktree1029 * setting. To be fixed real soon.1030 */1031strbuf_addf(sb,"%s/logs", refs->gitcommondir);1032return;1033}10341035switch(ref_type(refname)) {1036case REF_TYPE_PER_WORKTREE:1037case REF_TYPE_PSEUDOREF:1038strbuf_addf(sb,"%s/logs/%s", refs->gitdir, refname);1039break;1040case REF_TYPE_NORMAL:1041strbuf_addf(sb,"%s/logs/%s", refs->gitcommondir, refname);1042break;1043default:1044die("BUG: unknown ref type%dof ref%s",1045ref_type(refname), refname);1046}1047}10481049static voidfiles_ref_path(struct files_ref_store *refs,1050struct strbuf *sb,1051const char*refname)1052{1053switch(ref_type(refname)) {1054case REF_TYPE_PER_WORKTREE:1055case REF_TYPE_PSEUDOREF:1056strbuf_addf(sb,"%s/%s", refs->gitdir, refname);1057break;1058case REF_TYPE_NORMAL:1059strbuf_addf(sb,"%s/%s", refs->gitcommondir, refname);1060break;1061default:1062die("BUG: unknown ref type%dof ref%s",1063ref_type(refname), refname);1064}1065}10661067/*1068 * Get the packed_ref_cache for the specified files_ref_store,1069 * creating it if necessary.1070 */1071static struct packed_ref_cache *get_packed_ref_cache(struct files_ref_store *refs)1072{1073const char*packed_refs_file =files_packed_refs_path(refs);10741075if(refs->packed &&1076!stat_validity_check(&refs->packed->validity, packed_refs_file))1077clear_packed_ref_cache(refs);10781079if(!refs->packed) {1080FILE*f;10811082 refs->packed =xcalloc(1,sizeof(*refs->packed));1083acquire_packed_ref_cache(refs->packed);1084 refs->packed->root =create_dir_entry(refs,"",0,0);1085 f =fopen(packed_refs_file,"r");1086if(f) {1087stat_validity_update(&refs->packed->validity,fileno(f));1088read_packed_refs(f,get_ref_dir(refs->packed->root));1089fclose(f);1090}1091}1092return refs->packed;1093}10941095static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)1096{1097returnget_ref_dir(packed_ref_cache->root);1098}10991100static struct ref_dir *get_packed_refs(struct files_ref_store *refs)1101{1102returnget_packed_ref_dir(get_packed_ref_cache(refs));1103}11041105/*1106 * Add a reference to the in-memory packed reference cache. This may1107 * only be called while the packed-refs file is locked (see1108 * lock_packed_refs()). To actually write the packed-refs file, call1109 * commit_packed_refs().1110 */1111static voidadd_packed_ref(struct files_ref_store *refs,1112const char*refname,const unsigned char*sha1)1113{1114struct packed_ref_cache *packed_ref_cache =get_packed_ref_cache(refs);11151116if(!packed_ref_cache->lock)1117die("internal error: packed refs not locked");1118add_ref_entry(get_packed_ref_dir(packed_ref_cache),1119create_ref_entry(refname, sha1, REF_ISPACKED,1));1120}11211122/*1123 * Read the loose references from the namespace dirname into dir1124 * (without recursing). dirname must end with '/'. dir must be the1125 * directory entry corresponding to dirname.1126 */1127static voidread_loose_refs(const char*dirname,struct ref_dir *dir)1128{1129struct files_ref_store *refs = dir->ref_store;1130DIR*d;1131struct dirent *de;1132int dirnamelen =strlen(dirname);1133struct strbuf refname;1134struct strbuf path = STRBUF_INIT;1135size_t path_baselen;11361137files_ref_path(refs, &path, dirname);1138 path_baselen = path.len;11391140 d =opendir(path.buf);1141if(!d) {1142strbuf_release(&path);1143return;1144}11451146strbuf_init(&refname, dirnamelen +257);1147strbuf_add(&refname, dirname, dirnamelen);11481149while((de =readdir(d)) != NULL) {1150unsigned char sha1[20];1151struct stat st;1152int flag;11531154if(de->d_name[0] =='.')1155continue;1156if(ends_with(de->d_name,".lock"))1157continue;1158strbuf_addstr(&refname, de->d_name);1159strbuf_addstr(&path, de->d_name);1160if(stat(path.buf, &st) <0) {1161;/* silently ignore */1162}else if(S_ISDIR(st.st_mode)) {1163strbuf_addch(&refname,'/');1164add_entry_to_dir(dir,1165create_dir_entry(refs, refname.buf,1166 refname.len,1));1167}else{1168if(!refs_resolve_ref_unsafe(&refs->base,1169 refname.buf,1170 RESOLVE_REF_READING,1171 sha1, &flag)) {1172hashclr(sha1);1173 flag |= REF_ISBROKEN;1174}else if(is_null_sha1(sha1)) {1175/*1176 * It is so astronomically unlikely1177 * that NULL_SHA1 is the SHA-1 of an1178 * actual object that we consider its1179 * appearance in a loose reference1180 * file to be repo corruption1181 * (probably due to a software bug).1182 */1183 flag |= REF_ISBROKEN;1184}11851186if(check_refname_format(refname.buf,1187 REFNAME_ALLOW_ONELEVEL)) {1188if(!refname_is_safe(refname.buf))1189die("loose refname is dangerous:%s", refname.buf);1190hashclr(sha1);1191 flag |= REF_BAD_NAME | REF_ISBROKEN;1192}1193add_entry_to_dir(dir,1194create_ref_entry(refname.buf, sha1, flag,0));1195}1196strbuf_setlen(&refname, dirnamelen);1197strbuf_setlen(&path, path_baselen);1198}1199strbuf_release(&refname);1200strbuf_release(&path);1201closedir(d);1202}12031204static struct ref_dir *get_loose_refs(struct files_ref_store *refs)1205{1206if(!refs->loose) {1207/*1208 * Mark the top-level directory complete because we1209 * are about to read the only subdirectory that can1210 * hold references:1211 */1212 refs->loose =create_dir_entry(refs,"",0,0);1213/*1214 * Create an incomplete entry for "refs/":1215 */1216add_entry_to_dir(get_ref_dir(refs->loose),1217create_dir_entry(refs,"refs/",5,1));1218}1219returnget_ref_dir(refs->loose);1220}12211222/*1223 * Return the ref_entry for the given refname from the packed1224 * references. If it does not exist, return NULL.1225 */1226static struct ref_entry *get_packed_ref(struct files_ref_store *refs,1227const char*refname)1228{1229returnfind_ref_entry(get_packed_refs(refs), refname);1230}12311232/*1233 * A loose ref file doesn't exist; check for a packed ref.1234 */1235static intresolve_packed_ref(struct files_ref_store *refs,1236const char*refname,1237unsigned char*sha1,unsigned int*flags)1238{1239struct ref_entry *entry;12401241/*1242 * The loose reference file does not exist; check for a packed1243 * reference.1244 */1245 entry =get_packed_ref(refs, refname);1246if(entry) {1247hashcpy(sha1, entry->u.value.oid.hash);1248*flags |= REF_ISPACKED;1249return0;1250}1251/* refname is not a packed reference. */1252return-1;1253}12541255static intfiles_read_raw_ref(struct ref_store *ref_store,1256const char*refname,unsigned char*sha1,1257struct strbuf *referent,unsigned int*type)1258{1259struct files_ref_store *refs =1260files_downcast(ref_store, REF_STORE_READ,"read_raw_ref");1261struct strbuf sb_contents = STRBUF_INIT;1262struct strbuf sb_path = STRBUF_INIT;1263const char*path;1264const char*buf;1265struct stat st;1266int fd;1267int ret = -1;1268int save_errno;1269int remaining_retries =3;12701271*type =0;1272strbuf_reset(&sb_path);12731274files_ref_path(refs, &sb_path, refname);12751276 path = sb_path.buf;12771278stat_ref:1279/*1280 * We might have to loop back here to avoid a race1281 * condition: first we lstat() the file, then we try1282 * to read it as a link or as a file. But if somebody1283 * changes the type of the file (file <-> directory1284 * <-> symlink) between the lstat() and reading, then1285 * we don't want to report that as an error but rather1286 * try again starting with the lstat().1287 *1288 * We'll keep a count of the retries, though, just to avoid1289 * any confusing situation sending us into an infinite loop.1290 */12911292if(remaining_retries-- <=0)1293goto out;12941295if(lstat(path, &st) <0) {1296if(errno != ENOENT)1297goto out;1298if(resolve_packed_ref(refs, refname, sha1, type)) {1299 errno = ENOENT;1300goto out;1301}1302 ret =0;1303goto out;1304}13051306/* Follow "normalized" - ie "refs/.." symlinks by hand */1307if(S_ISLNK(st.st_mode)) {1308strbuf_reset(&sb_contents);1309if(strbuf_readlink(&sb_contents, path,0) <0) {1310if(errno == ENOENT || errno == EINVAL)1311/* inconsistent with lstat; retry */1312goto stat_ref;1313else1314goto out;1315}1316if(starts_with(sb_contents.buf,"refs/") &&1317!check_refname_format(sb_contents.buf,0)) {1318strbuf_swap(&sb_contents, referent);1319*type |= REF_ISSYMREF;1320 ret =0;1321goto out;1322}1323/*1324 * It doesn't look like a refname; fall through to just1325 * treating it like a non-symlink, and reading whatever it1326 * points to.1327 */1328}13291330/* Is it a directory? */1331if(S_ISDIR(st.st_mode)) {1332/*1333 * Even though there is a directory where the loose1334 * ref is supposed to be, there could still be a1335 * packed ref:1336 */1337if(resolve_packed_ref(refs, refname, sha1, type)) {1338 errno = EISDIR;1339goto out;1340}1341 ret =0;1342goto out;1343}13441345/*1346 * Anything else, just open it and try to use it as1347 * a ref1348 */1349 fd =open(path, O_RDONLY);1350if(fd <0) {1351if(errno == ENOENT && !S_ISLNK(st.st_mode))1352/* inconsistent with lstat; retry */1353goto stat_ref;1354else1355goto out;1356}1357strbuf_reset(&sb_contents);1358if(strbuf_read(&sb_contents, fd,256) <0) {1359int save_errno = errno;1360close(fd);1361 errno = save_errno;1362goto out;1363}1364close(fd);1365strbuf_rtrim(&sb_contents);1366 buf = sb_contents.buf;1367if(starts_with(buf,"ref:")) {1368 buf +=4;1369while(isspace(*buf))1370 buf++;13711372strbuf_reset(referent);1373strbuf_addstr(referent, buf);1374*type |= REF_ISSYMREF;1375 ret =0;1376goto out;1377}13781379/*1380 * Please note that FETCH_HEAD has additional1381 * data after the sha.1382 */1383if(get_sha1_hex(buf, sha1) ||1384(buf[40] !='\0'&& !isspace(buf[40]))) {1385*type |= REF_ISBROKEN;1386 errno = EINVAL;1387goto out;1388}13891390 ret =0;13911392out:1393 save_errno = errno;1394strbuf_release(&sb_path);1395strbuf_release(&sb_contents);1396 errno = save_errno;1397return ret;1398}13991400static voidunlock_ref(struct ref_lock *lock)1401{1402/* Do not free lock->lk -- atexit() still looks at them */1403if(lock->lk)1404rollback_lock_file(lock->lk);1405free(lock->ref_name);1406free(lock);1407}14081409/*1410 * Lock refname, without following symrefs, and set *lock_p to point1411 * at a newly-allocated lock object. Fill in lock->old_oid, referent,1412 * and type similarly to read_raw_ref().1413 *1414 * The caller must verify that refname is a "safe" reference name (in1415 * the sense of refname_is_safe()) before calling this function.1416 *1417 * If the reference doesn't already exist, verify that refname doesn't1418 * have a D/F conflict with any existing references. extras and skip1419 * are passed to refs_verify_refname_available() for this check.1420 *1421 * If mustexist is not set and the reference is not found or is1422 * broken, lock the reference anyway but clear sha1.1423 *1424 * Return 0 on success. On failure, write an error message to err and1425 * return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.1426 *1427 * Implementation note: This function is basically1428 *1429 * lock reference1430 * read_raw_ref()1431 *1432 * but it includes a lot more code to1433 * - Deal with possible races with other processes1434 * - Avoid calling refs_verify_refname_available() when it can be1435 * avoided, namely if we were successfully able to read the ref1436 * - Generate informative error messages in the case of failure1437 */1438static intlock_raw_ref(struct files_ref_store *refs,1439const char*refname,int mustexist,1440const struct string_list *extras,1441const struct string_list *skip,1442struct ref_lock **lock_p,1443struct strbuf *referent,1444unsigned int*type,1445struct strbuf *err)1446{1447struct ref_lock *lock;1448struct strbuf ref_file = STRBUF_INIT;1449int attempts_remaining =3;1450int ret = TRANSACTION_GENERIC_ERROR;14511452assert(err);1453files_assert_main_repository(refs,"lock_raw_ref");14541455*type =0;14561457/* First lock the file so it can't change out from under us. */14581459*lock_p = lock =xcalloc(1,sizeof(*lock));14601461 lock->ref_name =xstrdup(refname);1462files_ref_path(refs, &ref_file, refname);14631464retry:1465switch(safe_create_leading_directories(ref_file.buf)) {1466case SCLD_OK:1467break;/* success */1468case SCLD_EXISTS:1469/*1470 * Suppose refname is "refs/foo/bar". We just failed1471 * to create the containing directory, "refs/foo",1472 * because there was a non-directory in the way. This1473 * indicates a D/F conflict, probably because of1474 * another reference such as "refs/foo". There is no1475 * reason to expect this error to be transitory.1476 */1477if(refs_verify_refname_available(&refs->base, refname,1478 extras, skip, err)) {1479if(mustexist) {1480/*1481 * To the user the relevant error is1482 * that the "mustexist" reference is1483 * missing:1484 */1485strbuf_reset(err);1486strbuf_addf(err,"unable to resolve reference '%s'",1487 refname);1488}else{1489/*1490 * The error message set by1491 * refs_verify_refname_available() is1492 * OK.1493 */1494 ret = TRANSACTION_NAME_CONFLICT;1495}1496}else{1497/*1498 * The file that is in the way isn't a loose1499 * reference. Report it as a low-level1500 * failure.1501 */1502strbuf_addf(err,"unable to create lock file%s.lock; "1503"non-directory in the way",1504 ref_file.buf);1505}1506goto error_return;1507case SCLD_VANISHED:1508/* Maybe another process was tidying up. Try again. */1509if(--attempts_remaining >0)1510goto retry;1511/* fall through */1512default:1513strbuf_addf(err,"unable to create directory for%s",1514 ref_file.buf);1515goto error_return;1516}15171518if(!lock->lk)1519 lock->lk =xcalloc(1,sizeof(struct lock_file));15201521if(hold_lock_file_for_update(lock->lk, ref_file.buf, LOCK_NO_DEREF) <0) {1522if(errno == ENOENT && --attempts_remaining >0) {1523/*1524 * Maybe somebody just deleted one of the1525 * directories leading to ref_file. Try1526 * again:1527 */1528goto retry;1529}else{1530unable_to_lock_message(ref_file.buf, errno, err);1531goto error_return;1532}1533}15341535/*1536 * Now we hold the lock and can read the reference without1537 * fear that its value will change.1538 */15391540if(files_read_raw_ref(&refs->base, refname,1541 lock->old_oid.hash, referent, type)) {1542if(errno == ENOENT) {1543if(mustexist) {1544/* Garden variety missing reference. */1545strbuf_addf(err,"unable to resolve reference '%s'",1546 refname);1547goto error_return;1548}else{1549/*1550 * Reference is missing, but that's OK. We1551 * know that there is not a conflict with1552 * another loose reference because1553 * (supposing that we are trying to lock1554 * reference "refs/foo/bar"):1555 *1556 * - We were successfully able to create1557 * the lockfile refs/foo/bar.lock, so we1558 * know there cannot be a loose reference1559 * named "refs/foo".1560 *1561 * - We got ENOENT and not EISDIR, so we1562 * know that there cannot be a loose1563 * reference named "refs/foo/bar/baz".1564 */1565}1566}else if(errno == EISDIR) {1567/*1568 * There is a directory in the way. It might have1569 * contained references that have been deleted. If1570 * we don't require that the reference already1571 * exists, try to remove the directory so that it1572 * doesn't cause trouble when we want to rename the1573 * lockfile into place later.1574 */1575if(mustexist) {1576/* Garden variety missing reference. */1577strbuf_addf(err,"unable to resolve reference '%s'",1578 refname);1579goto error_return;1580}else if(remove_dir_recursively(&ref_file,1581 REMOVE_DIR_EMPTY_ONLY)) {1582if(refs_verify_refname_available(1583&refs->base, refname,1584 extras, skip, err)) {1585/*1586 * The error message set by1587 * verify_refname_available() is OK.1588 */1589 ret = TRANSACTION_NAME_CONFLICT;1590goto error_return;1591}else{1592/*1593 * We can't delete the directory,1594 * but we also don't know of any1595 * references that it should1596 * contain.1597 */1598strbuf_addf(err,"there is a non-empty directory '%s' "1599"blocking reference '%s'",1600 ref_file.buf, refname);1601goto error_return;1602}1603}1604}else if(errno == EINVAL && (*type & REF_ISBROKEN)) {1605strbuf_addf(err,"unable to resolve reference '%s': "1606"reference broken", refname);1607goto error_return;1608}else{1609strbuf_addf(err,"unable to resolve reference '%s':%s",1610 refname,strerror(errno));1611goto error_return;1612}16131614/*1615 * If the ref did not exist and we are creating it,1616 * make sure there is no existing ref that conflicts1617 * with refname:1618 */1619if(refs_verify_refname_available(1620&refs->base, refname,1621 extras, skip, err))1622goto error_return;1623}16241625 ret =0;1626goto out;16271628error_return:1629unlock_ref(lock);1630*lock_p = NULL;16311632out:1633strbuf_release(&ref_file);1634return ret;1635}16361637/*1638 * Peel the entry (if possible) and return its new peel_status. If1639 * repeel is true, re-peel the entry even if there is an old peeled1640 * value that is already stored in it.1641 *1642 * It is OK to call this function with a packed reference entry that1643 * might be stale and might even refer to an object that has since1644 * been garbage-collected. In such a case, if the entry has1645 * REF_KNOWS_PEELED then leave the status unchanged and return1646 * PEEL_PEELED or PEEL_NON_TAG; otherwise, return PEEL_INVALID.1647 */1648static enum peel_status peel_entry(struct ref_entry *entry,int repeel)1649{1650enum peel_status status;16511652if(entry->flag & REF_KNOWS_PEELED) {1653if(repeel) {1654 entry->flag &= ~REF_KNOWS_PEELED;1655oidclr(&entry->u.value.peeled);1656}else{1657returnis_null_oid(&entry->u.value.peeled) ?1658 PEEL_NON_TAG : PEEL_PEELED;1659}1660}1661if(entry->flag & REF_ISBROKEN)1662return PEEL_BROKEN;1663if(entry->flag & REF_ISSYMREF)1664return PEEL_IS_SYMREF;16651666 status =peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);1667if(status == PEEL_PEELED || status == PEEL_NON_TAG)1668 entry->flag |= REF_KNOWS_PEELED;1669return status;1670}16711672static intfiles_peel_ref(struct ref_store *ref_store,1673const char*refname,unsigned char*sha1)1674{1675struct files_ref_store *refs =1676files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,1677"peel_ref");1678int flag;1679unsigned char base[20];16801681if(current_ref_iter && current_ref_iter->refname == refname) {1682struct object_id peeled;16831684if(ref_iterator_peel(current_ref_iter, &peeled))1685return-1;1686hashcpy(sha1, peeled.hash);1687return0;1688}16891690if(refs_read_ref_full(ref_store, refname,1691 RESOLVE_REF_READING, base, &flag))1692return-1;16931694/*1695 * If the reference is packed, read its ref_entry from the1696 * cache in the hope that we already know its peeled value.1697 * We only try this optimization on packed references because1698 * (a) forcing the filling of the loose reference cache could1699 * be expensive and (b) loose references anyway usually do not1700 * have REF_KNOWS_PEELED.1701 */1702if(flag & REF_ISPACKED) {1703struct ref_entry *r =get_packed_ref(refs, refname);1704if(r) {1705if(peel_entry(r,0))1706return-1;1707hashcpy(sha1, r->u.value.peeled.hash);1708return0;1709}1710}17111712returnpeel_object(base, sha1);1713}17141715struct files_ref_iterator {1716struct ref_iterator base;17171718struct packed_ref_cache *packed_ref_cache;1719struct ref_iterator *iter0;1720unsigned int flags;1721};17221723static intfiles_ref_iterator_advance(struct ref_iterator *ref_iterator)1724{1725struct files_ref_iterator *iter =1726(struct files_ref_iterator *)ref_iterator;1727int ok;17281729while((ok =ref_iterator_advance(iter->iter0)) == ITER_OK) {1730if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&1731ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)1732continue;17331734if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&1735!ref_resolves_to_object(iter->iter0->refname,1736 iter->iter0->oid,1737 iter->iter0->flags))1738continue;17391740 iter->base.refname = iter->iter0->refname;1741 iter->base.oid = iter->iter0->oid;1742 iter->base.flags = iter->iter0->flags;1743return ITER_OK;1744}17451746 iter->iter0 = NULL;1747if(ref_iterator_abort(ref_iterator) != ITER_DONE)1748 ok = ITER_ERROR;17491750return ok;1751}17521753static intfiles_ref_iterator_peel(struct ref_iterator *ref_iterator,1754struct object_id *peeled)1755{1756struct files_ref_iterator *iter =1757(struct files_ref_iterator *)ref_iterator;17581759returnref_iterator_peel(iter->iter0, peeled);1760}17611762static intfiles_ref_iterator_abort(struct ref_iterator *ref_iterator)1763{1764struct files_ref_iterator *iter =1765(struct files_ref_iterator *)ref_iterator;1766int ok = ITER_DONE;17671768if(iter->iter0)1769 ok =ref_iterator_abort(iter->iter0);17701771release_packed_ref_cache(iter->packed_ref_cache);1772base_ref_iterator_free(ref_iterator);1773return ok;1774}17751776static struct ref_iterator_vtable files_ref_iterator_vtable = {1777 files_ref_iterator_advance,1778 files_ref_iterator_peel,1779 files_ref_iterator_abort1780};17811782static struct ref_iterator *files_ref_iterator_begin(1783struct ref_store *ref_store,1784const char*prefix,unsigned int flags)1785{1786struct files_ref_store *refs;1787struct ref_dir *loose_dir, *packed_dir;1788struct ref_iterator *loose_iter, *packed_iter;1789struct files_ref_iterator *iter;1790struct ref_iterator *ref_iterator;17911792if(ref_paranoia <0)1793 ref_paranoia =git_env_bool("GIT_REF_PARANOIA",0);1794if(ref_paranoia)1795 flags |= DO_FOR_EACH_INCLUDE_BROKEN;17961797 refs =files_downcast(ref_store,1798 REF_STORE_READ | (ref_paranoia ?0: REF_STORE_ODB),1799"ref_iterator_begin");18001801 iter =xcalloc(1,sizeof(*iter));1802 ref_iterator = &iter->base;1803base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);18041805/*1806 * We must make sure that all loose refs are read before1807 * accessing the packed-refs file; this avoids a race1808 * condition if loose refs are migrated to the packed-refs1809 * file by a simultaneous process, but our in-memory view is1810 * from before the migration. We ensure this as follows:1811 * First, we call prime_ref_dir(), which pre-reads the loose1812 * references for the subtree into the cache. (If they've1813 * already been read, that's OK; we only need to guarantee1814 * that they're read before the packed refs, not *how much*1815 * before.) After that, we call get_packed_ref_cache(), which1816 * internally checks whether the packed-ref cache is up to1817 * date with what is on disk, and re-reads it if not.1818 */18191820 loose_dir =get_loose_refs(refs);18211822if(prefix && *prefix)1823 loose_dir =find_containing_dir(loose_dir, prefix,0);18241825if(loose_dir) {1826prime_ref_dir(loose_dir);1827 loose_iter =cache_ref_iterator_begin(loose_dir);1828}else{1829/* There's nothing to iterate over. */1830 loose_iter =empty_ref_iterator_begin();1831}18321833 iter->packed_ref_cache =get_packed_ref_cache(refs);1834acquire_packed_ref_cache(iter->packed_ref_cache);1835 packed_dir =get_packed_ref_dir(iter->packed_ref_cache);18361837if(prefix && *prefix)1838 packed_dir =find_containing_dir(packed_dir, prefix,0);18391840if(packed_dir) {1841 packed_iter =cache_ref_iterator_begin(packed_dir);1842}else{1843/* There's nothing to iterate over. */1844 packed_iter =empty_ref_iterator_begin();1845}18461847 iter->iter0 =overlay_ref_iterator_begin(loose_iter, packed_iter);1848 iter->flags = flags;18491850return ref_iterator;1851}18521853/*1854 * Verify that the reference locked by lock has the value old_sha1.1855 * Fail if the reference doesn't exist and mustexist is set. Return 01856 * on success. On error, write an error message to err, set errno, and1857 * return a negative value.1858 */1859static intverify_lock(struct ref_store *ref_store,struct ref_lock *lock,1860const unsigned char*old_sha1,int mustexist,1861struct strbuf *err)1862{1863assert(err);18641865if(refs_read_ref_full(ref_store, lock->ref_name,1866 mustexist ? RESOLVE_REF_READING :0,1867 lock->old_oid.hash, NULL)) {1868if(old_sha1) {1869int save_errno = errno;1870strbuf_addf(err,"can't verify ref '%s'", lock->ref_name);1871 errno = save_errno;1872return-1;1873}else{1874oidclr(&lock->old_oid);1875return0;1876}1877}1878if(old_sha1 &&hashcmp(lock->old_oid.hash, old_sha1)) {1879strbuf_addf(err,"ref '%s' is at%sbut expected%s",1880 lock->ref_name,1881oid_to_hex(&lock->old_oid),1882sha1_to_hex(old_sha1));1883 errno = EBUSY;1884return-1;1885}1886return0;1887}18881889static intremove_empty_directories(struct strbuf *path)1890{1891/*1892 * we want to create a file but there is a directory there;1893 * if that is an empty directory (or a directory that contains1894 * only empty directories), remove them.1895 */1896returnremove_dir_recursively(path, REMOVE_DIR_EMPTY_ONLY);1897}18981899static intcreate_reflock(const char*path,void*cb)1900{1901struct lock_file *lk = cb;19021903returnhold_lock_file_for_update(lk, path, LOCK_NO_DEREF) <0? -1:0;1904}19051906/*1907 * Locks a ref returning the lock on success and NULL on failure.1908 * On failure errno is set to something meaningful.1909 */1910static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,1911const char*refname,1912const unsigned char*old_sha1,1913const struct string_list *extras,1914const struct string_list *skip,1915unsigned int flags,int*type,1916struct strbuf *err)1917{1918struct strbuf ref_file = STRBUF_INIT;1919struct ref_lock *lock;1920int last_errno =0;1921int mustexist = (old_sha1 && !is_null_sha1(old_sha1));1922int resolve_flags = RESOLVE_REF_NO_RECURSE;1923int resolved;19241925files_assert_main_repository(refs,"lock_ref_sha1_basic");1926assert(err);19271928 lock =xcalloc(1,sizeof(struct ref_lock));19291930if(mustexist)1931 resolve_flags |= RESOLVE_REF_READING;1932if(flags & REF_DELETING)1933 resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;19341935files_ref_path(refs, &ref_file, refname);1936 resolved = !!refs_resolve_ref_unsafe(&refs->base,1937 refname, resolve_flags,1938 lock->old_oid.hash, type);1939if(!resolved && errno == EISDIR) {1940/*1941 * we are trying to lock foo but we used to1942 * have foo/bar which now does not exist;1943 * it is normal for the empty directory 'foo'1944 * to remain.1945 */1946if(remove_empty_directories(&ref_file)) {1947 last_errno = errno;1948if(!refs_verify_refname_available(1949&refs->base,1950 refname, extras, skip, err))1951strbuf_addf(err,"there are still refs under '%s'",1952 refname);1953goto error_return;1954}1955 resolved = !!refs_resolve_ref_unsafe(&refs->base,1956 refname, resolve_flags,1957 lock->old_oid.hash, type);1958}1959if(!resolved) {1960 last_errno = errno;1961if(last_errno != ENOTDIR ||1962!refs_verify_refname_available(&refs->base, refname,1963 extras, skip, err))1964strbuf_addf(err,"unable to resolve reference '%s':%s",1965 refname,strerror(last_errno));19661967goto error_return;1968}19691970/*1971 * If the ref did not exist and we are creating it, make sure1972 * there is no existing packed ref whose name begins with our1973 * refname, nor a packed ref whose name is a proper prefix of1974 * our refname.1975 */1976if(is_null_oid(&lock->old_oid) &&1977refs_verify_refname_available(&refs->base, refname,1978 extras, skip, err)) {1979 last_errno = ENOTDIR;1980goto error_return;1981}19821983 lock->lk =xcalloc(1,sizeof(struct lock_file));19841985 lock->ref_name =xstrdup(refname);19861987if(raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {1988 last_errno = errno;1989unable_to_lock_message(ref_file.buf, errno, err);1990goto error_return;1991}19921993if(verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {1994 last_errno = errno;1995goto error_return;1996}1997goto out;19981999 error_return:2000unlock_ref(lock);2001 lock = NULL;20022003 out:2004strbuf_release(&ref_file);2005 errno = last_errno;2006return lock;2007}20082009/*2010 * Write an entry to the packed-refs file for the specified refname.2011 * If peeled is non-NULL, write it as the entry's peeled value.2012 */2013static voidwrite_packed_entry(FILE*fh,char*refname,unsigned char*sha1,2014unsigned char*peeled)2015{2016fprintf_or_die(fh,"%s %s\n",sha1_to_hex(sha1), refname);2017if(peeled)2018fprintf_or_die(fh,"^%s\n",sha1_to_hex(peeled));2019}20202021/*2022 * An each_ref_entry_fn that writes the entry to a packed-refs file.2023 */2024static intwrite_packed_entry_fn(struct ref_entry *entry,void*cb_data)2025{2026enum peel_status peel_status =peel_entry(entry,0);20272028if(peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)2029error("internal error:%sis not a valid packed reference!",2030 entry->name);2031write_packed_entry(cb_data, entry->name, entry->u.value.oid.hash,2032 peel_status == PEEL_PEELED ?2033 entry->u.value.peeled.hash : NULL);2034return0;2035}20362037/*2038 * Lock the packed-refs file for writing. Flags is passed to2039 * hold_lock_file_for_update(). Return 0 on success. On errors, set2040 * errno appropriately and return a nonzero value.2041 */2042static intlock_packed_refs(struct files_ref_store *refs,int flags)2043{2044static int timeout_configured =0;2045static int timeout_value =1000;2046struct packed_ref_cache *packed_ref_cache;20472048files_assert_main_repository(refs,"lock_packed_refs");20492050if(!timeout_configured) {2051git_config_get_int("core.packedrefstimeout", &timeout_value);2052 timeout_configured =1;2053}20542055if(hold_lock_file_for_update_timeout(2056&packlock,files_packed_refs_path(refs),2057 flags, timeout_value) <0)2058return-1;2059/*2060 * Get the current packed-refs while holding the lock. If the2061 * packed-refs file has been modified since we last read it,2062 * this will automatically invalidate the cache and re-read2063 * the packed-refs file.2064 */2065 packed_ref_cache =get_packed_ref_cache(refs);2066 packed_ref_cache->lock = &packlock;2067/* Increment the reference count to prevent it from being freed: */2068acquire_packed_ref_cache(packed_ref_cache);2069return0;2070}20712072/*2073 * Write the current version of the packed refs cache from memory to2074 * disk. The packed-refs file must already be locked for writing (see2075 * lock_packed_refs()). Return zero on success. On errors, set errno2076 * and return a nonzero value2077 */2078static intcommit_packed_refs(struct files_ref_store *refs)2079{2080struct packed_ref_cache *packed_ref_cache =2081get_packed_ref_cache(refs);2082int error =0;2083int save_errno =0;2084FILE*out;20852086files_assert_main_repository(refs,"commit_packed_refs");20872088if(!packed_ref_cache->lock)2089die("internal error: packed-refs not locked");20902091 out =fdopen_lock_file(packed_ref_cache->lock,"w");2092if(!out)2093die_errno("unable to fdopen packed-refs descriptor");20942095fprintf_or_die(out,"%s", PACKED_REFS_HEADER);2096do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache),20970, write_packed_entry_fn, out);20982099if(commit_lock_file(packed_ref_cache->lock)) {2100 save_errno = errno;2101 error = -1;2102}2103 packed_ref_cache->lock = NULL;2104release_packed_ref_cache(packed_ref_cache);2105 errno = save_errno;2106return error;2107}21082109/*2110 * Rollback the lockfile for the packed-refs file, and discard the2111 * in-memory packed reference cache. (The packed-refs file will be2112 * read anew if it is needed again after this function is called.)2113 */2114static voidrollback_packed_refs(struct files_ref_store *refs)2115{2116struct packed_ref_cache *packed_ref_cache =2117get_packed_ref_cache(refs);21182119files_assert_main_repository(refs,"rollback_packed_refs");21202121if(!packed_ref_cache->lock)2122die("internal error: packed-refs not locked");2123rollback_lock_file(packed_ref_cache->lock);2124 packed_ref_cache->lock = NULL;2125release_packed_ref_cache(packed_ref_cache);2126clear_packed_ref_cache(refs);2127}21282129struct ref_to_prune {2130struct ref_to_prune *next;2131unsigned char sha1[20];2132char name[FLEX_ARRAY];2133};21342135struct pack_refs_cb_data {2136unsigned int flags;2137struct ref_dir *packed_refs;2138struct ref_to_prune *ref_to_prune;2139};21402141/*2142 * An each_ref_entry_fn that is run over loose references only. If2143 * the loose reference can be packed, add an entry in the packed ref2144 * cache. If the reference should be pruned, also add it to2145 * ref_to_prune in the pack_refs_cb_data.2146 */2147static intpack_if_possible_fn(struct ref_entry *entry,void*cb_data)2148{2149struct pack_refs_cb_data *cb = cb_data;2150enum peel_status peel_status;2151struct ref_entry *packed_entry;2152int is_tag_ref =starts_with(entry->name,"refs/tags/");21532154/* Do not pack per-worktree refs: */2155if(ref_type(entry->name) != REF_TYPE_NORMAL)2156return0;21572158/* ALWAYS pack tags */2159if(!(cb->flags & PACK_REFS_ALL) && !is_tag_ref)2160return0;21612162/* Do not pack symbolic or broken refs: */2163if((entry->flag & REF_ISSYMREF) || !entry_resolves_to_object(entry))2164return0;21652166/* Add a packed ref cache entry equivalent to the loose entry. */2167 peel_status =peel_entry(entry,1);2168if(peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)2169die("internal error peeling reference%s(%s)",2170 entry->name,oid_to_hex(&entry->u.value.oid));2171 packed_entry =find_ref_entry(cb->packed_refs, entry->name);2172if(packed_entry) {2173/* Overwrite existing packed entry with info from loose entry */2174 packed_entry->flag = REF_ISPACKED | REF_KNOWS_PEELED;2175oidcpy(&packed_entry->u.value.oid, &entry->u.value.oid);2176}else{2177 packed_entry =create_ref_entry(entry->name, entry->u.value.oid.hash,2178 REF_ISPACKED | REF_KNOWS_PEELED,0);2179add_ref_entry(cb->packed_refs, packed_entry);2180}2181oidcpy(&packed_entry->u.value.peeled, &entry->u.value.peeled);21822183/* Schedule the loose reference for pruning if requested. */2184if((cb->flags & PACK_REFS_PRUNE)) {2185struct ref_to_prune *n;2186FLEX_ALLOC_STR(n, name, entry->name);2187hashcpy(n->sha1, entry->u.value.oid.hash);2188 n->next = cb->ref_to_prune;2189 cb->ref_to_prune = n;2190}2191return0;2192}21932194enum{2195 REMOVE_EMPTY_PARENTS_REF =0x01,2196 REMOVE_EMPTY_PARENTS_REFLOG =0x022197};21982199/*2200 * Remove empty parent directories associated with the specified2201 * reference and/or its reflog, but spare [logs/]refs/ and immediate2202 * subdirs. flags is a combination of REMOVE_EMPTY_PARENTS_REF and/or2203 * REMOVE_EMPTY_PARENTS_REFLOG.2204 */2205static voidtry_remove_empty_parents(struct files_ref_store *refs,2206const char*refname,2207unsigned int flags)2208{2209struct strbuf buf = STRBUF_INIT;2210struct strbuf sb = STRBUF_INIT;2211char*p, *q;2212int i;22132214strbuf_addstr(&buf, refname);2215 p = buf.buf;2216for(i =0; i <2; i++) {/* refs/{heads,tags,...}/ */2217while(*p && *p !='/')2218 p++;2219/* tolerate duplicate slashes; see check_refname_format() */2220while(*p =='/')2221 p++;2222}2223 q = buf.buf + buf.len;2224while(flags & (REMOVE_EMPTY_PARENTS_REF | REMOVE_EMPTY_PARENTS_REFLOG)) {2225while(q > p && *q !='/')2226 q--;2227while(q > p && *(q-1) =='/')2228 q--;2229if(q == p)2230break;2231strbuf_setlen(&buf, q - buf.buf);22322233strbuf_reset(&sb);2234files_ref_path(refs, &sb, buf.buf);2235if((flags & REMOVE_EMPTY_PARENTS_REF) &&rmdir(sb.buf))2236 flags &= ~REMOVE_EMPTY_PARENTS_REF;22372238strbuf_reset(&sb);2239files_reflog_path(refs, &sb, buf.buf);2240if((flags & REMOVE_EMPTY_PARENTS_REFLOG) &&rmdir(sb.buf))2241 flags &= ~REMOVE_EMPTY_PARENTS_REFLOG;2242}2243strbuf_release(&buf);2244strbuf_release(&sb);2245}22462247/* make sure nobody touched the ref, and unlink */2248static voidprune_ref(struct files_ref_store *refs,struct ref_to_prune *r)2249{2250struct ref_transaction *transaction;2251struct strbuf err = STRBUF_INIT;22522253if(check_refname_format(r->name,0))2254return;22552256 transaction =ref_store_transaction_begin(&refs->base, &err);2257if(!transaction ||2258ref_transaction_delete(transaction, r->name, r->sha1,2259 REF_ISPRUNING | REF_NODEREF, NULL, &err) ||2260ref_transaction_commit(transaction, &err)) {2261ref_transaction_free(transaction);2262error("%s", err.buf);2263strbuf_release(&err);2264return;2265}2266ref_transaction_free(transaction);2267strbuf_release(&err);2268}22692270static voidprune_refs(struct files_ref_store *refs,struct ref_to_prune *r)2271{2272while(r) {2273prune_ref(refs, r);2274 r = r->next;2275}2276}22772278static intfiles_pack_refs(struct ref_store *ref_store,unsigned int flags)2279{2280struct files_ref_store *refs =2281files_downcast(ref_store, REF_STORE_WRITE | REF_STORE_ODB,2282"pack_refs");2283struct pack_refs_cb_data cbdata;22842285memset(&cbdata,0,sizeof(cbdata));2286 cbdata.flags = flags;22872288lock_packed_refs(refs, LOCK_DIE_ON_ERROR);2289 cbdata.packed_refs =get_packed_refs(refs);22902291do_for_each_entry_in_dir(get_loose_refs(refs),0,2292 pack_if_possible_fn, &cbdata);22932294if(commit_packed_refs(refs))2295die_errno("unable to overwrite old ref-pack file");22962297prune_refs(refs, cbdata.ref_to_prune);2298return0;2299}23002301/*2302 * Rewrite the packed-refs file, omitting any refs listed in2303 * 'refnames'. On error, leave packed-refs unchanged, write an error2304 * message to 'err', and return a nonzero value.2305 *2306 * The refs in 'refnames' needn't be sorted. `err` must not be NULL.2307 */2308static intrepack_without_refs(struct files_ref_store *refs,2309struct string_list *refnames,struct strbuf *err)2310{2311struct ref_dir *packed;2312struct string_list_item *refname;2313int ret, needs_repacking =0, removed =0;23142315files_assert_main_repository(refs,"repack_without_refs");2316assert(err);23172318/* Look for a packed ref */2319for_each_string_list_item(refname, refnames) {2320if(get_packed_ref(refs, refname->string)) {2321 needs_repacking =1;2322break;2323}2324}23252326/* Avoid locking if we have nothing to do */2327if(!needs_repacking)2328return0;/* no refname exists in packed refs */23292330if(lock_packed_refs(refs,0)) {2331unable_to_lock_message(files_packed_refs_path(refs), errno, err);2332return-1;2333}2334 packed =get_packed_refs(refs);23352336/* Remove refnames from the cache */2337for_each_string_list_item(refname, refnames)2338if(remove_entry(packed, refname->string) != -1)2339 removed =1;2340if(!removed) {2341/*2342 * All packed entries disappeared while we were2343 * acquiring the lock.2344 */2345rollback_packed_refs(refs);2346return0;2347}23482349/* Write what remains */2350 ret =commit_packed_refs(refs);2351if(ret)2352strbuf_addf(err,"unable to overwrite old ref-pack file:%s",2353strerror(errno));2354return ret;2355}23562357static intfiles_delete_refs(struct ref_store *ref_store,2358struct string_list *refnames,unsigned int flags)2359{2360struct files_ref_store *refs =2361files_downcast(ref_store, REF_STORE_WRITE,"delete_refs");2362struct strbuf err = STRBUF_INIT;2363int i, result =0;23642365if(!refnames->nr)2366return0;23672368 result =repack_without_refs(refs, refnames, &err);2369if(result) {2370/*2371 * If we failed to rewrite the packed-refs file, then2372 * it is unsafe to try to remove loose refs, because2373 * doing so might expose an obsolete packed value for2374 * a reference that might even point at an object that2375 * has been garbage collected.2376 */2377if(refnames->nr ==1)2378error(_("could not delete reference%s:%s"),2379 refnames->items[0].string, err.buf);2380else2381error(_("could not delete references:%s"), err.buf);23822383goto out;2384}23852386for(i =0; i < refnames->nr; i++) {2387const char*refname = refnames->items[i].string;23882389if(refs_delete_ref(&refs->base, NULL, refname, NULL, flags))2390 result |=error(_("could not remove reference%s"), refname);2391}23922393out:2394strbuf_release(&err);2395return result;2396}23972398/*2399 * People using contrib's git-new-workdir have .git/logs/refs ->2400 * /some/other/path/.git/logs/refs, and that may live on another device.2401 *2402 * IOW, to avoid cross device rename errors, the temporary renamed log must2403 * live into logs/refs.2404 */2405#define TMP_RENAMED_LOG"refs/.tmp-renamed-log"24062407struct rename_cb {2408const char*tmp_renamed_log;2409int true_errno;2410};24112412static intrename_tmp_log_callback(const char*path,void*cb_data)2413{2414struct rename_cb *cb = cb_data;24152416if(rename(cb->tmp_renamed_log, path)) {2417/*2418 * rename(a, b) when b is an existing directory ought2419 * to result in ISDIR, but Solaris 5.8 gives ENOTDIR.2420 * Sheesh. Record the true errno for error reporting,2421 * but report EISDIR to raceproof_create_file() so2422 * that it knows to retry.2423 */2424 cb->true_errno = errno;2425if(errno == ENOTDIR)2426 errno = EISDIR;2427return-1;2428}else{2429return0;2430}2431}24322433static intrename_tmp_log(struct files_ref_store *refs,const char*newrefname)2434{2435struct strbuf path = STRBUF_INIT;2436struct strbuf tmp = STRBUF_INIT;2437struct rename_cb cb;2438int ret;24392440files_reflog_path(refs, &path, newrefname);2441files_reflog_path(refs, &tmp, TMP_RENAMED_LOG);2442 cb.tmp_renamed_log = tmp.buf;2443 ret =raceproof_create_file(path.buf, rename_tmp_log_callback, &cb);2444if(ret) {2445if(errno == EISDIR)2446error("directory not empty:%s", path.buf);2447else2448error("unable to move logfile%sto%s:%s",2449 tmp.buf, path.buf,2450strerror(cb.true_errno));2451}24522453strbuf_release(&path);2454strbuf_release(&tmp);2455return ret;2456}24572458static intwrite_ref_to_lockfile(struct ref_lock *lock,2459const unsigned char*sha1,struct strbuf *err);2460static intcommit_ref_update(struct files_ref_store *refs,2461struct ref_lock *lock,2462const unsigned char*sha1,const char*logmsg,2463struct strbuf *err);24642465static intfiles_rename_ref(struct ref_store *ref_store,2466const char*oldrefname,const char*newrefname,2467const char*logmsg)2468{2469struct files_ref_store *refs =2470files_downcast(ref_store, REF_STORE_WRITE,"rename_ref");2471unsigned char sha1[20], orig_sha1[20];2472int flag =0, logmoved =0;2473struct ref_lock *lock;2474struct stat loginfo;2475struct strbuf sb_oldref = STRBUF_INIT;2476struct strbuf sb_newref = STRBUF_INIT;2477struct strbuf tmp_renamed_log = STRBUF_INIT;2478int log, ret;2479struct strbuf err = STRBUF_INIT;24802481files_reflog_path(refs, &sb_oldref, oldrefname);2482files_reflog_path(refs, &sb_newref, newrefname);2483files_reflog_path(refs, &tmp_renamed_log, TMP_RENAMED_LOG);24842485 log = !lstat(sb_oldref.buf, &loginfo);2486if(log &&S_ISLNK(loginfo.st_mode)) {2487 ret =error("reflog for%sis a symlink", oldrefname);2488goto out;2489}24902491if(!refs_resolve_ref_unsafe(&refs->base, oldrefname,2492 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,2493 orig_sha1, &flag)) {2494 ret =error("refname%snot found", oldrefname);2495goto out;2496}24972498if(flag & REF_ISSYMREF) {2499 ret =error("refname%sis a symbolic ref, renaming it is not supported",2500 oldrefname);2501goto out;2502}2503if(!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {2504 ret =1;2505goto out;2506}25072508if(log &&rename(sb_oldref.buf, tmp_renamed_log.buf)) {2509 ret =error("unable to move logfile logs/%sto logs/"TMP_RENAMED_LOG":%s",2510 oldrefname,strerror(errno));2511goto out;2512}25132514if(refs_delete_ref(&refs->base, logmsg, oldrefname,2515 orig_sha1, REF_NODEREF)) {2516error("unable to delete old%s", oldrefname);2517goto rollback;2518}25192520/*2521 * Since we are doing a shallow lookup, sha1 is not the2522 * correct value to pass to delete_ref as old_sha1. But that2523 * doesn't matter, because an old_sha1 check wouldn't add to2524 * the safety anyway; we want to delete the reference whatever2525 * its current value.2526 */2527if(!refs_read_ref_full(&refs->base, newrefname,2528 RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,2529 sha1, NULL) &&2530refs_delete_ref(&refs->base, NULL, newrefname,2531 NULL, REF_NODEREF)) {2532if(errno == EISDIR) {2533struct strbuf path = STRBUF_INIT;2534int result;25352536files_ref_path(refs, &path, newrefname);2537 result =remove_empty_directories(&path);2538strbuf_release(&path);25392540if(result) {2541error("Directory not empty:%s", newrefname);2542goto rollback;2543}2544}else{2545error("unable to delete existing%s", newrefname);2546goto rollback;2547}2548}25492550if(log &&rename_tmp_log(refs, newrefname))2551goto rollback;25522553 logmoved = log;25542555 lock =lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,2556 REF_NODEREF, NULL, &err);2557if(!lock) {2558error("unable to rename '%s' to '%s':%s", oldrefname, newrefname, err.buf);2559strbuf_release(&err);2560goto rollback;2561}2562hashcpy(lock->old_oid.hash, orig_sha1);25632564if(write_ref_to_lockfile(lock, orig_sha1, &err) ||2565commit_ref_update(refs, lock, orig_sha1, logmsg, &err)) {2566error("unable to write current sha1 into%s:%s", newrefname, err.buf);2567strbuf_release(&err);2568goto rollback;2569}25702571 ret =0;2572goto out;25732574 rollback:2575 lock =lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,2576 REF_NODEREF, NULL, &err);2577if(!lock) {2578error("unable to lock%sfor rollback:%s", oldrefname, err.buf);2579strbuf_release(&err);2580goto rollbacklog;2581}25822583 flag = log_all_ref_updates;2584 log_all_ref_updates = LOG_REFS_NONE;2585if(write_ref_to_lockfile(lock, orig_sha1, &err) ||2586commit_ref_update(refs, lock, orig_sha1, NULL, &err)) {2587error("unable to write current sha1 into%s:%s", oldrefname, err.buf);2588strbuf_release(&err);2589}2590 log_all_ref_updates = flag;25912592 rollbacklog:2593if(logmoved &&rename(sb_newref.buf, sb_oldref.buf))2594error("unable to restore logfile%sfrom%s:%s",2595 oldrefname, newrefname,strerror(errno));2596if(!logmoved && log &&2597rename(tmp_renamed_log.buf, sb_oldref.buf))2598error("unable to restore logfile%sfrom logs/"TMP_RENAMED_LOG":%s",2599 oldrefname,strerror(errno));2600 ret =1;2601 out:2602strbuf_release(&sb_newref);2603strbuf_release(&sb_oldref);2604strbuf_release(&tmp_renamed_log);26052606return ret;2607}26082609static intclose_ref(struct ref_lock *lock)2610{2611if(close_lock_file(lock->lk))2612return-1;2613return0;2614}26152616static intcommit_ref(struct ref_lock *lock)2617{2618char*path =get_locked_file_path(lock->lk);2619struct stat st;26202621if(!lstat(path, &st) &&S_ISDIR(st.st_mode)) {2622/*2623 * There is a directory at the path we want to rename2624 * the lockfile to. Hopefully it is empty; try to2625 * delete it.2626 */2627size_t len =strlen(path);2628struct strbuf sb_path = STRBUF_INIT;26292630strbuf_attach(&sb_path, path, len, len);26312632/*2633 * If this fails, commit_lock_file() will also fail2634 * and will report the problem.2635 */2636remove_empty_directories(&sb_path);2637strbuf_release(&sb_path);2638}else{2639free(path);2640}26412642if(commit_lock_file(lock->lk))2643return-1;2644return0;2645}26462647static intopen_or_create_logfile(const char*path,void*cb)2648{2649int*fd = cb;26502651*fd =open(path, O_APPEND | O_WRONLY | O_CREAT,0666);2652return(*fd <0) ? -1:0;2653}26542655/*2656 * Create a reflog for a ref. If force_create = 0, only create the2657 * reflog for certain refs (those for which should_autocreate_reflog2658 * returns non-zero). Otherwise, create it regardless of the reference2659 * name. If the logfile already existed or was created, return 0 and2660 * set *logfd to the file descriptor opened for appending to the file.2661 * If no logfile exists and we decided not to create one, return 0 and2662 * set *logfd to -1. On failure, fill in *err, set *logfd to -1, and2663 * return -1.2664 */2665static intlog_ref_setup(struct files_ref_store *refs,2666const char*refname,int force_create,2667int*logfd,struct strbuf *err)2668{2669struct strbuf logfile_sb = STRBUF_INIT;2670char*logfile;26712672files_reflog_path(refs, &logfile_sb, refname);2673 logfile =strbuf_detach(&logfile_sb, NULL);26742675if(force_create ||should_autocreate_reflog(refname)) {2676if(raceproof_create_file(logfile, open_or_create_logfile, logfd)) {2677if(errno == ENOENT)2678strbuf_addf(err,"unable to create directory for '%s': "2679"%s", logfile,strerror(errno));2680else if(errno == EISDIR)2681strbuf_addf(err,"there are still logs under '%s'",2682 logfile);2683else2684strbuf_addf(err,"unable to append to '%s':%s",2685 logfile,strerror(errno));26862687goto error;2688}2689}else{2690*logfd =open(logfile, O_APPEND | O_WRONLY,0666);2691if(*logfd <0) {2692if(errno == ENOENT || errno == EISDIR) {2693/*2694 * The logfile doesn't already exist,2695 * but that is not an error; it only2696 * means that we won't write log2697 * entries to it.2698 */2699;2700}else{2701strbuf_addf(err,"unable to append to '%s':%s",2702 logfile,strerror(errno));2703goto error;2704}2705}2706}27072708if(*logfd >=0)2709adjust_shared_perm(logfile);27102711free(logfile);2712return0;27132714error:2715free(logfile);2716return-1;2717}27182719static intfiles_create_reflog(struct ref_store *ref_store,2720const char*refname,int force_create,2721struct strbuf *err)2722{2723struct files_ref_store *refs =2724files_downcast(ref_store, REF_STORE_WRITE,"create_reflog");2725int fd;27262727if(log_ref_setup(refs, refname, force_create, &fd, err))2728return-1;27292730if(fd >=0)2731close(fd);27322733return0;2734}27352736static intlog_ref_write_fd(int fd,const unsigned char*old_sha1,2737const unsigned char*new_sha1,2738const char*committer,const char*msg)2739{2740int msglen, written;2741unsigned maxlen, len;2742char*logrec;27432744 msglen = msg ?strlen(msg) :0;2745 maxlen =strlen(committer) + msglen +100;2746 logrec =xmalloc(maxlen);2747 len =xsnprintf(logrec, maxlen,"%s %s %s\n",2748sha1_to_hex(old_sha1),2749sha1_to_hex(new_sha1),2750 committer);2751if(msglen)2752 len +=copy_reflog_msg(logrec + len -1, msg) -1;27532754 written = len <= maxlen ?write_in_full(fd, logrec, len) : -1;2755free(logrec);2756if(written != len)2757return-1;27582759return0;2760}27612762static intfiles_log_ref_write(struct files_ref_store *refs,2763const char*refname,const unsigned char*old_sha1,2764const unsigned char*new_sha1,const char*msg,2765int flags,struct strbuf *err)2766{2767int logfd, result;27682769if(log_all_ref_updates == LOG_REFS_UNSET)2770 log_all_ref_updates =is_bare_repository() ? LOG_REFS_NONE : LOG_REFS_NORMAL;27712772 result =log_ref_setup(refs, refname,2773 flags & REF_FORCE_CREATE_REFLOG,2774&logfd, err);27752776if(result)2777return result;27782779if(logfd <0)2780return0;2781 result =log_ref_write_fd(logfd, old_sha1, new_sha1,2782git_committer_info(0), msg);2783if(result) {2784struct strbuf sb = STRBUF_INIT;2785int save_errno = errno;27862787files_reflog_path(refs, &sb, refname);2788strbuf_addf(err,"unable to append to '%s':%s",2789 sb.buf,strerror(save_errno));2790strbuf_release(&sb);2791close(logfd);2792return-1;2793}2794if(close(logfd)) {2795struct strbuf sb = STRBUF_INIT;2796int save_errno = errno;27972798files_reflog_path(refs, &sb, refname);2799strbuf_addf(err,"unable to append to '%s':%s",2800 sb.buf,strerror(save_errno));2801strbuf_release(&sb);2802return-1;2803}2804return0;2805}28062807/*2808 * Write sha1 into the open lockfile, then close the lockfile. On2809 * errors, rollback the lockfile, fill in *err and2810 * return -1.2811 */2812static intwrite_ref_to_lockfile(struct ref_lock *lock,2813const unsigned char*sha1,struct strbuf *err)2814{2815static char term ='\n';2816struct object *o;2817int fd;28182819 o =parse_object(sha1);2820if(!o) {2821strbuf_addf(err,2822"trying to write ref '%s' with nonexistent object%s",2823 lock->ref_name,sha1_to_hex(sha1));2824unlock_ref(lock);2825return-1;2826}2827if(o->type != OBJ_COMMIT &&is_branch(lock->ref_name)) {2828strbuf_addf(err,2829"trying to write non-commit object%sto branch '%s'",2830sha1_to_hex(sha1), lock->ref_name);2831unlock_ref(lock);2832return-1;2833}2834 fd =get_lock_file_fd(lock->lk);2835if(write_in_full(fd,sha1_to_hex(sha1),40) !=40||2836write_in_full(fd, &term,1) !=1||2837close_ref(lock) <0) {2838strbuf_addf(err,2839"couldn't write '%s'",get_lock_file_path(lock->lk));2840unlock_ref(lock);2841return-1;2842}2843return0;2844}28452846/*2847 * Commit a change to a loose reference that has already been written2848 * to the loose reference lockfile. Also update the reflogs if2849 * necessary, using the specified lockmsg (which can be NULL).2850 */2851static intcommit_ref_update(struct files_ref_store *refs,2852struct ref_lock *lock,2853const unsigned char*sha1,const char*logmsg,2854struct strbuf *err)2855{2856files_assert_main_repository(refs,"commit_ref_update");28572858clear_loose_ref_cache(refs);2859if(files_log_ref_write(refs, lock->ref_name,2860 lock->old_oid.hash, sha1,2861 logmsg,0, err)) {2862char*old_msg =strbuf_detach(err, NULL);2863strbuf_addf(err,"cannot update the ref '%s':%s",2864 lock->ref_name, old_msg);2865free(old_msg);2866unlock_ref(lock);2867return-1;2868}28692870if(strcmp(lock->ref_name,"HEAD") !=0) {2871/*2872 * Special hack: If a branch is updated directly and HEAD2873 * points to it (may happen on the remote side of a push2874 * for example) then logically the HEAD reflog should be2875 * updated too.2876 * A generic solution implies reverse symref information,2877 * but finding all symrefs pointing to the given branch2878 * would be rather costly for this rare event (the direct2879 * update of a branch) to be worth it. So let's cheat and2880 * check with HEAD only which should cover 99% of all usage2881 * scenarios (even 100% of the default ones).2882 */2883unsigned char head_sha1[20];2884int head_flag;2885const char*head_ref;28862887 head_ref =refs_resolve_ref_unsafe(&refs->base,"HEAD",2888 RESOLVE_REF_READING,2889 head_sha1, &head_flag);2890if(head_ref && (head_flag & REF_ISSYMREF) &&2891!strcmp(head_ref, lock->ref_name)) {2892struct strbuf log_err = STRBUF_INIT;2893if(files_log_ref_write(refs,"HEAD",2894 lock->old_oid.hash, sha1,2895 logmsg,0, &log_err)) {2896error("%s", log_err.buf);2897strbuf_release(&log_err);2898}2899}2900}29012902if(commit_ref(lock)) {2903strbuf_addf(err,"couldn't set '%s'", lock->ref_name);2904unlock_ref(lock);2905return-1;2906}29072908unlock_ref(lock);2909return0;2910}29112912static intcreate_ref_symlink(struct ref_lock *lock,const char*target)2913{2914int ret = -1;2915#ifndef NO_SYMLINK_HEAD2916char*ref_path =get_locked_file_path(lock->lk);2917unlink(ref_path);2918 ret =symlink(target, ref_path);2919free(ref_path);29202921if(ret)2922fprintf(stderr,"no symlink - falling back to symbolic ref\n");2923#endif2924return ret;2925}29262927static voidupdate_symref_reflog(struct files_ref_store *refs,2928struct ref_lock *lock,const char*refname,2929const char*target,const char*logmsg)2930{2931struct strbuf err = STRBUF_INIT;2932unsigned char new_sha1[20];2933if(logmsg &&2934!refs_read_ref_full(&refs->base, target,2935 RESOLVE_REF_READING, new_sha1, NULL) &&2936files_log_ref_write(refs, refname, lock->old_oid.hash,2937 new_sha1, logmsg,0, &err)) {2938error("%s", err.buf);2939strbuf_release(&err);2940}2941}29422943static intcreate_symref_locked(struct files_ref_store *refs,2944struct ref_lock *lock,const char*refname,2945const char*target,const char*logmsg)2946{2947if(prefer_symlink_refs && !create_ref_symlink(lock, target)) {2948update_symref_reflog(refs, lock, refname, target, logmsg);2949return0;2950}29512952if(!fdopen_lock_file(lock->lk,"w"))2953returnerror("unable to fdopen%s:%s",2954 lock->lk->tempfile.filename.buf,strerror(errno));29552956update_symref_reflog(refs, lock, refname, target, logmsg);29572958/* no error check; commit_ref will check ferror */2959fprintf(lock->lk->tempfile.fp,"ref:%s\n", target);2960if(commit_ref(lock) <0)2961returnerror("unable to write symref for%s:%s", refname,2962strerror(errno));2963return0;2964}29652966static intfiles_create_symref(struct ref_store *ref_store,2967const char*refname,const char*target,2968const char*logmsg)2969{2970struct files_ref_store *refs =2971files_downcast(ref_store, REF_STORE_WRITE,"create_symref");2972struct strbuf err = STRBUF_INIT;2973struct ref_lock *lock;2974int ret;29752976 lock =lock_ref_sha1_basic(refs, refname, NULL,2977 NULL, NULL, REF_NODEREF, NULL,2978&err);2979if(!lock) {2980error("%s", err.buf);2981strbuf_release(&err);2982return-1;2983}29842985 ret =create_symref_locked(refs, lock, refname, target, logmsg);2986unlock_ref(lock);2987return ret;2988}29892990intset_worktree_head_symref(const char*gitdir,const char*target,const char*logmsg)2991{2992/*2993 * FIXME: this obviously will not work well for future refs2994 * backends. This function needs to die.2995 */2996struct files_ref_store *refs =2997files_downcast(get_main_ref_store(),2998 REF_STORE_WRITE,2999"set_head_symref");30003001static struct lock_file head_lock;3002struct ref_lock *lock;3003struct strbuf head_path = STRBUF_INIT;3004const char*head_rel;3005int ret;30063007strbuf_addf(&head_path,"%s/HEAD",absolute_path(gitdir));3008if(hold_lock_file_for_update(&head_lock, head_path.buf,3009 LOCK_NO_DEREF) <0) {3010struct strbuf err = STRBUF_INIT;3011unable_to_lock_message(head_path.buf, errno, &err);3012error("%s", err.buf);3013strbuf_release(&err);3014strbuf_release(&head_path);3015return-1;3016}30173018/* head_rel will be "HEAD" for the main tree, "worktrees/wt/HEAD" for3019 linked trees */3020 head_rel =remove_leading_path(head_path.buf,3021absolute_path(get_git_common_dir()));3022/* to make use of create_symref_locked(), initialize ref_lock */3023 lock =xcalloc(1,sizeof(struct ref_lock));3024 lock->lk = &head_lock;3025 lock->ref_name =xstrdup(head_rel);30263027 ret =create_symref_locked(refs, lock, head_rel, target, logmsg);30283029unlock_ref(lock);/* will free lock */3030strbuf_release(&head_path);3031return ret;3032}30333034static intfiles_reflog_exists(struct ref_store *ref_store,3035const char*refname)3036{3037struct files_ref_store *refs =3038files_downcast(ref_store, REF_STORE_READ,"reflog_exists");3039struct strbuf sb = STRBUF_INIT;3040struct stat st;3041int ret;30423043files_reflog_path(refs, &sb, refname);3044 ret = !lstat(sb.buf, &st) &&S_ISREG(st.st_mode);3045strbuf_release(&sb);3046return ret;3047}30483049static intfiles_delete_reflog(struct ref_store *ref_store,3050const char*refname)3051{3052struct files_ref_store *refs =3053files_downcast(ref_store, REF_STORE_WRITE,"delete_reflog");3054struct strbuf sb = STRBUF_INIT;3055int ret;30563057files_reflog_path(refs, &sb, refname);3058 ret =remove_path(sb.buf);3059strbuf_release(&sb);3060return ret;3061}30623063static intshow_one_reflog_ent(struct strbuf *sb, each_reflog_ent_fn fn,void*cb_data)3064{3065struct object_id ooid, noid;3066char*email_end, *message;3067unsigned long timestamp;3068int tz;3069const char*p = sb->buf;30703071/* old SP new SP name <email> SP time TAB msg LF */3072if(!sb->len || sb->buf[sb->len -1] !='\n'||3073parse_oid_hex(p, &ooid, &p) || *p++ !=' '||3074parse_oid_hex(p, &noid, &p) || *p++ !=' '||3075!(email_end =strchr(p,'>')) ||3076 email_end[1] !=' '||3077!(timestamp =strtoul(email_end +2, &message,10)) ||3078!message || message[0] !=' '||3079(message[1] !='+'&& message[1] !='-') ||3080!isdigit(message[2]) || !isdigit(message[3]) ||3081!isdigit(message[4]) || !isdigit(message[5]))3082return0;/* corrupt? */3083 email_end[1] ='\0';3084 tz =strtol(message +1, NULL,10);3085if(message[6] !='\t')3086 message +=6;3087else3088 message +=7;3089returnfn(&ooid, &noid, p, timestamp, tz, message, cb_data);3090}30913092static char*find_beginning_of_line(char*bob,char*scan)3093{3094while(bob < scan && *(--scan) !='\n')3095;/* keep scanning backwards */3096/*3097 * Return either beginning of the buffer, or LF at the end of3098 * the previous line.3099 */3100return scan;3101}31023103static intfiles_for_each_reflog_ent_reverse(struct ref_store *ref_store,3104const char*refname,3105 each_reflog_ent_fn fn,3106void*cb_data)3107{3108struct files_ref_store *refs =3109files_downcast(ref_store, REF_STORE_READ,3110"for_each_reflog_ent_reverse");3111struct strbuf sb = STRBUF_INIT;3112FILE*logfp;3113long pos;3114int ret =0, at_tail =1;31153116files_reflog_path(refs, &sb, refname);3117 logfp =fopen(sb.buf,"r");3118strbuf_release(&sb);3119if(!logfp)3120return-1;31213122/* Jump to the end */3123if(fseek(logfp,0, SEEK_END) <0)3124returnerror("cannot seek back reflog for%s:%s",3125 refname,strerror(errno));3126 pos =ftell(logfp);3127while(!ret &&0< pos) {3128int cnt;3129size_t nread;3130char buf[BUFSIZ];3131char*endp, *scanp;31323133/* Fill next block from the end */3134 cnt = (sizeof(buf) < pos) ?sizeof(buf) : pos;3135if(fseek(logfp, pos - cnt, SEEK_SET))3136returnerror("cannot seek back reflog for%s:%s",3137 refname,strerror(errno));3138 nread =fread(buf, cnt,1, logfp);3139if(nread !=1)3140returnerror("cannot read%dbytes from reflog for%s:%s",3141 cnt, refname,strerror(errno));3142 pos -= cnt;31433144 scanp = endp = buf + cnt;3145if(at_tail && scanp[-1] =='\n')3146/* Looking at the final LF at the end of the file */3147 scanp--;3148 at_tail =0;31493150while(buf < scanp) {3151/*3152 * terminating LF of the previous line, or the beginning3153 * of the buffer.3154 */3155char*bp;31563157 bp =find_beginning_of_line(buf, scanp);31583159if(*bp =='\n') {3160/*3161 * The newline is the end of the previous line,3162 * so we know we have complete line starting3163 * at (bp + 1). Prefix it onto any prior data3164 * we collected for the line and process it.3165 */3166strbuf_splice(&sb,0,0, bp +1, endp - (bp +1));3167 scanp = bp;3168 endp = bp +1;3169 ret =show_one_reflog_ent(&sb, fn, cb_data);3170strbuf_reset(&sb);3171if(ret)3172break;3173}else if(!pos) {3174/*3175 * We are at the start of the buffer, and the3176 * start of the file; there is no previous3177 * line, and we have everything for this one.3178 * Process it, and we can end the loop.3179 */3180strbuf_splice(&sb,0,0, buf, endp - buf);3181 ret =show_one_reflog_ent(&sb, fn, cb_data);3182strbuf_reset(&sb);3183break;3184}31853186if(bp == buf) {3187/*3188 * We are at the start of the buffer, and there3189 * is more file to read backwards. Which means3190 * we are in the middle of a line. Note that we3191 * may get here even if *bp was a newline; that3192 * just means we are at the exact end of the3193 * previous line, rather than some spot in the3194 * middle.3195 *3196 * Save away what we have to be combined with3197 * the data from the next read.3198 */3199strbuf_splice(&sb,0,0, buf, endp - buf);3200break;3201}3202}32033204}3205if(!ret && sb.len)3206die("BUG: reverse reflog parser had leftover data");32073208fclose(logfp);3209strbuf_release(&sb);3210return ret;3211}32123213static intfiles_for_each_reflog_ent(struct ref_store *ref_store,3214const char*refname,3215 each_reflog_ent_fn fn,void*cb_data)3216{3217struct files_ref_store *refs =3218files_downcast(ref_store, REF_STORE_READ,3219"for_each_reflog_ent");3220FILE*logfp;3221struct strbuf sb = STRBUF_INIT;3222int ret =0;32233224files_reflog_path(refs, &sb, refname);3225 logfp =fopen(sb.buf,"r");3226strbuf_release(&sb);3227if(!logfp)3228return-1;32293230while(!ret && !strbuf_getwholeline(&sb, logfp,'\n'))3231 ret =show_one_reflog_ent(&sb, fn, cb_data);3232fclose(logfp);3233strbuf_release(&sb);3234return ret;3235}32363237struct files_reflog_iterator {3238struct ref_iterator base;32393240struct ref_store *ref_store;3241struct dir_iterator *dir_iterator;3242struct object_id oid;3243};32443245static intfiles_reflog_iterator_advance(struct ref_iterator *ref_iterator)3246{3247struct files_reflog_iterator *iter =3248(struct files_reflog_iterator *)ref_iterator;3249struct dir_iterator *diter = iter->dir_iterator;3250int ok;32513252while((ok =dir_iterator_advance(diter)) == ITER_OK) {3253int flags;32543255if(!S_ISREG(diter->st.st_mode))3256continue;3257if(diter->basename[0] =='.')3258continue;3259if(ends_with(diter->basename,".lock"))3260continue;32613262if(refs_read_ref_full(iter->ref_store,3263 diter->relative_path,0,3264 iter->oid.hash, &flags)) {3265error("bad ref for%s", diter->path.buf);3266continue;3267}32683269 iter->base.refname = diter->relative_path;3270 iter->base.oid = &iter->oid;3271 iter->base.flags = flags;3272return ITER_OK;3273}32743275 iter->dir_iterator = NULL;3276if(ref_iterator_abort(ref_iterator) == ITER_ERROR)3277 ok = ITER_ERROR;3278return ok;3279}32803281static intfiles_reflog_iterator_peel(struct ref_iterator *ref_iterator,3282struct object_id *peeled)3283{3284die("BUG: ref_iterator_peel() called for reflog_iterator");3285}32863287static intfiles_reflog_iterator_abort(struct ref_iterator *ref_iterator)3288{3289struct files_reflog_iterator *iter =3290(struct files_reflog_iterator *)ref_iterator;3291int ok = ITER_DONE;32923293if(iter->dir_iterator)3294 ok =dir_iterator_abort(iter->dir_iterator);32953296base_ref_iterator_free(ref_iterator);3297return ok;3298}32993300static struct ref_iterator_vtable files_reflog_iterator_vtable = {3301 files_reflog_iterator_advance,3302 files_reflog_iterator_peel,3303 files_reflog_iterator_abort3304};33053306static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)3307{3308struct files_ref_store *refs =3309files_downcast(ref_store, REF_STORE_READ,3310"reflog_iterator_begin");3311struct files_reflog_iterator *iter =xcalloc(1,sizeof(*iter));3312struct ref_iterator *ref_iterator = &iter->base;3313struct strbuf sb = STRBUF_INIT;33143315base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);3316files_reflog_path(refs, &sb, NULL);3317 iter->dir_iterator =dir_iterator_begin(sb.buf);3318 iter->ref_store = ref_store;3319strbuf_release(&sb);3320return ref_iterator;3321}33223323static intref_update_reject_duplicates(struct string_list *refnames,3324struct strbuf *err)3325{3326int i, n = refnames->nr;33273328assert(err);33293330for(i =1; i < n; i++)3331if(!strcmp(refnames->items[i -1].string, refnames->items[i].string)) {3332strbuf_addf(err,3333"multiple updates for ref '%s' not allowed.",3334 refnames->items[i].string);3335return1;3336}3337return0;3338}33393340/*3341 * If update is a direct update of head_ref (the reference pointed to3342 * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.3343 */3344static intsplit_head_update(struct ref_update *update,3345struct ref_transaction *transaction,3346const char*head_ref,3347struct string_list *affected_refnames,3348struct strbuf *err)3349{3350struct string_list_item *item;3351struct ref_update *new_update;33523353if((update->flags & REF_LOG_ONLY) ||3354(update->flags & REF_ISPRUNING) ||3355(update->flags & REF_UPDATE_VIA_HEAD))3356return0;33573358if(strcmp(update->refname, head_ref))3359return0;33603361/*3362 * First make sure that HEAD is not already in the3363 * transaction. This insertion is O(N) in the transaction3364 * size, but it happens at most once per transaction.3365 */3366 item =string_list_insert(affected_refnames,"HEAD");3367if(item->util) {3368/* An entry already existed */3369strbuf_addf(err,3370"multiple updates for 'HEAD' (including one "3371"via its referent '%s') are not allowed",3372 update->refname);3373return TRANSACTION_NAME_CONFLICT;3374}33753376 new_update =ref_transaction_add_update(3377 transaction,"HEAD",3378 update->flags | REF_LOG_ONLY | REF_NODEREF,3379 update->new_sha1, update->old_sha1,3380 update->msg);33813382 item->util = new_update;33833384return0;3385}33863387/*3388 * update is for a symref that points at referent and doesn't have3389 * REF_NODEREF set. Split it into two updates:3390 * - The original update, but with REF_LOG_ONLY and REF_NODEREF set3391 * - A new, separate update for the referent reference3392 * Note that the new update will itself be subject to splitting when3393 * the iteration gets to it.3394 */3395static intsplit_symref_update(struct files_ref_store *refs,3396struct ref_update *update,3397const char*referent,3398struct ref_transaction *transaction,3399struct string_list *affected_refnames,3400struct strbuf *err)3401{3402struct string_list_item *item;3403struct ref_update *new_update;3404unsigned int new_flags;34053406/*3407 * First make sure that referent is not already in the3408 * transaction. This insertion is O(N) in the transaction3409 * size, but it happens at most once per symref in a3410 * transaction.3411 */3412 item =string_list_insert(affected_refnames, referent);3413if(item->util) {3414/* An entry already existed */3415strbuf_addf(err,3416"multiple updates for '%s' (including one "3417"via symref '%s') are not allowed",3418 referent, update->refname);3419return TRANSACTION_NAME_CONFLICT;3420}34213422 new_flags = update->flags;3423if(!strcmp(update->refname,"HEAD")) {3424/*3425 * Record that the new update came via HEAD, so that3426 * when we process it, split_head_update() doesn't try3427 * to add another reflog update for HEAD. Note that3428 * this bit will be propagated if the new_update3429 * itself needs to be split.3430 */3431 new_flags |= REF_UPDATE_VIA_HEAD;3432}34333434 new_update =ref_transaction_add_update(3435 transaction, referent, new_flags,3436 update->new_sha1, update->old_sha1,3437 update->msg);34383439 new_update->parent_update = update;34403441/*3442 * Change the symbolic ref update to log only. Also, it3443 * doesn't need to check its old SHA-1 value, as that will be3444 * done when new_update is processed.3445 */3446 update->flags |= REF_LOG_ONLY | REF_NODEREF;3447 update->flags &= ~REF_HAVE_OLD;34483449 item->util = new_update;34503451return0;3452}34533454/*3455 * Return the refname under which update was originally requested.3456 */3457static const char*original_update_refname(struct ref_update *update)3458{3459while(update->parent_update)3460 update = update->parent_update;34613462return update->refname;3463}34643465/*3466 * Check whether the REF_HAVE_OLD and old_oid values stored in update3467 * are consistent with oid, which is the reference's current value. If3468 * everything is OK, return 0; otherwise, write an error message to3469 * err and return -1.3470 */3471static intcheck_old_oid(struct ref_update *update,struct object_id *oid,3472struct strbuf *err)3473{3474if(!(update->flags & REF_HAVE_OLD) ||3475!hashcmp(oid->hash, update->old_sha1))3476return0;34773478if(is_null_sha1(update->old_sha1))3479strbuf_addf(err,"cannot lock ref '%s': "3480"reference already exists",3481original_update_refname(update));3482else if(is_null_oid(oid))3483strbuf_addf(err,"cannot lock ref '%s': "3484"reference is missing but expected%s",3485original_update_refname(update),3486sha1_to_hex(update->old_sha1));3487else3488strbuf_addf(err,"cannot lock ref '%s': "3489"is at%sbut expected%s",3490original_update_refname(update),3491oid_to_hex(oid),3492sha1_to_hex(update->old_sha1));34933494return-1;3495}34963497/*3498 * Prepare for carrying out update:3499 * - Lock the reference referred to by update.3500 * - Read the reference under lock.3501 * - Check that its old SHA-1 value (if specified) is correct, and in3502 * any case record it in update->lock->old_oid for later use when3503 * writing the reflog.3504 * - If it is a symref update without REF_NODEREF, split it up into a3505 * REF_LOG_ONLY update of the symref and add a separate update for3506 * the referent to transaction.3507 * - If it is an update of head_ref, add a corresponding REF_LOG_ONLY3508 * update of HEAD.3509 */3510static intlock_ref_for_update(struct files_ref_store *refs,3511struct ref_update *update,3512struct ref_transaction *transaction,3513const char*head_ref,3514struct string_list *affected_refnames,3515struct strbuf *err)3516{3517struct strbuf referent = STRBUF_INIT;3518int mustexist = (update->flags & REF_HAVE_OLD) &&3519!is_null_sha1(update->old_sha1);3520int ret;3521struct ref_lock *lock;35223523files_assert_main_repository(refs,"lock_ref_for_update");35243525if((update->flags & REF_HAVE_NEW) &&is_null_sha1(update->new_sha1))3526 update->flags |= REF_DELETING;35273528if(head_ref) {3529 ret =split_head_update(update, transaction, head_ref,3530 affected_refnames, err);3531if(ret)3532return ret;3533}35343535 ret =lock_raw_ref(refs, update->refname, mustexist,3536 affected_refnames, NULL,3537&lock, &referent,3538&update->type, err);3539if(ret) {3540char*reason;35413542 reason =strbuf_detach(err, NULL);3543strbuf_addf(err,"cannot lock ref '%s':%s",3544original_update_refname(update), reason);3545free(reason);3546return ret;3547}35483549 update->backend_data = lock;35503551if(update->type & REF_ISSYMREF) {3552if(update->flags & REF_NODEREF) {3553/*3554 * We won't be reading the referent as part of3555 * the transaction, so we have to read it here3556 * to record and possibly check old_sha1:3557 */3558if(refs_read_ref_full(&refs->base,3559 referent.buf,0,3560 lock->old_oid.hash, NULL)) {3561if(update->flags & REF_HAVE_OLD) {3562strbuf_addf(err,"cannot lock ref '%s': "3563"error reading reference",3564original_update_refname(update));3565return-1;3566}3567}else if(check_old_oid(update, &lock->old_oid, err)) {3568return TRANSACTION_GENERIC_ERROR;3569}3570}else{3571/*3572 * Create a new update for the reference this3573 * symref is pointing at. Also, we will record3574 * and verify old_sha1 for this update as part3575 * of processing the split-off update, so we3576 * don't have to do it here.3577 */3578 ret =split_symref_update(refs, update,3579 referent.buf, transaction,3580 affected_refnames, err);3581if(ret)3582return ret;3583}3584}else{3585struct ref_update *parent_update;35863587if(check_old_oid(update, &lock->old_oid, err))3588return TRANSACTION_GENERIC_ERROR;35893590/*3591 * If this update is happening indirectly because of a3592 * symref update, record the old SHA-1 in the parent3593 * update:3594 */3595for(parent_update = update->parent_update;3596 parent_update;3597 parent_update = parent_update->parent_update) {3598struct ref_lock *parent_lock = parent_update->backend_data;3599oidcpy(&parent_lock->old_oid, &lock->old_oid);3600}3601}36023603if((update->flags & REF_HAVE_NEW) &&3604!(update->flags & REF_DELETING) &&3605!(update->flags & REF_LOG_ONLY)) {3606if(!(update->type & REF_ISSYMREF) &&3607!hashcmp(lock->old_oid.hash, update->new_sha1)) {3608/*3609 * The reference already has the desired3610 * value, so we don't need to write it.3611 */3612}else if(write_ref_to_lockfile(lock, update->new_sha1,3613 err)) {3614char*write_err =strbuf_detach(err, NULL);36153616/*3617 * The lock was freed upon failure of3618 * write_ref_to_lockfile():3619 */3620 update->backend_data = NULL;3621strbuf_addf(err,3622"cannot update ref '%s':%s",3623 update->refname, write_err);3624free(write_err);3625return TRANSACTION_GENERIC_ERROR;3626}else{3627 update->flags |= REF_NEEDS_COMMIT;3628}3629}3630if(!(update->flags & REF_NEEDS_COMMIT)) {3631/*3632 * We didn't call write_ref_to_lockfile(), so3633 * the lockfile is still open. Close it to3634 * free up the file descriptor:3635 */3636if(close_ref(lock)) {3637strbuf_addf(err,"couldn't close '%s.lock'",3638 update->refname);3639return TRANSACTION_GENERIC_ERROR;3640}3641}3642return0;3643}36443645static intfiles_transaction_commit(struct ref_store *ref_store,3646struct ref_transaction *transaction,3647struct strbuf *err)3648{3649struct files_ref_store *refs =3650files_downcast(ref_store, REF_STORE_WRITE,3651"ref_transaction_commit");3652int ret =0, i;3653struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;3654struct string_list_item *ref_to_delete;3655struct string_list affected_refnames = STRING_LIST_INIT_NODUP;3656char*head_ref = NULL;3657int head_type;3658struct object_id head_oid;3659struct strbuf sb = STRBUF_INIT;36603661assert(err);36623663if(transaction->state != REF_TRANSACTION_OPEN)3664die("BUG: commit called for transaction that is not open");36653666if(!transaction->nr) {3667 transaction->state = REF_TRANSACTION_CLOSED;3668return0;3669}36703671/*3672 * Fail if a refname appears more than once in the3673 * transaction. (If we end up splitting up any updates using3674 * split_symref_update() or split_head_update(), those3675 * functions will check that the new updates don't have the3676 * same refname as any existing ones.)3677 */3678for(i =0; i < transaction->nr; i++) {3679struct ref_update *update = transaction->updates[i];3680struct string_list_item *item =3681string_list_append(&affected_refnames, update->refname);36823683/*3684 * We store a pointer to update in item->util, but at3685 * the moment we never use the value of this field3686 * except to check whether it is non-NULL.3687 */3688 item->util = update;3689}3690string_list_sort(&affected_refnames);3691if(ref_update_reject_duplicates(&affected_refnames, err)) {3692 ret = TRANSACTION_GENERIC_ERROR;3693goto cleanup;3694}36953696/*3697 * Special hack: If a branch is updated directly and HEAD3698 * points to it (may happen on the remote side of a push3699 * for example) then logically the HEAD reflog should be3700 * updated too.3701 *3702 * A generic solution would require reverse symref lookups,3703 * but finding all symrefs pointing to a given branch would be3704 * rather costly for this rare event (the direct update of a3705 * branch) to be worth it. So let's cheat and check with HEAD3706 * only, which should cover 99% of all usage scenarios (even3707 * 100% of the default ones).3708 *3709 * So if HEAD is a symbolic reference, then record the name of3710 * the reference that it points to. If we see an update of3711 * head_ref within the transaction, then split_head_update()3712 * arranges for the reflog of HEAD to be updated, too.3713 */3714 head_ref =refs_resolve_refdup(ref_store,"HEAD",3715 RESOLVE_REF_NO_RECURSE,3716 head_oid.hash, &head_type);37173718if(head_ref && !(head_type & REF_ISSYMREF)) {3719free(head_ref);3720 head_ref = NULL;3721}37223723/*3724 * Acquire all locks, verify old values if provided, check3725 * that new values are valid, and write new values to the3726 * lockfiles, ready to be activated. Only keep one lockfile3727 * open at a time to avoid running out of file descriptors.3728 */3729for(i =0; i < transaction->nr; i++) {3730struct ref_update *update = transaction->updates[i];37313732 ret =lock_ref_for_update(refs, update, transaction,3733 head_ref, &affected_refnames, err);3734if(ret)3735goto cleanup;3736}37373738/* Perform updates first so live commits remain referenced */3739for(i =0; i < transaction->nr; i++) {3740struct ref_update *update = transaction->updates[i];3741struct ref_lock *lock = update->backend_data;37423743if(update->flags & REF_NEEDS_COMMIT ||3744 update->flags & REF_LOG_ONLY) {3745if(files_log_ref_write(refs,3746 lock->ref_name,3747 lock->old_oid.hash,3748 update->new_sha1,3749 update->msg, update->flags,3750 err)) {3751char*old_msg =strbuf_detach(err, NULL);37523753strbuf_addf(err,"cannot update the ref '%s':%s",3754 lock->ref_name, old_msg);3755free(old_msg);3756unlock_ref(lock);3757 update->backend_data = NULL;3758 ret = TRANSACTION_GENERIC_ERROR;3759goto cleanup;3760}3761}3762if(update->flags & REF_NEEDS_COMMIT) {3763clear_loose_ref_cache(refs);3764if(commit_ref(lock)) {3765strbuf_addf(err,"couldn't set '%s'", lock->ref_name);3766unlock_ref(lock);3767 update->backend_data = NULL;3768 ret = TRANSACTION_GENERIC_ERROR;3769goto cleanup;3770}3771}3772}3773/* Perform deletes now that updates are safely completed */3774for(i =0; i < transaction->nr; i++) {3775struct ref_update *update = transaction->updates[i];3776struct ref_lock *lock = update->backend_data;37773778if(update->flags & REF_DELETING &&3779!(update->flags & REF_LOG_ONLY)) {3780if(!(update->type & REF_ISPACKED) ||3781 update->type & REF_ISSYMREF) {3782/* It is a loose reference. */3783strbuf_reset(&sb);3784files_ref_path(refs, &sb, lock->ref_name);3785if(unlink_or_msg(sb.buf, err)) {3786 ret = TRANSACTION_GENERIC_ERROR;3787goto cleanup;3788}3789 update->flags |= REF_DELETED_LOOSE;3790}37913792if(!(update->flags & REF_ISPRUNING))3793string_list_append(&refs_to_delete,3794 lock->ref_name);3795}3796}37973798if(repack_without_refs(refs, &refs_to_delete, err)) {3799 ret = TRANSACTION_GENERIC_ERROR;3800goto cleanup;3801}38023803/* Delete the reflogs of any references that were deleted: */3804for_each_string_list_item(ref_to_delete, &refs_to_delete) {3805strbuf_reset(&sb);3806files_reflog_path(refs, &sb, ref_to_delete->string);3807if(!unlink_or_warn(sb.buf))3808try_remove_empty_parents(refs, ref_to_delete->string,3809 REMOVE_EMPTY_PARENTS_REFLOG);3810}38113812clear_loose_ref_cache(refs);38133814cleanup:3815strbuf_release(&sb);3816 transaction->state = REF_TRANSACTION_CLOSED;38173818for(i =0; i < transaction->nr; i++) {3819struct ref_update *update = transaction->updates[i];3820struct ref_lock *lock = update->backend_data;38213822if(lock)3823unlock_ref(lock);38243825if(update->flags & REF_DELETED_LOOSE) {3826/*3827 * The loose reference was deleted. Delete any3828 * empty parent directories. (Note that this3829 * can only work because we have already3830 * removed the lockfile.)3831 */3832try_remove_empty_parents(refs, update->refname,3833 REMOVE_EMPTY_PARENTS_REF);3834}3835}38363837string_list_clear(&refs_to_delete,0);3838free(head_ref);3839string_list_clear(&affected_refnames,0);38403841return ret;3842}38433844static intref_present(const char*refname,3845const struct object_id *oid,int flags,void*cb_data)3846{3847struct string_list *affected_refnames = cb_data;38483849returnstring_list_has_string(affected_refnames, refname);3850}38513852static intfiles_initial_transaction_commit(struct ref_store *ref_store,3853struct ref_transaction *transaction,3854struct strbuf *err)3855{3856struct files_ref_store *refs =3857files_downcast(ref_store, REF_STORE_WRITE,3858"initial_ref_transaction_commit");3859int ret =0, i;3860struct string_list affected_refnames = STRING_LIST_INIT_NODUP;38613862assert(err);38633864if(transaction->state != REF_TRANSACTION_OPEN)3865die("BUG: commit called for transaction that is not open");38663867/* Fail if a refname appears more than once in the transaction: */3868for(i =0; i < transaction->nr; i++)3869string_list_append(&affected_refnames,3870 transaction->updates[i]->refname);3871string_list_sort(&affected_refnames);3872if(ref_update_reject_duplicates(&affected_refnames, err)) {3873 ret = TRANSACTION_GENERIC_ERROR;3874goto cleanup;3875}38763877/*3878 * It's really undefined to call this function in an active3879 * repository or when there are existing references: we are3880 * only locking and changing packed-refs, so (1) any3881 * simultaneous processes might try to change a reference at3882 * the same time we do, and (2) any existing loose versions of3883 * the references that we are setting would have precedence3884 * over our values. But some remote helpers create the remote3885 * "HEAD" and "master" branches before calling this function,3886 * so here we really only check that none of the references3887 * that we are creating already exists.3888 */3889if(refs_for_each_rawref(&refs->base, ref_present,3890&affected_refnames))3891die("BUG: initial ref transaction called with existing refs");38923893for(i =0; i < transaction->nr; i++) {3894struct ref_update *update = transaction->updates[i];38953896if((update->flags & REF_HAVE_OLD) &&3897!is_null_sha1(update->old_sha1))3898die("BUG: initial ref transaction with old_sha1 set");3899if(refs_verify_refname_available(&refs->base, update->refname,3900&affected_refnames, NULL,3901 err)) {3902 ret = TRANSACTION_NAME_CONFLICT;3903goto cleanup;3904}3905}39063907if(lock_packed_refs(refs,0)) {3908strbuf_addf(err,"unable to lock packed-refs file:%s",3909strerror(errno));3910 ret = TRANSACTION_GENERIC_ERROR;3911goto cleanup;3912}39133914for(i =0; i < transaction->nr; i++) {3915struct ref_update *update = transaction->updates[i];39163917if((update->flags & REF_HAVE_NEW) &&3918!is_null_sha1(update->new_sha1))3919add_packed_ref(refs, update->refname, update->new_sha1);3920}39213922if(commit_packed_refs(refs)) {3923strbuf_addf(err,"unable to commit packed-refs file:%s",3924strerror(errno));3925 ret = TRANSACTION_GENERIC_ERROR;3926goto cleanup;3927}39283929cleanup:3930 transaction->state = REF_TRANSACTION_CLOSED;3931string_list_clear(&affected_refnames,0);3932return ret;3933}39343935struct expire_reflog_cb {3936unsigned int flags;3937 reflog_expiry_should_prune_fn *should_prune_fn;3938void*policy_cb;3939FILE*newlog;3940struct object_id last_kept_oid;3941};39423943static intexpire_reflog_ent(struct object_id *ooid,struct object_id *noid,3944const char*email,unsigned long timestamp,int tz,3945const char*message,void*cb_data)3946{3947struct expire_reflog_cb *cb = cb_data;3948struct expire_reflog_policy_cb *policy_cb = cb->policy_cb;39493950if(cb->flags & EXPIRE_REFLOGS_REWRITE)3951 ooid = &cb->last_kept_oid;39523953if((*cb->should_prune_fn)(ooid->hash, noid->hash, email, timestamp, tz,3954 message, policy_cb)) {3955if(!cb->newlog)3956printf("would prune%s", message);3957else if(cb->flags & EXPIRE_REFLOGS_VERBOSE)3958printf("prune%s", message);3959}else{3960if(cb->newlog) {3961fprintf(cb->newlog,"%s %s %s %lu %+05d\t%s",3962oid_to_hex(ooid),oid_to_hex(noid),3963 email, timestamp, tz, message);3964oidcpy(&cb->last_kept_oid, noid);3965}3966if(cb->flags & EXPIRE_REFLOGS_VERBOSE)3967printf("keep%s", message);3968}3969return0;3970}39713972static intfiles_reflog_expire(struct ref_store *ref_store,3973const char*refname,const unsigned char*sha1,3974unsigned int flags,3975 reflog_expiry_prepare_fn prepare_fn,3976 reflog_expiry_should_prune_fn should_prune_fn,3977 reflog_expiry_cleanup_fn cleanup_fn,3978void*policy_cb_data)3979{3980struct files_ref_store *refs =3981files_downcast(ref_store, REF_STORE_WRITE,"reflog_expire");3982static struct lock_file reflog_lock;3983struct expire_reflog_cb cb;3984struct ref_lock *lock;3985struct strbuf log_file_sb = STRBUF_INIT;3986char*log_file;3987int status =0;3988int type;3989struct strbuf err = STRBUF_INIT;39903991memset(&cb,0,sizeof(cb));3992 cb.flags = flags;3993 cb.policy_cb = policy_cb_data;3994 cb.should_prune_fn = should_prune_fn;39953996/*3997 * The reflog file is locked by holding the lock on the3998 * reference itself, plus we might need to update the3999 * reference if --updateref was specified:4000 */4001 lock =lock_ref_sha1_basic(refs, refname, sha1,4002 NULL, NULL, REF_NODEREF,4003&type, &err);4004if(!lock) {4005error("cannot lock ref '%s':%s", refname, err.buf);4006strbuf_release(&err);4007return-1;4008}4009if(!refs_reflog_exists(ref_store, refname)) {4010unlock_ref(lock);4011return0;4012}40134014files_reflog_path(refs, &log_file_sb, refname);4015 log_file =strbuf_detach(&log_file_sb, NULL);4016if(!(flags & EXPIRE_REFLOGS_DRY_RUN)) {4017/*4018 * Even though holding $GIT_DIR/logs/$reflog.lock has4019 * no locking implications, we use the lock_file4020 * machinery here anyway because it does a lot of the4021 * work we need, including cleaning up if the program4022 * exits unexpectedly.4023 */4024if(hold_lock_file_for_update(&reflog_lock, log_file,0) <0) {4025struct strbuf err = STRBUF_INIT;4026unable_to_lock_message(log_file, errno, &err);4027error("%s", err.buf);4028strbuf_release(&err);4029goto failure;4030}4031 cb.newlog =fdopen_lock_file(&reflog_lock,"w");4032if(!cb.newlog) {4033error("cannot fdopen%s(%s)",4034get_lock_file_path(&reflog_lock),strerror(errno));4035goto failure;4036}4037}40384039(*prepare_fn)(refname, sha1, cb.policy_cb);4040refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);4041(*cleanup_fn)(cb.policy_cb);40424043if(!(flags & EXPIRE_REFLOGS_DRY_RUN)) {4044/*4045 * It doesn't make sense to adjust a reference pointed4046 * to by a symbolic ref based on expiring entries in4047 * the symbolic reference's reflog. Nor can we update4048 * a reference if there are no remaining reflog4049 * entries.4050 */4051int update = (flags & EXPIRE_REFLOGS_UPDATE_REF) &&4052!(type & REF_ISSYMREF) &&4053!is_null_oid(&cb.last_kept_oid);40544055if(close_lock_file(&reflog_lock)) {4056 status |=error("couldn't write%s:%s", log_file,4057strerror(errno));4058}else if(update &&4059(write_in_full(get_lock_file_fd(lock->lk),4060oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||4061write_str_in_full(get_lock_file_fd(lock->lk),"\n") !=1||4062close_ref(lock) <0)) {4063 status |=error("couldn't write%s",4064get_lock_file_path(lock->lk));4065rollback_lock_file(&reflog_lock);4066}else if(commit_lock_file(&reflog_lock)) {4067 status |=error("unable to write reflog '%s' (%s)",4068 log_file,strerror(errno));4069}else if(update &&commit_ref(lock)) {4070 status |=error("couldn't set%s", lock->ref_name);4071}4072}4073free(log_file);4074unlock_ref(lock);4075return status;40764077 failure:4078rollback_lock_file(&reflog_lock);4079free(log_file);4080unlock_ref(lock);4081return-1;4082}40834084static intfiles_init_db(struct ref_store *ref_store,struct strbuf *err)4085{4086struct files_ref_store *refs =4087files_downcast(ref_store, REF_STORE_WRITE,"init_db");4088struct strbuf sb = STRBUF_INIT;40894090/*4091 * Create .git/refs/{heads,tags}4092 */4093files_ref_path(refs, &sb,"refs/heads");4094safe_create_dir(sb.buf,1);40954096strbuf_reset(&sb);4097files_ref_path(refs, &sb,"refs/tags");4098safe_create_dir(sb.buf,1);40994100strbuf_release(&sb);4101return0;4102}41034104struct ref_storage_be refs_be_files = {4105 NULL,4106"files",4107 files_ref_store_create,4108 files_init_db,4109 files_transaction_commit,4110 files_initial_transaction_commit,41114112 files_pack_refs,4113 files_peel_ref,4114 files_create_symref,4115 files_delete_refs,4116 files_rename_ref,41174118 files_ref_iterator_begin,4119 files_read_raw_ref,41204121 files_reflog_iterator_begin,4122 files_for_each_reflog_ent,4123 files_for_each_reflog_ent_reverse,4124 files_reflog_exists,4125 files_create_reflog,4126 files_delete_reflog,4127 files_reflog_expire4128};