1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"ref-cache.h" 6#include"packed-backend.h" 7#include"../iterator.h" 8#include"../lockfile.h" 9 10struct packed_ref_cache { 11struct ref_cache *cache; 12 13/* 14 * Count of references to the data structure in this instance, 15 * including the pointer from files_ref_store::packed if any. 16 * The data will not be freed as long as the reference count 17 * is nonzero. 18 */ 19unsigned int referrers; 20 21/* The metadata from when this packed-refs cache was read */ 22struct stat_validity validity; 23}; 24 25/* 26 * Increment the reference count of *packed_refs. 27 */ 28static voidacquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 29{ 30 packed_refs->referrers++; 31} 32 33/* 34 * Decrease the reference count of *packed_refs. If it goes to zero, 35 * free *packed_refs and return true; otherwise return false. 36 */ 37static intrelease_packed_ref_cache(struct packed_ref_cache *packed_refs) 38{ 39if(!--packed_refs->referrers) { 40free_ref_cache(packed_refs->cache); 41stat_validity_clear(&packed_refs->validity); 42free(packed_refs); 43return1; 44}else{ 45return0; 46} 47} 48 49/* 50 * A container for `packed-refs`-related data. It is not (yet) a 51 * `ref_store`. 52 */ 53struct packed_ref_store { 54struct ref_store base; 55 56unsigned int store_flags; 57 58/* The path of the "packed-refs" file: */ 59char*path; 60 61/* 62 * A cache of the values read from the `packed-refs` file, if 63 * it might still be current; otherwise, NULL. 64 */ 65struct packed_ref_cache *cache; 66 67/* 68 * Lock used for the "packed-refs" file. Note that this (and 69 * thus the enclosing `packed_ref_store`) must not be freed. 70 */ 71struct lock_file lock; 72 73/* 74 * Temporary file used when rewriting new contents to the 75 * "packed-refs" file. Note that this (and thus the enclosing 76 * `packed_ref_store`) must not be freed. 77 */ 78struct tempfile *tempfile; 79}; 80 81struct ref_store *packed_ref_store_create(const char*path, 82unsigned int store_flags) 83{ 84struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 85struct ref_store *ref_store = (struct ref_store *)refs; 86 87base_ref_store_init(ref_store, &refs_be_packed); 88 refs->store_flags = store_flags; 89 90 refs->path =xstrdup(path); 91return ref_store; 92} 93 94/* 95 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 96 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 97 * support at least the flags specified in `required_flags`. `caller` 98 * is used in any necessary error messages. 99 */ 100static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 101unsigned int required_flags, 102const char*caller) 103{ 104struct packed_ref_store *refs; 105 106if(ref_store->be != &refs_be_packed) 107die("BUG: ref_store is type\"%s\"not\"packed\"in%s", 108 ref_store->be->name, caller); 109 110 refs = (struct packed_ref_store *)ref_store; 111 112if((refs->store_flags & required_flags) != required_flags) 113die("BUG: unallowed operation (%s), requires%x, has%x\n", 114 caller, required_flags, refs->store_flags); 115 116return refs; 117} 118 119static voidclear_packed_ref_cache(struct packed_ref_store *refs) 120{ 121if(refs->cache) { 122struct packed_ref_cache *cache = refs->cache; 123 124 refs->cache = NULL; 125release_packed_ref_cache(cache); 126} 127} 128 129/* The length of a peeled reference line in packed-refs, including EOL: */ 130#define PEELED_LINE_LENGTH 42 131 132/* 133 * Parse one line from a packed-refs file. Write the SHA1 to sha1. 134 * Return a pointer to the refname within the line (null-terminated), 135 * or NULL if there was a problem. 136 */ 137static const char*parse_ref_line(struct strbuf *line,struct object_id *oid) 138{ 139const char*ref; 140 141if(parse_oid_hex(line->buf, oid, &ref) <0) 142return NULL; 143if(!isspace(*ref++)) 144return NULL; 145 146if(isspace(*ref)) 147return NULL; 148 149if(line->buf[line->len -1] !='\n') 150return NULL; 151 line->buf[--line->len] =0; 152 153return ref; 154} 155 156/* 157 * Read from `packed_refs_file` into a newly-allocated 158 * `packed_ref_cache` and return it. The return value will already 159 * have its reference count incremented. 160 * 161 * A comment line of the form "# pack-refs with: " may contain zero or 162 * more traits. We interpret the traits as follows: 163 * 164 * No traits: 165 * 166 * Probably no references are peeled. But if the file contains a 167 * peeled value for a reference, we will use it. 168 * 169 * peeled: 170 * 171 * References under "refs/tags/", if they *can* be peeled, *are* 172 * peeled in this file. References outside of "refs/tags/" are 173 * probably not peeled even if they could have been, but if we find 174 * a peeled value for such a reference we will use it. 175 * 176 * fully-peeled: 177 * 178 * All references in the file that can be peeled are peeled. 179 * Inversely (and this is more important), any references in the 180 * file for which no peeled value is recorded is not peelable. This 181 * trait should typically be written alongside "peeled" for 182 * compatibility with older clients, but we do not require it 183 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 184 */ 185static struct packed_ref_cache *read_packed_refs(const char*packed_refs_file) 186{ 187FILE*f; 188struct packed_ref_cache *packed_refs =xcalloc(1,sizeof(*packed_refs)); 189struct ref_entry *last = NULL; 190struct strbuf line = STRBUF_INIT; 191enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE; 192struct ref_dir *dir; 193 194acquire_packed_ref_cache(packed_refs); 195 packed_refs->cache =create_ref_cache(NULL, NULL); 196 packed_refs->cache->root->flag &= ~REF_INCOMPLETE; 197 198 f =fopen(packed_refs_file,"r"); 199if(!f) { 200if(errno == ENOENT) { 201/* 202 * This is OK; it just means that no 203 * "packed-refs" file has been written yet, 204 * which is equivalent to it being empty. 205 */ 206return packed_refs; 207}else{ 208die_errno("couldn't read%s", packed_refs_file); 209} 210} 211 212stat_validity_update(&packed_refs->validity,fileno(f)); 213 214 dir =get_ref_dir(packed_refs->cache->root); 215while(strbuf_getwholeline(&line, f,'\n') != EOF) { 216struct object_id oid; 217const char*refname; 218const char*traits; 219 220if(!line.len || line.buf[line.len -1] !='\n') 221die("unterminated line in%s:%s", packed_refs_file, line.buf); 222 223if(skip_prefix(line.buf,"# pack-refs with:", &traits)) { 224if(strstr(traits," fully-peeled ")) 225 peeled = PEELED_FULLY; 226else if(strstr(traits," peeled ")) 227 peeled = PEELED_TAGS; 228/* perhaps other traits later as well */ 229continue; 230} 231 232 refname =parse_ref_line(&line, &oid); 233if(refname) { 234int flag = REF_ISPACKED; 235 236if(check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { 237if(!refname_is_safe(refname)) 238die("packed refname is dangerous:%s", refname); 239oidclr(&oid); 240 flag |= REF_BAD_NAME | REF_ISBROKEN; 241} 242 last =create_ref_entry(refname, &oid, flag); 243if(peeled == PEELED_FULLY || 244(peeled == PEELED_TAGS &&starts_with(refname,"refs/tags/"))) 245 last->flag |= REF_KNOWS_PEELED; 246add_ref_entry(dir, last); 247}else if(last && 248 line.buf[0] =='^'&& 249 line.len == PEELED_LINE_LENGTH && 250 line.buf[PEELED_LINE_LENGTH -1] =='\n'&& 251!get_oid_hex(line.buf +1, &oid)) { 252oidcpy(&last->u.value.peeled, &oid); 253/* 254 * Regardless of what the file header said, 255 * we definitely know the value of *this* 256 * reference: 257 */ 258 last->flag |= REF_KNOWS_PEELED; 259}else{ 260strbuf_setlen(&line, line.len -1); 261die("unexpected line in%s:%s", packed_refs_file, line.buf); 262} 263} 264 265fclose(f); 266strbuf_release(&line); 267 268return packed_refs; 269} 270 271/* 272 * Check that the packed refs cache (if any) still reflects the 273 * contents of the file. If not, clear the cache. 274 */ 275static voidvalidate_packed_ref_cache(struct packed_ref_store *refs) 276{ 277if(refs->cache && 278!stat_validity_check(&refs->cache->validity, refs->path)) 279clear_packed_ref_cache(refs); 280} 281 282/* 283 * Get the packed_ref_cache for the specified packed_ref_store, 284 * creating and populating it if it hasn't been read before or if the 285 * file has been changed (according to its `validity` field) since it 286 * was last read. On the other hand, if we hold the lock, then assume 287 * that the file hasn't been changed out from under us, so skip the 288 * extra `stat()` call in `stat_validity_check()`. 289 */ 290static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 291{ 292if(!is_lock_file_locked(&refs->lock)) 293validate_packed_ref_cache(refs); 294 295if(!refs->cache) 296 refs->cache =read_packed_refs(refs->path); 297 298return refs->cache; 299} 300 301static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) 302{ 303returnget_ref_dir(packed_ref_cache->cache->root); 304} 305 306static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) 307{ 308returnget_packed_ref_dir(get_packed_ref_cache(refs)); 309} 310 311/* 312 * Return the ref_entry for the given refname from the packed 313 * references. If it does not exist, return NULL. 314 */ 315static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, 316const char*refname) 317{ 318returnfind_ref_entry(get_packed_refs(refs), refname); 319} 320 321static intpacked_read_raw_ref(struct ref_store *ref_store, 322const char*refname,unsigned char*sha1, 323struct strbuf *referent,unsigned int*type) 324{ 325struct packed_ref_store *refs = 326packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 327 328struct ref_entry *entry; 329 330*type =0; 331 332 entry =get_packed_ref(refs, refname); 333if(!entry) { 334 errno = ENOENT; 335return-1; 336} 337 338hashcpy(sha1, entry->u.value.oid.hash); 339*type = REF_ISPACKED; 340return0; 341} 342 343static intpacked_peel_ref(struct ref_store *ref_store, 344const char*refname,unsigned char*sha1) 345{ 346struct packed_ref_store *refs = 347packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, 348"peel_ref"); 349struct ref_entry *r =get_packed_ref(refs, refname); 350 351if(!r ||peel_entry(r,0)) 352return-1; 353 354hashcpy(sha1, r->u.value.peeled.hash); 355return0; 356} 357 358struct packed_ref_iterator { 359struct ref_iterator base; 360 361struct packed_ref_cache *cache; 362struct ref_iterator *iter0; 363unsigned int flags; 364}; 365 366static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 367{ 368struct packed_ref_iterator *iter = 369(struct packed_ref_iterator *)ref_iterator; 370int ok; 371 372while((ok =ref_iterator_advance(iter->iter0)) == ITER_OK) { 373if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 374ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) 375continue; 376 377if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 378!ref_resolves_to_object(iter->iter0->refname, 379 iter->iter0->oid, 380 iter->iter0->flags)) 381continue; 382 383 iter->base.refname = iter->iter0->refname; 384 iter->base.oid = iter->iter0->oid; 385 iter->base.flags = iter->iter0->flags; 386return ITER_OK; 387} 388 389 iter->iter0 = NULL; 390if(ref_iterator_abort(ref_iterator) != ITER_DONE) 391 ok = ITER_ERROR; 392 393return ok; 394} 395 396static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 397struct object_id *peeled) 398{ 399struct packed_ref_iterator *iter = 400(struct packed_ref_iterator *)ref_iterator; 401 402returnref_iterator_peel(iter->iter0, peeled); 403} 404 405static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 406{ 407struct packed_ref_iterator *iter = 408(struct packed_ref_iterator *)ref_iterator; 409int ok = ITER_DONE; 410 411if(iter->iter0) 412 ok =ref_iterator_abort(iter->iter0); 413 414release_packed_ref_cache(iter->cache); 415base_ref_iterator_free(ref_iterator); 416return ok; 417} 418 419static struct ref_iterator_vtable packed_ref_iterator_vtable = { 420 packed_ref_iterator_advance, 421 packed_ref_iterator_peel, 422 packed_ref_iterator_abort 423}; 424 425static struct ref_iterator *packed_ref_iterator_begin( 426struct ref_store *ref_store, 427const char*prefix,unsigned int flags) 428{ 429struct packed_ref_store *refs; 430struct packed_ref_iterator *iter; 431struct ref_iterator *ref_iterator; 432unsigned int required_flags = REF_STORE_READ; 433 434if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 435 required_flags |= REF_STORE_ODB; 436 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 437 438 iter =xcalloc(1,sizeof(*iter)); 439 ref_iterator = &iter->base; 440base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable); 441 442/* 443 * Note that get_packed_ref_cache() internally checks whether 444 * the packed-ref cache is up to date with what is on disk, 445 * and re-reads it if not. 446 */ 447 448 iter->cache =get_packed_ref_cache(refs); 449acquire_packed_ref_cache(iter->cache); 450 iter->iter0 =cache_ref_iterator_begin(iter->cache->cache, prefix,0); 451 452 iter->flags = flags; 453 454return ref_iterator; 455} 456 457/* 458 * Write an entry to the packed-refs file for the specified refname. 459 * If peeled is non-NULL, write it as the entry's peeled value. On 460 * error, return a nonzero value and leave errno set at the value left 461 * by the failing call to `fprintf()`. 462 */ 463static intwrite_packed_entry(FILE*fh,const char*refname, 464const unsigned char*sha1, 465const unsigned char*peeled) 466{ 467if(fprintf(fh,"%s %s\n",sha1_to_hex(sha1), refname) <0|| 468(peeled &&fprintf(fh,"^%s\n",sha1_to_hex(peeled)) <0)) 469return-1; 470 471return0; 472} 473 474intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 475{ 476struct packed_ref_store *refs = 477packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 478"packed_refs_lock"); 479static int timeout_configured =0; 480static int timeout_value =1000; 481 482if(!timeout_configured) { 483git_config_get_int("core.packedrefstimeout", &timeout_value); 484 timeout_configured =1; 485} 486 487/* 488 * Note that we close the lockfile immediately because we 489 * don't write new content to it, but rather to a separate 490 * tempfile. 491 */ 492if(hold_lock_file_for_update_timeout( 493&refs->lock, 494 refs->path, 495 flags, timeout_value) <0) { 496unable_to_lock_message(refs->path, errno, err); 497return-1; 498} 499 500if(close_lock_file_gently(&refs->lock)) { 501strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno)); 502rollback_lock_file(&refs->lock); 503return-1; 504} 505 506/* 507 * Now that we hold the `packed-refs` lock, make sure that our 508 * cache matches the current version of the file. Normally 509 * `get_packed_ref_cache()` does that for us, but that 510 * function assumes that when the file is locked, any existing 511 * cache is still valid. We've just locked the file, but it 512 * might have changed the moment *before* we locked it. 513 */ 514validate_packed_ref_cache(refs); 515 516/* 517 * Now make sure that the packed-refs file as it exists in the 518 * locked state is loaded into the cache: 519 */ 520get_packed_ref_cache(refs); 521return0; 522} 523 524voidpacked_refs_unlock(struct ref_store *ref_store) 525{ 526struct packed_ref_store *refs =packed_downcast( 527 ref_store, 528 REF_STORE_READ | REF_STORE_WRITE, 529"packed_refs_unlock"); 530 531if(!is_lock_file_locked(&refs->lock)) 532die("BUG: packed_refs_unlock() called when not locked"); 533rollback_lock_file(&refs->lock); 534} 535 536intpacked_refs_is_locked(struct ref_store *ref_store) 537{ 538struct packed_ref_store *refs =packed_downcast( 539 ref_store, 540 REF_STORE_READ | REF_STORE_WRITE, 541"packed_refs_is_locked"); 542 543returnis_lock_file_locked(&refs->lock); 544} 545 546/* 547 * The packed-refs header line that we write out. Perhaps other 548 * traits will be added later. The trailing space is required. 549 */ 550static const char PACKED_REFS_HEADER[] = 551"# pack-refs with: peeled fully-peeled\n"; 552 553static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err) 554{ 555/* Nothing to do. */ 556return0; 557} 558 559/* 560 * Write the packed-refs from the cache to the packed-refs tempfile, 561 * incorporating any changes from `updates`. `updates` must be a 562 * sorted string list whose keys are the refnames and whose util 563 * values are `struct ref_update *`. On error, rollback the tempfile, 564 * write an error message to `err`, and return a nonzero value. 565 * 566 * The packfile must be locked before calling this function and will 567 * remain locked when it is done. 568 */ 569static intwrite_with_updates(struct packed_ref_store *refs, 570struct string_list *updates, 571struct strbuf *err) 572{ 573struct ref_iterator *iter = NULL; 574size_t i; 575int ok; 576FILE*out; 577struct strbuf sb = STRBUF_INIT; 578char*packed_refs_path; 579 580if(!is_lock_file_locked(&refs->lock)) 581die("BUG: write_with_updates() called while unlocked"); 582 583/* 584 * If packed-refs is a symlink, we want to overwrite the 585 * symlinked-to file, not the symlink itself. Also, put the 586 * staging file next to it: 587 */ 588 packed_refs_path =get_locked_file_path(&refs->lock); 589strbuf_addf(&sb,"%s.new", packed_refs_path); 590free(packed_refs_path); 591 refs->tempfile =create_tempfile(sb.buf); 592if(!refs->tempfile) { 593strbuf_addf(err,"unable to create file%s:%s", 594 sb.buf,strerror(errno)); 595strbuf_release(&sb); 596return-1; 597} 598strbuf_release(&sb); 599 600 out =fdopen_tempfile(refs->tempfile,"w"); 601if(!out) { 602strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s", 603strerror(errno)); 604goto error; 605} 606 607if(fprintf(out,"%s", PACKED_REFS_HEADER) <0) 608goto write_error; 609 610/* 611 * We iterate in parallel through the current list of refs and 612 * the list of updates, processing an entry from at least one 613 * of the lists each time through the loop. When the current 614 * list of refs is exhausted, set iter to NULL. When the list 615 * of updates is exhausted, leave i set to updates->nr. 616 */ 617 iter =packed_ref_iterator_begin(&refs->base,"", 618 DO_FOR_EACH_INCLUDE_BROKEN); 619if((ok =ref_iterator_advance(iter)) != ITER_OK) 620 iter = NULL; 621 622 i =0; 623 624while(iter || i < updates->nr) { 625struct ref_update *update = NULL; 626int cmp; 627 628if(i >= updates->nr) { 629 cmp = -1; 630}else{ 631 update = updates->items[i].util; 632 633if(!iter) 634 cmp = +1; 635else 636 cmp =strcmp(iter->refname, update->refname); 637} 638 639if(!cmp) { 640/* 641 * There is both an old value and an update 642 * for this reference. Check the old value if 643 * necessary: 644 */ 645if((update->flags & REF_HAVE_OLD)) { 646if(is_null_oid(&update->old_oid)) { 647strbuf_addf(err,"cannot update ref '%s': " 648"reference already exists", 649 update->refname); 650goto error; 651}else if(oidcmp(&update->old_oid, iter->oid)) { 652strbuf_addf(err,"cannot update ref '%s': " 653"is at%sbut expected%s", 654 update->refname, 655oid_to_hex(iter->oid), 656oid_to_hex(&update->old_oid)); 657goto error; 658} 659} 660 661/* Now figure out what to use for the new value: */ 662if((update->flags & REF_HAVE_NEW)) { 663/* 664 * The update takes precedence. Skip 665 * the iterator over the unneeded 666 * value. 667 */ 668if((ok =ref_iterator_advance(iter)) != ITER_OK) 669 iter = NULL; 670 cmp = +1; 671}else{ 672/* 673 * The update doesn't actually want to 674 * change anything. We're done with it. 675 */ 676 i++; 677 cmp = -1; 678} 679}else if(cmp >0) { 680/* 681 * There is no old value but there is an 682 * update for this reference. Make sure that 683 * the update didn't expect an existing value: 684 */ 685if((update->flags & REF_HAVE_OLD) && 686!is_null_oid(&update->old_oid)) { 687strbuf_addf(err,"cannot update ref '%s': " 688"reference is missing but expected%s", 689 update->refname, 690oid_to_hex(&update->old_oid)); 691goto error; 692} 693} 694 695if(cmp <0) { 696/* Pass the old reference through. */ 697 698struct object_id peeled; 699int peel_error =ref_iterator_peel(iter, &peeled); 700 701if(write_packed_entry(out, iter->refname, 702 iter->oid->hash, 703 peel_error ? NULL : peeled.hash)) 704goto write_error; 705 706if((ok =ref_iterator_advance(iter)) != ITER_OK) 707 iter = NULL; 708}else if(is_null_oid(&update->new_oid)) { 709/* 710 * The update wants to delete the reference, 711 * and the reference either didn't exist or we 712 * have already skipped it. So we're done with 713 * the update (and don't have to write 714 * anything). 715 */ 716 i++; 717}else{ 718struct object_id peeled; 719int peel_error =peel_object(update->new_oid.hash, 720 peeled.hash); 721 722if(write_packed_entry(out, update->refname, 723 update->new_oid.hash, 724 peel_error ? NULL : peeled.hash)) 725goto write_error; 726 727 i++; 728} 729} 730 731if(ok != ITER_DONE) { 732strbuf_addf(err,"unable to write packed-refs file: " 733"error iterating over old contents"); 734goto error; 735} 736 737if(close_tempfile_gently(refs->tempfile)) { 738strbuf_addf(err,"error closing file%s:%s", 739get_tempfile_path(refs->tempfile), 740strerror(errno)); 741strbuf_release(&sb); 742delete_tempfile(&refs->tempfile); 743return-1; 744} 745 746return0; 747 748write_error: 749strbuf_addf(err,"error writing to%s:%s", 750get_tempfile_path(refs->tempfile),strerror(errno)); 751 752error: 753if(iter) 754ref_iterator_abort(iter); 755 756delete_tempfile(&refs->tempfile); 757return-1; 758} 759 760struct packed_transaction_backend_data { 761/* True iff the transaction owns the packed-refs lock. */ 762int own_lock; 763 764struct string_list updates; 765}; 766 767static voidpacked_transaction_cleanup(struct packed_ref_store *refs, 768struct ref_transaction *transaction) 769{ 770struct packed_transaction_backend_data *data = transaction->backend_data; 771 772if(data) { 773string_list_clear(&data->updates,0); 774 775if(is_tempfile_active(refs->tempfile)) 776delete_tempfile(&refs->tempfile); 777 778if(data->own_lock &&is_lock_file_locked(&refs->lock)) { 779packed_refs_unlock(&refs->base); 780 data->own_lock =0; 781} 782 783free(data); 784 transaction->backend_data = NULL; 785} 786 787 transaction->state = REF_TRANSACTION_CLOSED; 788} 789 790static intpacked_transaction_prepare(struct ref_store *ref_store, 791struct ref_transaction *transaction, 792struct strbuf *err) 793{ 794struct packed_ref_store *refs =packed_downcast( 795 ref_store, 796 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 797"ref_transaction_prepare"); 798struct packed_transaction_backend_data *data; 799size_t i; 800int ret = TRANSACTION_GENERIC_ERROR; 801 802/* 803 * Note that we *don't* skip transactions with zero updates, 804 * because such a transaction might be executed for the side 805 * effect of ensuring that all of the references are peeled. 806 * If the caller wants to optimize away empty transactions, it 807 * should do so itself. 808 */ 809 810 data =xcalloc(1,sizeof(*data)); 811string_list_init(&data->updates,0); 812 813 transaction->backend_data = data; 814 815/* 816 * Stick the updates in a string list by refname so that we 817 * can sort them: 818 */ 819for(i =0; i < transaction->nr; i++) { 820struct ref_update *update = transaction->updates[i]; 821struct string_list_item *item = 822string_list_append(&data->updates, update->refname); 823 824/* Store a pointer to update in item->util: */ 825 item->util = update; 826} 827string_list_sort(&data->updates); 828 829if(ref_update_reject_duplicates(&data->updates, err)) 830goto failure; 831 832if(!is_lock_file_locked(&refs->lock)) { 833if(packed_refs_lock(ref_store,0, err)) 834goto failure; 835 data->own_lock =1; 836} 837 838if(write_with_updates(refs, &data->updates, err)) 839goto failure; 840 841 transaction->state = REF_TRANSACTION_PREPARED; 842return0; 843 844failure: 845packed_transaction_cleanup(refs, transaction); 846return ret; 847} 848 849static intpacked_transaction_abort(struct ref_store *ref_store, 850struct ref_transaction *transaction, 851struct strbuf *err) 852{ 853struct packed_ref_store *refs =packed_downcast( 854 ref_store, 855 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 856"ref_transaction_abort"); 857 858packed_transaction_cleanup(refs, transaction); 859return0; 860} 861 862static intpacked_transaction_finish(struct ref_store *ref_store, 863struct ref_transaction *transaction, 864struct strbuf *err) 865{ 866struct packed_ref_store *refs =packed_downcast( 867 ref_store, 868 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 869"ref_transaction_finish"); 870int ret = TRANSACTION_GENERIC_ERROR; 871char*packed_refs_path; 872 873 packed_refs_path =get_locked_file_path(&refs->lock); 874if(rename_tempfile(&refs->tempfile, packed_refs_path)) { 875strbuf_addf(err,"error replacing%s:%s", 876 refs->path,strerror(errno)); 877goto cleanup; 878} 879 880clear_packed_ref_cache(refs); 881 ret =0; 882 883cleanup: 884free(packed_refs_path); 885packed_transaction_cleanup(refs, transaction); 886return ret; 887} 888 889static intpacked_initial_transaction_commit(struct ref_store *ref_store, 890struct ref_transaction *transaction, 891struct strbuf *err) 892{ 893returnref_transaction_commit(transaction, err); 894} 895 896static intpacked_delete_refs(struct ref_store *ref_store,const char*msg, 897struct string_list *refnames,unsigned int flags) 898{ 899struct packed_ref_store *refs = 900packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs"); 901struct strbuf err = STRBUF_INIT; 902struct ref_transaction *transaction; 903struct string_list_item *item; 904int ret; 905 906(void)refs;/* We need the check above, but don't use the variable */ 907 908if(!refnames->nr) 909return0; 910 911/* 912 * Since we don't check the references' old_oids, the 913 * individual updates can't fail, so we can pack all of the 914 * updates into a single transaction. 915 */ 916 917 transaction =ref_store_transaction_begin(ref_store, &err); 918if(!transaction) 919return-1; 920 921for_each_string_list_item(item, refnames) { 922if(ref_transaction_delete(transaction, item->string, NULL, 923 flags, msg, &err)) { 924warning(_("could not delete reference%s:%s"), 925 item->string, err.buf); 926strbuf_reset(&err); 927} 928} 929 930 ret =ref_transaction_commit(transaction, &err); 931 932if(ret) { 933if(refnames->nr ==1) 934error(_("could not delete reference%s:%s"), 935 refnames->items[0].string, err.buf); 936else 937error(_("could not delete references:%s"), err.buf); 938} 939 940ref_transaction_free(transaction); 941strbuf_release(&err); 942return ret; 943} 944 945static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags) 946{ 947/* 948 * Packed refs are already packed. It might be that loose refs 949 * are packed *into* a packed refs store, but that is done by 950 * updating the packed references via a transaction. 951 */ 952return0; 953} 954 955static intpacked_create_symref(struct ref_store *ref_store, 956const char*refname,const char*target, 957const char*logmsg) 958{ 959die("BUG: packed reference store does not support symrefs"); 960} 961 962static intpacked_rename_ref(struct ref_store *ref_store, 963const char*oldrefname,const char*newrefname, 964const char*logmsg) 965{ 966die("BUG: packed reference store does not support renaming references"); 967} 968 969static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store) 970{ 971returnempty_ref_iterator_begin(); 972} 973 974static intpacked_for_each_reflog_ent(struct ref_store *ref_store, 975const char*refname, 976 each_reflog_ent_fn fn,void*cb_data) 977{ 978return0; 979} 980 981static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store, 982const char*refname, 983 each_reflog_ent_fn fn, 984void*cb_data) 985{ 986return0; 987} 988 989static intpacked_reflog_exists(struct ref_store *ref_store, 990const char*refname) 991{ 992return0; 993} 994 995static intpacked_create_reflog(struct ref_store *ref_store, 996const char*refname,int force_create, 997struct strbuf *err) 998{ 999die("BUG: packed reference store does not support reflogs");1000}10011002static intpacked_delete_reflog(struct ref_store *ref_store,1003const char*refname)1004{1005return0;1006}10071008static intpacked_reflog_expire(struct ref_store *ref_store,1009const char*refname,const unsigned char*sha1,1010unsigned int flags,1011 reflog_expiry_prepare_fn prepare_fn,1012 reflog_expiry_should_prune_fn should_prune_fn,1013 reflog_expiry_cleanup_fn cleanup_fn,1014void*policy_cb_data)1015{1016return0;1017}10181019struct ref_storage_be refs_be_packed = {1020 NULL,1021"packed",1022 packed_ref_store_create,1023 packed_init_db,1024 packed_transaction_prepare,1025 packed_transaction_finish,1026 packed_transaction_abort,1027 packed_initial_transaction_commit,10281029 packed_pack_refs,1030 packed_peel_ref,1031 packed_create_symref,1032 packed_delete_refs,1033 packed_rename_ref,10341035 packed_ref_iterator_begin,1036 packed_read_raw_ref,10371038 packed_reflog_iterator_begin,1039 packed_for_each_reflog_ent,1040 packed_for_each_reflog_ent_reverse,1041 packed_reflog_exists,1042 packed_create_reflog,1043 packed_delete_reflog,1044 packed_reflog_expire1045};