1#include"../cache.h" 2#include"../config.h" 3#include"../refs.h" 4#include"refs-internal.h" 5#include"ref-cache.h" 6#include"packed-backend.h" 7#include"../iterator.h" 8#include"../lockfile.h" 9 10struct packed_ref_cache { 11struct ref_cache *cache; 12 13/* 14 * Count of references to the data structure in this instance, 15 * including the pointer from files_ref_store::packed if any. 16 * The data will not be freed as long as the reference count 17 * is nonzero. 18 */ 19unsigned int referrers; 20 21/* The metadata from when this packed-refs cache was read */ 22struct stat_validity validity; 23}; 24 25/* 26 * Increment the reference count of *packed_refs. 27 */ 28static voidacquire_packed_ref_cache(struct packed_ref_cache *packed_refs) 29{ 30 packed_refs->referrers++; 31} 32 33/* 34 * Decrease the reference count of *packed_refs. If it goes to zero, 35 * free *packed_refs and return true; otherwise return false. 36 */ 37static intrelease_packed_ref_cache(struct packed_ref_cache *packed_refs) 38{ 39if(!--packed_refs->referrers) { 40free_ref_cache(packed_refs->cache); 41stat_validity_clear(&packed_refs->validity); 42free(packed_refs); 43return1; 44}else{ 45return0; 46} 47} 48 49/* 50 * A container for `packed-refs`-related data. It is not (yet) a 51 * `ref_store`. 52 */ 53struct packed_ref_store { 54struct ref_store base; 55 56unsigned int store_flags; 57 58/* The path of the "packed-refs" file: */ 59char*path; 60 61/* 62 * A cache of the values read from the `packed-refs` file, if 63 * it might still be current; otherwise, NULL. 64 */ 65struct packed_ref_cache *cache; 66 67/* 68 * Lock used for the "packed-refs" file. Note that this (and 69 * thus the enclosing `packed_ref_store`) must not be freed. 70 */ 71struct lock_file lock; 72 73/* 74 * Temporary file used when rewriting new contents to the 75 * "packed-refs" file. Note that this (and thus the enclosing 76 * `packed_ref_store`) must not be freed. 77 */ 78struct tempfile tempfile; 79}; 80 81struct ref_store *packed_ref_store_create(const char*path, 82unsigned int store_flags) 83{ 84struct packed_ref_store *refs =xcalloc(1,sizeof(*refs)); 85struct ref_store *ref_store = (struct ref_store *)refs; 86 87base_ref_store_init(ref_store, &refs_be_packed); 88 refs->store_flags = store_flags; 89 90 refs->path =xstrdup(path); 91return ref_store; 92} 93 94/* 95 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is 96 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't 97 * support at least the flags specified in `required_flags`. `caller` 98 * is used in any necessary error messages. 99 */ 100static struct packed_ref_store *packed_downcast(struct ref_store *ref_store, 101unsigned int required_flags, 102const char*caller) 103{ 104struct packed_ref_store *refs; 105 106if(ref_store->be != &refs_be_packed) 107die("BUG: ref_store is type\"%s\"not\"packed\"in%s", 108 ref_store->be->name, caller); 109 110 refs = (struct packed_ref_store *)ref_store; 111 112if((refs->store_flags & required_flags) != required_flags) 113die("BUG: unallowed operation (%s), requires%x, has%x\n", 114 caller, required_flags, refs->store_flags); 115 116return refs; 117} 118 119static voidclear_packed_ref_cache(struct packed_ref_store *refs) 120{ 121if(refs->cache) { 122struct packed_ref_cache *cache = refs->cache; 123 124 refs->cache = NULL; 125release_packed_ref_cache(cache); 126} 127} 128 129/* The length of a peeled reference line in packed-refs, including EOL: */ 130#define PEELED_LINE_LENGTH 42 131 132/* 133 * Parse one line from a packed-refs file. Write the SHA1 to sha1. 134 * Return a pointer to the refname within the line (null-terminated), 135 * or NULL if there was a problem. 136 */ 137static const char*parse_ref_line(struct strbuf *line,struct object_id *oid) 138{ 139const char*ref; 140 141if(parse_oid_hex(line->buf, oid, &ref) <0) 142return NULL; 143if(!isspace(*ref++)) 144return NULL; 145 146if(isspace(*ref)) 147return NULL; 148 149if(line->buf[line->len -1] !='\n') 150return NULL; 151 line->buf[--line->len] =0; 152 153return ref; 154} 155 156/* 157 * Read from `packed_refs_file` into a newly-allocated 158 * `packed_ref_cache` and return it. The return value will already 159 * have its reference count incremented. 160 * 161 * A comment line of the form "# pack-refs with: " may contain zero or 162 * more traits. We interpret the traits as follows: 163 * 164 * No traits: 165 * 166 * Probably no references are peeled. But if the file contains a 167 * peeled value for a reference, we will use it. 168 * 169 * peeled: 170 * 171 * References under "refs/tags/", if they *can* be peeled, *are* 172 * peeled in this file. References outside of "refs/tags/" are 173 * probably not peeled even if they could have been, but if we find 174 * a peeled value for such a reference we will use it. 175 * 176 * fully-peeled: 177 * 178 * All references in the file that can be peeled are peeled. 179 * Inversely (and this is more important), any references in the 180 * file for which no peeled value is recorded is not peelable. This 181 * trait should typically be written alongside "peeled" for 182 * compatibility with older clients, but we do not require it 183 * (i.e., "peeled" is a no-op if "fully-peeled" is set). 184 */ 185static struct packed_ref_cache *read_packed_refs(const char*packed_refs_file) 186{ 187FILE*f; 188struct packed_ref_cache *packed_refs =xcalloc(1,sizeof(*packed_refs)); 189struct ref_entry *last = NULL; 190struct strbuf line = STRBUF_INIT; 191enum{ PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE; 192struct ref_dir *dir; 193 194acquire_packed_ref_cache(packed_refs); 195 packed_refs->cache =create_ref_cache(NULL, NULL); 196 packed_refs->cache->root->flag &= ~REF_INCOMPLETE; 197 198 f =fopen(packed_refs_file,"r"); 199if(!f) { 200if(errno == ENOENT) { 201/* 202 * This is OK; it just means that no 203 * "packed-refs" file has been written yet, 204 * which is equivalent to it being empty. 205 */ 206return packed_refs; 207}else{ 208die_errno("couldn't read%s", packed_refs_file); 209} 210} 211 212stat_validity_update(&packed_refs->validity,fileno(f)); 213 214 dir =get_ref_dir(packed_refs->cache->root); 215while(strbuf_getwholeline(&line, f,'\n') != EOF) { 216struct object_id oid; 217const char*refname; 218const char*traits; 219 220if(!line.len || line.buf[line.len -1] !='\n') 221die("unterminated line in%s:%s", packed_refs_file, line.buf); 222 223if(skip_prefix(line.buf,"# pack-refs with:", &traits)) { 224if(strstr(traits," fully-peeled ")) 225 peeled = PEELED_FULLY; 226else if(strstr(traits," peeled ")) 227 peeled = PEELED_TAGS; 228/* perhaps other traits later as well */ 229continue; 230} 231 232 refname =parse_ref_line(&line, &oid); 233if(refname) { 234int flag = REF_ISPACKED; 235 236if(check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) { 237if(!refname_is_safe(refname)) 238die("packed refname is dangerous:%s", refname); 239oidclr(&oid); 240 flag |= REF_BAD_NAME | REF_ISBROKEN; 241} 242 last =create_ref_entry(refname, &oid, flag); 243if(peeled == PEELED_FULLY || 244(peeled == PEELED_TAGS &&starts_with(refname,"refs/tags/"))) 245 last->flag |= REF_KNOWS_PEELED; 246add_ref_entry(dir, last); 247}else if(last && 248 line.buf[0] =='^'&& 249 line.len == PEELED_LINE_LENGTH && 250 line.buf[PEELED_LINE_LENGTH -1] =='\n'&& 251!get_oid_hex(line.buf +1, &oid)) { 252oidcpy(&last->u.value.peeled, &oid); 253/* 254 * Regardless of what the file header said, 255 * we definitely know the value of *this* 256 * reference: 257 */ 258 last->flag |= REF_KNOWS_PEELED; 259}else{ 260strbuf_setlen(&line, line.len -1); 261die("unexpected line in%s:%s", packed_refs_file, line.buf); 262} 263} 264 265fclose(f); 266strbuf_release(&line); 267 268return packed_refs; 269} 270 271/* 272 * Check that the packed refs cache (if any) still reflects the 273 * contents of the file. If not, clear the cache. 274 */ 275static voidvalidate_packed_ref_cache(struct packed_ref_store *refs) 276{ 277if(refs->cache && 278!stat_validity_check(&refs->cache->validity, refs->path)) 279clear_packed_ref_cache(refs); 280} 281 282/* 283 * Get the packed_ref_cache for the specified packed_ref_store, 284 * creating and populating it if it hasn't been read before or if the 285 * file has been changed (according to its `validity` field) since it 286 * was last read. On the other hand, if we hold the lock, then assume 287 * that the file hasn't been changed out from under us, so skip the 288 * extra `stat()` call in `stat_validity_check()`. 289 */ 290static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs) 291{ 292if(!is_lock_file_locked(&refs->lock)) 293validate_packed_ref_cache(refs); 294 295if(!refs->cache) 296 refs->cache =read_packed_refs(refs->path); 297 298return refs->cache; 299} 300 301static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache) 302{ 303returnget_ref_dir(packed_ref_cache->cache->root); 304} 305 306static struct ref_dir *get_packed_refs(struct packed_ref_store *refs) 307{ 308returnget_packed_ref_dir(get_packed_ref_cache(refs)); 309} 310 311/* 312 * Return the ref_entry for the given refname from the packed 313 * references. If it does not exist, return NULL. 314 */ 315static struct ref_entry *get_packed_ref(struct packed_ref_store *refs, 316const char*refname) 317{ 318returnfind_ref_entry(get_packed_refs(refs), refname); 319} 320 321static intpacked_read_raw_ref(struct ref_store *ref_store, 322const char*refname,unsigned char*sha1, 323struct strbuf *referent,unsigned int*type) 324{ 325struct packed_ref_store *refs = 326packed_downcast(ref_store, REF_STORE_READ,"read_raw_ref"); 327 328struct ref_entry *entry; 329 330*type =0; 331 332 entry =get_packed_ref(refs, refname); 333if(!entry) { 334 errno = ENOENT; 335return-1; 336} 337 338hashcpy(sha1, entry->u.value.oid.hash); 339*type = REF_ISPACKED; 340return0; 341} 342 343static intpacked_peel_ref(struct ref_store *ref_store, 344const char*refname,unsigned char*sha1) 345{ 346struct packed_ref_store *refs = 347packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB, 348"peel_ref"); 349struct ref_entry *r =get_packed_ref(refs, refname); 350 351if(!r ||peel_entry(r,0)) 352return-1; 353 354hashcpy(sha1, r->u.value.peeled.hash); 355return0; 356} 357 358struct packed_ref_iterator { 359struct ref_iterator base; 360 361struct packed_ref_cache *cache; 362struct ref_iterator *iter0; 363unsigned int flags; 364}; 365 366static intpacked_ref_iterator_advance(struct ref_iterator *ref_iterator) 367{ 368struct packed_ref_iterator *iter = 369(struct packed_ref_iterator *)ref_iterator; 370int ok; 371 372while((ok =ref_iterator_advance(iter->iter0)) == ITER_OK) { 373if(iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY && 374ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE) 375continue; 376 377if(!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) && 378!ref_resolves_to_object(iter->iter0->refname, 379 iter->iter0->oid, 380 iter->iter0->flags)) 381continue; 382 383 iter->base.refname = iter->iter0->refname; 384 iter->base.oid = iter->iter0->oid; 385 iter->base.flags = iter->iter0->flags; 386return ITER_OK; 387} 388 389 iter->iter0 = NULL; 390if(ref_iterator_abort(ref_iterator) != ITER_DONE) 391 ok = ITER_ERROR; 392 393return ok; 394} 395 396static intpacked_ref_iterator_peel(struct ref_iterator *ref_iterator, 397struct object_id *peeled) 398{ 399struct packed_ref_iterator *iter = 400(struct packed_ref_iterator *)ref_iterator; 401 402returnref_iterator_peel(iter->iter0, peeled); 403} 404 405static intpacked_ref_iterator_abort(struct ref_iterator *ref_iterator) 406{ 407struct packed_ref_iterator *iter = 408(struct packed_ref_iterator *)ref_iterator; 409int ok = ITER_DONE; 410 411if(iter->iter0) 412 ok =ref_iterator_abort(iter->iter0); 413 414release_packed_ref_cache(iter->cache); 415base_ref_iterator_free(ref_iterator); 416return ok; 417} 418 419static struct ref_iterator_vtable packed_ref_iterator_vtable = { 420 packed_ref_iterator_advance, 421 packed_ref_iterator_peel, 422 packed_ref_iterator_abort 423}; 424 425static struct ref_iterator *packed_ref_iterator_begin( 426struct ref_store *ref_store, 427const char*prefix,unsigned int flags) 428{ 429struct packed_ref_store *refs; 430struct packed_ref_iterator *iter; 431struct ref_iterator *ref_iterator; 432unsigned int required_flags = REF_STORE_READ; 433 434if(!(flags & DO_FOR_EACH_INCLUDE_BROKEN)) 435 required_flags |= REF_STORE_ODB; 436 refs =packed_downcast(ref_store, required_flags,"ref_iterator_begin"); 437 438 iter =xcalloc(1,sizeof(*iter)); 439 ref_iterator = &iter->base; 440base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable); 441 442/* 443 * Note that get_packed_ref_cache() internally checks whether 444 * the packed-ref cache is up to date with what is on disk, 445 * and re-reads it if not. 446 */ 447 448 iter->cache =get_packed_ref_cache(refs); 449acquire_packed_ref_cache(iter->cache); 450 iter->iter0 =cache_ref_iterator_begin(iter->cache->cache, prefix,0); 451 452 iter->flags = flags; 453 454return ref_iterator; 455} 456 457/* 458 * Write an entry to the packed-refs file for the specified refname. 459 * If peeled is non-NULL, write it as the entry's peeled value. On 460 * error, return a nonzero value and leave errno set at the value left 461 * by the failing call to `fprintf()`. 462 */ 463static intwrite_packed_entry(FILE*fh,const char*refname, 464const unsigned char*sha1, 465const unsigned char*peeled) 466{ 467if(fprintf(fh,"%s %s\n",sha1_to_hex(sha1), refname) <0|| 468(peeled &&fprintf(fh,"^%s\n",sha1_to_hex(peeled)) <0)) 469return-1; 470 471return0; 472} 473 474intpacked_refs_lock(struct ref_store *ref_store,int flags,struct strbuf *err) 475{ 476struct packed_ref_store *refs = 477packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN, 478"packed_refs_lock"); 479static int timeout_configured =0; 480static int timeout_value =1000; 481 482if(!timeout_configured) { 483git_config_get_int("core.packedrefstimeout", &timeout_value); 484 timeout_configured =1; 485} 486 487/* 488 * Note that we close the lockfile immediately because we 489 * don't write new content to it, but rather to a separate 490 * tempfile. 491 */ 492if(hold_lock_file_for_update_timeout( 493&refs->lock, 494 refs->path, 495 flags, timeout_value) <0) { 496unable_to_lock_message(refs->path, errno, err); 497return-1; 498} 499 500if(close_lock_file(&refs->lock)) { 501strbuf_addf(err,"unable to close%s:%s", refs->path,strerror(errno)); 502return-1; 503} 504 505/* 506 * Now that we hold the `packed-refs` lock, make sure that our 507 * cache matches the current version of the file. Normally 508 * `get_packed_ref_cache()` does that for us, but that 509 * function assumes that when the file is locked, any existing 510 * cache is still valid. We've just locked the file, but it 511 * might have changed the moment *before* we locked it. 512 */ 513validate_packed_ref_cache(refs); 514 515/* 516 * Now make sure that the packed-refs file as it exists in the 517 * locked state is loaded into the cache: 518 */ 519get_packed_ref_cache(refs); 520return0; 521} 522 523voidpacked_refs_unlock(struct ref_store *ref_store) 524{ 525struct packed_ref_store *refs =packed_downcast( 526 ref_store, 527 REF_STORE_READ | REF_STORE_WRITE, 528"packed_refs_unlock"); 529 530if(!is_lock_file_locked(&refs->lock)) 531die("BUG: packed_refs_unlock() called when not locked"); 532rollback_lock_file(&refs->lock); 533} 534 535intpacked_refs_is_locked(struct ref_store *ref_store) 536{ 537struct packed_ref_store *refs =packed_downcast( 538 ref_store, 539 REF_STORE_READ | REF_STORE_WRITE, 540"packed_refs_is_locked"); 541 542returnis_lock_file_locked(&refs->lock); 543} 544 545/* 546 * The packed-refs header line that we write out. Perhaps other 547 * traits will be added later. The trailing space is required. 548 */ 549static const char PACKED_REFS_HEADER[] = 550"# pack-refs with: peeled fully-peeled\n"; 551 552static intpacked_init_db(struct ref_store *ref_store,struct strbuf *err) 553{ 554/* Nothing to do. */ 555return0; 556} 557 558/* 559 * Write the packed-refs from the cache to the packed-refs tempfile, 560 * incorporating any changes from `updates`. `updates` must be a 561 * sorted string list whose keys are the refnames and whose util 562 * values are `struct ref_update *`. On error, rollback the tempfile, 563 * write an error message to `err`, and return a nonzero value. 564 * 565 * The packfile must be locked before calling this function and will 566 * remain locked when it is done. 567 */ 568static intwrite_with_updates(struct packed_ref_store *refs, 569struct string_list *updates, 570struct strbuf *err) 571{ 572struct ref_iterator *iter = NULL; 573size_t i; 574int ok; 575FILE*out; 576struct strbuf sb = STRBUF_INIT; 577char*packed_refs_path; 578 579if(!is_lock_file_locked(&refs->lock)) 580die("BUG: write_with_updates() called while unlocked"); 581 582/* 583 * If packed-refs is a symlink, we want to overwrite the 584 * symlinked-to file, not the symlink itself. Also, put the 585 * staging file next to it: 586 */ 587 packed_refs_path =get_locked_file_path(&refs->lock); 588strbuf_addf(&sb,"%s.new", packed_refs_path); 589free(packed_refs_path); 590if(create_tempfile(&refs->tempfile, sb.buf) <0) { 591strbuf_addf(err,"unable to create file%s:%s", 592 sb.buf,strerror(errno)); 593strbuf_release(&sb); 594return-1; 595} 596strbuf_release(&sb); 597 598 out =fdopen_tempfile(&refs->tempfile,"w"); 599if(!out) { 600strbuf_addf(err,"unable to fdopen packed-refs tempfile:%s", 601strerror(errno)); 602goto error; 603} 604 605if(fprintf(out,"%s", PACKED_REFS_HEADER) <0) 606goto write_error; 607 608/* 609 * We iterate in parallel through the current list of refs and 610 * the list of updates, processing an entry from at least one 611 * of the lists each time through the loop. When the current 612 * list of refs is exhausted, set iter to NULL. When the list 613 * of updates is exhausted, leave i set to updates->nr. 614 */ 615 iter =packed_ref_iterator_begin(&refs->base,"", 616 DO_FOR_EACH_INCLUDE_BROKEN); 617if((ok =ref_iterator_advance(iter)) != ITER_OK) 618 iter = NULL; 619 620 i =0; 621 622while(iter || i < updates->nr) { 623struct ref_update *update = NULL; 624int cmp; 625 626if(i >= updates->nr) { 627 cmp = -1; 628}else{ 629 update = updates->items[i].util; 630 631if(!iter) 632 cmp = +1; 633else 634 cmp =strcmp(iter->refname, update->refname); 635} 636 637if(!cmp) { 638/* 639 * There is both an old value and an update 640 * for this reference. Check the old value if 641 * necessary: 642 */ 643if((update->flags & REF_HAVE_OLD)) { 644if(is_null_oid(&update->old_oid)) { 645strbuf_addf(err,"cannot update ref '%s': " 646"reference already exists", 647 update->refname); 648goto error; 649}else if(oidcmp(&update->old_oid, iter->oid)) { 650strbuf_addf(err,"cannot update ref '%s': " 651"is at%sbut expected%s", 652 update->refname, 653oid_to_hex(iter->oid), 654oid_to_hex(&update->old_oid)); 655goto error; 656} 657} 658 659/* Now figure out what to use for the new value: */ 660if((update->flags & REF_HAVE_NEW)) { 661/* 662 * The update takes precedence. Skip 663 * the iterator over the unneeded 664 * value. 665 */ 666if((ok =ref_iterator_advance(iter)) != ITER_OK) 667 iter = NULL; 668 cmp = +1; 669}else{ 670/* 671 * The update doesn't actually want to 672 * change anything. We're done with it. 673 */ 674 i++; 675 cmp = -1; 676} 677}else if(cmp >0) { 678/* 679 * There is no old value but there is an 680 * update for this reference. Make sure that 681 * the update didn't expect an existing value: 682 */ 683if((update->flags & REF_HAVE_OLD) && 684!is_null_oid(&update->old_oid)) { 685strbuf_addf(err,"cannot update ref '%s': " 686"reference is missing but expected%s", 687 update->refname, 688oid_to_hex(&update->old_oid)); 689goto error; 690} 691} 692 693if(cmp <0) { 694/* Pass the old reference through. */ 695 696struct object_id peeled; 697int peel_error =ref_iterator_peel(iter, &peeled); 698 699if(write_packed_entry(out, iter->refname, 700 iter->oid->hash, 701 peel_error ? NULL : peeled.hash)) 702goto write_error; 703 704if((ok =ref_iterator_advance(iter)) != ITER_OK) 705 iter = NULL; 706}else if(is_null_oid(&update->new_oid)) { 707/* 708 * The update wants to delete the reference, 709 * and the reference either didn't exist or we 710 * have already skipped it. So we're done with 711 * the update (and don't have to write 712 * anything). 713 */ 714 i++; 715}else{ 716struct object_id peeled; 717int peel_error =peel_object(update->new_oid.hash, 718 peeled.hash); 719 720if(write_packed_entry(out, update->refname, 721 update->new_oid.hash, 722 peel_error ? NULL : peeled.hash)) 723goto write_error; 724 725 i++; 726} 727} 728 729if(ok != ITER_DONE) { 730strbuf_addf(err,"unable to write packed-refs file: " 731"error iterating over old contents"); 732goto error; 733} 734 735if(close_tempfile(&refs->tempfile)) { 736strbuf_addf(err,"error closing file%s:%s", 737get_tempfile_path(&refs->tempfile), 738strerror(errno)); 739strbuf_release(&sb); 740return-1; 741} 742 743return0; 744 745write_error: 746strbuf_addf(err,"error writing to%s:%s", 747get_tempfile_path(&refs->tempfile),strerror(errno)); 748 749error: 750if(iter) 751ref_iterator_abort(iter); 752 753delete_tempfile(&refs->tempfile); 754return-1; 755} 756 757struct packed_transaction_backend_data { 758/* True iff the transaction owns the packed-refs lock. */ 759int own_lock; 760 761struct string_list updates; 762}; 763 764static voidpacked_transaction_cleanup(struct packed_ref_store *refs, 765struct ref_transaction *transaction) 766{ 767struct packed_transaction_backend_data *data = transaction->backend_data; 768 769if(data) { 770string_list_clear(&data->updates,0); 771 772if(is_tempfile_active(&refs->tempfile)) 773delete_tempfile(&refs->tempfile); 774 775if(data->own_lock &&is_lock_file_locked(&refs->lock)) { 776packed_refs_unlock(&refs->base); 777 data->own_lock =0; 778} 779 780free(data); 781 transaction->backend_data = NULL; 782} 783 784 transaction->state = REF_TRANSACTION_CLOSED; 785} 786 787static intpacked_transaction_prepare(struct ref_store *ref_store, 788struct ref_transaction *transaction, 789struct strbuf *err) 790{ 791struct packed_ref_store *refs =packed_downcast( 792 ref_store, 793 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 794"ref_transaction_prepare"); 795struct packed_transaction_backend_data *data; 796size_t i; 797int ret = TRANSACTION_GENERIC_ERROR; 798 799/* 800 * Note that we *don't* skip transactions with zero updates, 801 * because such a transaction might be executed for the side 802 * effect of ensuring that all of the references are peeled. 803 * If the caller wants to optimize away empty transactions, it 804 * should do so itself. 805 */ 806 807 data =xcalloc(1,sizeof(*data)); 808string_list_init(&data->updates,0); 809 810 transaction->backend_data = data; 811 812/* 813 * Stick the updates in a string list by refname so that we 814 * can sort them: 815 */ 816for(i =0; i < transaction->nr; i++) { 817struct ref_update *update = transaction->updates[i]; 818struct string_list_item *item = 819string_list_append(&data->updates, update->refname); 820 821/* Store a pointer to update in item->util: */ 822 item->util = update; 823} 824string_list_sort(&data->updates); 825 826if(ref_update_reject_duplicates(&data->updates, err)) 827goto failure; 828 829if(!is_lock_file_locked(&refs->lock)) { 830if(packed_refs_lock(ref_store,0, err)) 831goto failure; 832 data->own_lock =1; 833} 834 835if(write_with_updates(refs, &data->updates, err)) 836goto failure; 837 838 transaction->state = REF_TRANSACTION_PREPARED; 839return0; 840 841failure: 842packed_transaction_cleanup(refs, transaction); 843return ret; 844} 845 846static intpacked_transaction_abort(struct ref_store *ref_store, 847struct ref_transaction *transaction, 848struct strbuf *err) 849{ 850struct packed_ref_store *refs =packed_downcast( 851 ref_store, 852 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 853"ref_transaction_abort"); 854 855packed_transaction_cleanup(refs, transaction); 856return0; 857} 858 859static intpacked_transaction_finish(struct ref_store *ref_store, 860struct ref_transaction *transaction, 861struct strbuf *err) 862{ 863struct packed_ref_store *refs =packed_downcast( 864 ref_store, 865 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB, 866"ref_transaction_finish"); 867int ret = TRANSACTION_GENERIC_ERROR; 868char*packed_refs_path; 869 870 packed_refs_path =get_locked_file_path(&refs->lock); 871if(rename_tempfile(&refs->tempfile, packed_refs_path)) { 872strbuf_addf(err,"error replacing%s:%s", 873 refs->path,strerror(errno)); 874goto cleanup; 875} 876 877clear_packed_ref_cache(refs); 878 ret =0; 879 880cleanup: 881free(packed_refs_path); 882packed_transaction_cleanup(refs, transaction); 883return ret; 884} 885 886static intpacked_initial_transaction_commit(struct ref_store *ref_store, 887struct ref_transaction *transaction, 888struct strbuf *err) 889{ 890returnref_transaction_commit(transaction, err); 891} 892 893static intpacked_delete_refs(struct ref_store *ref_store,const char*msg, 894struct string_list *refnames,unsigned int flags) 895{ 896struct packed_ref_store *refs = 897packed_downcast(ref_store, REF_STORE_WRITE,"delete_refs"); 898struct strbuf err = STRBUF_INIT; 899struct ref_transaction *transaction; 900struct string_list_item *item; 901int ret; 902 903(void)refs;/* We need the check above, but don't use the variable */ 904 905if(!refnames->nr) 906return0; 907 908/* 909 * Since we don't check the references' old_oids, the 910 * individual updates can't fail, so we can pack all of the 911 * updates into a single transaction. 912 */ 913 914 transaction =ref_store_transaction_begin(ref_store, &err); 915if(!transaction) 916return-1; 917 918for_each_string_list_item(item, refnames) { 919if(ref_transaction_delete(transaction, item->string, NULL, 920 flags, msg, &err)) { 921warning(_("could not delete reference%s:%s"), 922 item->string, err.buf); 923strbuf_reset(&err); 924} 925} 926 927 ret =ref_transaction_commit(transaction, &err); 928 929if(ret) { 930if(refnames->nr ==1) 931error(_("could not delete reference%s:%s"), 932 refnames->items[0].string, err.buf); 933else 934error(_("could not delete references:%s"), err.buf); 935} 936 937ref_transaction_free(transaction); 938strbuf_release(&err); 939return ret; 940} 941 942static intpacked_pack_refs(struct ref_store *ref_store,unsigned int flags) 943{ 944/* 945 * Packed refs are already packed. It might be that loose refs 946 * are packed *into* a packed refs store, but that is done by 947 * updating the packed references via a transaction. 948 */ 949return0; 950} 951 952static intpacked_create_symref(struct ref_store *ref_store, 953const char*refname,const char*target, 954const char*logmsg) 955{ 956die("BUG: packed reference store does not support symrefs"); 957} 958 959static intpacked_rename_ref(struct ref_store *ref_store, 960const char*oldrefname,const char*newrefname, 961const char*logmsg) 962{ 963die("BUG: packed reference store does not support renaming references"); 964} 965 966static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store) 967{ 968returnempty_ref_iterator_begin(); 969} 970 971static intpacked_for_each_reflog_ent(struct ref_store *ref_store, 972const char*refname, 973 each_reflog_ent_fn fn,void*cb_data) 974{ 975return0; 976} 977 978static intpacked_for_each_reflog_ent_reverse(struct ref_store *ref_store, 979const char*refname, 980 each_reflog_ent_fn fn, 981void*cb_data) 982{ 983return0; 984} 985 986static intpacked_reflog_exists(struct ref_store *ref_store, 987const char*refname) 988{ 989return0; 990} 991 992static intpacked_create_reflog(struct ref_store *ref_store, 993const char*refname,int force_create, 994struct strbuf *err) 995{ 996die("BUG: packed reference store does not support reflogs"); 997} 998 999static intpacked_delete_reflog(struct ref_store *ref_store,1000const char*refname)1001{1002return0;1003}10041005static intpacked_reflog_expire(struct ref_store *ref_store,1006const char*refname,const unsigned char*sha1,1007unsigned int flags,1008 reflog_expiry_prepare_fn prepare_fn,1009 reflog_expiry_should_prune_fn should_prune_fn,1010 reflog_expiry_cleanup_fn cleanup_fn,1011void*policy_cb_data)1012{1013return0;1014}10151016struct ref_storage_be refs_be_packed = {1017 NULL,1018"packed",1019 packed_ref_store_create,1020 packed_init_db,1021 packed_transaction_prepare,1022 packed_transaction_finish,1023 packed_transaction_abort,1024 packed_initial_transaction_commit,10251026 packed_pack_refs,1027 packed_peel_ref,1028 packed_create_symref,1029 packed_delete_refs,1030 packed_rename_ref,10311032 packed_ref_iterator_begin,1033 packed_read_raw_ref,10341035 packed_reflog_iterator_begin,1036 packed_for_each_reflog_ent,1037 packed_for_each_reflog_ent_reverse,1038 packed_reflog_exists,1039 packed_create_reflog,1040 packed_delete_reflog,1041 packed_reflog_expire1042};