1#include"builtin.h" 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"attr.h" 6#include"object.h" 7#include"blob.h" 8#include"commit.h" 9#include"tag.h" 10#include"tree.h" 11#include"delta.h" 12#include"pack.h" 13#include"pack-revindex.h" 14#include"csum-file.h" 15#include"tree-walk.h" 16#include"diff.h" 17#include"revision.h" 18#include"list-objects.h" 19#include"list-objects-filter.h" 20#include"list-objects-filter-options.h" 21#include"pack-objects.h" 22#include"progress.h" 23#include"refs.h" 24#include"streaming.h" 25#include"thread-utils.h" 26#include"pack-bitmap.h" 27#include"delta-islands.h" 28#include"reachable.h" 29#include"sha1-array.h" 30#include"argv-array.h" 31#include"list.h" 32#include"packfile.h" 33#include"object-store.h" 34#include"dir.h" 35#include"midx.h" 36#include"trace2.h" 37 38#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 39#define SIZE(obj) oe_size(&to_pack, obj) 40#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 41#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 42#define DELTA(obj) oe_delta(&to_pack, obj) 43#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 44#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 45#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 46#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid) 47#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 48#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 49#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 50 51static const char*pack_usage[] = { 52N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 53N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 54 NULL 55}; 56 57/* 58 * Objects we are going to pack are collected in the `to_pack` structure. 59 * It contains an array (dynamically expanded) of the object data, and a map 60 * that can resolve SHA1s to their position in the array. 61 */ 62static struct packing_data to_pack; 63 64static struct pack_idx_entry **written_list; 65static uint32_t nr_result, nr_written, nr_seen; 66static struct bitmap_index *bitmap_git; 67static uint32_t write_layer; 68 69static int non_empty; 70static int reuse_delta =1, reuse_object =1; 71static int keep_unreachable, unpack_unreachable, include_tag; 72static timestamp_t unpack_unreachable_expiration; 73static int pack_loose_unreachable; 74static int local; 75static int have_non_local_packs; 76static int incremental; 77static int ignore_packed_keep_on_disk; 78static int ignore_packed_keep_in_core; 79static int allow_ofs_delta; 80static struct pack_idx_option pack_idx_opts; 81static const char*base_name; 82static int progress =1; 83static int window =10; 84static unsigned long pack_size_limit; 85static int depth =50; 86static int delta_search_threads; 87static int pack_to_stdout; 88static int sparse; 89static int thin; 90static int num_preferred_base; 91static struct progress *progress_state; 92 93static struct packed_git *reuse_packfile; 94static uint32_t reuse_packfile_objects; 95static off_t reuse_packfile_offset; 96 97static int use_bitmap_index_default =1; 98static int use_bitmap_index = -1; 99static int write_bitmap_index; 100static uint16_t write_bitmap_options; 101 102static int exclude_promisor_objects; 103 104static int use_delta_islands; 105 106static unsigned long delta_cache_size =0; 107static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 108static unsigned long cache_max_small_delta_size =1000; 109 110static unsigned long window_memory_limit =0; 111 112static struct list_objects_filter_options filter_options; 113 114enum missing_action { 115 MA_ERROR =0,/* fail if any missing objects are encountered */ 116 MA_ALLOW_ANY,/* silently allow ALL missing objects */ 117 MA_ALLOW_PROMISOR,/* silently allow all missing PROMISOR objects */ 118}; 119static enum missing_action arg_missing_action; 120static show_object_fn fn_show_object; 121 122/* 123 * stats 124 */ 125static uint32_t written, written_delta; 126static uint32_t reused, reused_delta; 127 128/* 129 * Indexed commits 130 */ 131static struct commit **indexed_commits; 132static unsigned int indexed_commits_nr; 133static unsigned int indexed_commits_alloc; 134 135static voidindex_commit_for_bitmap(struct commit *commit) 136{ 137if(indexed_commits_nr >= indexed_commits_alloc) { 138 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 139REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 140} 141 142 indexed_commits[indexed_commits_nr++] = commit; 143} 144 145static void*get_delta(struct object_entry *entry) 146{ 147unsigned long size, base_size, delta_size; 148void*buf, *base_buf, *delta_buf; 149enum object_type type; 150 151 buf =read_object_file(&entry->idx.oid, &type, &size); 152if(!buf) 153die(_("unable to read%s"),oid_to_hex(&entry->idx.oid)); 154 base_buf =read_object_file(&DELTA(entry)->idx.oid, &type, 155&base_size); 156if(!base_buf) 157die("unable to read%s", 158oid_to_hex(&DELTA(entry)->idx.oid)); 159 delta_buf =diff_delta(base_buf, base_size, 160 buf, size, &delta_size,0); 161/* 162 * We succesfully computed this delta once but dropped it for 163 * memory reasons. Something is very wrong if this time we 164 * recompute and create a different delta. 165 */ 166if(!delta_buf || delta_size !=DELTA_SIZE(entry)) 167BUG("delta size changed"); 168free(buf); 169free(base_buf); 170return delta_buf; 171} 172 173static unsigned longdo_compress(void**pptr,unsigned long size) 174{ 175 git_zstream stream; 176void*in, *out; 177unsigned long maxsize; 178 179git_deflate_init(&stream, pack_compression_level); 180 maxsize =git_deflate_bound(&stream, size); 181 182 in = *pptr; 183 out =xmalloc(maxsize); 184*pptr = out; 185 186 stream.next_in = in; 187 stream.avail_in = size; 188 stream.next_out = out; 189 stream.avail_out = maxsize; 190while(git_deflate(&stream, Z_FINISH) == Z_OK) 191;/* nothing */ 192git_deflate_end(&stream); 193 194free(in); 195return stream.total_out; 196} 197 198static unsigned longwrite_large_blob_data(struct git_istream *st,struct hashfile *f, 199const struct object_id *oid) 200{ 201 git_zstream stream; 202unsigned char ibuf[1024*16]; 203unsigned char obuf[1024*16]; 204unsigned long olen =0; 205 206git_deflate_init(&stream, pack_compression_level); 207 208for(;;) { 209 ssize_t readlen; 210int zret = Z_OK; 211 readlen =read_istream(st, ibuf,sizeof(ibuf)); 212if(readlen == -1) 213die(_("unable to read%s"),oid_to_hex(oid)); 214 215 stream.next_in = ibuf; 216 stream.avail_in = readlen; 217while((stream.avail_in || readlen ==0) && 218(zret == Z_OK || zret == Z_BUF_ERROR)) { 219 stream.next_out = obuf; 220 stream.avail_out =sizeof(obuf); 221 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 222hashwrite(f, obuf, stream.next_out - obuf); 223 olen += stream.next_out - obuf; 224} 225if(stream.avail_in) 226die(_("deflate error (%d)"), zret); 227if(readlen ==0) { 228if(zret != Z_STREAM_END) 229die(_("deflate error (%d)"), zret); 230break; 231} 232} 233git_deflate_end(&stream); 234return olen; 235} 236 237/* 238 * we are going to reuse the existing object data as is. make 239 * sure it is not corrupt. 240 */ 241static intcheck_pack_inflate(struct packed_git *p, 242struct pack_window **w_curs, 243 off_t offset, 244 off_t len, 245unsigned long expect) 246{ 247 git_zstream stream; 248unsigned char fakebuf[4096], *in; 249int st; 250 251memset(&stream,0,sizeof(stream)); 252git_inflate_init(&stream); 253do{ 254 in =use_pack(p, w_curs, offset, &stream.avail_in); 255 stream.next_in = in; 256 stream.next_out = fakebuf; 257 stream.avail_out =sizeof(fakebuf); 258 st =git_inflate(&stream, Z_FINISH); 259 offset += stream.next_in - in; 260}while(st == Z_OK || st == Z_BUF_ERROR); 261git_inflate_end(&stream); 262return(st == Z_STREAM_END && 263 stream.total_out == expect && 264 stream.total_in == len) ?0: -1; 265} 266 267static voidcopy_pack_data(struct hashfile *f, 268struct packed_git *p, 269struct pack_window **w_curs, 270 off_t offset, 271 off_t len) 272{ 273unsigned char*in; 274unsigned long avail; 275 276while(len) { 277 in =use_pack(p, w_curs, offset, &avail); 278if(avail > len) 279 avail = (unsigned long)len; 280hashwrite(f, in, avail); 281 offset += avail; 282 len -= avail; 283} 284} 285 286/* Return 0 if we will bust the pack-size limit */ 287static unsigned longwrite_no_reuse_object(struct hashfile *f,struct object_entry *entry, 288unsigned long limit,int usable_delta) 289{ 290unsigned long size, datalen; 291unsigned char header[MAX_PACK_OBJECT_HEADER], 292 dheader[MAX_PACK_OBJECT_HEADER]; 293unsigned hdrlen; 294enum object_type type; 295void*buf; 296struct git_istream *st = NULL; 297const unsigned hashsz = the_hash_algo->rawsz; 298 299if(!usable_delta) { 300if(oe_type(entry) == OBJ_BLOB && 301oe_size_greater_than(&to_pack, entry, big_file_threshold) && 302(st =open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 303 buf = NULL; 304else{ 305 buf =read_object_file(&entry->idx.oid, &type, &size); 306if(!buf) 307die(_("unable to read%s"), 308oid_to_hex(&entry->idx.oid)); 309} 310/* 311 * make sure no cached delta data remains from a 312 * previous attempt before a pack split occurred. 313 */ 314FREE_AND_NULL(entry->delta_data); 315 entry->z_delta_size =0; 316}else if(entry->delta_data) { 317 size =DELTA_SIZE(entry); 318 buf = entry->delta_data; 319 entry->delta_data = NULL; 320 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 321 OBJ_OFS_DELTA : OBJ_REF_DELTA; 322}else{ 323 buf =get_delta(entry); 324 size =DELTA_SIZE(entry); 325 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 326 OBJ_OFS_DELTA : OBJ_REF_DELTA; 327} 328 329if(st)/* large blob case, just assume we don't compress well */ 330 datalen = size; 331else if(entry->z_delta_size) 332 datalen = entry->z_delta_size; 333else 334 datalen =do_compress(&buf, size); 335 336/* 337 * The object header is a byte of 'type' followed by zero or 338 * more bytes of length. 339 */ 340 hdrlen =encode_in_pack_object_header(header,sizeof(header), 341 type, size); 342 343if(type == OBJ_OFS_DELTA) { 344/* 345 * Deltas with relative base contain an additional 346 * encoding of the relative offset for the delta 347 * base from this object's position in the pack. 348 */ 349 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 350unsigned pos =sizeof(dheader) -1; 351 dheader[pos] = ofs &127; 352while(ofs >>=7) 353 dheader[--pos] =128| (--ofs &127); 354if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 355if(st) 356close_istream(st); 357free(buf); 358return0; 359} 360hashwrite(f, header, hdrlen); 361hashwrite(f, dheader + pos,sizeof(dheader) - pos); 362 hdrlen +=sizeof(dheader) - pos; 363}else if(type == OBJ_REF_DELTA) { 364/* 365 * Deltas with a base reference contain 366 * additional bytes for the base object ID. 367 */ 368if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 369if(st) 370close_istream(st); 371free(buf); 372return0; 373} 374hashwrite(f, header, hdrlen); 375hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 376 hdrlen += hashsz; 377}else{ 378if(limit && hdrlen + datalen + hashsz >= limit) { 379if(st) 380close_istream(st); 381free(buf); 382return0; 383} 384hashwrite(f, header, hdrlen); 385} 386if(st) { 387 datalen =write_large_blob_data(st, f, &entry->idx.oid); 388close_istream(st); 389}else{ 390hashwrite(f, buf, datalen); 391free(buf); 392} 393 394return hdrlen + datalen; 395} 396 397/* Return 0 if we will bust the pack-size limit */ 398static off_t write_reuse_object(struct hashfile *f,struct object_entry *entry, 399unsigned long limit,int usable_delta) 400{ 401struct packed_git *p =IN_PACK(entry); 402struct pack_window *w_curs = NULL; 403struct revindex_entry *revidx; 404 off_t offset; 405enum object_type type =oe_type(entry); 406 off_t datalen; 407unsigned char header[MAX_PACK_OBJECT_HEADER], 408 dheader[MAX_PACK_OBJECT_HEADER]; 409unsigned hdrlen; 410const unsigned hashsz = the_hash_algo->rawsz; 411unsigned long entry_size =SIZE(entry); 412 413if(DELTA(entry)) 414 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 415 OBJ_OFS_DELTA : OBJ_REF_DELTA; 416 hdrlen =encode_in_pack_object_header(header,sizeof(header), 417 type, entry_size); 418 419 offset = entry->in_pack_offset; 420 revidx =find_pack_revindex(p, offset); 421 datalen = revidx[1].offset - offset; 422if(!pack_to_stdout && p->index_version >1&& 423check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 424error(_("bad packed object CRC for%s"), 425oid_to_hex(&entry->idx.oid)); 426unuse_pack(&w_curs); 427returnwrite_no_reuse_object(f, entry, limit, usable_delta); 428} 429 430 offset += entry->in_pack_header_size; 431 datalen -= entry->in_pack_header_size; 432 433if(!pack_to_stdout && p->index_version ==1&& 434check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 435error(_("corrupt packed object for%s"), 436oid_to_hex(&entry->idx.oid)); 437unuse_pack(&w_curs); 438returnwrite_no_reuse_object(f, entry, limit, usable_delta); 439} 440 441if(type == OBJ_OFS_DELTA) { 442 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 443unsigned pos =sizeof(dheader) -1; 444 dheader[pos] = ofs &127; 445while(ofs >>=7) 446 dheader[--pos] =128| (--ofs &127); 447if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 448unuse_pack(&w_curs); 449return0; 450} 451hashwrite(f, header, hdrlen); 452hashwrite(f, dheader + pos,sizeof(dheader) - pos); 453 hdrlen +=sizeof(dheader) - pos; 454 reused_delta++; 455}else if(type == OBJ_REF_DELTA) { 456if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 457unuse_pack(&w_curs); 458return0; 459} 460hashwrite(f, header, hdrlen); 461hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 462 hdrlen += hashsz; 463 reused_delta++; 464}else{ 465if(limit && hdrlen + datalen + hashsz >= limit) { 466unuse_pack(&w_curs); 467return0; 468} 469hashwrite(f, header, hdrlen); 470} 471copy_pack_data(f, p, &w_curs, offset, datalen); 472unuse_pack(&w_curs); 473 reused++; 474return hdrlen + datalen; 475} 476 477/* Return 0 if we will bust the pack-size limit */ 478static off_t write_object(struct hashfile *f, 479struct object_entry *entry, 480 off_t write_offset) 481{ 482unsigned long limit; 483 off_t len; 484int usable_delta, to_reuse; 485 486if(!pack_to_stdout) 487crc32_begin(f); 488 489/* apply size limit if limited packsize and not first object */ 490if(!pack_size_limit || !nr_written) 491 limit =0; 492else if(pack_size_limit <= write_offset) 493/* 494 * the earlier object did not fit the limit; avoid 495 * mistaking this with unlimited (i.e. limit = 0). 496 */ 497 limit =1; 498else 499 limit = pack_size_limit - write_offset; 500 501if(!DELTA(entry)) 502 usable_delta =0;/* no delta */ 503else if(!pack_size_limit) 504 usable_delta =1;/* unlimited packfile */ 505else if(DELTA(entry)->idx.offset == (off_t)-1) 506 usable_delta =0;/* base was written to another pack */ 507else if(DELTA(entry)->idx.offset) 508 usable_delta =1;/* base already exists in this pack */ 509else 510 usable_delta =0;/* base could end up in another pack */ 511 512if(!reuse_object) 513 to_reuse =0;/* explicit */ 514else if(!IN_PACK(entry)) 515 to_reuse =0;/* can't reuse what we don't have */ 516else if(oe_type(entry) == OBJ_REF_DELTA || 517oe_type(entry) == OBJ_OFS_DELTA) 518/* check_object() decided it for us ... */ 519 to_reuse = usable_delta; 520/* ... but pack split may override that */ 521else if(oe_type(entry) != entry->in_pack_type) 522 to_reuse =0;/* pack has delta which is unusable */ 523else if(DELTA(entry)) 524 to_reuse =0;/* we want to pack afresh */ 525else 526 to_reuse =1;/* we have it in-pack undeltified, 527 * and we do not need to deltify it. 528 */ 529 530if(!to_reuse) 531 len =write_no_reuse_object(f, entry, limit, usable_delta); 532else 533 len =write_reuse_object(f, entry, limit, usable_delta); 534if(!len) 535return0; 536 537if(usable_delta) 538 written_delta++; 539 written++; 540if(!pack_to_stdout) 541 entry->idx.crc32 =crc32_end(f); 542return len; 543} 544 545enum write_one_status { 546 WRITE_ONE_SKIP = -1,/* already written */ 547 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 548 WRITE_ONE_WRITTEN =1,/* normal */ 549 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 550}; 551 552static enum write_one_status write_one(struct hashfile *f, 553struct object_entry *e, 554 off_t *offset) 555{ 556 off_t size; 557int recursing; 558 559/* 560 * we set offset to 1 (which is an impossible value) to mark 561 * the fact that this object is involved in "write its base 562 * first before writing a deltified object" recursion. 563 */ 564 recursing = (e->idx.offset ==1); 565if(recursing) { 566warning(_("recursive delta detected for object%s"), 567oid_to_hex(&e->idx.oid)); 568return WRITE_ONE_RECURSIVE; 569}else if(e->idx.offset || e->preferred_base) { 570/* offset is non zero if object is written already. */ 571return WRITE_ONE_SKIP; 572} 573 574/* if we are deltified, write out base object first. */ 575if(DELTA(e)) { 576 e->idx.offset =1;/* now recurse */ 577switch(write_one(f,DELTA(e), offset)) { 578case WRITE_ONE_RECURSIVE: 579/* we cannot depend on this one */ 580SET_DELTA(e, NULL); 581break; 582default: 583break; 584case WRITE_ONE_BREAK: 585 e->idx.offset = recursing; 586return WRITE_ONE_BREAK; 587} 588} 589 590 e->idx.offset = *offset; 591 size =write_object(f, e, *offset); 592if(!size) { 593 e->idx.offset = recursing; 594return WRITE_ONE_BREAK; 595} 596 written_list[nr_written++] = &e->idx; 597 598/* make sure off_t is sufficiently large not to wrap */ 599if(signed_add_overflows(*offset, size)) 600die(_("pack too large for current definition of off_t")); 601*offset += size; 602return WRITE_ONE_WRITTEN; 603} 604 605static intmark_tagged(const char*path,const struct object_id *oid,int flag, 606void*cb_data) 607{ 608struct object_id peeled; 609struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 610 611if(entry) 612 entry->tagged =1; 613if(!peel_ref(path, &peeled)) { 614 entry =packlist_find(&to_pack, peeled.hash, NULL); 615if(entry) 616 entry->tagged =1; 617} 618return0; 619} 620 621staticinlinevoidadd_to_write_order(struct object_entry **wo, 622unsigned int*endp, 623struct object_entry *e) 624{ 625if(e->filled ||oe_layer(&to_pack, e) != write_layer) 626return; 627 wo[(*endp)++] = e; 628 e->filled =1; 629} 630 631static voidadd_descendants_to_write_order(struct object_entry **wo, 632unsigned int*endp, 633struct object_entry *e) 634{ 635int add_to_order =1; 636while(e) { 637if(add_to_order) { 638struct object_entry *s; 639/* add this node... */ 640add_to_write_order(wo, endp, e); 641/* all its siblings... */ 642for(s =DELTA_SIBLING(e); s; s =DELTA_SIBLING(s)) { 643add_to_write_order(wo, endp, s); 644} 645} 646/* drop down a level to add left subtree nodes if possible */ 647if(DELTA_CHILD(e)) { 648 add_to_order =1; 649 e =DELTA_CHILD(e); 650}else{ 651 add_to_order =0; 652/* our sibling might have some children, it is next */ 653if(DELTA_SIBLING(e)) { 654 e =DELTA_SIBLING(e); 655continue; 656} 657/* go back to our parent node */ 658 e =DELTA(e); 659while(e && !DELTA_SIBLING(e)) { 660/* we're on the right side of a subtree, keep 661 * going up until we can go right again */ 662 e =DELTA(e); 663} 664if(!e) { 665/* done- we hit our original root node */ 666return; 667} 668/* pass it off to sibling at this level */ 669 e =DELTA_SIBLING(e); 670} 671}; 672} 673 674static voidadd_family_to_write_order(struct object_entry **wo, 675unsigned int*endp, 676struct object_entry *e) 677{ 678struct object_entry *root; 679 680for(root = e;DELTA(root); root =DELTA(root)) 681;/* nothing */ 682add_descendants_to_write_order(wo, endp, root); 683} 684 685static voidcompute_layer_order(struct object_entry **wo,unsigned int*wo_end) 686{ 687unsigned int i, last_untagged; 688struct object_entry *objects = to_pack.objects; 689 690for(i =0; i < to_pack.nr_objects; i++) { 691if(objects[i].tagged) 692break; 693add_to_write_order(wo, wo_end, &objects[i]); 694} 695 last_untagged = i; 696 697/* 698 * Then fill all the tagged tips. 699 */ 700for(; i < to_pack.nr_objects; i++) { 701if(objects[i].tagged) 702add_to_write_order(wo, wo_end, &objects[i]); 703} 704 705/* 706 * And then all remaining commits and tags. 707 */ 708for(i = last_untagged; i < to_pack.nr_objects; i++) { 709if(oe_type(&objects[i]) != OBJ_COMMIT && 710oe_type(&objects[i]) != OBJ_TAG) 711continue; 712add_to_write_order(wo, wo_end, &objects[i]); 713} 714 715/* 716 * And then all the trees. 717 */ 718for(i = last_untagged; i < to_pack.nr_objects; i++) { 719if(oe_type(&objects[i]) != OBJ_TREE) 720continue; 721add_to_write_order(wo, wo_end, &objects[i]); 722} 723 724/* 725 * Finally all the rest in really tight order 726 */ 727for(i = last_untagged; i < to_pack.nr_objects; i++) { 728if(!objects[i].filled &&oe_layer(&to_pack, &objects[i]) == write_layer) 729add_family_to_write_order(wo, wo_end, &objects[i]); 730} 731} 732 733static struct object_entry **compute_write_order(void) 734{ 735uint32_t max_layers =1; 736unsigned int i, wo_end; 737 738struct object_entry **wo; 739struct object_entry *objects = to_pack.objects; 740 741for(i =0; i < to_pack.nr_objects; i++) { 742 objects[i].tagged =0; 743 objects[i].filled =0; 744SET_DELTA_CHILD(&objects[i], NULL); 745SET_DELTA_SIBLING(&objects[i], NULL); 746} 747 748/* 749 * Fully connect delta_child/delta_sibling network. 750 * Make sure delta_sibling is sorted in the original 751 * recency order. 752 */ 753for(i = to_pack.nr_objects; i >0;) { 754struct object_entry *e = &objects[--i]; 755if(!DELTA(e)) 756continue; 757/* Mark me as the first child */ 758 e->delta_sibling_idx =DELTA(e)->delta_child_idx; 759SET_DELTA_CHILD(DELTA(e), e); 760} 761 762/* 763 * Mark objects that are at the tip of tags. 764 */ 765for_each_tag_ref(mark_tagged, NULL); 766 767if(use_delta_islands) 768 max_layers =compute_pack_layers(&to_pack); 769 770ALLOC_ARRAY(wo, to_pack.nr_objects); 771 wo_end =0; 772 773for(; write_layer < max_layers; ++write_layer) 774compute_layer_order(wo, &wo_end); 775 776if(wo_end != to_pack.nr_objects) 777die(_("ordered%uobjects, expected %"PRIu32), 778 wo_end, to_pack.nr_objects); 779 780return wo; 781} 782 783static off_t write_reused_pack(struct hashfile *f) 784{ 785unsigned char buffer[8192]; 786 off_t to_write, total; 787int fd; 788 789if(!is_pack_valid(reuse_packfile)) 790die(_("packfile is invalid:%s"), reuse_packfile->pack_name); 791 792 fd =git_open(reuse_packfile->pack_name); 793if(fd <0) 794die_errno(_("unable to open packfile for reuse:%s"), 795 reuse_packfile->pack_name); 796 797if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 798die_errno(_("unable to seek in reused packfile")); 799 800if(reuse_packfile_offset <0) 801 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 802 803 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 804 805while(to_write) { 806int read_pack =xread(fd, buffer,sizeof(buffer)); 807 808if(read_pack <=0) 809die_errno(_("unable to read from reused packfile")); 810 811if(read_pack > to_write) 812 read_pack = to_write; 813 814hashwrite(f, buffer, read_pack); 815 to_write -= read_pack; 816 817/* 818 * We don't know the actual number of objects written, 819 * only how many bytes written, how many bytes total, and 820 * how many objects total. So we can fake it by pretending all 821 * objects we are writing are the same size. This gives us a 822 * smooth progress meter, and at the end it matches the true 823 * answer. 824 */ 825 written = reuse_packfile_objects * 826(((double)(total - to_write)) / total); 827display_progress(progress_state, written); 828} 829 830close(fd); 831 written = reuse_packfile_objects; 832display_progress(progress_state, written); 833return reuse_packfile_offset -sizeof(struct pack_header); 834} 835 836static const char no_split_warning[] =N_( 837"disabling bitmap writing, packs are split due to pack.packSizeLimit" 838); 839 840static voidwrite_pack_file(void) 841{ 842uint32_t i =0, j; 843struct hashfile *f; 844 off_t offset; 845uint32_t nr_remaining = nr_result; 846time_t last_mtime =0; 847struct object_entry **write_order; 848 849if(progress > pack_to_stdout) 850 progress_state =start_progress(_("Writing objects"), nr_result); 851ALLOC_ARRAY(written_list, to_pack.nr_objects); 852 write_order =compute_write_order(); 853 854do{ 855struct object_id oid; 856char*pack_tmp_name = NULL; 857 858if(pack_to_stdout) 859 f =hashfd_throughput(1,"<stdout>", progress_state); 860else 861 f =create_tmp_packfile(&pack_tmp_name); 862 863 offset =write_pack_header(f, nr_remaining); 864 865if(reuse_packfile) { 866 off_t packfile_size; 867assert(pack_to_stdout); 868 869 packfile_size =write_reused_pack(f); 870 offset += packfile_size; 871} 872 873 nr_written =0; 874for(; i < to_pack.nr_objects; i++) { 875struct object_entry *e = write_order[i]; 876if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 877break; 878display_progress(progress_state, written); 879} 880 881/* 882 * Did we write the wrong # entries in the header? 883 * If so, rewrite it like in fast-import 884 */ 885if(pack_to_stdout) { 886finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 887}else if(nr_written == nr_remaining) { 888finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 889}else{ 890int fd =finalize_hashfile(f, oid.hash,0); 891fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 892 nr_written, oid.hash, offset); 893close(fd); 894if(write_bitmap_index) { 895warning(_(no_split_warning)); 896 write_bitmap_index =0; 897} 898} 899 900if(!pack_to_stdout) { 901struct stat st; 902struct strbuf tmpname = STRBUF_INIT; 903 904/* 905 * Packs are runtime accessed in their mtime 906 * order since newer packs are more likely to contain 907 * younger objects. So if we are creating multiple 908 * packs then we should modify the mtime of later ones 909 * to preserve this property. 910 */ 911if(stat(pack_tmp_name, &st) <0) { 912warning_errno(_("failed to stat%s"), pack_tmp_name); 913}else if(!last_mtime) { 914 last_mtime = st.st_mtime; 915}else{ 916struct utimbuf utb; 917 utb.actime = st.st_atime; 918 utb.modtime = --last_mtime; 919if(utime(pack_tmp_name, &utb) <0) 920warning_errno(_("failed utime() on%s"), pack_tmp_name); 921} 922 923strbuf_addf(&tmpname,"%s-", base_name); 924 925if(write_bitmap_index) { 926bitmap_writer_set_checksum(oid.hash); 927bitmap_writer_build_type_index( 928&to_pack, written_list, nr_written); 929} 930 931finish_tmp_packfile(&tmpname, pack_tmp_name, 932 written_list, nr_written, 933&pack_idx_opts, oid.hash); 934 935if(write_bitmap_index) { 936strbuf_addf(&tmpname,"%s.bitmap",oid_to_hex(&oid)); 937 938stop_progress(&progress_state); 939 940bitmap_writer_show_progress(progress); 941bitmap_writer_reuse_bitmaps(&to_pack); 942bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 943bitmap_writer_build(&to_pack); 944bitmap_writer_finish(written_list, nr_written, 945 tmpname.buf, write_bitmap_options); 946 write_bitmap_index =0; 947} 948 949strbuf_release(&tmpname); 950free(pack_tmp_name); 951puts(oid_to_hex(&oid)); 952} 953 954/* mark written objects as written to previous pack */ 955for(j =0; j < nr_written; j++) { 956 written_list[j]->offset = (off_t)-1; 957} 958 nr_remaining -= nr_written; 959}while(nr_remaining && i < to_pack.nr_objects); 960 961free(written_list); 962free(write_order); 963stop_progress(&progress_state); 964if(written != nr_result) 965die(_("wrote %"PRIu32" objects while expecting %"PRIu32), 966 written, nr_result); 967} 968 969static intno_try_delta(const char*path) 970{ 971static struct attr_check *check; 972 973if(!check) 974 check =attr_check_initl("delta", NULL); 975git_check_attr(the_repository->index, path, check); 976if(ATTR_FALSE(check->items[0].value)) 977return1; 978return0; 979} 980 981/* 982 * When adding an object, check whether we have already added it 983 * to our packing list. If so, we can skip. However, if we are 984 * being asked to excludei t, but the previous mention was to include 985 * it, make sure to adjust its flags and tweak our numbers accordingly. 986 * 987 * As an optimization, we pass out the index position where we would have 988 * found the item, since that saves us from having to look it up again a 989 * few lines later when we want to add the new entry. 990 */ 991static inthave_duplicate_entry(const struct object_id *oid, 992int exclude, 993uint32_t*index_pos) 994{ 995struct object_entry *entry; 996 997 entry =packlist_find(&to_pack, oid->hash, index_pos); 998if(!entry) 999return0;10001001if(exclude) {1002if(!entry->preferred_base)1003 nr_result--;1004 entry->preferred_base =1;1005}10061007return1;1008}10091010static intwant_found_object(int exclude,struct packed_git *p)1011{1012if(exclude)1013return1;1014if(incremental)1015return0;10161017/*1018 * When asked to do --local (do not include an object that appears in a1019 * pack we borrow from elsewhere) or --honor-pack-keep (do not include1020 * an object that appears in a pack marked with .keep), finding a pack1021 * that matches the criteria is sufficient for us to decide to omit it.1022 * However, even if this pack does not satisfy the criteria, we need to1023 * make sure no copy of this object appears in _any_ pack that makes us1024 * to omit the object, so we need to check all the packs.1025 *1026 * We can however first check whether these options can possible matter;1027 * if they do not matter we know we want the object in generated pack.1028 * Otherwise, we signal "-1" at the end to tell the caller that we do1029 * not know either way, and it needs to check more packs.1030 */1031if(!ignore_packed_keep_on_disk &&1032!ignore_packed_keep_in_core &&1033(!local || !have_non_local_packs))1034return1;10351036if(local && !p->pack_local)1037return0;1038if(p->pack_local &&1039((ignore_packed_keep_on_disk && p->pack_keep) ||1040(ignore_packed_keep_in_core && p->pack_keep_in_core)))1041return0;10421043/* we don't know yet; keep looking for more packs */1044return-1;1045}10461047/*1048 * Check whether we want the object in the pack (e.g., we do not want1049 * objects found in non-local stores if the "--local" option was used).1050 *1051 * If the caller already knows an existing pack it wants to take the object1052 * from, that is passed in *found_pack and *found_offset; otherwise this1053 * function finds if there is any pack that has the object and returns the pack1054 * and its offset in these variables.1055 */1056static intwant_object_in_pack(const struct object_id *oid,1057int exclude,1058struct packed_git **found_pack,1059 off_t *found_offset)1060{1061int want;1062struct list_head *pos;1063struct multi_pack_index *m;10641065if(!exclude && local &&has_loose_object_nonlocal(oid))1066return0;10671068/*1069 * If we already know the pack object lives in, start checks from that1070 * pack - in the usual case when neither --local was given nor .keep files1071 * are present we will determine the answer right now.1072 */1073if(*found_pack) {1074 want =want_found_object(exclude, *found_pack);1075if(want != -1)1076return want;1077}10781079for(m =get_multi_pack_index(the_repository); m; m = m->next) {1080struct pack_entry e;1081if(fill_midx_entry(oid, &e, m)) {1082struct packed_git *p = e.p;1083 off_t offset;10841085if(p == *found_pack)1086 offset = *found_offset;1087else1088 offset =find_pack_entry_one(oid->hash, p);10891090if(offset) {1091if(!*found_pack) {1092if(!is_pack_valid(p))1093continue;1094*found_offset = offset;1095*found_pack = p;1096}1097 want =want_found_object(exclude, p);1098if(want != -1)1099return want;1100}1101}1102}11031104list_for_each(pos,get_packed_git_mru(the_repository)) {1105struct packed_git *p =list_entry(pos,struct packed_git, mru);1106 off_t offset;11071108if(p == *found_pack)1109 offset = *found_offset;1110else1111 offset =find_pack_entry_one(oid->hash, p);11121113if(offset) {1114if(!*found_pack) {1115if(!is_pack_valid(p))1116continue;1117*found_offset = offset;1118*found_pack = p;1119}1120 want =want_found_object(exclude, p);1121if(!exclude && want >0)1122list_move(&p->mru,1123get_packed_git_mru(the_repository));1124if(want != -1)1125return want;1126}1127}11281129return1;1130}11311132static voidcreate_object_entry(const struct object_id *oid,1133enum object_type type,1134uint32_t hash,1135int exclude,1136int no_try_delta,1137uint32_t index_pos,1138struct packed_git *found_pack,1139 off_t found_offset)1140{1141struct object_entry *entry;11421143 entry =packlist_alloc(&to_pack, oid->hash, index_pos);1144 entry->hash = hash;1145oe_set_type(entry, type);1146if(exclude)1147 entry->preferred_base =1;1148else1149 nr_result++;1150if(found_pack) {1151oe_set_in_pack(&to_pack, entry, found_pack);1152 entry->in_pack_offset = found_offset;1153}11541155 entry->no_try_delta = no_try_delta;1156}11571158static const char no_closure_warning[] =N_(1159"disabling bitmap writing, as some objects are not being packed"1160);11611162static intadd_object_entry(const struct object_id *oid,enum object_type type,1163const char*name,int exclude)1164{1165struct packed_git *found_pack = NULL;1166 off_t found_offset =0;1167uint32_t index_pos;11681169display_progress(progress_state, ++nr_seen);11701171if(have_duplicate_entry(oid, exclude, &index_pos))1172return0;11731174if(!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1175/* The pack is missing an object, so it will not have closure */1176if(write_bitmap_index) {1177warning(_(no_closure_warning));1178 write_bitmap_index =0;1179}1180return0;1181}11821183create_object_entry(oid, type,pack_name_hash(name),1184 exclude, name &&no_try_delta(name),1185 index_pos, found_pack, found_offset);1186return1;1187}11881189static intadd_object_entry_from_bitmap(const struct object_id *oid,1190enum object_type type,1191int flags,uint32_t name_hash,1192struct packed_git *pack, off_t offset)1193{1194uint32_t index_pos;11951196display_progress(progress_state, ++nr_seen);11971198if(have_duplicate_entry(oid,0, &index_pos))1199return0;12001201if(!want_object_in_pack(oid,0, &pack, &offset))1202return0;12031204create_object_entry(oid, type, name_hash,0,0, index_pos, pack, offset);1205return1;1206}12071208struct pbase_tree_cache {1209struct object_id oid;1210int ref;1211int temporary;1212void*tree_data;1213unsigned long tree_size;1214};12151216static struct pbase_tree_cache *(pbase_tree_cache[256]);1217static intpbase_tree_cache_ix(const struct object_id *oid)1218{1219return oid->hash[0] %ARRAY_SIZE(pbase_tree_cache);1220}1221static intpbase_tree_cache_ix_incr(int ix)1222{1223return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1224}12251226static struct pbase_tree {1227struct pbase_tree *next;1228/* This is a phony "cache" entry; we are not1229 * going to evict it or find it through _get()1230 * mechanism -- this is for the toplevel node that1231 * would almost always change with any commit.1232 */1233struct pbase_tree_cache pcache;1234} *pbase_tree;12351236static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1237{1238struct pbase_tree_cache *ent, *nent;1239void*data;1240unsigned long size;1241enum object_type type;1242int neigh;1243int my_ix =pbase_tree_cache_ix(oid);1244int available_ix = -1;12451246/* pbase-tree-cache acts as a limited hashtable.1247 * your object will be found at your index or within a few1248 * slots after that slot if it is cached.1249 */1250for(neigh =0; neigh <8; neigh++) {1251 ent = pbase_tree_cache[my_ix];1252if(ent &&oideq(&ent->oid, oid)) {1253 ent->ref++;1254return ent;1255}1256else if(((available_ix <0) && (!ent || !ent->ref)) ||1257((0<= available_ix) &&1258(!ent && pbase_tree_cache[available_ix])))1259 available_ix = my_ix;1260if(!ent)1261break;1262 my_ix =pbase_tree_cache_ix_incr(my_ix);1263}12641265/* Did not find one. Either we got a bogus request or1266 * we need to read and perhaps cache.1267 */1268 data =read_object_file(oid, &type, &size);1269if(!data)1270return NULL;1271if(type != OBJ_TREE) {1272free(data);1273return NULL;1274}12751276/* We need to either cache or return a throwaway copy */12771278if(available_ix <0)1279 ent = NULL;1280else{1281 ent = pbase_tree_cache[available_ix];1282 my_ix = available_ix;1283}12841285if(!ent) {1286 nent =xmalloc(sizeof(*nent));1287 nent->temporary = (available_ix <0);1288}1289else{1290/* evict and reuse */1291free(ent->tree_data);1292 nent = ent;1293}1294oidcpy(&nent->oid, oid);1295 nent->tree_data = data;1296 nent->tree_size = size;1297 nent->ref =1;1298if(!nent->temporary)1299 pbase_tree_cache[my_ix] = nent;1300return nent;1301}13021303static voidpbase_tree_put(struct pbase_tree_cache *cache)1304{1305if(!cache->temporary) {1306 cache->ref--;1307return;1308}1309free(cache->tree_data);1310free(cache);1311}13121313static intname_cmp_len(const char*name)1314{1315int i;1316for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1317;1318return i;1319}13201321static voidadd_pbase_object(struct tree_desc *tree,1322const char*name,1323int cmplen,1324const char*fullname)1325{1326struct name_entry entry;1327int cmp;13281329while(tree_entry(tree,&entry)) {1330if(S_ISGITLINK(entry.mode))1331continue;1332 cmp =tree_entry_len(&entry) != cmplen ?1:1333memcmp(name, entry.path, cmplen);1334if(cmp >0)1335continue;1336if(cmp <0)1337return;1338if(name[cmplen] !='/') {1339add_object_entry(&entry.oid,1340object_type(entry.mode),1341 fullname,1);1342return;1343}1344if(S_ISDIR(entry.mode)) {1345struct tree_desc sub;1346struct pbase_tree_cache *tree;1347const char*down = name+cmplen+1;1348int downlen =name_cmp_len(down);13491350 tree =pbase_tree_get(&entry.oid);1351if(!tree)1352return;1353init_tree_desc(&sub, tree->tree_data, tree->tree_size);13541355add_pbase_object(&sub, down, downlen, fullname);1356pbase_tree_put(tree);1357}1358}1359}13601361static unsigned*done_pbase_paths;1362static int done_pbase_paths_num;1363static int done_pbase_paths_alloc;1364static intdone_pbase_path_pos(unsigned hash)1365{1366int lo =0;1367int hi = done_pbase_paths_num;1368while(lo < hi) {1369int mi = lo + (hi - lo) /2;1370if(done_pbase_paths[mi] == hash)1371return mi;1372if(done_pbase_paths[mi] < hash)1373 hi = mi;1374else1375 lo = mi +1;1376}1377return-lo-1;1378}13791380static intcheck_pbase_path(unsigned hash)1381{1382int pos =done_pbase_path_pos(hash);1383if(0<= pos)1384return1;1385 pos = -pos -1;1386ALLOC_GROW(done_pbase_paths,1387 done_pbase_paths_num +1,1388 done_pbase_paths_alloc);1389 done_pbase_paths_num++;1390if(pos < done_pbase_paths_num)1391MOVE_ARRAY(done_pbase_paths + pos +1, done_pbase_paths + pos,1392 done_pbase_paths_num - pos -1);1393 done_pbase_paths[pos] = hash;1394return0;1395}13961397static voidadd_preferred_base_object(const char*name)1398{1399struct pbase_tree *it;1400int cmplen;1401unsigned hash =pack_name_hash(name);14021403if(!num_preferred_base ||check_pbase_path(hash))1404return;14051406 cmplen =name_cmp_len(name);1407for(it = pbase_tree; it; it = it->next) {1408if(cmplen ==0) {1409add_object_entry(&it->pcache.oid, OBJ_TREE, NULL,1);1410}1411else{1412struct tree_desc tree;1413init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1414add_pbase_object(&tree, name, cmplen, name);1415}1416}1417}14181419static voidadd_preferred_base(struct object_id *oid)1420{1421struct pbase_tree *it;1422void*data;1423unsigned long size;1424struct object_id tree_oid;14251426if(window <= num_preferred_base++)1427return;14281429 data =read_object_with_reference(oid, tree_type, &size, &tree_oid);1430if(!data)1431return;14321433for(it = pbase_tree; it; it = it->next) {1434if(oideq(&it->pcache.oid, &tree_oid)) {1435free(data);1436return;1437}1438}14391440 it =xcalloc(1,sizeof(*it));1441 it->next = pbase_tree;1442 pbase_tree = it;14431444oidcpy(&it->pcache.oid, &tree_oid);1445 it->pcache.tree_data = data;1446 it->pcache.tree_size = size;1447}14481449static voidcleanup_preferred_base(void)1450{1451struct pbase_tree *it;1452unsigned i;14531454 it = pbase_tree;1455 pbase_tree = NULL;1456while(it) {1457struct pbase_tree *tmp = it;1458 it = tmp->next;1459free(tmp->pcache.tree_data);1460free(tmp);1461}14621463for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1464if(!pbase_tree_cache[i])1465continue;1466free(pbase_tree_cache[i]->tree_data);1467FREE_AND_NULL(pbase_tree_cache[i]);1468}14691470FREE_AND_NULL(done_pbase_paths);1471 done_pbase_paths_num = done_pbase_paths_alloc =0;1472}14731474/*1475 * Return 1 iff the object specified by "delta" can be sent1476 * literally as a delta against the base in "base_sha1". If1477 * so, then *base_out will point to the entry in our packing1478 * list, or NULL if we must use the external-base list.1479 *1480 * Depth value does not matter - find_deltas() will1481 * never consider reused delta as the base object to1482 * deltify other objects against, in order to avoid1483 * circular deltas.1484 */1485static intcan_reuse_delta(const unsigned char*base_sha1,1486struct object_entry *delta,1487struct object_entry **base_out)1488{1489struct object_entry *base;14901491if(!base_sha1)1492return0;14931494/*1495 * First see if we're already sending the base (or it's explicitly in1496 * our "excluded" list).1497 */1498 base =packlist_find(&to_pack, base_sha1, NULL);1499if(base) {1500if(!in_same_island(&delta->idx.oid, &base->idx.oid))1501return0;1502*base_out = base;1503return1;1504}15051506/*1507 * Otherwise, reachability bitmaps may tell us if the receiver has it,1508 * even if it was buried too deep in history to make it into the1509 * packing list.1510 */1511if(thin &&bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {1512if(use_delta_islands) {1513struct object_id base_oid;1514hashcpy(base_oid.hash, base_sha1);1515if(!in_same_island(&delta->idx.oid, &base_oid))1516return0;1517}1518*base_out = NULL;1519return1;1520}15211522return0;1523}15241525static voidcheck_object(struct object_entry *entry)1526{1527unsigned long canonical_size;15281529if(IN_PACK(entry)) {1530struct packed_git *p =IN_PACK(entry);1531struct pack_window *w_curs = NULL;1532const unsigned char*base_ref = NULL;1533struct object_entry *base_entry;1534unsigned long used, used_0;1535unsigned long avail;1536 off_t ofs;1537unsigned char*buf, c;1538enum object_type type;1539unsigned long in_pack_size;15401541 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);15421543/*1544 * We want in_pack_type even if we do not reuse delta1545 * since non-delta representations could still be reused.1546 */1547 used =unpack_object_header_buffer(buf, avail,1548&type,1549&in_pack_size);1550if(used ==0)1551goto give_up;15521553if(type <0)1554BUG("invalid type%d", type);1555 entry->in_pack_type = type;15561557/*1558 * Determine if this is a delta and if so whether we can1559 * reuse it or not. Otherwise let's find out as cheaply as1560 * possible what the actual type and size for this object is.1561 */1562switch(entry->in_pack_type) {1563default:1564/* Not a delta hence we've already got all we need. */1565oe_set_type(entry, entry->in_pack_type);1566SET_SIZE(entry, in_pack_size);1567 entry->in_pack_header_size = used;1568if(oe_type(entry) < OBJ_COMMIT ||oe_type(entry) > OBJ_BLOB)1569goto give_up;1570unuse_pack(&w_curs);1571return;1572case OBJ_REF_DELTA:1573if(reuse_delta && !entry->preferred_base)1574 base_ref =use_pack(p, &w_curs,1575 entry->in_pack_offset + used, NULL);1576 entry->in_pack_header_size = used + the_hash_algo->rawsz;1577break;1578case OBJ_OFS_DELTA:1579 buf =use_pack(p, &w_curs,1580 entry->in_pack_offset + used, NULL);1581 used_0 =0;1582 c = buf[used_0++];1583 ofs = c &127;1584while(c &128) {1585 ofs +=1;1586if(!ofs ||MSB(ofs,7)) {1587error(_("delta base offset overflow in pack for%s"),1588oid_to_hex(&entry->idx.oid));1589goto give_up;1590}1591 c = buf[used_0++];1592 ofs = (ofs <<7) + (c &127);1593}1594 ofs = entry->in_pack_offset - ofs;1595if(ofs <=0|| ofs >= entry->in_pack_offset) {1596error(_("delta base offset out of bound for%s"),1597oid_to_hex(&entry->idx.oid));1598goto give_up;1599}1600if(reuse_delta && !entry->preferred_base) {1601struct revindex_entry *revidx;1602 revidx =find_pack_revindex(p, ofs);1603if(!revidx)1604goto give_up;1605 base_ref =nth_packed_object_sha1(p, revidx->nr);1606}1607 entry->in_pack_header_size = used + used_0;1608break;1609}16101611if(can_reuse_delta(base_ref, entry, &base_entry)) {1612oe_set_type(entry, entry->in_pack_type);1613SET_SIZE(entry, in_pack_size);/* delta size */1614SET_DELTA_SIZE(entry, in_pack_size);16151616if(base_entry) {1617SET_DELTA(entry, base_entry);1618 entry->delta_sibling_idx = base_entry->delta_child_idx;1619SET_DELTA_CHILD(base_entry, entry);1620}else{1621SET_DELTA_EXT(entry, base_ref);1622}16231624unuse_pack(&w_curs);1625return;1626}16271628if(oe_type(entry)) {1629 off_t delta_pos;16301631/*1632 * This must be a delta and we already know what the1633 * final object type is. Let's extract the actual1634 * object size from the delta header.1635 */1636 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1637 canonical_size =get_size_from_delta(p, &w_curs, delta_pos);1638if(canonical_size ==0)1639goto give_up;1640SET_SIZE(entry, canonical_size);1641unuse_pack(&w_curs);1642return;1643}16441645/*1646 * No choice but to fall back to the recursive delta walk1647 * with oid_object_info() to find about the object type1648 * at this point...1649 */1650 give_up:1651unuse_pack(&w_curs);1652}16531654oe_set_type(entry,1655oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1656if(entry->type_valid) {1657SET_SIZE(entry, canonical_size);1658}else{1659/*1660 * Bad object type is checked in prepare_pack(). This is1661 * to permit a missing preferred base object to be ignored1662 * as a preferred base. Doing so can result in a larger1663 * pack file, but the transfer will still take place.1664 */1665}1666}16671668static intpack_offset_sort(const void*_a,const void*_b)1669{1670const struct object_entry *a = *(struct object_entry **)_a;1671const struct object_entry *b = *(struct object_entry **)_b;1672const struct packed_git *a_in_pack =IN_PACK(a);1673const struct packed_git *b_in_pack =IN_PACK(b);16741675/* avoid filesystem trashing with loose objects */1676if(!a_in_pack && !b_in_pack)1677returnoidcmp(&a->idx.oid, &b->idx.oid);16781679if(a_in_pack < b_in_pack)1680return-1;1681if(a_in_pack > b_in_pack)1682return1;1683return a->in_pack_offset < b->in_pack_offset ? -1:1684(a->in_pack_offset > b->in_pack_offset);1685}16861687/*1688 * Drop an on-disk delta we were planning to reuse. Naively, this would1689 * just involve blanking out the "delta" field, but we have to deal1690 * with some extra book-keeping:1691 *1692 * 1. Removing ourselves from the delta_sibling linked list.1693 *1694 * 2. Updating our size/type to the non-delta representation. These were1695 * either not recorded initially (size) or overwritten with the delta type1696 * (type) when check_object() decided to reuse the delta.1697 *1698 * 3. Resetting our delta depth, as we are now a base object.1699 */1700static voiddrop_reused_delta(struct object_entry *entry)1701{1702unsigned*idx = &to_pack.objects[entry->delta_idx -1].delta_child_idx;1703struct object_info oi = OBJECT_INFO_INIT;1704enum object_type type;1705unsigned long size;17061707while(*idx) {1708struct object_entry *oe = &to_pack.objects[*idx -1];17091710if(oe == entry)1711*idx = oe->delta_sibling_idx;1712else1713 idx = &oe->delta_sibling_idx;1714}1715SET_DELTA(entry, NULL);1716 entry->depth =0;17171718 oi.sizep = &size;1719 oi.typep = &type;1720if(packed_object_info(the_repository,IN_PACK(entry), entry->in_pack_offset, &oi) <0) {1721/*1722 * We failed to get the info from this pack for some reason;1723 * fall back to oid_object_info, which may find another copy.1724 * And if that fails, the error will be recorded in oe_type(entry)1725 * and dealt with in prepare_pack().1726 */1727oe_set_type(entry,1728oid_object_info(the_repository, &entry->idx.oid, &size));1729}else{1730oe_set_type(entry, type);1731}1732SET_SIZE(entry, size);1733}17341735/*1736 * Follow the chain of deltas from this entry onward, throwing away any links1737 * that cause us to hit a cycle (as determined by the DFS state flags in1738 * the entries).1739 *1740 * We also detect too-long reused chains that would violate our --depth1741 * limit.1742 */1743static voidbreak_delta_chains(struct object_entry *entry)1744{1745/*1746 * The actual depth of each object we will write is stored as an int,1747 * as it cannot exceed our int "depth" limit. But before we break1748 * changes based no that limit, we may potentially go as deep as the1749 * number of objects, which is elsewhere bounded to a uint32_t.1750 */1751uint32_t total_depth;1752struct object_entry *cur, *next;17531754for(cur = entry, total_depth =0;1755 cur;1756 cur =DELTA(cur), total_depth++) {1757if(cur->dfs_state == DFS_DONE) {1758/*1759 * We've already seen this object and know it isn't1760 * part of a cycle. We do need to append its depth1761 * to our count.1762 */1763 total_depth += cur->depth;1764break;1765}17661767/*1768 * We break cycles before looping, so an ACTIVE state (or any1769 * other cruft which made its way into the state variable)1770 * is a bug.1771 */1772if(cur->dfs_state != DFS_NONE)1773BUG("confusing delta dfs state in first pass:%d",1774 cur->dfs_state);17751776/*1777 * Now we know this is the first time we've seen the object. If1778 * it's not a delta, we're done traversing, but we'll mark it1779 * done to save time on future traversals.1780 */1781if(!DELTA(cur)) {1782 cur->dfs_state = DFS_DONE;1783break;1784}17851786/*1787 * Mark ourselves as active and see if the next step causes1788 * us to cycle to another active object. It's important to do1789 * this _before_ we loop, because it impacts where we make the1790 * cut, and thus how our total_depth counter works.1791 * E.g., We may see a partial loop like:1792 *1793 * A -> B -> C -> D -> B1794 *1795 * Cutting B->C breaks the cycle. But now the depth of A is1796 * only 1, and our total_depth counter is at 3. The size of the1797 * error is always one less than the size of the cycle we1798 * broke. Commits C and D were "lost" from A's chain.1799 *1800 * If we instead cut D->B, then the depth of A is correct at 3.1801 * We keep all commits in the chain that we examined.1802 */1803 cur->dfs_state = DFS_ACTIVE;1804if(DELTA(cur)->dfs_state == DFS_ACTIVE) {1805drop_reused_delta(cur);1806 cur->dfs_state = DFS_DONE;1807break;1808}1809}18101811/*1812 * And now that we've gone all the way to the bottom of the chain, we1813 * need to clear the active flags and set the depth fields as1814 * appropriate. Unlike the loop above, which can quit when it drops a1815 * delta, we need to keep going to look for more depth cuts. So we need1816 * an extra "next" pointer to keep going after we reset cur->delta.1817 */1818for(cur = entry; cur; cur = next) {1819 next =DELTA(cur);18201821/*1822 * We should have a chain of zero or more ACTIVE states down to1823 * a final DONE. We can quit after the DONE, because either it1824 * has no bases, or we've already handled them in a previous1825 * call.1826 */1827if(cur->dfs_state == DFS_DONE)1828break;1829else if(cur->dfs_state != DFS_ACTIVE)1830BUG("confusing delta dfs state in second pass:%d",1831 cur->dfs_state);18321833/*1834 * If the total_depth is more than depth, then we need to snip1835 * the chain into two or more smaller chains that don't exceed1836 * the maximum depth. Most of the resulting chains will contain1837 * (depth + 1) entries (i.e., depth deltas plus one base), and1838 * the last chain (i.e., the one containing entry) will contain1839 * whatever entries are left over, namely1840 * (total_depth % (depth + 1)) of them.1841 *1842 * Since we are iterating towards decreasing depth, we need to1843 * decrement total_depth as we go, and we need to write to the1844 * entry what its final depth will be after all of the1845 * snipping. Since we're snipping into chains of length (depth1846 * + 1) entries, the final depth of an entry will be its1847 * original depth modulo (depth + 1). Any time we encounter an1848 * entry whose final depth is supposed to be zero, we snip it1849 * from its delta base, thereby making it so.1850 */1851 cur->depth = (total_depth--) % (depth +1);1852if(!cur->depth)1853drop_reused_delta(cur);18541855 cur->dfs_state = DFS_DONE;1856}1857}18581859static voidget_object_details(void)1860{1861uint32_t i;1862struct object_entry **sorted_by_offset;18631864if(progress)1865 progress_state =start_progress(_("Counting objects"),1866 to_pack.nr_objects);18671868 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1869for(i =0; i < to_pack.nr_objects; i++)1870 sorted_by_offset[i] = to_pack.objects + i;1871QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);18721873for(i =0; i < to_pack.nr_objects; i++) {1874struct object_entry *entry = sorted_by_offset[i];1875check_object(entry);1876if(entry->type_valid &&1877oe_size_greater_than(&to_pack, entry, big_file_threshold))1878 entry->no_try_delta =1;1879display_progress(progress_state, i +1);1880}1881stop_progress(&progress_state);18821883/*1884 * This must happen in a second pass, since we rely on the delta1885 * information for the whole list being completed.1886 */1887for(i =0; i < to_pack.nr_objects; i++)1888break_delta_chains(&to_pack.objects[i]);18891890free(sorted_by_offset);1891}18921893/*1894 * We search for deltas in a list sorted by type, by filename hash, and then1895 * by size, so that we see progressively smaller and smaller files.1896 * That's because we prefer deltas to be from the bigger file1897 * to the smaller -- deletes are potentially cheaper, but perhaps1898 * more importantly, the bigger file is likely the more recent1899 * one. The deepest deltas are therefore the oldest objects which are1900 * less susceptible to be accessed often.1901 */1902static inttype_size_sort(const void*_a,const void*_b)1903{1904const struct object_entry *a = *(struct object_entry **)_a;1905const struct object_entry *b = *(struct object_entry **)_b;1906const enum object_type a_type =oe_type(a);1907const enum object_type b_type =oe_type(b);1908const unsigned long a_size =SIZE(a);1909const unsigned long b_size =SIZE(b);19101911if(a_type > b_type)1912return-1;1913if(a_type < b_type)1914return1;1915if(a->hash > b->hash)1916return-1;1917if(a->hash < b->hash)1918return1;1919if(a->preferred_base > b->preferred_base)1920return-1;1921if(a->preferred_base < b->preferred_base)1922return1;1923if(use_delta_islands) {1924const int island_cmp =island_delta_cmp(&a->idx.oid, &b->idx.oid);1925if(island_cmp)1926return island_cmp;1927}1928if(a_size > b_size)1929return-1;1930if(a_size < b_size)1931return1;1932return a < b ? -1: (a > b);/* newest first */1933}19341935struct unpacked {1936struct object_entry *entry;1937void*data;1938struct delta_index *index;1939unsigned depth;1940};19411942static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1943unsigned long delta_size)1944{1945if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1946return0;19471948if(delta_size < cache_max_small_delta_size)1949return1;19501951/* cache delta, if objects are large enough compared to delta size */1952if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1953return1;19541955return0;1956}19571958/* Protect delta_cache_size */1959static pthread_mutex_t cache_mutex;1960#define cache_lock() pthread_mutex_lock(&cache_mutex)1961#define cache_unlock() pthread_mutex_unlock(&cache_mutex)19621963/*1964 * Protect object list partitioning (e.g. struct thread_param) and1965 * progress_state1966 */1967static pthread_mutex_t progress_mutex;1968#define progress_lock() pthread_mutex_lock(&progress_mutex)1969#define progress_unlock() pthread_mutex_unlock(&progress_mutex)19701971/*1972 * Access to struct object_entry is unprotected since each thread owns1973 * a portion of the main object list. Just don't access object entries1974 * ahead in the list because they can be stolen and would need1975 * progress_mutex for protection.1976 */19771978/*1979 * Return the size of the object without doing any delta1980 * reconstruction (so non-deltas are true object sizes, but deltas1981 * return the size of the delta data).1982 */1983unsigned longoe_get_size_slow(struct packing_data *pack,1984const struct object_entry *e)1985{1986struct packed_git *p;1987struct pack_window *w_curs;1988unsigned char*buf;1989enum object_type type;1990unsigned long used, avail, size;19911992if(e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1993packing_data_lock(&to_pack);1994if(oid_object_info(the_repository, &e->idx.oid, &size) <0)1995die(_("unable to get size of%s"),1996oid_to_hex(&e->idx.oid));1997packing_data_unlock(&to_pack);1998return size;1999}20002001 p =oe_in_pack(pack, e);2002if(!p)2003BUG("when e->type is a delta, it must belong to a pack");20042005packing_data_lock(&to_pack);2006 w_curs = NULL;2007 buf =use_pack(p, &w_curs, e->in_pack_offset, &avail);2008 used =unpack_object_header_buffer(buf, avail, &type, &size);2009if(used ==0)2010die(_("unable to parse object header of%s"),2011oid_to_hex(&e->idx.oid));20122013unuse_pack(&w_curs);2014packing_data_unlock(&to_pack);2015return size;2016}20172018static inttry_delta(struct unpacked *trg,struct unpacked *src,2019unsigned max_depth,unsigned long*mem_usage)2020{2021struct object_entry *trg_entry = trg->entry;2022struct object_entry *src_entry = src->entry;2023unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;2024unsigned ref_depth;2025enum object_type type;2026void*delta_buf;20272028/* Don't bother doing diffs between different types */2029if(oe_type(trg_entry) !=oe_type(src_entry))2030return-1;20312032/*2033 * We do not bother to try a delta that we discarded on an2034 * earlier try, but only when reusing delta data. Note that2035 * src_entry that is marked as the preferred_base should always2036 * be considered, as even if we produce a suboptimal delta against2037 * it, we will still save the transfer cost, as we already know2038 * the other side has it and we won't send src_entry at all.2039 */2040if(reuse_delta &&IN_PACK(trg_entry) &&2041IN_PACK(trg_entry) ==IN_PACK(src_entry) &&2042!src_entry->preferred_base &&2043 trg_entry->in_pack_type != OBJ_REF_DELTA &&2044 trg_entry->in_pack_type != OBJ_OFS_DELTA)2045return0;20462047/* Let's not bust the allowed depth. */2048if(src->depth >= max_depth)2049return0;20502051/* Now some size filtering heuristics. */2052 trg_size =SIZE(trg_entry);2053if(!DELTA(trg_entry)) {2054 max_size = trg_size/2- the_hash_algo->rawsz;2055 ref_depth =1;2056}else{2057 max_size =DELTA_SIZE(trg_entry);2058 ref_depth = trg->depth;2059}2060 max_size = (uint64_t)max_size * (max_depth - src->depth) /2061(max_depth - ref_depth +1);2062if(max_size ==0)2063return0;2064 src_size =SIZE(src_entry);2065 sizediff = src_size < trg_size ? trg_size - src_size :0;2066if(sizediff >= max_size)2067return0;2068if(trg_size < src_size /32)2069return0;20702071if(!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))2072return0;20732074/* Load data if not already done */2075if(!trg->data) {2076packing_data_lock(&to_pack);2077 trg->data =read_object_file(&trg_entry->idx.oid, &type, &sz);2078packing_data_unlock(&to_pack);2079if(!trg->data)2080die(_("object%scannot be read"),2081oid_to_hex(&trg_entry->idx.oid));2082if(sz != trg_size)2083die(_("object%sinconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),2084oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz,2085(uintmax_t)trg_size);2086*mem_usage += sz;2087}2088if(!src->data) {2089packing_data_lock(&to_pack);2090 src->data =read_object_file(&src_entry->idx.oid, &type, &sz);2091packing_data_unlock(&to_pack);2092if(!src->data) {2093if(src_entry->preferred_base) {2094static int warned =0;2095if(!warned++)2096warning(_("object%scannot be read"),2097oid_to_hex(&src_entry->idx.oid));2098/*2099 * Those objects are not included in the2100 * resulting pack. Be resilient and ignore2101 * them if they can't be read, in case the2102 * pack could be created nevertheless.2103 */2104return0;2105}2106die(_("object%scannot be read"),2107oid_to_hex(&src_entry->idx.oid));2108}2109if(sz != src_size)2110die(_("object%sinconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),2111oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz,2112(uintmax_t)src_size);2113*mem_usage += sz;2114}2115if(!src->index) {2116 src->index =create_delta_index(src->data, src_size);2117if(!src->index) {2118static int warned =0;2119if(!warned++)2120warning(_("suboptimal pack - out of memory"));2121return0;2122}2123*mem_usage +=sizeof_delta_index(src->index);2124}21252126 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2127if(!delta_buf)2128return0;21292130if(DELTA(trg_entry)) {2131/* Prefer only shallower same-sized deltas. */2132if(delta_size ==DELTA_SIZE(trg_entry) &&2133 src->depth +1>= trg->depth) {2134free(delta_buf);2135return0;2136}2137}21382139/*2140 * Handle memory allocation outside of the cache2141 * accounting lock. Compiler will optimize the strangeness2142 * away when NO_PTHREADS is defined.2143 */2144free(trg_entry->delta_data);2145cache_lock();2146if(trg_entry->delta_data) {2147 delta_cache_size -=DELTA_SIZE(trg_entry);2148 trg_entry->delta_data = NULL;2149}2150if(delta_cacheable(src_size, trg_size, delta_size)) {2151 delta_cache_size += delta_size;2152cache_unlock();2153 trg_entry->delta_data =xrealloc(delta_buf, delta_size);2154}else{2155cache_unlock();2156free(delta_buf);2157}21582159SET_DELTA(trg_entry, src_entry);2160SET_DELTA_SIZE(trg_entry, delta_size);2161 trg->depth = src->depth +1;21622163return1;2164}21652166static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)2167{2168struct object_entry *child =DELTA_CHILD(me);2169unsigned int m = n;2170while(child) {2171const unsigned int c =check_delta_limit(child, n +1);2172if(m < c)2173 m = c;2174 child =DELTA_SIBLING(child);2175}2176return m;2177}21782179static unsigned longfree_unpacked(struct unpacked *n)2180{2181unsigned long freed_mem =sizeof_delta_index(n->index);2182free_delta_index(n->index);2183 n->index = NULL;2184if(n->data) {2185 freed_mem +=SIZE(n->entry);2186FREE_AND_NULL(n->data);2187}2188 n->entry = NULL;2189 n->depth =0;2190return freed_mem;2191}21922193static voidfind_deltas(struct object_entry **list,unsigned*list_size,2194int window,int depth,unsigned*processed)2195{2196uint32_t i, idx =0, count =0;2197struct unpacked *array;2198unsigned long mem_usage =0;21992200 array =xcalloc(window,sizeof(struct unpacked));22012202for(;;) {2203struct object_entry *entry;2204struct unpacked *n = array + idx;2205int j, max_depth, best_base = -1;22062207progress_lock();2208if(!*list_size) {2209progress_unlock();2210break;2211}2212 entry = *list++;2213(*list_size)--;2214if(!entry->preferred_base) {2215(*processed)++;2216display_progress(progress_state, *processed);2217}2218progress_unlock();22192220 mem_usage -=free_unpacked(n);2221 n->entry = entry;22222223while(window_memory_limit &&2224 mem_usage > window_memory_limit &&2225 count >1) {2226const uint32_t tail = (idx + window - count) % window;2227 mem_usage -=free_unpacked(array + tail);2228 count--;2229}22302231/* We do not compute delta to *create* objects we are not2232 * going to pack.2233 */2234if(entry->preferred_base)2235goto next;22362237/*2238 * If the current object is at pack edge, take the depth the2239 * objects that depend on the current object into account2240 * otherwise they would become too deep.2241 */2242 max_depth = depth;2243if(DELTA_CHILD(entry)) {2244 max_depth -=check_delta_limit(entry,0);2245if(max_depth <=0)2246goto next;2247}22482249 j = window;2250while(--j >0) {2251int ret;2252uint32_t other_idx = idx + j;2253struct unpacked *m;2254if(other_idx >= window)2255 other_idx -= window;2256 m = array + other_idx;2257if(!m->entry)2258break;2259 ret =try_delta(n, m, max_depth, &mem_usage);2260if(ret <0)2261break;2262else if(ret >0)2263 best_base = other_idx;2264}22652266/*2267 * If we decided to cache the delta data, then it is best2268 * to compress it right away. First because we have to do2269 * it anyway, and doing it here while we're threaded will2270 * save a lot of time in the non threaded write phase,2271 * as well as allow for caching more deltas within2272 * the same cache size limit.2273 * ...2274 * But only if not writing to stdout, since in that case2275 * the network is most likely throttling writes anyway,2276 * and therefore it is best to go to the write phase ASAP2277 * instead, as we can afford spending more time compressing2278 * between writes at that moment.2279 */2280if(entry->delta_data && !pack_to_stdout) {2281unsigned long size;22822283 size =do_compress(&entry->delta_data,DELTA_SIZE(entry));2284if(size < (1U<< OE_Z_DELTA_BITS)) {2285 entry->z_delta_size = size;2286cache_lock();2287 delta_cache_size -=DELTA_SIZE(entry);2288 delta_cache_size += entry->z_delta_size;2289cache_unlock();2290}else{2291FREE_AND_NULL(entry->delta_data);2292 entry->z_delta_size =0;2293}2294}22952296/* if we made n a delta, and if n is already at max2297 * depth, leaving it in the window is pointless. we2298 * should evict it first.2299 */2300if(DELTA(entry) && max_depth <= n->depth)2301continue;23022303/*2304 * Move the best delta base up in the window, after the2305 * currently deltified object, to keep it longer. It will2306 * be the first base object to be attempted next.2307 */2308if(DELTA(entry)) {2309struct unpacked swap = array[best_base];2310int dist = (window + idx - best_base) % window;2311int dst = best_base;2312while(dist--) {2313int src = (dst +1) % window;2314 array[dst] = array[src];2315 dst = src;2316}2317 array[dst] = swap;2318}23192320 next:2321 idx++;2322if(count +1< window)2323 count++;2324if(idx >= window)2325 idx =0;2326}23272328for(i =0; i < window; ++i) {2329free_delta_index(array[i].index);2330free(array[i].data);2331}2332free(array);2333}23342335static voidtry_to_free_from_threads(size_t size)2336{2337packing_data_lock(&to_pack);2338release_pack_memory(size);2339packing_data_unlock(&to_pack);2340}23412342static try_to_free_t old_try_to_free_routine;23432344/*2345 * The main object list is split into smaller lists, each is handed to2346 * one worker.2347 *2348 * The main thread waits on the condition that (at least) one of the workers2349 * has stopped working (which is indicated in the .working member of2350 * struct thread_params).2351 *2352 * When a work thread has completed its work, it sets .working to 0 and2353 * signals the main thread and waits on the condition that .data_ready2354 * becomes 1.2355 *2356 * The main thread steals half of the work from the worker that has2357 * most work left to hand it to the idle worker.2358 */23592360struct thread_params {2361 pthread_t thread;2362struct object_entry **list;2363unsigned list_size;2364unsigned remaining;2365int window;2366int depth;2367int working;2368int data_ready;2369 pthread_mutex_t mutex;2370 pthread_cond_t cond;2371unsigned*processed;2372};23732374static pthread_cond_t progress_cond;23752376/*2377 * Mutex and conditional variable can't be statically-initialized on Windows.2378 */2379static voidinit_threaded_search(void)2380{2381pthread_mutex_init(&cache_mutex, NULL);2382pthread_mutex_init(&progress_mutex, NULL);2383pthread_cond_init(&progress_cond, NULL);2384 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2385}23862387static voidcleanup_threaded_search(void)2388{2389set_try_to_free_routine(old_try_to_free_routine);2390pthread_cond_destroy(&progress_cond);2391pthread_mutex_destroy(&cache_mutex);2392pthread_mutex_destroy(&progress_mutex);2393}23942395static void*threaded_find_deltas(void*arg)2396{2397struct thread_params *me = arg;23982399progress_lock();2400while(me->remaining) {2401progress_unlock();24022403find_deltas(me->list, &me->remaining,2404 me->window, me->depth, me->processed);24052406progress_lock();2407 me->working =0;2408pthread_cond_signal(&progress_cond);2409progress_unlock();24102411/*2412 * We must not set ->data_ready before we wait on the2413 * condition because the main thread may have set it to 12414 * before we get here. In order to be sure that new2415 * work is available if we see 1 in ->data_ready, it2416 * was initialized to 0 before this thread was spawned2417 * and we reset it to 0 right away.2418 */2419pthread_mutex_lock(&me->mutex);2420while(!me->data_ready)2421pthread_cond_wait(&me->cond, &me->mutex);2422 me->data_ready =0;2423pthread_mutex_unlock(&me->mutex);24242425progress_lock();2426}2427progress_unlock();2428/* leave ->working 1 so that this doesn't get more work assigned */2429return NULL;2430}24312432static voidll_find_deltas(struct object_entry **list,unsigned list_size,2433int window,int depth,unsigned*processed)2434{2435struct thread_params *p;2436int i, ret, active_threads =0;24372438init_threaded_search();24392440if(delta_search_threads <=1) {2441find_deltas(list, &list_size, window, depth, processed);2442cleanup_threaded_search();2443return;2444}2445if(progress > pack_to_stdout)2446fprintf_ln(stderr,_("Delta compression using up to%dthreads"),2447 delta_search_threads);2448 p =xcalloc(delta_search_threads,sizeof(*p));24492450/* Partition the work amongst work threads. */2451for(i =0; i < delta_search_threads; i++) {2452unsigned sub_size = list_size / (delta_search_threads - i);24532454/* don't use too small segments or no deltas will be found */2455if(sub_size <2*window && i+1< delta_search_threads)2456 sub_size =0;24572458 p[i].window = window;2459 p[i].depth = depth;2460 p[i].processed = processed;2461 p[i].working =1;2462 p[i].data_ready =0;24632464/* try to split chunks on "path" boundaries */2465while(sub_size && sub_size < list_size &&2466 list[sub_size]->hash &&2467 list[sub_size]->hash == list[sub_size-1]->hash)2468 sub_size++;24692470 p[i].list = list;2471 p[i].list_size = sub_size;2472 p[i].remaining = sub_size;24732474 list += sub_size;2475 list_size -= sub_size;2476}24772478/* Start work threads. */2479for(i =0; i < delta_search_threads; i++) {2480if(!p[i].list_size)2481continue;2482pthread_mutex_init(&p[i].mutex, NULL);2483pthread_cond_init(&p[i].cond, NULL);2484 ret =pthread_create(&p[i].thread, NULL,2485 threaded_find_deltas, &p[i]);2486if(ret)2487die(_("unable to create thread:%s"),strerror(ret));2488 active_threads++;2489}24902491/*2492 * Now let's wait for work completion. Each time a thread is done2493 * with its work, we steal half of the remaining work from the2494 * thread with the largest number of unprocessed objects and give2495 * it to that newly idle thread. This ensure good load balancing2496 * until the remaining object list segments are simply too short2497 * to be worth splitting anymore.2498 */2499while(active_threads) {2500struct thread_params *target = NULL;2501struct thread_params *victim = NULL;2502unsigned sub_size =0;25032504progress_lock();2505for(;;) {2506for(i =0; !target && i < delta_search_threads; i++)2507if(!p[i].working)2508 target = &p[i];2509if(target)2510break;2511pthread_cond_wait(&progress_cond, &progress_mutex);2512}25132514for(i =0; i < delta_search_threads; i++)2515if(p[i].remaining >2*window &&2516(!victim || victim->remaining < p[i].remaining))2517 victim = &p[i];2518if(victim) {2519 sub_size = victim->remaining /2;2520 list = victim->list + victim->list_size - sub_size;2521while(sub_size && list[0]->hash &&2522 list[0]->hash == list[-1]->hash) {2523 list++;2524 sub_size--;2525}2526if(!sub_size) {2527/*2528 * It is possible for some "paths" to have2529 * so many objects that no hash boundary2530 * might be found. Let's just steal the2531 * exact half in that case.2532 */2533 sub_size = victim->remaining /2;2534 list -= sub_size;2535}2536 target->list = list;2537 victim->list_size -= sub_size;2538 victim->remaining -= sub_size;2539}2540 target->list_size = sub_size;2541 target->remaining = sub_size;2542 target->working =1;2543progress_unlock();25442545pthread_mutex_lock(&target->mutex);2546 target->data_ready =1;2547pthread_cond_signal(&target->cond);2548pthread_mutex_unlock(&target->mutex);25492550if(!sub_size) {2551pthread_join(target->thread, NULL);2552pthread_cond_destroy(&target->cond);2553pthread_mutex_destroy(&target->mutex);2554 active_threads--;2555}2556}2557cleanup_threaded_search();2558free(p);2559}25602561static voidadd_tag_chain(const struct object_id *oid)2562{2563struct tag *tag;25642565/*2566 * We catch duplicates already in add_object_entry(), but we'd2567 * prefer to do this extra check to avoid having to parse the2568 * tag at all if we already know that it's being packed (e.g., if2569 * it was included via bitmaps, we would not have parsed it2570 * previously).2571 */2572if(packlist_find(&to_pack, oid->hash, NULL))2573return;25742575 tag =lookup_tag(the_repository, oid);2576while(1) {2577if(!tag ||parse_tag(tag) || !tag->tagged)2578die(_("unable to pack objects reachable from tag%s"),2579oid_to_hex(oid));25802581add_object_entry(&tag->object.oid, OBJ_TAG, NULL,0);25822583if(tag->tagged->type != OBJ_TAG)2584return;25852586 tag = (struct tag *)tag->tagged;2587}2588}25892590static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2591{2592struct object_id peeled;25932594if(starts_with(path,"refs/tags/") &&/* is a tag? */2595!peel_ref(path, &peeled) &&/* peelable? */2596packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2597add_tag_chain(oid);2598return0;2599}26002601static voidprepare_pack(int window,int depth)2602{2603struct object_entry **delta_list;2604uint32_t i, nr_deltas;2605unsigned n;26062607if(use_delta_islands)2608resolve_tree_islands(the_repository, progress, &to_pack);26092610get_object_details();26112612/*2613 * If we're locally repacking then we need to be doubly careful2614 * from now on in order to make sure no stealth corruption gets2615 * propagated to the new pack. Clients receiving streamed packs2616 * should validate everything they get anyway so no need to incur2617 * the additional cost here in that case.2618 */2619if(!pack_to_stdout)2620 do_check_packed_object_crc =1;26212622if(!to_pack.nr_objects || !window || !depth)2623return;26242625ALLOC_ARRAY(delta_list, to_pack.nr_objects);2626 nr_deltas = n =0;26272628for(i =0; i < to_pack.nr_objects; i++) {2629struct object_entry *entry = to_pack.objects + i;26302631if(DELTA(entry))2632/* This happens if we decided to reuse existing2633 * delta from a pack. "reuse_delta &&" is implied.2634 */2635continue;26362637if(!entry->type_valid ||2638oe_size_less_than(&to_pack, entry,50))2639continue;26402641if(entry->no_try_delta)2642continue;26432644if(!entry->preferred_base) {2645 nr_deltas++;2646if(oe_type(entry) <0)2647die(_("unable to get type of object%s"),2648oid_to_hex(&entry->idx.oid));2649}else{2650if(oe_type(entry) <0) {2651/*2652 * This object is not found, but we2653 * don't have to include it anyway.2654 */2655continue;2656}2657}26582659 delta_list[n++] = entry;2660}26612662if(nr_deltas && n >1) {2663unsigned nr_done =0;2664if(progress)2665 progress_state =start_progress(_("Compressing objects"),2666 nr_deltas);2667QSORT(delta_list, n, type_size_sort);2668ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2669stop_progress(&progress_state);2670if(nr_done != nr_deltas)2671die(_("inconsistency with delta count"));2672}2673free(delta_list);2674}26752676static intgit_pack_config(const char*k,const char*v,void*cb)2677{2678if(!strcmp(k,"pack.window")) {2679 window =git_config_int(k, v);2680return0;2681}2682if(!strcmp(k,"pack.windowmemory")) {2683 window_memory_limit =git_config_ulong(k, v);2684return0;2685}2686if(!strcmp(k,"pack.depth")) {2687 depth =git_config_int(k, v);2688return0;2689}2690if(!strcmp(k,"pack.deltacachesize")) {2691 max_delta_cache_size =git_config_int(k, v);2692return0;2693}2694if(!strcmp(k,"pack.deltacachelimit")) {2695 cache_max_small_delta_size =git_config_int(k, v);2696return0;2697}2698if(!strcmp(k,"pack.writebitmaphashcache")) {2699if(git_config_bool(k, v))2700 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2701else2702 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2703}2704if(!strcmp(k,"pack.usebitmaps")) {2705 use_bitmap_index_default =git_config_bool(k, v);2706return0;2707}2708if(!strcmp(k,"pack.usesparse")) {2709 sparse =git_config_bool(k, v);2710return0;2711}2712if(!strcmp(k,"pack.threads")) {2713 delta_search_threads =git_config_int(k, v);2714if(delta_search_threads <0)2715die(_("invalid number of threads specified (%d)"),2716 delta_search_threads);2717if(!HAVE_THREADS && delta_search_threads !=1) {2718warning(_("no threads support, ignoring%s"), k);2719 delta_search_threads =0;2720}2721return0;2722}2723if(!strcmp(k,"pack.indexversion")) {2724 pack_idx_opts.version =git_config_int(k, v);2725if(pack_idx_opts.version >2)2726die(_("bad pack.indexversion=%"PRIu32),2727 pack_idx_opts.version);2728return0;2729}2730returngit_default_config(k, v, cb);2731}27322733static voidread_object_list_from_stdin(void)2734{2735char line[GIT_MAX_HEXSZ +1+ PATH_MAX +2];2736struct object_id oid;2737const char*p;27382739for(;;) {2740if(!fgets(line,sizeof(line), stdin)) {2741if(feof(stdin))2742break;2743if(!ferror(stdin))2744die("BUG: fgets returned NULL, not EOF, not error!");2745if(errno != EINTR)2746die_errno("fgets");2747clearerr(stdin);2748continue;2749}2750if(line[0] =='-') {2751if(get_oid_hex(line+1, &oid))2752die(_("expected edge object ID, got garbage:\n%s"),2753 line);2754add_preferred_base(&oid);2755continue;2756}2757if(parse_oid_hex(line, &oid, &p))2758die(_("expected object ID, got garbage:\n%s"), line);27592760add_preferred_base_object(p +1);2761add_object_entry(&oid, OBJ_NONE, p +1,0);2762}2763}27642765/* Remember to update object flag allocation in object.h */2766#define OBJECT_ADDED (1u<<20)27672768static voidshow_commit(struct commit *commit,void*data)2769{2770add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL,0);2771 commit->object.flags |= OBJECT_ADDED;27722773if(write_bitmap_index)2774index_commit_for_bitmap(commit);27752776if(use_delta_islands)2777propagate_island_marks(commit);2778}27792780static voidshow_object(struct object *obj,const char*name,void*data)2781{2782add_preferred_base_object(name);2783add_object_entry(&obj->oid, obj->type, name,0);2784 obj->flags |= OBJECT_ADDED;27852786if(use_delta_islands) {2787const char*p;2788unsigned depth;2789struct object_entry *ent;27902791/* the empty string is a root tree, which is depth 0 */2792 depth = *name ?1:0;2793for(p =strchr(name,'/'); p; p =strchr(p +1,'/'))2794 depth++;27952796 ent =packlist_find(&to_pack, obj->oid.hash, NULL);2797if(ent && depth >oe_tree_depth(&to_pack, ent))2798oe_set_tree_depth(&to_pack, ent, depth);2799}2800}28012802static voidshow_object__ma_allow_any(struct object *obj,const char*name,void*data)2803{2804assert(arg_missing_action == MA_ALLOW_ANY);28052806/*2807 * Quietly ignore ALL missing objects. This avoids problems with2808 * staging them now and getting an odd error later.2809 */2810if(!has_object_file(&obj->oid))2811return;28122813show_object(obj, name, data);2814}28152816static voidshow_object__ma_allow_promisor(struct object *obj,const char*name,void*data)2817{2818assert(arg_missing_action == MA_ALLOW_PROMISOR);28192820/*2821 * Quietly ignore EXPECTED missing objects. This avoids problems with2822 * staging them now and getting an odd error later.2823 */2824if(!has_object_file(&obj->oid) &&is_promisor_object(&obj->oid))2825return;28262827show_object(obj, name, data);2828}28292830static intoption_parse_missing_action(const struct option *opt,2831const char*arg,int unset)2832{2833assert(arg);2834assert(!unset);28352836if(!strcmp(arg,"error")) {2837 arg_missing_action = MA_ERROR;2838 fn_show_object = show_object;2839return0;2840}28412842if(!strcmp(arg,"allow-any")) {2843 arg_missing_action = MA_ALLOW_ANY;2844 fetch_if_missing =0;2845 fn_show_object = show_object__ma_allow_any;2846return0;2847}28482849if(!strcmp(arg,"allow-promisor")) {2850 arg_missing_action = MA_ALLOW_PROMISOR;2851 fetch_if_missing =0;2852 fn_show_object = show_object__ma_allow_promisor;2853return0;2854}28552856die(_("invalid value for --missing"));2857return0;2858}28592860static voidshow_edge(struct commit *commit)2861{2862add_preferred_base(&commit->object.oid);2863}28642865struct in_pack_object {2866 off_t offset;2867struct object *object;2868};28692870struct in_pack {2871unsigned int alloc;2872unsigned int nr;2873struct in_pack_object *array;2874};28752876static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2877{2878 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2879 in_pack->array[in_pack->nr].object = object;2880 in_pack->nr++;2881}28822883/*2884 * Compare the objects in the offset order, in order to emulate the2885 * "git rev-list --objects" output that produced the pack originally.2886 */2887static intofscmp(const void*a_,const void*b_)2888{2889struct in_pack_object *a = (struct in_pack_object *)a_;2890struct in_pack_object *b = (struct in_pack_object *)b_;28912892if(a->offset < b->offset)2893return-1;2894else if(a->offset > b->offset)2895return1;2896else2897returnoidcmp(&a->object->oid, &b->object->oid);2898}28992900static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2901{2902struct packed_git *p;2903struct in_pack in_pack;2904uint32_t i;29052906memset(&in_pack,0,sizeof(in_pack));29072908for(p =get_all_packs(the_repository); p; p = p->next) {2909struct object_id oid;2910struct object *o;29112912if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2913continue;2914if(open_pack_index(p))2915die(_("cannot open pack index"));29162917ALLOC_GROW(in_pack.array,2918 in_pack.nr + p->num_objects,2919 in_pack.alloc);29202921for(i =0; i < p->num_objects; i++) {2922nth_packed_object_oid(&oid, p, i);2923 o =lookup_unknown_object(oid.hash);2924if(!(o->flags & OBJECT_ADDED))2925mark_in_pack_object(o, p, &in_pack);2926 o->flags |= OBJECT_ADDED;2927}2928}29292930if(in_pack.nr) {2931QSORT(in_pack.array, in_pack.nr, ofscmp);2932for(i =0; i < in_pack.nr; i++) {2933struct object *o = in_pack.array[i].object;2934add_object_entry(&o->oid, o->type,"",0);2935}2936}2937free(in_pack.array);2938}29392940static intadd_loose_object(const struct object_id *oid,const char*path,2941void*data)2942{2943enum object_type type =oid_object_info(the_repository, oid, NULL);29442945if(type <0) {2946warning(_("loose object at%scould not be examined"), path);2947return0;2948}29492950add_object_entry(oid, type,"",0);2951return0;2952}29532954/*2955 * We actually don't even have to worry about reachability here.2956 * add_object_entry will weed out duplicates, so we just add every2957 * loose object we find.2958 */2959static voidadd_unreachable_loose_objects(void)2960{2961for_each_loose_file_in_objdir(get_object_directory(),2962 add_loose_object,2963 NULL, NULL, NULL);2964}29652966static inthas_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2967{2968static struct packed_git *last_found = (void*)1;2969struct packed_git *p;29702971 p = (last_found != (void*)1) ? last_found :2972get_all_packs(the_repository);29732974while(p) {2975if((!p->pack_local || p->pack_keep ||2976 p->pack_keep_in_core) &&2977find_pack_entry_one(oid->hash, p)) {2978 last_found = p;2979return1;2980}2981if(p == last_found)2982 p =get_all_packs(the_repository);2983else2984 p = p->next;2985if(p == last_found)2986 p = p->next;2987}2988return0;2989}29902991/*2992 * Store a list of sha1s that are should not be discarded2993 * because they are either written too recently, or are2994 * reachable from another object that was.2995 *2996 * This is filled by get_object_list.2997 */2998static struct oid_array recent_objects;29993000static intloosened_object_can_be_discarded(const struct object_id *oid,3001 timestamp_t mtime)3002{3003if(!unpack_unreachable_expiration)3004return0;3005if(mtime > unpack_unreachable_expiration)3006return0;3007if(oid_array_lookup(&recent_objects, oid) >=0)3008return0;3009return1;3010}30113012static voidloosen_unused_packed_objects(struct rev_info *revs)3013{3014struct packed_git *p;3015uint32_t i;3016struct object_id oid;30173018for(p =get_all_packs(the_repository); p; p = p->next) {3019if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)3020continue;30213022if(open_pack_index(p))3023die(_("cannot open pack index"));30243025for(i =0; i < p->num_objects; i++) {3026nth_packed_object_oid(&oid, p, i);3027if(!packlist_find(&to_pack, oid.hash, NULL) &&3028!has_sha1_pack_kept_or_nonlocal(&oid) &&3029!loosened_object_can_be_discarded(&oid, p->mtime))3030if(force_object_loose(&oid, p->mtime))3031die(_("unable to force loose object"));3032}3033}3034}30353036/*3037 * This tracks any options which pack-reuse code expects to be on, or which a3038 * reader of the pack might not understand, and which would therefore prevent3039 * blind reuse of what we have on disk.3040 */3041static intpack_options_allow_reuse(void)3042{3043return pack_to_stdout &&3044 allow_ofs_delta &&3045!ignore_packed_keep_on_disk &&3046!ignore_packed_keep_in_core &&3047(!local || !have_non_local_packs) &&3048!incremental;3049}30503051static intget_object_list_from_bitmap(struct rev_info *revs)3052{3053if(!(bitmap_git =prepare_bitmap_walk(revs)))3054return-1;30553056if(pack_options_allow_reuse() &&3057!reuse_partial_packfile_from_bitmap(3058 bitmap_git,3059&reuse_packfile,3060&reuse_packfile_objects,3061&reuse_packfile_offset)) {3062assert(reuse_packfile_objects);3063 nr_result += reuse_packfile_objects;3064display_progress(progress_state, nr_result);3065}30663067traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);3068return0;3069}30703071static voidrecord_recent_object(struct object *obj,3072const char*name,3073void*data)3074{3075oid_array_append(&recent_objects, &obj->oid);3076}30773078static voidrecord_recent_commit(struct commit *commit,void*data)3079{3080oid_array_append(&recent_objects, &commit->object.oid);3081}30823083static voidget_object_list(int ac,const char**av)3084{3085struct rev_info revs;3086struct setup_revision_opt s_r_opt = {3087.allow_exclude_promisor_objects =1,3088};3089char line[1000];3090int flags =0;3091int save_warning;30923093repo_init_revisions(the_repository, &revs, NULL);3094 save_commit_buffer =0;3095setup_revisions(ac, av, &revs, &s_r_opt);30963097/* make sure shallows are read */3098is_repository_shallow(the_repository);30993100 save_warning = warn_on_object_refname_ambiguity;3101 warn_on_object_refname_ambiguity =0;31023103while(fgets(line,sizeof(line), stdin) != NULL) {3104int len =strlen(line);3105if(len && line[len -1] =='\n')3106 line[--len] =0;3107if(!len)3108break;3109if(*line =='-') {3110if(!strcmp(line,"--not")) {3111 flags ^= UNINTERESTING;3112 write_bitmap_index =0;3113continue;3114}3115if(starts_with(line,"--shallow ")) {3116struct object_id oid;3117if(get_oid_hex(line +10, &oid))3118die("not an SHA-1 '%s'", line +10);3119register_shallow(the_repository, &oid);3120 use_bitmap_index =0;3121continue;3122}3123die(_("not a rev '%s'"), line);3124}3125if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))3126die(_("bad revision '%s'"), line);3127}31283129 warn_on_object_refname_ambiguity = save_warning;31303131if(use_bitmap_index && !get_object_list_from_bitmap(&revs))3132return;31333134if(use_delta_islands)3135load_delta_islands(the_repository);31363137if(prepare_revision_walk(&revs))3138die(_("revision walk setup failed"));3139mark_edges_uninteresting(&revs, show_edge, sparse);31403141if(!fn_show_object)3142 fn_show_object = show_object;3143traverse_commit_list_filtered(&filter_options, &revs,3144 show_commit, fn_show_object, NULL,3145 NULL);31463147if(unpack_unreachable_expiration) {3148 revs.ignore_missing_links =1;3149if(add_unseen_recent_objects_to_traversal(&revs,3150 unpack_unreachable_expiration))3151die(_("unable to add recent objects"));3152if(prepare_revision_walk(&revs))3153die(_("revision walk setup failed"));3154traverse_commit_list(&revs, record_recent_commit,3155 record_recent_object, NULL);3156}31573158if(keep_unreachable)3159add_objects_in_unpacked_packs(&revs);3160if(pack_loose_unreachable)3161add_unreachable_loose_objects();3162if(unpack_unreachable)3163loosen_unused_packed_objects(&revs);31643165oid_array_clear(&recent_objects);3166}31673168static voidadd_extra_kept_packs(const struct string_list *names)3169{3170struct packed_git *p;31713172if(!names->nr)3173return;31743175for(p =get_all_packs(the_repository); p; p = p->next) {3176const char*name =basename(p->pack_name);3177int i;31783179if(!p->pack_local)3180continue;31813182for(i =0; i < names->nr; i++)3183if(!fspathcmp(name, names->items[i].string))3184break;31853186if(i < names->nr) {3187 p->pack_keep_in_core =1;3188 ignore_packed_keep_in_core =1;3189continue;3190}3191}3192}31933194static intoption_parse_index_version(const struct option *opt,3195const char*arg,int unset)3196{3197char*c;3198const char*val = arg;31993200BUG_ON_OPT_NEG(unset);32013202 pack_idx_opts.version =strtoul(val, &c,10);3203if(pack_idx_opts.version >2)3204die(_("unsupported index version%s"), val);3205if(*c ==','&& c[1])3206 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);3207if(*c || pack_idx_opts.off32_limit &0x80000000)3208die(_("bad index version '%s'"), val);3209return0;3210}32113212static intoption_parse_unpack_unreachable(const struct option *opt,3213const char*arg,int unset)3214{3215if(unset) {3216 unpack_unreachable =0;3217 unpack_unreachable_expiration =0;3218}3219else{3220 unpack_unreachable =1;3221if(arg)3222 unpack_unreachable_expiration =approxidate(arg);3223}3224return0;3225}32263227intcmd_pack_objects(int argc,const char**argv,const char*prefix)3228{3229int use_internal_rev_list =0;3230int shallow =0;3231int all_progress_implied =0;3232struct argv_array rp = ARGV_ARRAY_INIT;3233int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;3234int rev_list_index =0;3235struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3236struct option pack_objects_options[] = {3237OPT_SET_INT('q',"quiet", &progress,3238N_("do not show progress meter"),0),3239OPT_SET_INT(0,"progress", &progress,3240N_("show progress meter"),1),3241OPT_SET_INT(0,"all-progress", &progress,3242N_("show progress meter during object writing phase"),2),3243OPT_BOOL(0,"all-progress-implied",3244&all_progress_implied,3245N_("similar to --all-progress when progress meter is shown")),3246{ OPTION_CALLBACK,0,"index-version", NULL,N_("<version>[,<offset>]"),3247N_("write the pack index file in the specified idx format version"),3248 PARSE_OPT_NONEG, option_parse_index_version },3249OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,3250N_("maximum size of each output pack file")),3251OPT_BOOL(0,"local", &local,3252N_("ignore borrowed objects from alternate object store")),3253OPT_BOOL(0,"incremental", &incremental,3254N_("ignore packed objects")),3255OPT_INTEGER(0,"window", &window,3256N_("limit pack window by objects")),3257OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,3258N_("limit pack window by memory in addition to object limit")),3259OPT_INTEGER(0,"depth", &depth,3260N_("maximum length of delta chain allowed in the resulting pack")),3261OPT_BOOL(0,"reuse-delta", &reuse_delta,3262N_("reuse existing deltas")),3263OPT_BOOL(0,"reuse-object", &reuse_object,3264N_("reuse existing objects")),3265OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,3266N_("use OFS_DELTA objects")),3267OPT_INTEGER(0,"threads", &delta_search_threads,3268N_("use threads when searching for best delta matches")),3269OPT_BOOL(0,"non-empty", &non_empty,3270N_("do not create an empty pack output")),3271OPT_BOOL(0,"revs", &use_internal_rev_list,3272N_("read revision arguments from standard input")),3273OPT_SET_INT_F(0,"unpacked", &rev_list_unpacked,3274N_("limit the objects to those that are not yet packed"),32751, PARSE_OPT_NONEG),3276OPT_SET_INT_F(0,"all", &rev_list_all,3277N_("include objects reachable from any reference"),32781, PARSE_OPT_NONEG),3279OPT_SET_INT_F(0,"reflog", &rev_list_reflog,3280N_("include objects referred by reflog entries"),32811, PARSE_OPT_NONEG),3282OPT_SET_INT_F(0,"indexed-objects", &rev_list_index,3283N_("include objects referred to by the index"),32841, PARSE_OPT_NONEG),3285OPT_BOOL(0,"stdout", &pack_to_stdout,3286N_("output pack to stdout")),3287OPT_BOOL(0,"include-tag", &include_tag,3288N_("include tag objects that refer to objects to be packed")),3289OPT_BOOL(0,"keep-unreachable", &keep_unreachable,3290N_("keep unreachable objects")),3291OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,3292N_("pack loose unreachable objects")),3293{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),3294N_("unpack unreachable objects newer than <time>"),3295 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3296OPT_BOOL(0,"sparse", &sparse,3297N_("use the sparse reachability algorithm")),3298OPT_BOOL(0,"thin", &thin,3299N_("create thin packs")),3300OPT_BOOL(0,"shallow", &shallow,3301N_("create packs suitable for shallow fetches")),3302OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep_on_disk,3303N_("ignore packs that have companion .keep file")),3304OPT_STRING_LIST(0,"keep-pack", &keep_pack_list,N_("name"),3305N_("ignore this pack")),3306OPT_INTEGER(0,"compression", &pack_compression_level,3307N_("pack compression level")),3308OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,3309N_("do not hide commits by grafts"),0),3310OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,3311N_("use a bitmap index if available to speed up counting objects")),3312OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,3313N_("write a bitmap index together with the pack index")),3314OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3315{ OPTION_CALLBACK,0,"missing", NULL,N_("action"),3316N_("handling for missing objects"), PARSE_OPT_NONEG,3317 option_parse_missing_action },3318OPT_BOOL(0,"exclude-promisor-objects", &exclude_promisor_objects,3319N_("do not pack objects in promisor packfiles")),3320OPT_BOOL(0,"delta-islands", &use_delta_islands,3321N_("respect islands during delta compression")),3322OPT_END(),3323};33243325if(DFS_NUM_STATES > (1<< OE_DFS_STATE_BITS))3326BUG("too many dfs states, increase OE_DFS_STATE_BITS");33273328 read_replace_refs =0;33293330 sparse =git_env_bool("GIT_TEST_PACK_SPARSE",0);3331reset_pack_idx_option(&pack_idx_opts);3332git_config(git_pack_config, NULL);33333334 progress =isatty(2);3335 argc =parse_options(argc, argv, prefix, pack_objects_options,3336 pack_usage,0);33373338if(argc) {3339 base_name = argv[0];3340 argc--;3341}3342if(pack_to_stdout != !base_name || argc)3343usage_with_options(pack_usage, pack_objects_options);33443345if(depth >= (1<< OE_DEPTH_BITS)) {3346warning(_("delta chain depth%dis too deep, forcing%d"),3347 depth, (1<< OE_DEPTH_BITS) -1);3348 depth = (1<< OE_DEPTH_BITS) -1;3349}3350if(cache_max_small_delta_size >= (1U<< OE_Z_DELTA_BITS)) {3351warning(_("pack.deltaCacheLimit is too high, forcing%d"),3352(1U<< OE_Z_DELTA_BITS) -1);3353 cache_max_small_delta_size = (1U<< OE_Z_DELTA_BITS) -1;3354}33553356argv_array_push(&rp,"pack-objects");3357if(thin) {3358 use_internal_rev_list =1;3359argv_array_push(&rp, shallow3360?"--objects-edge-aggressive"3361:"--objects-edge");3362}else3363argv_array_push(&rp,"--objects");33643365if(rev_list_all) {3366 use_internal_rev_list =1;3367argv_array_push(&rp,"--all");3368}3369if(rev_list_reflog) {3370 use_internal_rev_list =1;3371argv_array_push(&rp,"--reflog");3372}3373if(rev_list_index) {3374 use_internal_rev_list =1;3375argv_array_push(&rp,"--indexed-objects");3376}3377if(rev_list_unpacked) {3378 use_internal_rev_list =1;3379argv_array_push(&rp,"--unpacked");3380}33813382if(exclude_promisor_objects) {3383 use_internal_rev_list =1;3384 fetch_if_missing =0;3385argv_array_push(&rp,"--exclude-promisor-objects");3386}3387if(unpack_unreachable || keep_unreachable || pack_loose_unreachable)3388 use_internal_rev_list =1;33893390if(!reuse_object)3391 reuse_delta =0;3392if(pack_compression_level == -1)3393 pack_compression_level = Z_DEFAULT_COMPRESSION;3394else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)3395die(_("bad pack compression level%d"), pack_compression_level);33963397if(!delta_search_threads)/* --threads=0 means autodetect */3398 delta_search_threads =online_cpus();33993400if(!HAVE_THREADS && delta_search_threads !=1)3401warning(_("no threads support, ignoring --threads"));3402if(!pack_to_stdout && !pack_size_limit)3403 pack_size_limit = pack_size_limit_cfg;3404if(pack_to_stdout && pack_size_limit)3405die(_("--max-pack-size cannot be used to build a pack for transfer"));3406if(pack_size_limit && pack_size_limit <1024*1024) {3407warning(_("minimum pack size limit is 1 MiB"));3408 pack_size_limit =1024*1024;3409}34103411if(!pack_to_stdout && thin)3412die(_("--thin cannot be used to build an indexable pack"));34133414if(keep_unreachable && unpack_unreachable)3415die(_("--keep-unreachable and --unpack-unreachable are incompatible"));3416if(!rev_list_all || !rev_list_reflog || !rev_list_index)3417 unpack_unreachable_expiration =0;34183419if(filter_options.choice) {3420if(!pack_to_stdout)3421die(_("cannot use --filter without --stdout"));3422 use_bitmap_index =0;3423}34243425/*3426 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3427 *3428 * - to produce good pack (with bitmap index not-yet-packed objects are3429 * packed in suboptimal order).3430 *3431 * - to use more robust pack-generation codepath (avoiding possible3432 * bugs in bitmap code and possible bitmap index corruption).3433 */3434if(!pack_to_stdout)3435 use_bitmap_index_default =0;34363437if(use_bitmap_index <0)3438 use_bitmap_index = use_bitmap_index_default;34393440/* "hard" reasons not to use bitmaps; these just won't work at all */3441if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow(the_repository))3442 use_bitmap_index =0;34433444if(pack_to_stdout || !rev_list_all)3445 write_bitmap_index =0;34463447if(use_delta_islands)3448argv_array_push(&rp,"--topo-order");34493450if(progress && all_progress_implied)3451 progress =2;34523453add_extra_kept_packs(&keep_pack_list);3454if(ignore_packed_keep_on_disk) {3455struct packed_git *p;3456for(p =get_all_packs(the_repository); p; p = p->next)3457if(p->pack_local && p->pack_keep)3458break;3459if(!p)/* no keep-able packs found */3460 ignore_packed_keep_on_disk =0;3461}3462if(local) {3463/*3464 * unlike ignore_packed_keep_on_disk above, we do not3465 * want to unset "local" based on looking at packs, as3466 * it also covers non-local objects3467 */3468struct packed_git *p;3469for(p =get_all_packs(the_repository); p; p = p->next) {3470if(!p->pack_local) {3471 have_non_local_packs =1;3472break;3473}3474}3475}34763477trace2_region_enter("pack-objects","enumerate-objects",3478 the_repository);3479prepare_packing_data(the_repository, &to_pack);34803481if(progress)3482 progress_state =start_progress(_("Enumerating objects"),0);3483if(!use_internal_rev_list)3484read_object_list_from_stdin();3485else{3486get_object_list(rp.argc, rp.argv);3487argv_array_clear(&rp);3488}3489cleanup_preferred_base();3490if(include_tag && nr_result)3491for_each_ref(add_ref_tag, NULL);3492stop_progress(&progress_state);3493trace2_region_leave("pack-objects","enumerate-objects",3494 the_repository);34953496if(non_empty && !nr_result)3497return0;3498if(nr_result) {3499trace2_region_enter("pack-objects","prepare-pack",3500 the_repository);3501prepare_pack(window, depth);3502trace2_region_leave("pack-objects","prepare-pack",3503 the_repository);3504}35053506trace2_region_enter("pack-objects","write-pack-file", the_repository);3507write_pack_file();3508trace2_region_leave("pack-objects","write-pack-file", the_repository);35093510if(progress)3511fprintf_ln(stderr,3512_("Total %"PRIu32" (delta %"PRIu32"),"3513" reused %"PRIu32" (delta %"PRIu32")"),3514 written, written_delta, reused, reused_delta);3515return0;3516}