1#include"builtin.h" 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"attr.h" 6#include"object.h" 7#include"blob.h" 8#include"commit.h" 9#include"tag.h" 10#include"tree.h" 11#include"delta.h" 12#include"pack.h" 13#include"pack-revindex.h" 14#include"csum-file.h" 15#include"tree-walk.h" 16#include"diff.h" 17#include"revision.h" 18#include"list-objects.h" 19#include"list-objects-filter.h" 20#include"list-objects-filter-options.h" 21#include"pack-objects.h" 22#include"progress.h" 23#include"refs.h" 24#include"streaming.h" 25#include"thread-utils.h" 26#include"pack-bitmap.h" 27#include"reachable.h" 28#include"sha1-array.h" 29#include"argv-array.h" 30#include"list.h" 31#include"packfile.h" 32#include"object-store.h" 33#include"dir.h" 34 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 36#define SIZE(obj) oe_size(&to_pack, obj) 37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 39#define DELTA(obj) oe_delta(&to_pack, obj) 40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 46 47static const char*pack_usage[] = { 48N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 49N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 50 NULL 51}; 52 53/* 54 * Objects we are going to pack are collected in the `to_pack` structure. 55 * It contains an array (dynamically expanded) of the object data, and a map 56 * that can resolve SHA1s to their position in the array. 57 */ 58static struct packing_data to_pack; 59 60static struct pack_idx_entry **written_list; 61static uint32_t nr_result, nr_written, nr_seen; 62 63static int non_empty; 64static int reuse_delta =1, reuse_object =1; 65static int keep_unreachable, unpack_unreachable, include_tag; 66static timestamp_t unpack_unreachable_expiration; 67static int pack_loose_unreachable; 68static int local; 69static int have_non_local_packs; 70static int incremental; 71static int ignore_packed_keep_on_disk; 72static int ignore_packed_keep_in_core; 73static int allow_ofs_delta; 74static struct pack_idx_option pack_idx_opts; 75static const char*base_name; 76static int progress =1; 77static int window =10; 78static unsigned long pack_size_limit; 79static int depth =50; 80static int delta_search_threads; 81static int pack_to_stdout; 82static int num_preferred_base; 83static struct progress *progress_state; 84 85static struct packed_git *reuse_packfile; 86static uint32_t reuse_packfile_objects; 87static off_t reuse_packfile_offset; 88 89static int use_bitmap_index_default =1; 90static int use_bitmap_index = -1; 91static int write_bitmap_index; 92static uint16_t write_bitmap_options; 93 94static int exclude_promisor_objects; 95 96static unsigned long delta_cache_size =0; 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 98static unsigned long cache_max_small_delta_size =1000; 99 100static unsigned long window_memory_limit =0; 101 102static struct list_objects_filter_options filter_options; 103 104enum missing_action { 105 MA_ERROR =0,/* fail if any missing objects are encountered */ 106 MA_ALLOW_ANY,/* silently allow ALL missing objects */ 107 MA_ALLOW_PROMISOR,/* silently allow all missing PROMISOR objects */ 108}; 109static enum missing_action arg_missing_action; 110static show_object_fn fn_show_object; 111 112/* 113 * stats 114 */ 115static uint32_t written, written_delta; 116static uint32_t reused, reused_delta; 117 118/* 119 * Indexed commits 120 */ 121static struct commit **indexed_commits; 122static unsigned int indexed_commits_nr; 123static unsigned int indexed_commits_alloc; 124 125static voidindex_commit_for_bitmap(struct commit *commit) 126{ 127if(indexed_commits_nr >= indexed_commits_alloc) { 128 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 129REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 130} 131 132 indexed_commits[indexed_commits_nr++] = commit; 133} 134 135static void*get_delta(struct object_entry *entry) 136{ 137unsigned long size, base_size, delta_size; 138void*buf, *base_buf, *delta_buf; 139enum object_type type; 140 141 buf =read_object_file(&entry->idx.oid, &type, &size); 142if(!buf) 143die("unable to read%s",oid_to_hex(&entry->idx.oid)); 144 base_buf =read_object_file(&DELTA(entry)->idx.oid, &type, 145&base_size); 146if(!base_buf) 147die("unable to read%s", 148oid_to_hex(&DELTA(entry)->idx.oid)); 149 delta_buf =diff_delta(base_buf, base_size, 150 buf, size, &delta_size,0); 151if(!delta_buf || delta_size !=DELTA_SIZE(entry)) 152die("delta size changed"); 153free(buf); 154free(base_buf); 155return delta_buf; 156} 157 158static unsigned longdo_compress(void**pptr,unsigned long size) 159{ 160 git_zstream stream; 161void*in, *out; 162unsigned long maxsize; 163 164git_deflate_init(&stream, pack_compression_level); 165 maxsize =git_deflate_bound(&stream, size); 166 167 in = *pptr; 168 out =xmalloc(maxsize); 169*pptr = out; 170 171 stream.next_in = in; 172 stream.avail_in = size; 173 stream.next_out = out; 174 stream.avail_out = maxsize; 175while(git_deflate(&stream, Z_FINISH) == Z_OK) 176;/* nothing */ 177git_deflate_end(&stream); 178 179free(in); 180return stream.total_out; 181} 182 183static unsigned longwrite_large_blob_data(struct git_istream *st,struct hashfile *f, 184const struct object_id *oid) 185{ 186 git_zstream stream; 187unsigned char ibuf[1024*16]; 188unsigned char obuf[1024*16]; 189unsigned long olen =0; 190 191git_deflate_init(&stream, pack_compression_level); 192 193for(;;) { 194 ssize_t readlen; 195int zret = Z_OK; 196 readlen =read_istream(st, ibuf,sizeof(ibuf)); 197if(readlen == -1) 198die(_("unable to read%s"),oid_to_hex(oid)); 199 200 stream.next_in = ibuf; 201 stream.avail_in = readlen; 202while((stream.avail_in || readlen ==0) && 203(zret == Z_OK || zret == Z_BUF_ERROR)) { 204 stream.next_out = obuf; 205 stream.avail_out =sizeof(obuf); 206 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 207hashwrite(f, obuf, stream.next_out - obuf); 208 olen += stream.next_out - obuf; 209} 210if(stream.avail_in) 211die(_("deflate error (%d)"), zret); 212if(readlen ==0) { 213if(zret != Z_STREAM_END) 214die(_("deflate error (%d)"), zret); 215break; 216} 217} 218git_deflate_end(&stream); 219return olen; 220} 221 222/* 223 * we are going to reuse the existing object data as is. make 224 * sure it is not corrupt. 225 */ 226static intcheck_pack_inflate(struct packed_git *p, 227struct pack_window **w_curs, 228 off_t offset, 229 off_t len, 230unsigned long expect) 231{ 232 git_zstream stream; 233unsigned char fakebuf[4096], *in; 234int st; 235 236memset(&stream,0,sizeof(stream)); 237git_inflate_init(&stream); 238do{ 239 in =use_pack(p, w_curs, offset, &stream.avail_in); 240 stream.next_in = in; 241 stream.next_out = fakebuf; 242 stream.avail_out =sizeof(fakebuf); 243 st =git_inflate(&stream, Z_FINISH); 244 offset += stream.next_in - in; 245}while(st == Z_OK || st == Z_BUF_ERROR); 246git_inflate_end(&stream); 247return(st == Z_STREAM_END && 248 stream.total_out == expect && 249 stream.total_in == len) ?0: -1; 250} 251 252static voidcopy_pack_data(struct hashfile *f, 253struct packed_git *p, 254struct pack_window **w_curs, 255 off_t offset, 256 off_t len) 257{ 258unsigned char*in; 259unsigned long avail; 260 261while(len) { 262 in =use_pack(p, w_curs, offset, &avail); 263if(avail > len) 264 avail = (unsigned long)len; 265hashwrite(f, in, avail); 266 offset += avail; 267 len -= avail; 268} 269} 270 271/* Return 0 if we will bust the pack-size limit */ 272static unsigned longwrite_no_reuse_object(struct hashfile *f,struct object_entry *entry, 273unsigned long limit,int usable_delta) 274{ 275unsigned long size, datalen; 276unsigned char header[MAX_PACK_OBJECT_HEADER], 277 dheader[MAX_PACK_OBJECT_HEADER]; 278unsigned hdrlen; 279enum object_type type; 280void*buf; 281struct git_istream *st = NULL; 282const unsigned hashsz = the_hash_algo->rawsz; 283 284if(!usable_delta) { 285if(oe_type(entry) == OBJ_BLOB && 286oe_size_greater_than(&to_pack, entry, big_file_threshold) && 287(st =open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 288 buf = NULL; 289else{ 290 buf =read_object_file(&entry->idx.oid, &type, &size); 291if(!buf) 292die(_("unable to read%s"), 293oid_to_hex(&entry->idx.oid)); 294} 295/* 296 * make sure no cached delta data remains from a 297 * previous attempt before a pack split occurred. 298 */ 299FREE_AND_NULL(entry->delta_data); 300 entry->z_delta_size =0; 301}else if(entry->delta_data) { 302 size =DELTA_SIZE(entry); 303 buf = entry->delta_data; 304 entry->delta_data = NULL; 305 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 306 OBJ_OFS_DELTA : OBJ_REF_DELTA; 307}else{ 308 buf =get_delta(entry); 309 size =DELTA_SIZE(entry); 310 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 311 OBJ_OFS_DELTA : OBJ_REF_DELTA; 312} 313 314if(st)/* large blob case, just assume we don't compress well */ 315 datalen = size; 316else if(entry->z_delta_size) 317 datalen = entry->z_delta_size; 318else 319 datalen =do_compress(&buf, size); 320 321/* 322 * The object header is a byte of 'type' followed by zero or 323 * more bytes of length. 324 */ 325 hdrlen =encode_in_pack_object_header(header,sizeof(header), 326 type, size); 327 328if(type == OBJ_OFS_DELTA) { 329/* 330 * Deltas with relative base contain an additional 331 * encoding of the relative offset for the delta 332 * base from this object's position in the pack. 333 */ 334 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 335unsigned pos =sizeof(dheader) -1; 336 dheader[pos] = ofs &127; 337while(ofs >>=7) 338 dheader[--pos] =128| (--ofs &127); 339if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 340if(st) 341close_istream(st); 342free(buf); 343return0; 344} 345hashwrite(f, header, hdrlen); 346hashwrite(f, dheader + pos,sizeof(dheader) - pos); 347 hdrlen +=sizeof(dheader) - pos; 348}else if(type == OBJ_REF_DELTA) { 349/* 350 * Deltas with a base reference contain 351 * additional bytes for the base object ID. 352 */ 353if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 354if(st) 355close_istream(st); 356free(buf); 357return0; 358} 359hashwrite(f, header, hdrlen); 360hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 361 hdrlen += hashsz; 362}else{ 363if(limit && hdrlen + datalen + hashsz >= limit) { 364if(st) 365close_istream(st); 366free(buf); 367return0; 368} 369hashwrite(f, header, hdrlen); 370} 371if(st) { 372 datalen =write_large_blob_data(st, f, &entry->idx.oid); 373close_istream(st); 374}else{ 375hashwrite(f, buf, datalen); 376free(buf); 377} 378 379return hdrlen + datalen; 380} 381 382/* Return 0 if we will bust the pack-size limit */ 383static off_t write_reuse_object(struct hashfile *f,struct object_entry *entry, 384unsigned long limit,int usable_delta) 385{ 386struct packed_git *p =IN_PACK(entry); 387struct pack_window *w_curs = NULL; 388struct revindex_entry *revidx; 389 off_t offset; 390enum object_type type =oe_type(entry); 391 off_t datalen; 392unsigned char header[MAX_PACK_OBJECT_HEADER], 393 dheader[MAX_PACK_OBJECT_HEADER]; 394unsigned hdrlen; 395const unsigned hashsz = the_hash_algo->rawsz; 396unsigned long entry_size =SIZE(entry); 397 398if(DELTA(entry)) 399 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 400 OBJ_OFS_DELTA : OBJ_REF_DELTA; 401 hdrlen =encode_in_pack_object_header(header,sizeof(header), 402 type, entry_size); 403 404 offset = entry->in_pack_offset; 405 revidx =find_pack_revindex(p, offset); 406 datalen = revidx[1].offset - offset; 407if(!pack_to_stdout && p->index_version >1&& 408check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 409error("bad packed object CRC for%s", 410oid_to_hex(&entry->idx.oid)); 411unuse_pack(&w_curs); 412returnwrite_no_reuse_object(f, entry, limit, usable_delta); 413} 414 415 offset += entry->in_pack_header_size; 416 datalen -= entry->in_pack_header_size; 417 418if(!pack_to_stdout && p->index_version ==1&& 419check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 420error("corrupt packed object for%s", 421oid_to_hex(&entry->idx.oid)); 422unuse_pack(&w_curs); 423returnwrite_no_reuse_object(f, entry, limit, usable_delta); 424} 425 426if(type == OBJ_OFS_DELTA) { 427 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 428unsigned pos =sizeof(dheader) -1; 429 dheader[pos] = ofs &127; 430while(ofs >>=7) 431 dheader[--pos] =128| (--ofs &127); 432if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 433unuse_pack(&w_curs); 434return0; 435} 436hashwrite(f, header, hdrlen); 437hashwrite(f, dheader + pos,sizeof(dheader) - pos); 438 hdrlen +=sizeof(dheader) - pos; 439 reused_delta++; 440}else if(type == OBJ_REF_DELTA) { 441if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 442unuse_pack(&w_curs); 443return0; 444} 445hashwrite(f, header, hdrlen); 446hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 447 hdrlen += hashsz; 448 reused_delta++; 449}else{ 450if(limit && hdrlen + datalen + hashsz >= limit) { 451unuse_pack(&w_curs); 452return0; 453} 454hashwrite(f, header, hdrlen); 455} 456copy_pack_data(f, p, &w_curs, offset, datalen); 457unuse_pack(&w_curs); 458 reused++; 459return hdrlen + datalen; 460} 461 462/* Return 0 if we will bust the pack-size limit */ 463static off_t write_object(struct hashfile *f, 464struct object_entry *entry, 465 off_t write_offset) 466{ 467unsigned long limit; 468 off_t len; 469int usable_delta, to_reuse; 470 471if(!pack_to_stdout) 472crc32_begin(f); 473 474/* apply size limit if limited packsize and not first object */ 475if(!pack_size_limit || !nr_written) 476 limit =0; 477else if(pack_size_limit <= write_offset) 478/* 479 * the earlier object did not fit the limit; avoid 480 * mistaking this with unlimited (i.e. limit = 0). 481 */ 482 limit =1; 483else 484 limit = pack_size_limit - write_offset; 485 486if(!DELTA(entry)) 487 usable_delta =0;/* no delta */ 488else if(!pack_size_limit) 489 usable_delta =1;/* unlimited packfile */ 490else if(DELTA(entry)->idx.offset == (off_t)-1) 491 usable_delta =0;/* base was written to another pack */ 492else if(DELTA(entry)->idx.offset) 493 usable_delta =1;/* base already exists in this pack */ 494else 495 usable_delta =0;/* base could end up in another pack */ 496 497if(!reuse_object) 498 to_reuse =0;/* explicit */ 499else if(!IN_PACK(entry)) 500 to_reuse =0;/* can't reuse what we don't have */ 501else if(oe_type(entry) == OBJ_REF_DELTA || 502oe_type(entry) == OBJ_OFS_DELTA) 503/* check_object() decided it for us ... */ 504 to_reuse = usable_delta; 505/* ... but pack split may override that */ 506else if(oe_type(entry) != entry->in_pack_type) 507 to_reuse =0;/* pack has delta which is unusable */ 508else if(DELTA(entry)) 509 to_reuse =0;/* we want to pack afresh */ 510else 511 to_reuse =1;/* we have it in-pack undeltified, 512 * and we do not need to deltify it. 513 */ 514 515if(!to_reuse) 516 len =write_no_reuse_object(f, entry, limit, usable_delta); 517else 518 len =write_reuse_object(f, entry, limit, usable_delta); 519if(!len) 520return0; 521 522if(usable_delta) 523 written_delta++; 524 written++; 525if(!pack_to_stdout) 526 entry->idx.crc32 =crc32_end(f); 527return len; 528} 529 530enum write_one_status { 531 WRITE_ONE_SKIP = -1,/* already written */ 532 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 533 WRITE_ONE_WRITTEN =1,/* normal */ 534 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 535}; 536 537static enum write_one_status write_one(struct hashfile *f, 538struct object_entry *e, 539 off_t *offset) 540{ 541 off_t size; 542int recursing; 543 544/* 545 * we set offset to 1 (which is an impossible value) to mark 546 * the fact that this object is involved in "write its base 547 * first before writing a deltified object" recursion. 548 */ 549 recursing = (e->idx.offset ==1); 550if(recursing) { 551warning("recursive delta detected for object%s", 552oid_to_hex(&e->idx.oid)); 553return WRITE_ONE_RECURSIVE; 554}else if(e->idx.offset || e->preferred_base) { 555/* offset is non zero if object is written already. */ 556return WRITE_ONE_SKIP; 557} 558 559/* if we are deltified, write out base object first. */ 560if(DELTA(e)) { 561 e->idx.offset =1;/* now recurse */ 562switch(write_one(f,DELTA(e), offset)) { 563case WRITE_ONE_RECURSIVE: 564/* we cannot depend on this one */ 565SET_DELTA(e, NULL); 566break; 567default: 568break; 569case WRITE_ONE_BREAK: 570 e->idx.offset = recursing; 571return WRITE_ONE_BREAK; 572} 573} 574 575 e->idx.offset = *offset; 576 size =write_object(f, e, *offset); 577if(!size) { 578 e->idx.offset = recursing; 579return WRITE_ONE_BREAK; 580} 581 written_list[nr_written++] = &e->idx; 582 583/* make sure off_t is sufficiently large not to wrap */ 584if(signed_add_overflows(*offset, size)) 585die("pack too large for current definition of off_t"); 586*offset += size; 587return WRITE_ONE_WRITTEN; 588} 589 590static intmark_tagged(const char*path,const struct object_id *oid,int flag, 591void*cb_data) 592{ 593struct object_id peeled; 594struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 595 596if(entry) 597 entry->tagged =1; 598if(!peel_ref(path, &peeled)) { 599 entry =packlist_find(&to_pack, peeled.hash, NULL); 600if(entry) 601 entry->tagged =1; 602} 603return0; 604} 605 606staticinlinevoidadd_to_write_order(struct object_entry **wo, 607unsigned int*endp, 608struct object_entry *e) 609{ 610if(e->filled) 611return; 612 wo[(*endp)++] = e; 613 e->filled =1; 614} 615 616static voidadd_descendants_to_write_order(struct object_entry **wo, 617unsigned int*endp, 618struct object_entry *e) 619{ 620int add_to_order =1; 621while(e) { 622if(add_to_order) { 623struct object_entry *s; 624/* add this node... */ 625add_to_write_order(wo, endp, e); 626/* all its siblings... */ 627for(s =DELTA_SIBLING(e); s; s =DELTA_SIBLING(s)) { 628add_to_write_order(wo, endp, s); 629} 630} 631/* drop down a level to add left subtree nodes if possible */ 632if(DELTA_CHILD(e)) { 633 add_to_order =1; 634 e =DELTA_CHILD(e); 635}else{ 636 add_to_order =0; 637/* our sibling might have some children, it is next */ 638if(DELTA_SIBLING(e)) { 639 e =DELTA_SIBLING(e); 640continue; 641} 642/* go back to our parent node */ 643 e =DELTA(e); 644while(e && !DELTA_SIBLING(e)) { 645/* we're on the right side of a subtree, keep 646 * going up until we can go right again */ 647 e =DELTA(e); 648} 649if(!e) { 650/* done- we hit our original root node */ 651return; 652} 653/* pass it off to sibling at this level */ 654 e =DELTA_SIBLING(e); 655} 656}; 657} 658 659static voidadd_family_to_write_order(struct object_entry **wo, 660unsigned int*endp, 661struct object_entry *e) 662{ 663struct object_entry *root; 664 665for(root = e;DELTA(root); root =DELTA(root)) 666;/* nothing */ 667add_descendants_to_write_order(wo, endp, root); 668} 669 670static voidcompute_layer_order(struct object_entry **wo,unsigned int*wo_end) 671{ 672unsigned int i, last_untagged; 673struct object_entry *objects = to_pack.objects; 674 675for(i =0; i < to_pack.nr_objects; i++) { 676if(objects[i].tagged) 677break; 678add_to_write_order(wo, wo_end, &objects[i]); 679} 680 last_untagged = i; 681 682/* 683 * Then fill all the tagged tips. 684 */ 685for(; i < to_pack.nr_objects; i++) { 686if(objects[i].tagged) 687add_to_write_order(wo, wo_end, &objects[i]); 688} 689 690/* 691 * And then all remaining commits and tags. 692 */ 693for(i = last_untagged; i < to_pack.nr_objects; i++) { 694if(oe_type(&objects[i]) != OBJ_COMMIT && 695oe_type(&objects[i]) != OBJ_TAG) 696continue; 697add_to_write_order(wo, wo_end, &objects[i]); 698} 699 700/* 701 * And then all the trees. 702 */ 703for(i = last_untagged; i < to_pack.nr_objects; i++) { 704if(oe_type(&objects[i]) != OBJ_TREE) 705continue; 706add_to_write_order(wo, wo_end, &objects[i]); 707} 708 709/* 710 * Finally all the rest in really tight order 711 */ 712for(i = last_untagged; i < to_pack.nr_objects; i++) { 713if(!objects[i].filled) 714add_family_to_write_order(wo, wo_end, &objects[i]); 715} 716} 717 718static struct object_entry **compute_write_order(void) 719{ 720unsigned int i, wo_end; 721 722struct object_entry **wo; 723struct object_entry *objects = to_pack.objects; 724 725for(i =0; i < to_pack.nr_objects; i++) { 726 objects[i].tagged =0; 727 objects[i].filled =0; 728SET_DELTA_CHILD(&objects[i], NULL); 729SET_DELTA_SIBLING(&objects[i], NULL); 730} 731 732/* 733 * Fully connect delta_child/delta_sibling network. 734 * Make sure delta_sibling is sorted in the original 735 * recency order. 736 */ 737for(i = to_pack.nr_objects; i >0;) { 738struct object_entry *e = &objects[--i]; 739if(!DELTA(e)) 740continue; 741/* Mark me as the first child */ 742 e->delta_sibling_idx =DELTA(e)->delta_child_idx; 743SET_DELTA_CHILD(DELTA(e), e); 744} 745 746/* 747 * Mark objects that are at the tip of tags. 748 */ 749for_each_tag_ref(mark_tagged, NULL); 750 751/* 752 * Give the objects in the original recency order until 753 * we see a tagged tip. 754 */ 755ALLOC_ARRAY(wo, to_pack.nr_objects); 756 wo_end =0; 757 758compute_layer_order(wo, &wo_end); 759 760if(wo_end != to_pack.nr_objects) 761die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 762 763return wo; 764} 765 766static off_t write_reused_pack(struct hashfile *f) 767{ 768unsigned char buffer[8192]; 769 off_t to_write, total; 770int fd; 771 772if(!is_pack_valid(reuse_packfile)) 773die("packfile is invalid:%s", reuse_packfile->pack_name); 774 775 fd =git_open(reuse_packfile->pack_name); 776if(fd <0) 777die_errno("unable to open packfile for reuse:%s", 778 reuse_packfile->pack_name); 779 780if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 781die_errno("unable to seek in reused packfile"); 782 783if(reuse_packfile_offset <0) 784 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 785 786 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 787 788while(to_write) { 789int read_pack =xread(fd, buffer,sizeof(buffer)); 790 791if(read_pack <=0) 792die_errno("unable to read from reused packfile"); 793 794if(read_pack > to_write) 795 read_pack = to_write; 796 797hashwrite(f, buffer, read_pack); 798 to_write -= read_pack; 799 800/* 801 * We don't know the actual number of objects written, 802 * only how many bytes written, how many bytes total, and 803 * how many objects total. So we can fake it by pretending all 804 * objects we are writing are the same size. This gives us a 805 * smooth progress meter, and at the end it matches the true 806 * answer. 807 */ 808 written = reuse_packfile_objects * 809(((double)(total - to_write)) / total); 810display_progress(progress_state, written); 811} 812 813close(fd); 814 written = reuse_packfile_objects; 815display_progress(progress_state, written); 816return reuse_packfile_offset -sizeof(struct pack_header); 817} 818 819static const char no_split_warning[] =N_( 820"disabling bitmap writing, packs are split due to pack.packSizeLimit" 821); 822 823static voidwrite_pack_file(void) 824{ 825uint32_t i =0, j; 826struct hashfile *f; 827 off_t offset; 828uint32_t nr_remaining = nr_result; 829time_t last_mtime =0; 830struct object_entry **write_order; 831 832if(progress > pack_to_stdout) 833 progress_state =start_progress(_("Writing objects"), nr_result); 834ALLOC_ARRAY(written_list, to_pack.nr_objects); 835 write_order =compute_write_order(); 836 837do{ 838struct object_id oid; 839char*pack_tmp_name = NULL; 840 841if(pack_to_stdout) 842 f =hashfd_throughput(1,"<stdout>", progress_state); 843else 844 f =create_tmp_packfile(&pack_tmp_name); 845 846 offset =write_pack_header(f, nr_remaining); 847 848if(reuse_packfile) { 849 off_t packfile_size; 850assert(pack_to_stdout); 851 852 packfile_size =write_reused_pack(f); 853 offset += packfile_size; 854} 855 856 nr_written =0; 857for(; i < to_pack.nr_objects; i++) { 858struct object_entry *e = write_order[i]; 859if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 860break; 861display_progress(progress_state, written); 862} 863 864/* 865 * Did we write the wrong # entries in the header? 866 * If so, rewrite it like in fast-import 867 */ 868if(pack_to_stdout) { 869finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 870}else if(nr_written == nr_remaining) { 871finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 872}else{ 873int fd =finalize_hashfile(f, oid.hash,0); 874fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 875 nr_written, oid.hash, offset); 876close(fd); 877if(write_bitmap_index) { 878warning(_(no_split_warning)); 879 write_bitmap_index =0; 880} 881} 882 883if(!pack_to_stdout) { 884struct stat st; 885struct strbuf tmpname = STRBUF_INIT; 886 887/* 888 * Packs are runtime accessed in their mtime 889 * order since newer packs are more likely to contain 890 * younger objects. So if we are creating multiple 891 * packs then we should modify the mtime of later ones 892 * to preserve this property. 893 */ 894if(stat(pack_tmp_name, &st) <0) { 895warning_errno("failed to stat%s", pack_tmp_name); 896}else if(!last_mtime) { 897 last_mtime = st.st_mtime; 898}else{ 899struct utimbuf utb; 900 utb.actime = st.st_atime; 901 utb.modtime = --last_mtime; 902if(utime(pack_tmp_name, &utb) <0) 903warning_errno("failed utime() on%s", pack_tmp_name); 904} 905 906strbuf_addf(&tmpname,"%s-", base_name); 907 908if(write_bitmap_index) { 909bitmap_writer_set_checksum(oid.hash); 910bitmap_writer_build_type_index( 911&to_pack, written_list, nr_written); 912} 913 914finish_tmp_packfile(&tmpname, pack_tmp_name, 915 written_list, nr_written, 916&pack_idx_opts, oid.hash); 917 918if(write_bitmap_index) { 919strbuf_addf(&tmpname,"%s.bitmap",oid_to_hex(&oid)); 920 921stop_progress(&progress_state); 922 923bitmap_writer_show_progress(progress); 924bitmap_writer_reuse_bitmaps(&to_pack); 925bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 926bitmap_writer_build(&to_pack); 927bitmap_writer_finish(written_list, nr_written, 928 tmpname.buf, write_bitmap_options); 929 write_bitmap_index =0; 930} 931 932strbuf_release(&tmpname); 933free(pack_tmp_name); 934puts(oid_to_hex(&oid)); 935} 936 937/* mark written objects as written to previous pack */ 938for(j =0; j < nr_written; j++) { 939 written_list[j]->offset = (off_t)-1; 940} 941 nr_remaining -= nr_written; 942}while(nr_remaining && i < to_pack.nr_objects); 943 944free(written_list); 945free(write_order); 946stop_progress(&progress_state); 947if(written != nr_result) 948die("wrote %"PRIu32" objects while expecting %"PRIu32, 949 written, nr_result); 950} 951 952static intno_try_delta(const char*path) 953{ 954static struct attr_check *check; 955 956if(!check) 957 check =attr_check_initl("delta", NULL); 958if(git_check_attr(path, check)) 959return0; 960if(ATTR_FALSE(check->items[0].value)) 961return1; 962return0; 963} 964 965/* 966 * When adding an object, check whether we have already added it 967 * to our packing list. If so, we can skip. However, if we are 968 * being asked to excludei t, but the previous mention was to include 969 * it, make sure to adjust its flags and tweak our numbers accordingly. 970 * 971 * As an optimization, we pass out the index position where we would have 972 * found the item, since that saves us from having to look it up again a 973 * few lines later when we want to add the new entry. 974 */ 975static inthave_duplicate_entry(const struct object_id *oid, 976int exclude, 977uint32_t*index_pos) 978{ 979struct object_entry *entry; 980 981 entry =packlist_find(&to_pack, oid->hash, index_pos); 982if(!entry) 983return0; 984 985if(exclude) { 986if(!entry->preferred_base) 987 nr_result--; 988 entry->preferred_base =1; 989} 990 991return1; 992} 993 994static intwant_found_object(int exclude,struct packed_git *p) 995{ 996if(exclude) 997return1; 998if(incremental) 999return0;10001001/*1002 * When asked to do --local (do not include an object that appears in a1003 * pack we borrow from elsewhere) or --honor-pack-keep (do not include1004 * an object that appears in a pack marked with .keep), finding a pack1005 * that matches the criteria is sufficient for us to decide to omit it.1006 * However, even if this pack does not satisfy the criteria, we need to1007 * make sure no copy of this object appears in _any_ pack that makes us1008 * to omit the object, so we need to check all the packs.1009 *1010 * We can however first check whether these options can possible matter;1011 * if they do not matter we know we want the object in generated pack.1012 * Otherwise, we signal "-1" at the end to tell the caller that we do1013 * not know either way, and it needs to check more packs.1014 */1015if(!ignore_packed_keep_on_disk &&1016!ignore_packed_keep_in_core &&1017(!local || !have_non_local_packs))1018return1;10191020if(local && !p->pack_local)1021return0;1022if(p->pack_local &&1023((ignore_packed_keep_on_disk && p->pack_keep) ||1024(ignore_packed_keep_in_core && p->pack_keep_in_core)))1025return0;10261027/* we don't know yet; keep looking for more packs */1028return-1;1029}10301031/*1032 * Check whether we want the object in the pack (e.g., we do not want1033 * objects found in non-local stores if the "--local" option was used).1034 *1035 * If the caller already knows an existing pack it wants to take the object1036 * from, that is passed in *found_pack and *found_offset; otherwise this1037 * function finds if there is any pack that has the object and returns the pack1038 * and its offset in these variables.1039 */1040static intwant_object_in_pack(const struct object_id *oid,1041int exclude,1042struct packed_git **found_pack,1043 off_t *found_offset)1044{1045int want;1046struct list_head *pos;10471048if(!exclude && local &&has_loose_object_nonlocal(oid))1049return0;10501051/*1052 * If we already know the pack object lives in, start checks from that1053 * pack - in the usual case when neither --local was given nor .keep files1054 * are present we will determine the answer right now.1055 */1056if(*found_pack) {1057 want =want_found_object(exclude, *found_pack);1058if(want != -1)1059return want;1060}1061list_for_each(pos,get_packed_git_mru(the_repository)) {1062struct packed_git *p =list_entry(pos,struct packed_git, mru);1063 off_t offset;10641065if(p == *found_pack)1066 offset = *found_offset;1067else1068 offset =find_pack_entry_one(oid->hash, p);10691070if(offset) {1071if(!*found_pack) {1072if(!is_pack_valid(p))1073continue;1074*found_offset = offset;1075*found_pack = p;1076}1077 want =want_found_object(exclude, p);1078if(!exclude && want >0)1079list_move(&p->mru,1080get_packed_git_mru(the_repository));1081if(want != -1)1082return want;1083}1084}10851086return1;1087}10881089static voidcreate_object_entry(const struct object_id *oid,1090enum object_type type,1091uint32_t hash,1092int exclude,1093int no_try_delta,1094uint32_t index_pos,1095struct packed_git *found_pack,1096 off_t found_offset)1097{1098struct object_entry *entry;10991100 entry =packlist_alloc(&to_pack, oid->hash, index_pos);1101 entry->hash = hash;1102oe_set_type(entry, type);1103if(exclude)1104 entry->preferred_base =1;1105else1106 nr_result++;1107if(found_pack) {1108oe_set_in_pack(&to_pack, entry, found_pack);1109 entry->in_pack_offset = found_offset;1110}11111112 entry->no_try_delta = no_try_delta;1113}11141115static const char no_closure_warning[] =N_(1116"disabling bitmap writing, as some objects are not being packed"1117);11181119static intadd_object_entry(const struct object_id *oid,enum object_type type,1120const char*name,int exclude)1121{1122struct packed_git *found_pack = NULL;1123 off_t found_offset =0;1124uint32_t index_pos;11251126display_progress(progress_state, ++nr_seen);11271128if(have_duplicate_entry(oid, exclude, &index_pos))1129return0;11301131if(!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1132/* The pack is missing an object, so it will not have closure */1133if(write_bitmap_index) {1134warning(_(no_closure_warning));1135 write_bitmap_index =0;1136}1137return0;1138}11391140create_object_entry(oid, type,pack_name_hash(name),1141 exclude, name &&no_try_delta(name),1142 index_pos, found_pack, found_offset);1143return1;1144}11451146static intadd_object_entry_from_bitmap(const struct object_id *oid,1147enum object_type type,1148int flags,uint32_t name_hash,1149struct packed_git *pack, off_t offset)1150{1151uint32_t index_pos;11521153display_progress(progress_state, ++nr_seen);11541155if(have_duplicate_entry(oid,0, &index_pos))1156return0;11571158if(!want_object_in_pack(oid,0, &pack, &offset))1159return0;11601161create_object_entry(oid, type, name_hash,0,0, index_pos, pack, offset);1162return1;1163}11641165struct pbase_tree_cache {1166struct object_id oid;1167int ref;1168int temporary;1169void*tree_data;1170unsigned long tree_size;1171};11721173static struct pbase_tree_cache *(pbase_tree_cache[256]);1174static intpbase_tree_cache_ix(const struct object_id *oid)1175{1176return oid->hash[0] %ARRAY_SIZE(pbase_tree_cache);1177}1178static intpbase_tree_cache_ix_incr(int ix)1179{1180return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1181}11821183static struct pbase_tree {1184struct pbase_tree *next;1185/* This is a phony "cache" entry; we are not1186 * going to evict it or find it through _get()1187 * mechanism -- this is for the toplevel node that1188 * would almost always change with any commit.1189 */1190struct pbase_tree_cache pcache;1191} *pbase_tree;11921193static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1194{1195struct pbase_tree_cache *ent, *nent;1196void*data;1197unsigned long size;1198enum object_type type;1199int neigh;1200int my_ix =pbase_tree_cache_ix(oid);1201int available_ix = -1;12021203/* pbase-tree-cache acts as a limited hashtable.1204 * your object will be found at your index or within a few1205 * slots after that slot if it is cached.1206 */1207for(neigh =0; neigh <8; neigh++) {1208 ent = pbase_tree_cache[my_ix];1209if(ent && !oidcmp(&ent->oid, oid)) {1210 ent->ref++;1211return ent;1212}1213else if(((available_ix <0) && (!ent || !ent->ref)) ||1214((0<= available_ix) &&1215(!ent && pbase_tree_cache[available_ix])))1216 available_ix = my_ix;1217if(!ent)1218break;1219 my_ix =pbase_tree_cache_ix_incr(my_ix);1220}12211222/* Did not find one. Either we got a bogus request or1223 * we need to read and perhaps cache.1224 */1225 data =read_object_file(oid, &type, &size);1226if(!data)1227return NULL;1228if(type != OBJ_TREE) {1229free(data);1230return NULL;1231}12321233/* We need to either cache or return a throwaway copy */12341235if(available_ix <0)1236 ent = NULL;1237else{1238 ent = pbase_tree_cache[available_ix];1239 my_ix = available_ix;1240}12411242if(!ent) {1243 nent =xmalloc(sizeof(*nent));1244 nent->temporary = (available_ix <0);1245}1246else{1247/* evict and reuse */1248free(ent->tree_data);1249 nent = ent;1250}1251oidcpy(&nent->oid, oid);1252 nent->tree_data = data;1253 nent->tree_size = size;1254 nent->ref =1;1255if(!nent->temporary)1256 pbase_tree_cache[my_ix] = nent;1257return nent;1258}12591260static voidpbase_tree_put(struct pbase_tree_cache *cache)1261{1262if(!cache->temporary) {1263 cache->ref--;1264return;1265}1266free(cache->tree_data);1267free(cache);1268}12691270static intname_cmp_len(const char*name)1271{1272int i;1273for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1274;1275return i;1276}12771278static voidadd_pbase_object(struct tree_desc *tree,1279const char*name,1280int cmplen,1281const char*fullname)1282{1283struct name_entry entry;1284int cmp;12851286while(tree_entry(tree,&entry)) {1287if(S_ISGITLINK(entry.mode))1288continue;1289 cmp =tree_entry_len(&entry) != cmplen ?1:1290memcmp(name, entry.path, cmplen);1291if(cmp >0)1292continue;1293if(cmp <0)1294return;1295if(name[cmplen] !='/') {1296add_object_entry(entry.oid,1297object_type(entry.mode),1298 fullname,1);1299return;1300}1301if(S_ISDIR(entry.mode)) {1302struct tree_desc sub;1303struct pbase_tree_cache *tree;1304const char*down = name+cmplen+1;1305int downlen =name_cmp_len(down);13061307 tree =pbase_tree_get(entry.oid);1308if(!tree)1309return;1310init_tree_desc(&sub, tree->tree_data, tree->tree_size);13111312add_pbase_object(&sub, down, downlen, fullname);1313pbase_tree_put(tree);1314}1315}1316}13171318static unsigned*done_pbase_paths;1319static int done_pbase_paths_num;1320static int done_pbase_paths_alloc;1321static intdone_pbase_path_pos(unsigned hash)1322{1323int lo =0;1324int hi = done_pbase_paths_num;1325while(lo < hi) {1326int mi = lo + (hi - lo) /2;1327if(done_pbase_paths[mi] == hash)1328return mi;1329if(done_pbase_paths[mi] < hash)1330 hi = mi;1331else1332 lo = mi +1;1333}1334return-lo-1;1335}13361337static intcheck_pbase_path(unsigned hash)1338{1339int pos =done_pbase_path_pos(hash);1340if(0<= pos)1341return1;1342 pos = -pos -1;1343ALLOC_GROW(done_pbase_paths,1344 done_pbase_paths_num +1,1345 done_pbase_paths_alloc);1346 done_pbase_paths_num++;1347if(pos < done_pbase_paths_num)1348MOVE_ARRAY(done_pbase_paths + pos +1, done_pbase_paths + pos,1349 done_pbase_paths_num - pos -1);1350 done_pbase_paths[pos] = hash;1351return0;1352}13531354static voidadd_preferred_base_object(const char*name)1355{1356struct pbase_tree *it;1357int cmplen;1358unsigned hash =pack_name_hash(name);13591360if(!num_preferred_base ||check_pbase_path(hash))1361return;13621363 cmplen =name_cmp_len(name);1364for(it = pbase_tree; it; it = it->next) {1365if(cmplen ==0) {1366add_object_entry(&it->pcache.oid, OBJ_TREE, NULL,1);1367}1368else{1369struct tree_desc tree;1370init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1371add_pbase_object(&tree, name, cmplen, name);1372}1373}1374}13751376static voidadd_preferred_base(struct object_id *oid)1377{1378struct pbase_tree *it;1379void*data;1380unsigned long size;1381struct object_id tree_oid;13821383if(window <= num_preferred_base++)1384return;13851386 data =read_object_with_reference(oid, tree_type, &size, &tree_oid);1387if(!data)1388return;13891390for(it = pbase_tree; it; it = it->next) {1391if(!oidcmp(&it->pcache.oid, &tree_oid)) {1392free(data);1393return;1394}1395}13961397 it =xcalloc(1,sizeof(*it));1398 it->next = pbase_tree;1399 pbase_tree = it;14001401oidcpy(&it->pcache.oid, &tree_oid);1402 it->pcache.tree_data = data;1403 it->pcache.tree_size = size;1404}14051406static voidcleanup_preferred_base(void)1407{1408struct pbase_tree *it;1409unsigned i;14101411 it = pbase_tree;1412 pbase_tree = NULL;1413while(it) {1414struct pbase_tree *tmp = it;1415 it = tmp->next;1416free(tmp->pcache.tree_data);1417free(tmp);1418}14191420for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1421if(!pbase_tree_cache[i])1422continue;1423free(pbase_tree_cache[i]->tree_data);1424FREE_AND_NULL(pbase_tree_cache[i]);1425}14261427FREE_AND_NULL(done_pbase_paths);1428 done_pbase_paths_num = done_pbase_paths_alloc =0;1429}14301431static voidcheck_object(struct object_entry *entry)1432{1433unsigned long canonical_size;14341435if(IN_PACK(entry)) {1436struct packed_git *p =IN_PACK(entry);1437struct pack_window *w_curs = NULL;1438const unsigned char*base_ref = NULL;1439struct object_entry *base_entry;1440unsigned long used, used_0;1441unsigned long avail;1442 off_t ofs;1443unsigned char*buf, c;1444enum object_type type;1445unsigned long in_pack_size;14461447 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);14481449/*1450 * We want in_pack_type even if we do not reuse delta1451 * since non-delta representations could still be reused.1452 */1453 used =unpack_object_header_buffer(buf, avail,1454&type,1455&in_pack_size);1456if(used ==0)1457goto give_up;14581459if(type <0)1460BUG("invalid type%d", type);1461 entry->in_pack_type = type;14621463/*1464 * Determine if this is a delta and if so whether we can1465 * reuse it or not. Otherwise let's find out as cheaply as1466 * possible what the actual type and size for this object is.1467 */1468switch(entry->in_pack_type) {1469default:1470/* Not a delta hence we've already got all we need. */1471oe_set_type(entry, entry->in_pack_type);1472SET_SIZE(entry, in_pack_size);1473 entry->in_pack_header_size = used;1474if(oe_type(entry) < OBJ_COMMIT ||oe_type(entry) > OBJ_BLOB)1475goto give_up;1476unuse_pack(&w_curs);1477return;1478case OBJ_REF_DELTA:1479if(reuse_delta && !entry->preferred_base)1480 base_ref =use_pack(p, &w_curs,1481 entry->in_pack_offset + used, NULL);1482 entry->in_pack_header_size = used + the_hash_algo->rawsz;1483break;1484case OBJ_OFS_DELTA:1485 buf =use_pack(p, &w_curs,1486 entry->in_pack_offset + used, NULL);1487 used_0 =0;1488 c = buf[used_0++];1489 ofs = c &127;1490while(c &128) {1491 ofs +=1;1492if(!ofs ||MSB(ofs,7)) {1493error("delta base offset overflow in pack for%s",1494oid_to_hex(&entry->idx.oid));1495goto give_up;1496}1497 c = buf[used_0++];1498 ofs = (ofs <<7) + (c &127);1499}1500 ofs = entry->in_pack_offset - ofs;1501if(ofs <=0|| ofs >= entry->in_pack_offset) {1502error("delta base offset out of bound for%s",1503oid_to_hex(&entry->idx.oid));1504goto give_up;1505}1506if(reuse_delta && !entry->preferred_base) {1507struct revindex_entry *revidx;1508 revidx =find_pack_revindex(p, ofs);1509if(!revidx)1510goto give_up;1511 base_ref =nth_packed_object_sha1(p, revidx->nr);1512}1513 entry->in_pack_header_size = used + used_0;1514break;1515}15161517if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1518/*1519 * If base_ref was set above that means we wish to1520 * reuse delta data, and we even found that base1521 * in the list of objects we want to pack. Goodie!1522 *1523 * Depth value does not matter - find_deltas() will1524 * never consider reused delta as the base object to1525 * deltify other objects against, in order to avoid1526 * circular deltas.1527 */1528oe_set_type(entry, entry->in_pack_type);1529SET_SIZE(entry, in_pack_size);/* delta size */1530SET_DELTA(entry, base_entry);1531SET_DELTA_SIZE(entry, in_pack_size);1532 entry->delta_sibling_idx = base_entry->delta_child_idx;1533SET_DELTA_CHILD(base_entry, entry);1534unuse_pack(&w_curs);1535return;1536}15371538if(oe_type(entry)) {1539 off_t delta_pos;15401541/*1542 * This must be a delta and we already know what the1543 * final object type is. Let's extract the actual1544 * object size from the delta header.1545 */1546 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1547 canonical_size =get_size_from_delta(p, &w_curs, delta_pos);1548if(canonical_size ==0)1549goto give_up;1550SET_SIZE(entry, canonical_size);1551unuse_pack(&w_curs);1552return;1553}15541555/*1556 * No choice but to fall back to the recursive delta walk1557 * with sha1_object_info() to find about the object type1558 * at this point...1559 */1560 give_up:1561unuse_pack(&w_curs);1562}15631564oe_set_type(entry,1565oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1566if(entry->type_valid) {1567SET_SIZE(entry, canonical_size);1568}else{1569/*1570 * Bad object type is checked in prepare_pack(). This is1571 * to permit a missing preferred base object to be ignored1572 * as a preferred base. Doing so can result in a larger1573 * pack file, but the transfer will still take place.1574 */1575}1576}15771578static intpack_offset_sort(const void*_a,const void*_b)1579{1580const struct object_entry *a = *(struct object_entry **)_a;1581const struct object_entry *b = *(struct object_entry **)_b;1582const struct packed_git *a_in_pack =IN_PACK(a);1583const struct packed_git *b_in_pack =IN_PACK(b);15841585/* avoid filesystem trashing with loose objects */1586if(!a_in_pack && !b_in_pack)1587returnoidcmp(&a->idx.oid, &b->idx.oid);15881589if(a_in_pack < b_in_pack)1590return-1;1591if(a_in_pack > b_in_pack)1592return1;1593return a->in_pack_offset < b->in_pack_offset ? -1:1594(a->in_pack_offset > b->in_pack_offset);1595}15961597/*1598 * Drop an on-disk delta we were planning to reuse. Naively, this would1599 * just involve blanking out the "delta" field, but we have to deal1600 * with some extra book-keeping:1601 *1602 * 1. Removing ourselves from the delta_sibling linked list.1603 *1604 * 2. Updating our size/type to the non-delta representation. These were1605 * either not recorded initially (size) or overwritten with the delta type1606 * (type) when check_object() decided to reuse the delta.1607 *1608 * 3. Resetting our delta depth, as we are now a base object.1609 */1610static voiddrop_reused_delta(struct object_entry *entry)1611{1612unsigned*idx = &to_pack.objects[entry->delta_idx -1].delta_child_idx;1613struct object_info oi = OBJECT_INFO_INIT;1614enum object_type type;1615unsigned long size;16161617while(*idx) {1618struct object_entry *oe = &to_pack.objects[*idx -1];16191620if(oe == entry)1621*idx = oe->delta_sibling_idx;1622else1623 idx = &oe->delta_sibling_idx;1624}1625SET_DELTA(entry, NULL);1626 entry->depth =0;16271628 oi.sizep = &size;1629 oi.typep = &type;1630if(packed_object_info(the_repository,IN_PACK(entry), entry->in_pack_offset, &oi) <0) {1631/*1632 * We failed to get the info from this pack for some reason;1633 * fall back to sha1_object_info, which may find another copy.1634 * And if that fails, the error will be recorded in oe_type(entry)1635 * and dealt with in prepare_pack().1636 */1637oe_set_type(entry,1638oid_object_info(the_repository, &entry->idx.oid, &size));1639}else{1640oe_set_type(entry, type);1641}1642SET_SIZE(entry, size);1643}16441645/*1646 * Follow the chain of deltas from this entry onward, throwing away any links1647 * that cause us to hit a cycle (as determined by the DFS state flags in1648 * the entries).1649 *1650 * We also detect too-long reused chains that would violate our --depth1651 * limit.1652 */1653static voidbreak_delta_chains(struct object_entry *entry)1654{1655/*1656 * The actual depth of each object we will write is stored as an int,1657 * as it cannot exceed our int "depth" limit. But before we break1658 * changes based no that limit, we may potentially go as deep as the1659 * number of objects, which is elsewhere bounded to a uint32_t.1660 */1661uint32_t total_depth;1662struct object_entry *cur, *next;16631664for(cur = entry, total_depth =0;1665 cur;1666 cur =DELTA(cur), total_depth++) {1667if(cur->dfs_state == DFS_DONE) {1668/*1669 * We've already seen this object and know it isn't1670 * part of a cycle. We do need to append its depth1671 * to our count.1672 */1673 total_depth += cur->depth;1674break;1675}16761677/*1678 * We break cycles before looping, so an ACTIVE state (or any1679 * other cruft which made its way into the state variable)1680 * is a bug.1681 */1682if(cur->dfs_state != DFS_NONE)1683BUG("confusing delta dfs state in first pass:%d",1684 cur->dfs_state);16851686/*1687 * Now we know this is the first time we've seen the object. If1688 * it's not a delta, we're done traversing, but we'll mark it1689 * done to save time on future traversals.1690 */1691if(!DELTA(cur)) {1692 cur->dfs_state = DFS_DONE;1693break;1694}16951696/*1697 * Mark ourselves as active and see if the next step causes1698 * us to cycle to another active object. It's important to do1699 * this _before_ we loop, because it impacts where we make the1700 * cut, and thus how our total_depth counter works.1701 * E.g., We may see a partial loop like:1702 *1703 * A -> B -> C -> D -> B1704 *1705 * Cutting B->C breaks the cycle. But now the depth of A is1706 * only 1, and our total_depth counter is at 3. The size of the1707 * error is always one less than the size of the cycle we1708 * broke. Commits C and D were "lost" from A's chain.1709 *1710 * If we instead cut D->B, then the depth of A is correct at 3.1711 * We keep all commits in the chain that we examined.1712 */1713 cur->dfs_state = DFS_ACTIVE;1714if(DELTA(cur)->dfs_state == DFS_ACTIVE) {1715drop_reused_delta(cur);1716 cur->dfs_state = DFS_DONE;1717break;1718}1719}17201721/*1722 * And now that we've gone all the way to the bottom of the chain, we1723 * need to clear the active flags and set the depth fields as1724 * appropriate. Unlike the loop above, which can quit when it drops a1725 * delta, we need to keep going to look for more depth cuts. So we need1726 * an extra "next" pointer to keep going after we reset cur->delta.1727 */1728for(cur = entry; cur; cur = next) {1729 next =DELTA(cur);17301731/*1732 * We should have a chain of zero or more ACTIVE states down to1733 * a final DONE. We can quit after the DONE, because either it1734 * has no bases, or we've already handled them in a previous1735 * call.1736 */1737if(cur->dfs_state == DFS_DONE)1738break;1739else if(cur->dfs_state != DFS_ACTIVE)1740BUG("confusing delta dfs state in second pass:%d",1741 cur->dfs_state);17421743/*1744 * If the total_depth is more than depth, then we need to snip1745 * the chain into two or more smaller chains that don't exceed1746 * the maximum depth. Most of the resulting chains will contain1747 * (depth + 1) entries (i.e., depth deltas plus one base), and1748 * the last chain (i.e., the one containing entry) will contain1749 * whatever entries are left over, namely1750 * (total_depth % (depth + 1)) of them.1751 *1752 * Since we are iterating towards decreasing depth, we need to1753 * decrement total_depth as we go, and we need to write to the1754 * entry what its final depth will be after all of the1755 * snipping. Since we're snipping into chains of length (depth1756 * + 1) entries, the final depth of an entry will be its1757 * original depth modulo (depth + 1). Any time we encounter an1758 * entry whose final depth is supposed to be zero, we snip it1759 * from its delta base, thereby making it so.1760 */1761 cur->depth = (total_depth--) % (depth +1);1762if(!cur->depth)1763drop_reused_delta(cur);17641765 cur->dfs_state = DFS_DONE;1766}1767}17681769static voidget_object_details(void)1770{1771uint32_t i;1772struct object_entry **sorted_by_offset;17731774if(progress)1775 progress_state =start_progress(_("Counting objects"),1776 to_pack.nr_objects);17771778 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1779for(i =0; i < to_pack.nr_objects; i++)1780 sorted_by_offset[i] = to_pack.objects + i;1781QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17821783for(i =0; i < to_pack.nr_objects; i++) {1784struct object_entry *entry = sorted_by_offset[i];1785check_object(entry);1786if(entry->type_valid &&1787oe_size_greater_than(&to_pack, entry, big_file_threshold))1788 entry->no_try_delta =1;1789display_progress(progress_state, i +1);1790}1791stop_progress(&progress_state);17921793/*1794 * This must happen in a second pass, since we rely on the delta1795 * information for the whole list being completed.1796 */1797for(i =0; i < to_pack.nr_objects; i++)1798break_delta_chains(&to_pack.objects[i]);17991800free(sorted_by_offset);1801}18021803/*1804 * We search for deltas in a list sorted by type, by filename hash, and then1805 * by size, so that we see progressively smaller and smaller files.1806 * That's because we prefer deltas to be from the bigger file1807 * to the smaller -- deletes are potentially cheaper, but perhaps1808 * more importantly, the bigger file is likely the more recent1809 * one. The deepest deltas are therefore the oldest objects which are1810 * less susceptible to be accessed often.1811 */1812static inttype_size_sort(const void*_a,const void*_b)1813{1814const struct object_entry *a = *(struct object_entry **)_a;1815const struct object_entry *b = *(struct object_entry **)_b;1816enum object_type a_type =oe_type(a);1817enum object_type b_type =oe_type(b);1818unsigned long a_size =SIZE(a);1819unsigned long b_size =SIZE(b);18201821if(a_type > b_type)1822return-1;1823if(a_type < b_type)1824return1;1825if(a->hash > b->hash)1826return-1;1827if(a->hash < b->hash)1828return1;1829if(a->preferred_base > b->preferred_base)1830return-1;1831if(a->preferred_base < b->preferred_base)1832return1;1833if(a_size > b_size)1834return-1;1835if(a_size < b_size)1836return1;1837return a < b ? -1: (a > b);/* newest first */1838}18391840struct unpacked {1841struct object_entry *entry;1842void*data;1843struct delta_index *index;1844unsigned depth;1845};18461847static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1848unsigned long delta_size)1849{1850if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1851return0;18521853if(delta_size < cache_max_small_delta_size)1854return1;18551856/* cache delta, if objects are large enough compared to delta size */1857if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1858return1;18591860return0;1861}18621863#ifndef NO_PTHREADS18641865static pthread_mutex_t read_mutex;1866#define read_lock() pthread_mutex_lock(&read_mutex)1867#define read_unlock() pthread_mutex_unlock(&read_mutex)18681869static pthread_mutex_t cache_mutex;1870#define cache_lock() pthread_mutex_lock(&cache_mutex)1871#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18721873static pthread_mutex_t progress_mutex;1874#define progress_lock() pthread_mutex_lock(&progress_mutex)1875#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18761877#else18781879#define read_lock() (void)01880#define read_unlock() (void)01881#define cache_lock() (void)01882#define cache_unlock() (void)01883#define progress_lock() (void)01884#define progress_unlock() (void)018851886#endif18871888/*1889 * Return the size of the object without doing any delta1890 * reconstruction (so non-deltas are true object sizes, but deltas1891 * return the size of the delta data).1892 */1893unsigned longoe_get_size_slow(struct packing_data *pack,1894const struct object_entry *e)1895{1896struct packed_git *p;1897struct pack_window *w_curs;1898unsigned char*buf;1899enum object_type type;1900unsigned long used, avail, size;19011902if(e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1903read_lock();1904if(oid_object_info(the_repository, &e->idx.oid, &size) <0)1905die(_("unable to get size of%s"),1906oid_to_hex(&e->idx.oid));1907read_unlock();1908return size;1909}19101911 p =oe_in_pack(pack, e);1912if(!p)1913BUG("when e->type is a delta, it must belong to a pack");19141915read_lock();1916 w_curs = NULL;1917 buf =use_pack(p, &w_curs, e->in_pack_offset, &avail);1918 used =unpack_object_header_buffer(buf, avail, &type, &size);1919if(used ==0)1920die(_("unable to parse object header of%s"),1921oid_to_hex(&e->idx.oid));19221923unuse_pack(&w_curs);1924read_unlock();1925return size;1926}19271928static inttry_delta(struct unpacked *trg,struct unpacked *src,1929unsigned max_depth,unsigned long*mem_usage)1930{1931struct object_entry *trg_entry = trg->entry;1932struct object_entry *src_entry = src->entry;1933unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1934unsigned ref_depth;1935enum object_type type;1936void*delta_buf;19371938/* Don't bother doing diffs between different types */1939if(oe_type(trg_entry) !=oe_type(src_entry))1940return-1;19411942/*1943 * We do not bother to try a delta that we discarded on an1944 * earlier try, but only when reusing delta data. Note that1945 * src_entry that is marked as the preferred_base should always1946 * be considered, as even if we produce a suboptimal delta against1947 * it, we will still save the transfer cost, as we already know1948 * the other side has it and we won't send src_entry at all.1949 */1950if(reuse_delta &&IN_PACK(trg_entry) &&1951IN_PACK(trg_entry) ==IN_PACK(src_entry) &&1952!src_entry->preferred_base &&1953 trg_entry->in_pack_type != OBJ_REF_DELTA &&1954 trg_entry->in_pack_type != OBJ_OFS_DELTA)1955return0;19561957/* Let's not bust the allowed depth. */1958if(src->depth >= max_depth)1959return0;19601961/* Now some size filtering heuristics. */1962 trg_size =SIZE(trg_entry);1963if(!DELTA(trg_entry)) {1964 max_size = trg_size/2- the_hash_algo->rawsz;1965 ref_depth =1;1966}else{1967 max_size =DELTA_SIZE(trg_entry);1968 ref_depth = trg->depth;1969}1970 max_size = (uint64_t)max_size * (max_depth - src->depth) /1971(max_depth - ref_depth +1);1972if(max_size ==0)1973return0;1974 src_size =SIZE(src_entry);1975 sizediff = src_size < trg_size ? trg_size - src_size :0;1976if(sizediff >= max_size)1977return0;1978if(trg_size < src_size /32)1979return0;19801981/* Load data if not already done */1982if(!trg->data) {1983read_lock();1984 trg->data =read_object_file(&trg_entry->idx.oid, &type, &sz);1985read_unlock();1986if(!trg->data)1987die("object%scannot be read",1988oid_to_hex(&trg_entry->idx.oid));1989if(sz != trg_size)1990die("object%sinconsistent object length (%lu vs%lu)",1991oid_to_hex(&trg_entry->idx.oid), sz,1992 trg_size);1993*mem_usage += sz;1994}1995if(!src->data) {1996read_lock();1997 src->data =read_object_file(&src_entry->idx.oid, &type, &sz);1998read_unlock();1999if(!src->data) {2000if(src_entry->preferred_base) {2001static int warned =0;2002if(!warned++)2003warning("object%scannot be read",2004oid_to_hex(&src_entry->idx.oid));2005/*2006 * Those objects are not included in the2007 * resulting pack. Be resilient and ignore2008 * them if they can't be read, in case the2009 * pack could be created nevertheless.2010 */2011return0;2012}2013die("object%scannot be read",2014oid_to_hex(&src_entry->idx.oid));2015}2016if(sz != src_size)2017die("object%sinconsistent object length (%lu vs%lu)",2018oid_to_hex(&src_entry->idx.oid), sz,2019 src_size);2020*mem_usage += sz;2021}2022if(!src->index) {2023 src->index =create_delta_index(src->data, src_size);2024if(!src->index) {2025static int warned =0;2026if(!warned++)2027warning("suboptimal pack - out of memory");2028return0;2029}2030*mem_usage +=sizeof_delta_index(src->index);2031}20322033 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2034if(!delta_buf)2035return0;2036if(delta_size >= (1U<< OE_DELTA_SIZE_BITS)) {2037free(delta_buf);2038return0;2039}20402041if(DELTA(trg_entry)) {2042/* Prefer only shallower same-sized deltas. */2043if(delta_size ==DELTA_SIZE(trg_entry) &&2044 src->depth +1>= trg->depth) {2045free(delta_buf);2046return0;2047}2048}20492050/*2051 * Handle memory allocation outside of the cache2052 * accounting lock. Compiler will optimize the strangeness2053 * away when NO_PTHREADS is defined.2054 */2055free(trg_entry->delta_data);2056cache_lock();2057if(trg_entry->delta_data) {2058 delta_cache_size -=DELTA_SIZE(trg_entry);2059 trg_entry->delta_data = NULL;2060}2061if(delta_cacheable(src_size, trg_size, delta_size)) {2062 delta_cache_size += delta_size;2063cache_unlock();2064 trg_entry->delta_data =xrealloc(delta_buf, delta_size);2065}else{2066cache_unlock();2067free(delta_buf);2068}20692070SET_DELTA(trg_entry, src_entry);2071SET_DELTA_SIZE(trg_entry, delta_size);2072 trg->depth = src->depth +1;20732074return1;2075}20762077static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)2078{2079struct object_entry *child =DELTA_CHILD(me);2080unsigned int m = n;2081while(child) {2082unsigned int c =check_delta_limit(child, n +1);2083if(m < c)2084 m = c;2085 child =DELTA_SIBLING(child);2086}2087return m;2088}20892090static unsigned longfree_unpacked(struct unpacked *n)2091{2092unsigned long freed_mem =sizeof_delta_index(n->index);2093free_delta_index(n->index);2094 n->index = NULL;2095if(n->data) {2096 freed_mem +=SIZE(n->entry);2097FREE_AND_NULL(n->data);2098}2099 n->entry = NULL;2100 n->depth =0;2101return freed_mem;2102}21032104static voidfind_deltas(struct object_entry **list,unsigned*list_size,2105int window,int depth,unsigned*processed)2106{2107uint32_t i, idx =0, count =0;2108struct unpacked *array;2109unsigned long mem_usage =0;21102111 array =xcalloc(window,sizeof(struct unpacked));21122113for(;;) {2114struct object_entry *entry;2115struct unpacked *n = array + idx;2116int j, max_depth, best_base = -1;21172118progress_lock();2119if(!*list_size) {2120progress_unlock();2121break;2122}2123 entry = *list++;2124(*list_size)--;2125if(!entry->preferred_base) {2126(*processed)++;2127display_progress(progress_state, *processed);2128}2129progress_unlock();21302131 mem_usage -=free_unpacked(n);2132 n->entry = entry;21332134while(window_memory_limit &&2135 mem_usage > window_memory_limit &&2136 count >1) {2137uint32_t tail = (idx + window - count) % window;2138 mem_usage -=free_unpacked(array + tail);2139 count--;2140}21412142/* We do not compute delta to *create* objects we are not2143 * going to pack.2144 */2145if(entry->preferred_base)2146goto next;21472148/*2149 * If the current object is at pack edge, take the depth the2150 * objects that depend on the current object into account2151 * otherwise they would become too deep.2152 */2153 max_depth = depth;2154if(DELTA_CHILD(entry)) {2155 max_depth -=check_delta_limit(entry,0);2156if(max_depth <=0)2157goto next;2158}21592160 j = window;2161while(--j >0) {2162int ret;2163uint32_t other_idx = idx + j;2164struct unpacked *m;2165if(other_idx >= window)2166 other_idx -= window;2167 m = array + other_idx;2168if(!m->entry)2169break;2170 ret =try_delta(n, m, max_depth, &mem_usage);2171if(ret <0)2172break;2173else if(ret >0)2174 best_base = other_idx;2175}21762177/*2178 * If we decided to cache the delta data, then it is best2179 * to compress it right away. First because we have to do2180 * it anyway, and doing it here while we're threaded will2181 * save a lot of time in the non threaded write phase,2182 * as well as allow for caching more deltas within2183 * the same cache size limit.2184 * ...2185 * But only if not writing to stdout, since in that case2186 * the network is most likely throttling writes anyway,2187 * and therefore it is best to go to the write phase ASAP2188 * instead, as we can afford spending more time compressing2189 * between writes at that moment.2190 */2191if(entry->delta_data && !pack_to_stdout) {2192unsigned long size;21932194 size =do_compress(&entry->delta_data,DELTA_SIZE(entry));2195if(size < (1U<< OE_Z_DELTA_BITS)) {2196 entry->z_delta_size = size;2197cache_lock();2198 delta_cache_size -=DELTA_SIZE(entry);2199 delta_cache_size += entry->z_delta_size;2200cache_unlock();2201}else{2202FREE_AND_NULL(entry->delta_data);2203 entry->z_delta_size =0;2204}2205}22062207/* if we made n a delta, and if n is already at max2208 * depth, leaving it in the window is pointless. we2209 * should evict it first.2210 */2211if(DELTA(entry) && max_depth <= n->depth)2212continue;22132214/*2215 * Move the best delta base up in the window, after the2216 * currently deltified object, to keep it longer. It will2217 * be the first base object to be attempted next.2218 */2219if(DELTA(entry)) {2220struct unpacked swap = array[best_base];2221int dist = (window + idx - best_base) % window;2222int dst = best_base;2223while(dist--) {2224int src = (dst +1) % window;2225 array[dst] = array[src];2226 dst = src;2227}2228 array[dst] = swap;2229}22302231 next:2232 idx++;2233if(count +1< window)2234 count++;2235if(idx >= window)2236 idx =0;2237}22382239for(i =0; i < window; ++i) {2240free_delta_index(array[i].index);2241free(array[i].data);2242}2243free(array);2244}22452246#ifndef NO_PTHREADS22472248static voidtry_to_free_from_threads(size_t size)2249{2250read_lock();2251release_pack_memory(size);2252read_unlock();2253}22542255static try_to_free_t old_try_to_free_routine;22562257/*2258 * The main thread waits on the condition that (at least) one of the workers2259 * has stopped working (which is indicated in the .working member of2260 * struct thread_params).2261 * When a work thread has completed its work, it sets .working to 0 and2262 * signals the main thread and waits on the condition that .data_ready2263 * becomes 1.2264 */22652266struct thread_params {2267 pthread_t thread;2268struct object_entry **list;2269unsigned list_size;2270unsigned remaining;2271int window;2272int depth;2273int working;2274int data_ready;2275 pthread_mutex_t mutex;2276 pthread_cond_t cond;2277unsigned*processed;2278};22792280static pthread_cond_t progress_cond;22812282/*2283 * Mutex and conditional variable can't be statically-initialized on Windows.2284 */2285static voidinit_threaded_search(void)2286{2287init_recursive_mutex(&read_mutex);2288pthread_mutex_init(&cache_mutex, NULL);2289pthread_mutex_init(&progress_mutex, NULL);2290pthread_cond_init(&progress_cond, NULL);2291 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2292}22932294static voidcleanup_threaded_search(void)2295{2296set_try_to_free_routine(old_try_to_free_routine);2297pthread_cond_destroy(&progress_cond);2298pthread_mutex_destroy(&read_mutex);2299pthread_mutex_destroy(&cache_mutex);2300pthread_mutex_destroy(&progress_mutex);2301}23022303static void*threaded_find_deltas(void*arg)2304{2305struct thread_params *me = arg;23062307progress_lock();2308while(me->remaining) {2309progress_unlock();23102311find_deltas(me->list, &me->remaining,2312 me->window, me->depth, me->processed);23132314progress_lock();2315 me->working =0;2316pthread_cond_signal(&progress_cond);2317progress_unlock();23182319/*2320 * We must not set ->data_ready before we wait on the2321 * condition because the main thread may have set it to 12322 * before we get here. In order to be sure that new2323 * work is available if we see 1 in ->data_ready, it2324 * was initialized to 0 before this thread was spawned2325 * and we reset it to 0 right away.2326 */2327pthread_mutex_lock(&me->mutex);2328while(!me->data_ready)2329pthread_cond_wait(&me->cond, &me->mutex);2330 me->data_ready =0;2331pthread_mutex_unlock(&me->mutex);23322333progress_lock();2334}2335progress_unlock();2336/* leave ->working 1 so that this doesn't get more work assigned */2337return NULL;2338}23392340static voidll_find_deltas(struct object_entry **list,unsigned list_size,2341int window,int depth,unsigned*processed)2342{2343struct thread_params *p;2344int i, ret, active_threads =0;23452346init_threaded_search();23472348if(delta_search_threads <=1) {2349find_deltas(list, &list_size, window, depth, processed);2350cleanup_threaded_search();2351return;2352}2353if(progress > pack_to_stdout)2354fprintf(stderr,"Delta compression using up to%dthreads.\n",2355 delta_search_threads);2356 p =xcalloc(delta_search_threads,sizeof(*p));23572358/* Partition the work amongst work threads. */2359for(i =0; i < delta_search_threads; i++) {2360unsigned sub_size = list_size / (delta_search_threads - i);23612362/* don't use too small segments or no deltas will be found */2363if(sub_size <2*window && i+1< delta_search_threads)2364 sub_size =0;23652366 p[i].window = window;2367 p[i].depth = depth;2368 p[i].processed = processed;2369 p[i].working =1;2370 p[i].data_ready =0;23712372/* try to split chunks on "path" boundaries */2373while(sub_size && sub_size < list_size &&2374 list[sub_size]->hash &&2375 list[sub_size]->hash == list[sub_size-1]->hash)2376 sub_size++;23772378 p[i].list = list;2379 p[i].list_size = sub_size;2380 p[i].remaining = sub_size;23812382 list += sub_size;2383 list_size -= sub_size;2384}23852386/* Start work threads. */2387for(i =0; i < delta_search_threads; i++) {2388if(!p[i].list_size)2389continue;2390pthread_mutex_init(&p[i].mutex, NULL);2391pthread_cond_init(&p[i].cond, NULL);2392 ret =pthread_create(&p[i].thread, NULL,2393 threaded_find_deltas, &p[i]);2394if(ret)2395die("unable to create thread:%s",strerror(ret));2396 active_threads++;2397}23982399/*2400 * Now let's wait for work completion. Each time a thread is done2401 * with its work, we steal half of the remaining work from the2402 * thread with the largest number of unprocessed objects and give2403 * it to that newly idle thread. This ensure good load balancing2404 * until the remaining object list segments are simply too short2405 * to be worth splitting anymore.2406 */2407while(active_threads) {2408struct thread_params *target = NULL;2409struct thread_params *victim = NULL;2410unsigned sub_size =0;24112412progress_lock();2413for(;;) {2414for(i =0; !target && i < delta_search_threads; i++)2415if(!p[i].working)2416 target = &p[i];2417if(target)2418break;2419pthread_cond_wait(&progress_cond, &progress_mutex);2420}24212422for(i =0; i < delta_search_threads; i++)2423if(p[i].remaining >2*window &&2424(!victim || victim->remaining < p[i].remaining))2425 victim = &p[i];2426if(victim) {2427 sub_size = victim->remaining /2;2428 list = victim->list + victim->list_size - sub_size;2429while(sub_size && list[0]->hash &&2430 list[0]->hash == list[-1]->hash) {2431 list++;2432 sub_size--;2433}2434if(!sub_size) {2435/*2436 * It is possible for some "paths" to have2437 * so many objects that no hash boundary2438 * might be found. Let's just steal the2439 * exact half in that case.2440 */2441 sub_size = victim->remaining /2;2442 list -= sub_size;2443}2444 target->list = list;2445 victim->list_size -= sub_size;2446 victim->remaining -= sub_size;2447}2448 target->list_size = sub_size;2449 target->remaining = sub_size;2450 target->working =1;2451progress_unlock();24522453pthread_mutex_lock(&target->mutex);2454 target->data_ready =1;2455pthread_cond_signal(&target->cond);2456pthread_mutex_unlock(&target->mutex);24572458if(!sub_size) {2459pthread_join(target->thread, NULL);2460pthread_cond_destroy(&target->cond);2461pthread_mutex_destroy(&target->mutex);2462 active_threads--;2463}2464}2465cleanup_threaded_search();2466free(p);2467}24682469#else2470#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2471#endif24722473static voidadd_tag_chain(const struct object_id *oid)2474{2475struct tag *tag;24762477/*2478 * We catch duplicates already in add_object_entry(), but we'd2479 * prefer to do this extra check to avoid having to parse the2480 * tag at all if we already know that it's being packed (e.g., if2481 * it was included via bitmaps, we would not have parsed it2482 * previously).2483 */2484if(packlist_find(&to_pack, oid->hash, NULL))2485return;24862487 tag =lookup_tag(the_repository, oid);2488while(1) {2489if(!tag ||parse_tag(tag) || !tag->tagged)2490die("unable to pack objects reachable from tag%s",2491oid_to_hex(oid));24922493add_object_entry(&tag->object.oid, OBJ_TAG, NULL,0);24942495if(tag->tagged->type != OBJ_TAG)2496return;24972498 tag = (struct tag *)tag->tagged;2499}2500}25012502static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2503{2504struct object_id peeled;25052506if(starts_with(path,"refs/tags/") &&/* is a tag? */2507!peel_ref(path, &peeled) &&/* peelable? */2508packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2509add_tag_chain(oid);2510return0;2511}25122513static voidprepare_pack(int window,int depth)2514{2515struct object_entry **delta_list;2516uint32_t i, nr_deltas;2517unsigned n;25182519get_object_details();25202521/*2522 * If we're locally repacking then we need to be doubly careful2523 * from now on in order to make sure no stealth corruption gets2524 * propagated to the new pack. Clients receiving streamed packs2525 * should validate everything they get anyway so no need to incur2526 * the additional cost here in that case.2527 */2528if(!pack_to_stdout)2529 do_check_packed_object_crc =1;25302531if(!to_pack.nr_objects || !window || !depth)2532return;25332534ALLOC_ARRAY(delta_list, to_pack.nr_objects);2535 nr_deltas = n =0;25362537for(i =0; i < to_pack.nr_objects; i++) {2538struct object_entry *entry = to_pack.objects + i;25392540if(DELTA(entry))2541/* This happens if we decided to reuse existing2542 * delta from a pack. "reuse_delta &&" is implied.2543 */2544continue;25452546if(!entry->type_valid ||2547oe_size_less_than(&to_pack, entry,50))2548continue;25492550if(entry->no_try_delta)2551continue;25522553if(!entry->preferred_base) {2554 nr_deltas++;2555if(oe_type(entry) <0)2556die("unable to get type of object%s",2557oid_to_hex(&entry->idx.oid));2558}else{2559if(oe_type(entry) <0) {2560/*2561 * This object is not found, but we2562 * don't have to include it anyway.2563 */2564continue;2565}2566}25672568 delta_list[n++] = entry;2569}25702571if(nr_deltas && n >1) {2572unsigned nr_done =0;2573if(progress)2574 progress_state =start_progress(_("Compressing objects"),2575 nr_deltas);2576QSORT(delta_list, n, type_size_sort);2577ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2578stop_progress(&progress_state);2579if(nr_done != nr_deltas)2580die("inconsistency with delta count");2581}2582free(delta_list);2583}25842585static intgit_pack_config(const char*k,const char*v,void*cb)2586{2587if(!strcmp(k,"pack.window")) {2588 window =git_config_int(k, v);2589return0;2590}2591if(!strcmp(k,"pack.windowmemory")) {2592 window_memory_limit =git_config_ulong(k, v);2593return0;2594}2595if(!strcmp(k,"pack.depth")) {2596 depth =git_config_int(k, v);2597return0;2598}2599if(!strcmp(k,"pack.deltacachesize")) {2600 max_delta_cache_size =git_config_int(k, v);2601return0;2602}2603if(!strcmp(k,"pack.deltacachelimit")) {2604 cache_max_small_delta_size =git_config_int(k, v);2605return0;2606}2607if(!strcmp(k,"pack.writebitmaphashcache")) {2608if(git_config_bool(k, v))2609 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2610else2611 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2612}2613if(!strcmp(k,"pack.usebitmaps")) {2614 use_bitmap_index_default =git_config_bool(k, v);2615return0;2616}2617if(!strcmp(k,"pack.threads")) {2618 delta_search_threads =git_config_int(k, v);2619if(delta_search_threads <0)2620die("invalid number of threads specified (%d)",2621 delta_search_threads);2622#ifdef NO_PTHREADS2623if(delta_search_threads !=1) {2624warning("no threads support, ignoring%s", k);2625 delta_search_threads =0;2626}2627#endif2628return0;2629}2630if(!strcmp(k,"pack.indexversion")) {2631 pack_idx_opts.version =git_config_int(k, v);2632if(pack_idx_opts.version >2)2633die("bad pack.indexversion=%"PRIu32,2634 pack_idx_opts.version);2635return0;2636}2637returngit_default_config(k, v, cb);2638}26392640static voidread_object_list_from_stdin(void)2641{2642char line[GIT_MAX_HEXSZ +1+ PATH_MAX +2];2643struct object_id oid;2644const char*p;26452646for(;;) {2647if(!fgets(line,sizeof(line), stdin)) {2648if(feof(stdin))2649break;2650if(!ferror(stdin))2651die("fgets returned NULL, not EOF, not error!");2652if(errno != EINTR)2653die_errno("fgets");2654clearerr(stdin);2655continue;2656}2657if(line[0] =='-') {2658if(get_oid_hex(line+1, &oid))2659die("expected edge object ID, got garbage:\n%s",2660 line);2661add_preferred_base(&oid);2662continue;2663}2664if(parse_oid_hex(line, &oid, &p))2665die("expected object ID, got garbage:\n%s", line);26662667add_preferred_base_object(p +1);2668add_object_entry(&oid, OBJ_NONE, p +1,0);2669}2670}26712672/* Remember to update object flag allocation in object.h */2673#define OBJECT_ADDED (1u<<20)26742675static voidshow_commit(struct commit *commit,void*data)2676{2677add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL,0);2678 commit->object.flags |= OBJECT_ADDED;26792680if(write_bitmap_index)2681index_commit_for_bitmap(commit);2682}26832684static voidshow_object(struct object *obj,const char*name,void*data)2685{2686add_preferred_base_object(name);2687add_object_entry(&obj->oid, obj->type, name,0);2688 obj->flags |= OBJECT_ADDED;2689}26902691static voidshow_object__ma_allow_any(struct object *obj,const char*name,void*data)2692{2693assert(arg_missing_action == MA_ALLOW_ANY);26942695/*2696 * Quietly ignore ALL missing objects. This avoids problems with2697 * staging them now and getting an odd error later.2698 */2699if(!has_object_file(&obj->oid))2700return;27012702show_object(obj, name, data);2703}27042705static voidshow_object__ma_allow_promisor(struct object *obj,const char*name,void*data)2706{2707assert(arg_missing_action == MA_ALLOW_PROMISOR);27082709/*2710 * Quietly ignore EXPECTED missing objects. This avoids problems with2711 * staging them now and getting an odd error later.2712 */2713if(!has_object_file(&obj->oid) &&is_promisor_object(&obj->oid))2714return;27152716show_object(obj, name, data);2717}27182719static intoption_parse_missing_action(const struct option *opt,2720const char*arg,int unset)2721{2722assert(arg);2723assert(!unset);27242725if(!strcmp(arg,"error")) {2726 arg_missing_action = MA_ERROR;2727 fn_show_object = show_object;2728return0;2729}27302731if(!strcmp(arg,"allow-any")) {2732 arg_missing_action = MA_ALLOW_ANY;2733 fetch_if_missing =0;2734 fn_show_object = show_object__ma_allow_any;2735return0;2736}27372738if(!strcmp(arg,"allow-promisor")) {2739 arg_missing_action = MA_ALLOW_PROMISOR;2740 fetch_if_missing =0;2741 fn_show_object = show_object__ma_allow_promisor;2742return0;2743}27442745die(_("invalid value for --missing"));2746return0;2747}27482749static voidshow_edge(struct commit *commit)2750{2751add_preferred_base(&commit->object.oid);2752}27532754struct in_pack_object {2755 off_t offset;2756struct object *object;2757};27582759struct in_pack {2760unsigned int alloc;2761unsigned int nr;2762struct in_pack_object *array;2763};27642765static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2766{2767 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2768 in_pack->array[in_pack->nr].object = object;2769 in_pack->nr++;2770}27712772/*2773 * Compare the objects in the offset order, in order to emulate the2774 * "git rev-list --objects" output that produced the pack originally.2775 */2776static intofscmp(const void*a_,const void*b_)2777{2778struct in_pack_object *a = (struct in_pack_object *)a_;2779struct in_pack_object *b = (struct in_pack_object *)b_;27802781if(a->offset < b->offset)2782return-1;2783else if(a->offset > b->offset)2784return1;2785else2786returnoidcmp(&a->object->oid, &b->object->oid);2787}27882789static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2790{2791struct packed_git *p;2792struct in_pack in_pack;2793uint32_t i;27942795memset(&in_pack,0,sizeof(in_pack));27962797for(p =get_packed_git(the_repository); p; p = p->next) {2798struct object_id oid;2799struct object *o;28002801if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2802continue;2803if(open_pack_index(p))2804die("cannot open pack index");28052806ALLOC_GROW(in_pack.array,2807 in_pack.nr + p->num_objects,2808 in_pack.alloc);28092810for(i =0; i < p->num_objects; i++) {2811nth_packed_object_oid(&oid, p, i);2812 o =lookup_unknown_object(oid.hash);2813if(!(o->flags & OBJECT_ADDED))2814mark_in_pack_object(o, p, &in_pack);2815 o->flags |= OBJECT_ADDED;2816}2817}28182819if(in_pack.nr) {2820QSORT(in_pack.array, in_pack.nr, ofscmp);2821for(i =0; i < in_pack.nr; i++) {2822struct object *o = in_pack.array[i].object;2823add_object_entry(&o->oid, o->type,"",0);2824}2825}2826free(in_pack.array);2827}28282829static intadd_loose_object(const struct object_id *oid,const char*path,2830void*data)2831{2832enum object_type type =oid_object_info(the_repository, oid, NULL);28332834if(type <0) {2835warning("loose object at%scould not be examined", path);2836return0;2837}28382839add_object_entry(oid, type,"",0);2840return0;2841}28422843/*2844 * We actually don't even have to worry about reachability here.2845 * add_object_entry will weed out duplicates, so we just add every2846 * loose object we find.2847 */2848static voidadd_unreachable_loose_objects(void)2849{2850for_each_loose_file_in_objdir(get_object_directory(),2851 add_loose_object,2852 NULL, NULL, NULL);2853}28542855static inthas_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2856{2857static struct packed_git *last_found = (void*)1;2858struct packed_git *p;28592860 p = (last_found != (void*)1) ? last_found :2861get_packed_git(the_repository);28622863while(p) {2864if((!p->pack_local || p->pack_keep ||2865 p->pack_keep_in_core) &&2866find_pack_entry_one(oid->hash, p)) {2867 last_found = p;2868return1;2869}2870if(p == last_found)2871 p =get_packed_git(the_repository);2872else2873 p = p->next;2874if(p == last_found)2875 p = p->next;2876}2877return0;2878}28792880/*2881 * Store a list of sha1s that are should not be discarded2882 * because they are either written too recently, or are2883 * reachable from another object that was.2884 *2885 * This is filled by get_object_list.2886 */2887static struct oid_array recent_objects;28882889static intloosened_object_can_be_discarded(const struct object_id *oid,2890 timestamp_t mtime)2891{2892if(!unpack_unreachable_expiration)2893return0;2894if(mtime > unpack_unreachable_expiration)2895return0;2896if(oid_array_lookup(&recent_objects, oid) >=0)2897return0;2898return1;2899}29002901static voidloosen_unused_packed_objects(struct rev_info *revs)2902{2903struct packed_git *p;2904uint32_t i;2905struct object_id oid;29062907for(p =get_packed_git(the_repository); p; p = p->next) {2908if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2909continue;29102911if(open_pack_index(p))2912die("cannot open pack index");29132914for(i =0; i < p->num_objects; i++) {2915nth_packed_object_oid(&oid, p, i);2916if(!packlist_find(&to_pack, oid.hash, NULL) &&2917!has_sha1_pack_kept_or_nonlocal(&oid) &&2918!loosened_object_can_be_discarded(&oid, p->mtime))2919if(force_object_loose(&oid, p->mtime))2920die("unable to force loose object");2921}2922}2923}29242925/*2926 * This tracks any options which pack-reuse code expects to be on, or which a2927 * reader of the pack might not understand, and which would therefore prevent2928 * blind reuse of what we have on disk.2929 */2930static intpack_options_allow_reuse(void)2931{2932return pack_to_stdout &&2933 allow_ofs_delta &&2934!ignore_packed_keep_on_disk &&2935!ignore_packed_keep_in_core &&2936(!local || !have_non_local_packs) &&2937!incremental;2938}29392940static intget_object_list_from_bitmap(struct rev_info *revs)2941{2942struct bitmap_index *bitmap_git;2943if(!(bitmap_git =prepare_bitmap_walk(revs)))2944return-1;29452946if(pack_options_allow_reuse() &&2947!reuse_partial_packfile_from_bitmap(2948 bitmap_git,2949&reuse_packfile,2950&reuse_packfile_objects,2951&reuse_packfile_offset)) {2952assert(reuse_packfile_objects);2953 nr_result += reuse_packfile_objects;2954display_progress(progress_state, nr_result);2955}29562957traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);2958free_bitmap_index(bitmap_git);2959return0;2960}29612962static voidrecord_recent_object(struct object *obj,2963const char*name,2964void*data)2965{2966oid_array_append(&recent_objects, &obj->oid);2967}29682969static voidrecord_recent_commit(struct commit *commit,void*data)2970{2971oid_array_append(&recent_objects, &commit->object.oid);2972}29732974static voidget_object_list(int ac,const char**av)2975{2976struct rev_info revs;2977char line[1000];2978int flags =0;29792980init_revisions(&revs, NULL);2981 save_commit_buffer =0;2982setup_revisions(ac, av, &revs, NULL);29832984/* make sure shallows are read */2985is_repository_shallow(the_repository);29862987while(fgets(line,sizeof(line), stdin) != NULL) {2988int len =strlen(line);2989if(len && line[len -1] =='\n')2990 line[--len] =0;2991if(!len)2992break;2993if(*line =='-') {2994if(!strcmp(line,"--not")) {2995 flags ^= UNINTERESTING;2996 write_bitmap_index =0;2997continue;2998}2999if(starts_with(line,"--shallow ")) {3000struct object_id oid;3001if(get_oid_hex(line +10, &oid))3002die("not an SHA-1 '%s'", line +10);3003register_shallow(the_repository, &oid);3004 use_bitmap_index =0;3005continue;3006}3007die("not a rev '%s'", line);3008}3009if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))3010die("bad revision '%s'", line);3011}30123013if(use_bitmap_index && !get_object_list_from_bitmap(&revs))3014return;30153016if(prepare_revision_walk(&revs))3017die("revision walk setup failed");3018mark_edges_uninteresting(&revs, show_edge);30193020if(!fn_show_object)3021 fn_show_object = show_object;3022traverse_commit_list_filtered(&filter_options, &revs,3023 show_commit, fn_show_object, NULL,3024 NULL);30253026if(unpack_unreachable_expiration) {3027 revs.ignore_missing_links =1;3028if(add_unseen_recent_objects_to_traversal(&revs,3029 unpack_unreachable_expiration))3030die("unable to add recent objects");3031if(prepare_revision_walk(&revs))3032die("revision walk setup failed");3033traverse_commit_list(&revs, record_recent_commit,3034 record_recent_object, NULL);3035}30363037if(keep_unreachable)3038add_objects_in_unpacked_packs(&revs);3039if(pack_loose_unreachable)3040add_unreachable_loose_objects();3041if(unpack_unreachable)3042loosen_unused_packed_objects(&revs);30433044oid_array_clear(&recent_objects);3045}30463047static voidadd_extra_kept_packs(const struct string_list *names)3048{3049struct packed_git *p;30503051if(!names->nr)3052return;30533054for(p =get_packed_git(the_repository); p; p = p->next) {3055const char*name =basename(p->pack_name);3056int i;30573058if(!p->pack_local)3059continue;30603061for(i =0; i < names->nr; i++)3062if(!fspathcmp(name, names->items[i].string))3063break;30643065if(i < names->nr) {3066 p->pack_keep_in_core =1;3067 ignore_packed_keep_in_core =1;3068continue;3069}3070}3071}30723073static intoption_parse_index_version(const struct option *opt,3074const char*arg,int unset)3075{3076char*c;3077const char*val = arg;3078 pack_idx_opts.version =strtoul(val, &c,10);3079if(pack_idx_opts.version >2)3080die(_("unsupported index version%s"), val);3081if(*c ==','&& c[1])3082 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);3083if(*c || pack_idx_opts.off32_limit &0x80000000)3084die(_("bad index version '%s'"), val);3085return0;3086}30873088static intoption_parse_unpack_unreachable(const struct option *opt,3089const char*arg,int unset)3090{3091if(unset) {3092 unpack_unreachable =0;3093 unpack_unreachable_expiration =0;3094}3095else{3096 unpack_unreachable =1;3097if(arg)3098 unpack_unreachable_expiration =approxidate(arg);3099}3100return0;3101}31023103intcmd_pack_objects(int argc,const char**argv,const char*prefix)3104{3105int use_internal_rev_list =0;3106int thin =0;3107int shallow =0;3108int all_progress_implied =0;3109struct argv_array rp = ARGV_ARRAY_INIT;3110int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;3111int rev_list_index =0;3112struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3113struct option pack_objects_options[] = {3114OPT_SET_INT('q',"quiet", &progress,3115N_("do not show progress meter"),0),3116OPT_SET_INT(0,"progress", &progress,3117N_("show progress meter"),1),3118OPT_SET_INT(0,"all-progress", &progress,3119N_("show progress meter during object writing phase"),2),3120OPT_BOOL(0,"all-progress-implied",3121&all_progress_implied,3122N_("similar to --all-progress when progress meter is shown")),3123{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),3124N_("write the pack index file in the specified idx format version"),31250, option_parse_index_version },3126OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,3127N_("maximum size of each output pack file")),3128OPT_BOOL(0,"local", &local,3129N_("ignore borrowed objects from alternate object store")),3130OPT_BOOL(0,"incremental", &incremental,3131N_("ignore packed objects")),3132OPT_INTEGER(0,"window", &window,3133N_("limit pack window by objects")),3134OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,3135N_("limit pack window by memory in addition to object limit")),3136OPT_INTEGER(0,"depth", &depth,3137N_("maximum length of delta chain allowed in the resulting pack")),3138OPT_BOOL(0,"reuse-delta", &reuse_delta,3139N_("reuse existing deltas")),3140OPT_BOOL(0,"reuse-object", &reuse_object,3141N_("reuse existing objects")),3142OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,3143N_("use OFS_DELTA objects")),3144OPT_INTEGER(0,"threads", &delta_search_threads,3145N_("use threads when searching for best delta matches")),3146OPT_BOOL(0,"non-empty", &non_empty,3147N_("do not create an empty pack output")),3148OPT_BOOL(0,"revs", &use_internal_rev_list,3149N_("read revision arguments from standard input")),3150OPT_SET_INT_F(0,"unpacked", &rev_list_unpacked,3151N_("limit the objects to those that are not yet packed"),31521, PARSE_OPT_NONEG),3153OPT_SET_INT_F(0,"all", &rev_list_all,3154N_("include objects reachable from any reference"),31551, PARSE_OPT_NONEG),3156OPT_SET_INT_F(0,"reflog", &rev_list_reflog,3157N_("include objects referred by reflog entries"),31581, PARSE_OPT_NONEG),3159OPT_SET_INT_F(0,"indexed-objects", &rev_list_index,3160N_("include objects referred to by the index"),31611, PARSE_OPT_NONEG),3162OPT_BOOL(0,"stdout", &pack_to_stdout,3163N_("output pack to stdout")),3164OPT_BOOL(0,"include-tag", &include_tag,3165N_("include tag objects that refer to objects to be packed")),3166OPT_BOOL(0,"keep-unreachable", &keep_unreachable,3167N_("keep unreachable objects")),3168OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,3169N_("pack loose unreachable objects")),3170{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),3171N_("unpack unreachable objects newer than <time>"),3172 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3173OPT_BOOL(0,"thin", &thin,3174N_("create thin packs")),3175OPT_BOOL(0,"shallow", &shallow,3176N_("create packs suitable for shallow fetches")),3177OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep_on_disk,3178N_("ignore packs that have companion .keep file")),3179OPT_STRING_LIST(0,"keep-pack", &keep_pack_list,N_("name"),3180N_("ignore this pack")),3181OPT_INTEGER(0,"compression", &pack_compression_level,3182N_("pack compression level")),3183OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,3184N_("do not hide commits by grafts"),0),3185OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,3186N_("use a bitmap index if available to speed up counting objects")),3187OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,3188N_("write a bitmap index together with the pack index")),3189OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3190{ OPTION_CALLBACK,0,"missing", NULL,N_("action"),3191N_("handling for missing objects"), PARSE_OPT_NONEG,3192 option_parse_missing_action },3193OPT_BOOL(0,"exclude-promisor-objects", &exclude_promisor_objects,3194N_("do not pack objects in promisor packfiles")),3195OPT_END(),3196};31973198if(DFS_NUM_STATES > (1<< OE_DFS_STATE_BITS))3199BUG("too many dfs states, increase OE_DFS_STATE_BITS");32003201 check_replace_refs =0;32023203reset_pack_idx_option(&pack_idx_opts);3204git_config(git_pack_config, NULL);32053206 progress =isatty(2);3207 argc =parse_options(argc, argv, prefix, pack_objects_options,3208 pack_usage,0);32093210if(argc) {3211 base_name = argv[0];3212 argc--;3213}3214if(pack_to_stdout != !base_name || argc)3215usage_with_options(pack_usage, pack_objects_options);32163217if(depth >= (1<< OE_DEPTH_BITS)) {3218warning(_("delta chain depth%dis too deep, forcing%d"),3219 depth, (1<< OE_DEPTH_BITS) -1);3220 depth = (1<< OE_DEPTH_BITS) -1;3221}3222if(cache_max_small_delta_size >= (1U<< OE_Z_DELTA_BITS)) {3223warning(_("pack.deltaCacheLimit is too high, forcing%d"),3224(1U<< OE_Z_DELTA_BITS) -1);3225 cache_max_small_delta_size = (1U<< OE_Z_DELTA_BITS) -1;3226}32273228argv_array_push(&rp,"pack-objects");3229if(thin) {3230 use_internal_rev_list =1;3231argv_array_push(&rp, shallow3232?"--objects-edge-aggressive"3233:"--objects-edge");3234}else3235argv_array_push(&rp,"--objects");32363237if(rev_list_all) {3238 use_internal_rev_list =1;3239argv_array_push(&rp,"--all");3240}3241if(rev_list_reflog) {3242 use_internal_rev_list =1;3243argv_array_push(&rp,"--reflog");3244}3245if(rev_list_index) {3246 use_internal_rev_list =1;3247argv_array_push(&rp,"--indexed-objects");3248}3249if(rev_list_unpacked) {3250 use_internal_rev_list =1;3251argv_array_push(&rp,"--unpacked");3252}32533254if(exclude_promisor_objects) {3255 use_internal_rev_list =1;3256 fetch_if_missing =0;3257argv_array_push(&rp,"--exclude-promisor-objects");3258}3259if(unpack_unreachable || keep_unreachable || pack_loose_unreachable)3260 use_internal_rev_list =1;32613262if(!reuse_object)3263 reuse_delta =0;3264if(pack_compression_level == -1)3265 pack_compression_level = Z_DEFAULT_COMPRESSION;3266else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)3267die("bad pack compression level%d", pack_compression_level);32683269if(!delta_search_threads)/* --threads=0 means autodetect */3270 delta_search_threads =online_cpus();32713272#ifdef NO_PTHREADS3273if(delta_search_threads !=1)3274warning("no threads support, ignoring --threads");3275#endif3276if(!pack_to_stdout && !pack_size_limit)3277 pack_size_limit = pack_size_limit_cfg;3278if(pack_to_stdout && pack_size_limit)3279die("--max-pack-size cannot be used to build a pack for transfer.");3280if(pack_size_limit && pack_size_limit <1024*1024) {3281warning("minimum pack size limit is 1 MiB");3282 pack_size_limit =1024*1024;3283}32843285if(!pack_to_stdout && thin)3286die("--thin cannot be used to build an indexable pack.");32873288if(keep_unreachable && unpack_unreachable)3289die("--keep-unreachable and --unpack-unreachable are incompatible.");3290if(!rev_list_all || !rev_list_reflog || !rev_list_index)3291 unpack_unreachable_expiration =0;32923293if(filter_options.choice) {3294if(!pack_to_stdout)3295die("cannot use --filter without --stdout.");3296 use_bitmap_index =0;3297}32983299/*3300 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3301 *3302 * - to produce good pack (with bitmap index not-yet-packed objects are3303 * packed in suboptimal order).3304 *3305 * - to use more robust pack-generation codepath (avoiding possible3306 * bugs in bitmap code and possible bitmap index corruption).3307 */3308if(!pack_to_stdout)3309 use_bitmap_index_default =0;33103311if(use_bitmap_index <0)3312 use_bitmap_index = use_bitmap_index_default;33133314/* "hard" reasons not to use bitmaps; these just won't work at all */3315if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow(the_repository))3316 use_bitmap_index =0;33173318if(pack_to_stdout || !rev_list_all)3319 write_bitmap_index =0;33203321if(progress && all_progress_implied)3322 progress =2;33233324add_extra_kept_packs(&keep_pack_list);3325if(ignore_packed_keep_on_disk) {3326struct packed_git *p;3327for(p =get_packed_git(the_repository); p; p = p->next)3328if(p->pack_local && p->pack_keep)3329break;3330if(!p)/* no keep-able packs found */3331 ignore_packed_keep_on_disk =0;3332}3333if(local) {3334/*3335 * unlike ignore_packed_keep_on_disk above, we do not3336 * want to unset "local" based on looking at packs, as3337 * it also covers non-local objects3338 */3339struct packed_git *p;3340for(p =get_packed_git(the_repository); p; p = p->next) {3341if(!p->pack_local) {3342 have_non_local_packs =1;3343break;3344}3345}3346}33473348prepare_packing_data(&to_pack);33493350if(progress)3351 progress_state =start_progress(_("Enumerating objects"),0);3352if(!use_internal_rev_list)3353read_object_list_from_stdin();3354else{3355get_object_list(rp.argc, rp.argv);3356argv_array_clear(&rp);3357}3358cleanup_preferred_base();3359if(include_tag && nr_result)3360for_each_ref(add_ref_tag, NULL);3361stop_progress(&progress_state);33623363if(non_empty && !nr_result)3364return0;3365if(nr_result)3366prepare_pack(window, depth);3367write_pack_file();3368if(progress)3369fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3370" reused %"PRIu32" (delta %"PRIu32")\n",3371 written, written_delta, reused, reused_delta);3372return0;3373}