1#include"builtin.h" 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"attr.h" 6#include"object.h" 7#include"blob.h" 8#include"commit.h" 9#include"tag.h" 10#include"tree.h" 11#include"delta.h" 12#include"pack.h" 13#include"pack-revindex.h" 14#include"csum-file.h" 15#include"tree-walk.h" 16#include"diff.h" 17#include"revision.h" 18#include"list-objects.h" 19#include"list-objects-filter.h" 20#include"list-objects-filter-options.h" 21#include"pack-objects.h" 22#include"progress.h" 23#include"refs.h" 24#include"streaming.h" 25#include"thread-utils.h" 26#include"pack-bitmap.h" 27#include"reachable.h" 28#include"sha1-array.h" 29#include"argv-array.h" 30#include"list.h" 31#include"packfile.h" 32#include"object-store.h" 33#include"dir.h" 34 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 36#define SIZE(obj) oe_size(&to_pack, obj) 37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 39#define DELTA(obj) oe_delta(&to_pack, obj) 40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 46 47static const char*pack_usage[] = { 48N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 49N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 50 NULL 51}; 52 53/* 54 * Objects we are going to pack are collected in the `to_pack` structure. 55 * It contains an array (dynamically expanded) of the object data, and a map 56 * that can resolve SHA1s to their position in the array. 57 */ 58static struct packing_data to_pack; 59 60static struct pack_idx_entry **written_list; 61static uint32_t nr_result, nr_written, nr_seen; 62 63static int non_empty; 64static int reuse_delta =1, reuse_object =1; 65static int keep_unreachable, unpack_unreachable, include_tag; 66static timestamp_t unpack_unreachable_expiration; 67static int pack_loose_unreachable; 68static int local; 69static int have_non_local_packs; 70static int incremental; 71static int ignore_packed_keep_on_disk; 72static int ignore_packed_keep_in_core; 73static int allow_ofs_delta; 74static struct pack_idx_option pack_idx_opts; 75static const char*base_name; 76static int progress =1; 77static int window =10; 78static unsigned long pack_size_limit; 79static int depth =50; 80static int delta_search_threads; 81static int pack_to_stdout; 82static int num_preferred_base; 83static struct progress *progress_state; 84 85static struct packed_git *reuse_packfile; 86static uint32_t reuse_packfile_objects; 87static off_t reuse_packfile_offset; 88 89static int use_bitmap_index_default =1; 90static int use_bitmap_index = -1; 91static int write_bitmap_index; 92static uint16_t write_bitmap_options; 93 94static int exclude_promisor_objects; 95 96static unsigned long delta_cache_size =0; 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 98static unsigned long cache_max_small_delta_size =1000; 99 100static unsigned long window_memory_limit =0; 101 102static struct list_objects_filter_options filter_options; 103 104enum missing_action { 105 MA_ERROR =0,/* fail if any missing objects are encountered */ 106 MA_ALLOW_ANY,/* silently allow ALL missing objects */ 107 MA_ALLOW_PROMISOR,/* silently allow all missing PROMISOR objects */ 108}; 109static enum missing_action arg_missing_action; 110static show_object_fn fn_show_object; 111 112/* 113 * stats 114 */ 115static uint32_t written, written_delta; 116static uint32_t reused, reused_delta; 117 118/* 119 * Indexed commits 120 */ 121static struct commit **indexed_commits; 122static unsigned int indexed_commits_nr; 123static unsigned int indexed_commits_alloc; 124 125static voidindex_commit_for_bitmap(struct commit *commit) 126{ 127if(indexed_commits_nr >= indexed_commits_alloc) { 128 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 129REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 130} 131 132 indexed_commits[indexed_commits_nr++] = commit; 133} 134 135static void*get_delta(struct object_entry *entry) 136{ 137unsigned long size, base_size, delta_size; 138void*buf, *base_buf, *delta_buf; 139enum object_type type; 140 141 buf =read_object_file(&entry->idx.oid, &type, &size); 142if(!buf) 143die("unable to read%s",oid_to_hex(&entry->idx.oid)); 144 base_buf =read_object_file(&DELTA(entry)->idx.oid, &type, 145&base_size); 146if(!base_buf) 147die("unable to read%s", 148oid_to_hex(&DELTA(entry)->idx.oid)); 149 delta_buf =diff_delta(base_buf, base_size, 150 buf, size, &delta_size,0); 151/* 152 * We succesfully computed this delta once but dropped it for 153 * memory reasons. Something is very wrong if this time we 154 * recompute and create a different delta. 155 */ 156if(!delta_buf || delta_size !=DELTA_SIZE(entry)) 157BUG("delta size changed"); 158free(buf); 159free(base_buf); 160return delta_buf; 161} 162 163static unsigned longdo_compress(void**pptr,unsigned long size) 164{ 165 git_zstream stream; 166void*in, *out; 167unsigned long maxsize; 168 169git_deflate_init(&stream, pack_compression_level); 170 maxsize =git_deflate_bound(&stream, size); 171 172 in = *pptr; 173 out =xmalloc(maxsize); 174*pptr = out; 175 176 stream.next_in = in; 177 stream.avail_in = size; 178 stream.next_out = out; 179 stream.avail_out = maxsize; 180while(git_deflate(&stream, Z_FINISH) == Z_OK) 181;/* nothing */ 182git_deflate_end(&stream); 183 184free(in); 185return stream.total_out; 186} 187 188static unsigned longwrite_large_blob_data(struct git_istream *st,struct hashfile *f, 189const struct object_id *oid) 190{ 191 git_zstream stream; 192unsigned char ibuf[1024*16]; 193unsigned char obuf[1024*16]; 194unsigned long olen =0; 195 196git_deflate_init(&stream, pack_compression_level); 197 198for(;;) { 199 ssize_t readlen; 200int zret = Z_OK; 201 readlen =read_istream(st, ibuf,sizeof(ibuf)); 202if(readlen == -1) 203die(_("unable to read%s"),oid_to_hex(oid)); 204 205 stream.next_in = ibuf; 206 stream.avail_in = readlen; 207while((stream.avail_in || readlen ==0) && 208(zret == Z_OK || zret == Z_BUF_ERROR)) { 209 stream.next_out = obuf; 210 stream.avail_out =sizeof(obuf); 211 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 212hashwrite(f, obuf, stream.next_out - obuf); 213 olen += stream.next_out - obuf; 214} 215if(stream.avail_in) 216die(_("deflate error (%d)"), zret); 217if(readlen ==0) { 218if(zret != Z_STREAM_END) 219die(_("deflate error (%d)"), zret); 220break; 221} 222} 223git_deflate_end(&stream); 224return olen; 225} 226 227/* 228 * we are going to reuse the existing object data as is. make 229 * sure it is not corrupt. 230 */ 231static intcheck_pack_inflate(struct packed_git *p, 232struct pack_window **w_curs, 233 off_t offset, 234 off_t len, 235unsigned long expect) 236{ 237 git_zstream stream; 238unsigned char fakebuf[4096], *in; 239int st; 240 241memset(&stream,0,sizeof(stream)); 242git_inflate_init(&stream); 243do{ 244 in =use_pack(p, w_curs, offset, &stream.avail_in); 245 stream.next_in = in; 246 stream.next_out = fakebuf; 247 stream.avail_out =sizeof(fakebuf); 248 st =git_inflate(&stream, Z_FINISH); 249 offset += stream.next_in - in; 250}while(st == Z_OK || st == Z_BUF_ERROR); 251git_inflate_end(&stream); 252return(st == Z_STREAM_END && 253 stream.total_out == expect && 254 stream.total_in == len) ?0: -1; 255} 256 257static voidcopy_pack_data(struct hashfile *f, 258struct packed_git *p, 259struct pack_window **w_curs, 260 off_t offset, 261 off_t len) 262{ 263unsigned char*in; 264unsigned long avail; 265 266while(len) { 267 in =use_pack(p, w_curs, offset, &avail); 268if(avail > len) 269 avail = (unsigned long)len; 270hashwrite(f, in, avail); 271 offset += avail; 272 len -= avail; 273} 274} 275 276/* Return 0 if we will bust the pack-size limit */ 277static unsigned longwrite_no_reuse_object(struct hashfile *f,struct object_entry *entry, 278unsigned long limit,int usable_delta) 279{ 280unsigned long size, datalen; 281unsigned char header[MAX_PACK_OBJECT_HEADER], 282 dheader[MAX_PACK_OBJECT_HEADER]; 283unsigned hdrlen; 284enum object_type type; 285void*buf; 286struct git_istream *st = NULL; 287const unsigned hashsz = the_hash_algo->rawsz; 288 289if(!usable_delta) { 290if(oe_type(entry) == OBJ_BLOB && 291oe_size_greater_than(&to_pack, entry, big_file_threshold) && 292(st =open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 293 buf = NULL; 294else{ 295 buf =read_object_file(&entry->idx.oid, &type, &size); 296if(!buf) 297die(_("unable to read%s"), 298oid_to_hex(&entry->idx.oid)); 299} 300/* 301 * make sure no cached delta data remains from a 302 * previous attempt before a pack split occurred. 303 */ 304FREE_AND_NULL(entry->delta_data); 305 entry->z_delta_size =0; 306}else if(entry->delta_data) { 307 size =DELTA_SIZE(entry); 308 buf = entry->delta_data; 309 entry->delta_data = NULL; 310 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 311 OBJ_OFS_DELTA : OBJ_REF_DELTA; 312}else{ 313 buf =get_delta(entry); 314 size =DELTA_SIZE(entry); 315 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 316 OBJ_OFS_DELTA : OBJ_REF_DELTA; 317} 318 319if(st)/* large blob case, just assume we don't compress well */ 320 datalen = size; 321else if(entry->z_delta_size) 322 datalen = entry->z_delta_size; 323else 324 datalen =do_compress(&buf, size); 325 326/* 327 * The object header is a byte of 'type' followed by zero or 328 * more bytes of length. 329 */ 330 hdrlen =encode_in_pack_object_header(header,sizeof(header), 331 type, size); 332 333if(type == OBJ_OFS_DELTA) { 334/* 335 * Deltas with relative base contain an additional 336 * encoding of the relative offset for the delta 337 * base from this object's position in the pack. 338 */ 339 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 340unsigned pos =sizeof(dheader) -1; 341 dheader[pos] = ofs &127; 342while(ofs >>=7) 343 dheader[--pos] =128| (--ofs &127); 344if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 345if(st) 346close_istream(st); 347free(buf); 348return0; 349} 350hashwrite(f, header, hdrlen); 351hashwrite(f, dheader + pos,sizeof(dheader) - pos); 352 hdrlen +=sizeof(dheader) - pos; 353}else if(type == OBJ_REF_DELTA) { 354/* 355 * Deltas with a base reference contain 356 * additional bytes for the base object ID. 357 */ 358if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 359if(st) 360close_istream(st); 361free(buf); 362return0; 363} 364hashwrite(f, header, hdrlen); 365hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 366 hdrlen += hashsz; 367}else{ 368if(limit && hdrlen + datalen + hashsz >= limit) { 369if(st) 370close_istream(st); 371free(buf); 372return0; 373} 374hashwrite(f, header, hdrlen); 375} 376if(st) { 377 datalen =write_large_blob_data(st, f, &entry->idx.oid); 378close_istream(st); 379}else{ 380hashwrite(f, buf, datalen); 381free(buf); 382} 383 384return hdrlen + datalen; 385} 386 387/* Return 0 if we will bust the pack-size limit */ 388static off_t write_reuse_object(struct hashfile *f,struct object_entry *entry, 389unsigned long limit,int usable_delta) 390{ 391struct packed_git *p =IN_PACK(entry); 392struct pack_window *w_curs = NULL; 393struct revindex_entry *revidx; 394 off_t offset; 395enum object_type type =oe_type(entry); 396 off_t datalen; 397unsigned char header[MAX_PACK_OBJECT_HEADER], 398 dheader[MAX_PACK_OBJECT_HEADER]; 399unsigned hdrlen; 400const unsigned hashsz = the_hash_algo->rawsz; 401unsigned long entry_size =SIZE(entry); 402 403if(DELTA(entry)) 404 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 405 OBJ_OFS_DELTA : OBJ_REF_DELTA; 406 hdrlen =encode_in_pack_object_header(header,sizeof(header), 407 type, entry_size); 408 409 offset = entry->in_pack_offset; 410 revidx =find_pack_revindex(p, offset); 411 datalen = revidx[1].offset - offset; 412if(!pack_to_stdout && p->index_version >1&& 413check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 414error("bad packed object CRC for%s", 415oid_to_hex(&entry->idx.oid)); 416unuse_pack(&w_curs); 417returnwrite_no_reuse_object(f, entry, limit, usable_delta); 418} 419 420 offset += entry->in_pack_header_size; 421 datalen -= entry->in_pack_header_size; 422 423if(!pack_to_stdout && p->index_version ==1&& 424check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 425error("corrupt packed object for%s", 426oid_to_hex(&entry->idx.oid)); 427unuse_pack(&w_curs); 428returnwrite_no_reuse_object(f, entry, limit, usable_delta); 429} 430 431if(type == OBJ_OFS_DELTA) { 432 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 433unsigned pos =sizeof(dheader) -1; 434 dheader[pos] = ofs &127; 435while(ofs >>=7) 436 dheader[--pos] =128| (--ofs &127); 437if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 438unuse_pack(&w_curs); 439return0; 440} 441hashwrite(f, header, hdrlen); 442hashwrite(f, dheader + pos,sizeof(dheader) - pos); 443 hdrlen +=sizeof(dheader) - pos; 444 reused_delta++; 445}else if(type == OBJ_REF_DELTA) { 446if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 447unuse_pack(&w_curs); 448return0; 449} 450hashwrite(f, header, hdrlen); 451hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 452 hdrlen += hashsz; 453 reused_delta++; 454}else{ 455if(limit && hdrlen + datalen + hashsz >= limit) { 456unuse_pack(&w_curs); 457return0; 458} 459hashwrite(f, header, hdrlen); 460} 461copy_pack_data(f, p, &w_curs, offset, datalen); 462unuse_pack(&w_curs); 463 reused++; 464return hdrlen + datalen; 465} 466 467/* Return 0 if we will bust the pack-size limit */ 468static off_t write_object(struct hashfile *f, 469struct object_entry *entry, 470 off_t write_offset) 471{ 472unsigned long limit; 473 off_t len; 474int usable_delta, to_reuse; 475 476if(!pack_to_stdout) 477crc32_begin(f); 478 479/* apply size limit if limited packsize and not first object */ 480if(!pack_size_limit || !nr_written) 481 limit =0; 482else if(pack_size_limit <= write_offset) 483/* 484 * the earlier object did not fit the limit; avoid 485 * mistaking this with unlimited (i.e. limit = 0). 486 */ 487 limit =1; 488else 489 limit = pack_size_limit - write_offset; 490 491if(!DELTA(entry)) 492 usable_delta =0;/* no delta */ 493else if(!pack_size_limit) 494 usable_delta =1;/* unlimited packfile */ 495else if(DELTA(entry)->idx.offset == (off_t)-1) 496 usable_delta =0;/* base was written to another pack */ 497else if(DELTA(entry)->idx.offset) 498 usable_delta =1;/* base already exists in this pack */ 499else 500 usable_delta =0;/* base could end up in another pack */ 501 502if(!reuse_object) 503 to_reuse =0;/* explicit */ 504else if(!IN_PACK(entry)) 505 to_reuse =0;/* can't reuse what we don't have */ 506else if(oe_type(entry) == OBJ_REF_DELTA || 507oe_type(entry) == OBJ_OFS_DELTA) 508/* check_object() decided it for us ... */ 509 to_reuse = usable_delta; 510/* ... but pack split may override that */ 511else if(oe_type(entry) != entry->in_pack_type) 512 to_reuse =0;/* pack has delta which is unusable */ 513else if(DELTA(entry)) 514 to_reuse =0;/* we want to pack afresh */ 515else 516 to_reuse =1;/* we have it in-pack undeltified, 517 * and we do not need to deltify it. 518 */ 519 520if(!to_reuse) 521 len =write_no_reuse_object(f, entry, limit, usable_delta); 522else 523 len =write_reuse_object(f, entry, limit, usable_delta); 524if(!len) 525return0; 526 527if(usable_delta) 528 written_delta++; 529 written++; 530if(!pack_to_stdout) 531 entry->idx.crc32 =crc32_end(f); 532return len; 533} 534 535enum write_one_status { 536 WRITE_ONE_SKIP = -1,/* already written */ 537 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 538 WRITE_ONE_WRITTEN =1,/* normal */ 539 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 540}; 541 542static enum write_one_status write_one(struct hashfile *f, 543struct object_entry *e, 544 off_t *offset) 545{ 546 off_t size; 547int recursing; 548 549/* 550 * we set offset to 1 (which is an impossible value) to mark 551 * the fact that this object is involved in "write its base 552 * first before writing a deltified object" recursion. 553 */ 554 recursing = (e->idx.offset ==1); 555if(recursing) { 556warning("recursive delta detected for object%s", 557oid_to_hex(&e->idx.oid)); 558return WRITE_ONE_RECURSIVE; 559}else if(e->idx.offset || e->preferred_base) { 560/* offset is non zero if object is written already. */ 561return WRITE_ONE_SKIP; 562} 563 564/* if we are deltified, write out base object first. */ 565if(DELTA(e)) { 566 e->idx.offset =1;/* now recurse */ 567switch(write_one(f,DELTA(e), offset)) { 568case WRITE_ONE_RECURSIVE: 569/* we cannot depend on this one */ 570SET_DELTA(e, NULL); 571break; 572default: 573break; 574case WRITE_ONE_BREAK: 575 e->idx.offset = recursing; 576return WRITE_ONE_BREAK; 577} 578} 579 580 e->idx.offset = *offset; 581 size =write_object(f, e, *offset); 582if(!size) { 583 e->idx.offset = recursing; 584return WRITE_ONE_BREAK; 585} 586 written_list[nr_written++] = &e->idx; 587 588/* make sure off_t is sufficiently large not to wrap */ 589if(signed_add_overflows(*offset, size)) 590die("pack too large for current definition of off_t"); 591*offset += size; 592return WRITE_ONE_WRITTEN; 593} 594 595static intmark_tagged(const char*path,const struct object_id *oid,int flag, 596void*cb_data) 597{ 598struct object_id peeled; 599struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 600 601if(entry) 602 entry->tagged =1; 603if(!peel_ref(path, &peeled)) { 604 entry =packlist_find(&to_pack, peeled.hash, NULL); 605if(entry) 606 entry->tagged =1; 607} 608return0; 609} 610 611staticinlinevoidadd_to_write_order(struct object_entry **wo, 612unsigned int*endp, 613struct object_entry *e) 614{ 615if(e->filled) 616return; 617 wo[(*endp)++] = e; 618 e->filled =1; 619} 620 621static voidadd_descendants_to_write_order(struct object_entry **wo, 622unsigned int*endp, 623struct object_entry *e) 624{ 625int add_to_order =1; 626while(e) { 627if(add_to_order) { 628struct object_entry *s; 629/* add this node... */ 630add_to_write_order(wo, endp, e); 631/* all its siblings... */ 632for(s =DELTA_SIBLING(e); s; s =DELTA_SIBLING(s)) { 633add_to_write_order(wo, endp, s); 634} 635} 636/* drop down a level to add left subtree nodes if possible */ 637if(DELTA_CHILD(e)) { 638 add_to_order =1; 639 e =DELTA_CHILD(e); 640}else{ 641 add_to_order =0; 642/* our sibling might have some children, it is next */ 643if(DELTA_SIBLING(e)) { 644 e =DELTA_SIBLING(e); 645continue; 646} 647/* go back to our parent node */ 648 e =DELTA(e); 649while(e && !DELTA_SIBLING(e)) { 650/* we're on the right side of a subtree, keep 651 * going up until we can go right again */ 652 e =DELTA(e); 653} 654if(!e) { 655/* done- we hit our original root node */ 656return; 657} 658/* pass it off to sibling at this level */ 659 e =DELTA_SIBLING(e); 660} 661}; 662} 663 664static voidadd_family_to_write_order(struct object_entry **wo, 665unsigned int*endp, 666struct object_entry *e) 667{ 668struct object_entry *root; 669 670for(root = e;DELTA(root); root =DELTA(root)) 671;/* nothing */ 672add_descendants_to_write_order(wo, endp, root); 673} 674 675static struct object_entry **compute_write_order(void) 676{ 677unsigned int i, wo_end, last_untagged; 678 679struct object_entry **wo; 680struct object_entry *objects = to_pack.objects; 681 682for(i =0; i < to_pack.nr_objects; i++) { 683 objects[i].tagged =0; 684 objects[i].filled =0; 685SET_DELTA_CHILD(&objects[i], NULL); 686SET_DELTA_SIBLING(&objects[i], NULL); 687} 688 689/* 690 * Fully connect delta_child/delta_sibling network. 691 * Make sure delta_sibling is sorted in the original 692 * recency order. 693 */ 694for(i = to_pack.nr_objects; i >0;) { 695struct object_entry *e = &objects[--i]; 696if(!DELTA(e)) 697continue; 698/* Mark me as the first child */ 699 e->delta_sibling_idx =DELTA(e)->delta_child_idx; 700SET_DELTA_CHILD(DELTA(e), e); 701} 702 703/* 704 * Mark objects that are at the tip of tags. 705 */ 706for_each_tag_ref(mark_tagged, NULL); 707 708/* 709 * Give the objects in the original recency order until 710 * we see a tagged tip. 711 */ 712ALLOC_ARRAY(wo, to_pack.nr_objects); 713for(i = wo_end =0; i < to_pack.nr_objects; i++) { 714if(objects[i].tagged) 715break; 716add_to_write_order(wo, &wo_end, &objects[i]); 717} 718 last_untagged = i; 719 720/* 721 * Then fill all the tagged tips. 722 */ 723for(; i < to_pack.nr_objects; i++) { 724if(objects[i].tagged) 725add_to_write_order(wo, &wo_end, &objects[i]); 726} 727 728/* 729 * And then all remaining commits and tags. 730 */ 731for(i = last_untagged; i < to_pack.nr_objects; i++) { 732if(oe_type(&objects[i]) != OBJ_COMMIT && 733oe_type(&objects[i]) != OBJ_TAG) 734continue; 735add_to_write_order(wo, &wo_end, &objects[i]); 736} 737 738/* 739 * And then all the trees. 740 */ 741for(i = last_untagged; i < to_pack.nr_objects; i++) { 742if(oe_type(&objects[i]) != OBJ_TREE) 743continue; 744add_to_write_order(wo, &wo_end, &objects[i]); 745} 746 747/* 748 * Finally all the rest in really tight order 749 */ 750for(i = last_untagged; i < to_pack.nr_objects; i++) { 751if(!objects[i].filled) 752add_family_to_write_order(wo, &wo_end, &objects[i]); 753} 754 755if(wo_end != to_pack.nr_objects) 756die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 757 758return wo; 759} 760 761static off_t write_reused_pack(struct hashfile *f) 762{ 763unsigned char buffer[8192]; 764 off_t to_write, total; 765int fd; 766 767if(!is_pack_valid(reuse_packfile)) 768die("packfile is invalid:%s", reuse_packfile->pack_name); 769 770 fd =git_open(reuse_packfile->pack_name); 771if(fd <0) 772die_errno("unable to open packfile for reuse:%s", 773 reuse_packfile->pack_name); 774 775if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 776die_errno("unable to seek in reused packfile"); 777 778if(reuse_packfile_offset <0) 779 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 780 781 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 782 783while(to_write) { 784int read_pack =xread(fd, buffer,sizeof(buffer)); 785 786if(read_pack <=0) 787die_errno("unable to read from reused packfile"); 788 789if(read_pack > to_write) 790 read_pack = to_write; 791 792hashwrite(f, buffer, read_pack); 793 to_write -= read_pack; 794 795/* 796 * We don't know the actual number of objects written, 797 * only how many bytes written, how many bytes total, and 798 * how many objects total. So we can fake it by pretending all 799 * objects we are writing are the same size. This gives us a 800 * smooth progress meter, and at the end it matches the true 801 * answer. 802 */ 803 written = reuse_packfile_objects * 804(((double)(total - to_write)) / total); 805display_progress(progress_state, written); 806} 807 808close(fd); 809 written = reuse_packfile_objects; 810display_progress(progress_state, written); 811return reuse_packfile_offset -sizeof(struct pack_header); 812} 813 814static const char no_split_warning[] =N_( 815"disabling bitmap writing, packs are split due to pack.packSizeLimit" 816); 817 818static voidwrite_pack_file(void) 819{ 820uint32_t i =0, j; 821struct hashfile *f; 822 off_t offset; 823uint32_t nr_remaining = nr_result; 824time_t last_mtime =0; 825struct object_entry **write_order; 826 827if(progress > pack_to_stdout) 828 progress_state =start_progress(_("Writing objects"), nr_result); 829ALLOC_ARRAY(written_list, to_pack.nr_objects); 830 write_order =compute_write_order(); 831 832do{ 833struct object_id oid; 834char*pack_tmp_name = NULL; 835 836if(pack_to_stdout) 837 f =hashfd_throughput(1,"<stdout>", progress_state); 838else 839 f =create_tmp_packfile(&pack_tmp_name); 840 841 offset =write_pack_header(f, nr_remaining); 842 843if(reuse_packfile) { 844 off_t packfile_size; 845assert(pack_to_stdout); 846 847 packfile_size =write_reused_pack(f); 848 offset += packfile_size; 849} 850 851 nr_written =0; 852for(; i < to_pack.nr_objects; i++) { 853struct object_entry *e = write_order[i]; 854if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 855break; 856display_progress(progress_state, written); 857} 858 859/* 860 * Did we write the wrong # entries in the header? 861 * If so, rewrite it like in fast-import 862 */ 863if(pack_to_stdout) { 864finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 865}else if(nr_written == nr_remaining) { 866finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 867}else{ 868int fd =finalize_hashfile(f, oid.hash,0); 869fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 870 nr_written, oid.hash, offset); 871close(fd); 872if(write_bitmap_index) { 873warning(_(no_split_warning)); 874 write_bitmap_index =0; 875} 876} 877 878if(!pack_to_stdout) { 879struct stat st; 880struct strbuf tmpname = STRBUF_INIT; 881 882/* 883 * Packs are runtime accessed in their mtime 884 * order since newer packs are more likely to contain 885 * younger objects. So if we are creating multiple 886 * packs then we should modify the mtime of later ones 887 * to preserve this property. 888 */ 889if(stat(pack_tmp_name, &st) <0) { 890warning_errno("failed to stat%s", pack_tmp_name); 891}else if(!last_mtime) { 892 last_mtime = st.st_mtime; 893}else{ 894struct utimbuf utb; 895 utb.actime = st.st_atime; 896 utb.modtime = --last_mtime; 897if(utime(pack_tmp_name, &utb) <0) 898warning_errno("failed utime() on%s", pack_tmp_name); 899} 900 901strbuf_addf(&tmpname,"%s-", base_name); 902 903if(write_bitmap_index) { 904bitmap_writer_set_checksum(oid.hash); 905bitmap_writer_build_type_index( 906&to_pack, written_list, nr_written); 907} 908 909finish_tmp_packfile(&tmpname, pack_tmp_name, 910 written_list, nr_written, 911&pack_idx_opts, oid.hash); 912 913if(write_bitmap_index) { 914strbuf_addf(&tmpname,"%s.bitmap",oid_to_hex(&oid)); 915 916stop_progress(&progress_state); 917 918bitmap_writer_show_progress(progress); 919bitmap_writer_reuse_bitmaps(&to_pack); 920bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 921bitmap_writer_build(&to_pack); 922bitmap_writer_finish(written_list, nr_written, 923 tmpname.buf, write_bitmap_options); 924 write_bitmap_index =0; 925} 926 927strbuf_release(&tmpname); 928free(pack_tmp_name); 929puts(oid_to_hex(&oid)); 930} 931 932/* mark written objects as written to previous pack */ 933for(j =0; j < nr_written; j++) { 934 written_list[j]->offset = (off_t)-1; 935} 936 nr_remaining -= nr_written; 937}while(nr_remaining && i < to_pack.nr_objects); 938 939free(written_list); 940free(write_order); 941stop_progress(&progress_state); 942if(written != nr_result) 943die("wrote %"PRIu32" objects while expecting %"PRIu32, 944 written, nr_result); 945} 946 947static intno_try_delta(const char*path) 948{ 949static struct attr_check *check; 950 951if(!check) 952 check =attr_check_initl("delta", NULL); 953if(git_check_attr(path, check)) 954return0; 955if(ATTR_FALSE(check->items[0].value)) 956return1; 957return0; 958} 959 960/* 961 * When adding an object, check whether we have already added it 962 * to our packing list. If so, we can skip. However, if we are 963 * being asked to excludei t, but the previous mention was to include 964 * it, make sure to adjust its flags and tweak our numbers accordingly. 965 * 966 * As an optimization, we pass out the index position where we would have 967 * found the item, since that saves us from having to look it up again a 968 * few lines later when we want to add the new entry. 969 */ 970static inthave_duplicate_entry(const struct object_id *oid, 971int exclude, 972uint32_t*index_pos) 973{ 974struct object_entry *entry; 975 976 entry =packlist_find(&to_pack, oid->hash, index_pos); 977if(!entry) 978return0; 979 980if(exclude) { 981if(!entry->preferred_base) 982 nr_result--; 983 entry->preferred_base =1; 984} 985 986return1; 987} 988 989static intwant_found_object(int exclude,struct packed_git *p) 990{ 991if(exclude) 992return1; 993if(incremental) 994return0; 995 996/* 997 * When asked to do --local (do not include an object that appears in a 998 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 999 * an object that appears in a pack marked with .keep), finding a pack1000 * that matches the criteria is sufficient for us to decide to omit it.1001 * However, even if this pack does not satisfy the criteria, we need to1002 * make sure no copy of this object appears in _any_ pack that makes us1003 * to omit the object, so we need to check all the packs.1004 *1005 * We can however first check whether these options can possible matter;1006 * if they do not matter we know we want the object in generated pack.1007 * Otherwise, we signal "-1" at the end to tell the caller that we do1008 * not know either way, and it needs to check more packs.1009 */1010if(!ignore_packed_keep_on_disk &&1011!ignore_packed_keep_in_core &&1012(!local || !have_non_local_packs))1013return1;10141015if(local && !p->pack_local)1016return0;1017if(p->pack_local &&1018((ignore_packed_keep_on_disk && p->pack_keep) ||1019(ignore_packed_keep_in_core && p->pack_keep_in_core)))1020return0;10211022/* we don't know yet; keep looking for more packs */1023return-1;1024}10251026/*1027 * Check whether we want the object in the pack (e.g., we do not want1028 * objects found in non-local stores if the "--local" option was used).1029 *1030 * If the caller already knows an existing pack it wants to take the object1031 * from, that is passed in *found_pack and *found_offset; otherwise this1032 * function finds if there is any pack that has the object and returns the pack1033 * and its offset in these variables.1034 */1035static intwant_object_in_pack(const struct object_id *oid,1036int exclude,1037struct packed_git **found_pack,1038 off_t *found_offset)1039{1040int want;1041struct list_head *pos;10421043if(!exclude && local &&has_loose_object_nonlocal(oid))1044return0;10451046/*1047 * If we already know the pack object lives in, start checks from that1048 * pack - in the usual case when neither --local was given nor .keep files1049 * are present we will determine the answer right now.1050 */1051if(*found_pack) {1052 want =want_found_object(exclude, *found_pack);1053if(want != -1)1054return want;1055}1056list_for_each(pos,get_packed_git_mru(the_repository)) {1057struct packed_git *p =list_entry(pos,struct packed_git, mru);1058 off_t offset;10591060if(p == *found_pack)1061 offset = *found_offset;1062else1063 offset =find_pack_entry_one(oid->hash, p);10641065if(offset) {1066if(!*found_pack) {1067if(!is_pack_valid(p))1068continue;1069*found_offset = offset;1070*found_pack = p;1071}1072 want =want_found_object(exclude, p);1073if(!exclude && want >0)1074list_move(&p->mru,1075get_packed_git_mru(the_repository));1076if(want != -1)1077return want;1078}1079}10801081return1;1082}10831084static voidcreate_object_entry(const struct object_id *oid,1085enum object_type type,1086uint32_t hash,1087int exclude,1088int no_try_delta,1089uint32_t index_pos,1090struct packed_git *found_pack,1091 off_t found_offset)1092{1093struct object_entry *entry;10941095 entry =packlist_alloc(&to_pack, oid->hash, index_pos);1096 entry->hash = hash;1097oe_set_type(entry, type);1098if(exclude)1099 entry->preferred_base =1;1100else1101 nr_result++;1102if(found_pack) {1103oe_set_in_pack(&to_pack, entry, found_pack);1104 entry->in_pack_offset = found_offset;1105}11061107 entry->no_try_delta = no_try_delta;1108}11091110static const char no_closure_warning[] =N_(1111"disabling bitmap writing, as some objects are not being packed"1112);11131114static intadd_object_entry(const struct object_id *oid,enum object_type type,1115const char*name,int exclude)1116{1117struct packed_git *found_pack = NULL;1118 off_t found_offset =0;1119uint32_t index_pos;11201121display_progress(progress_state, ++nr_seen);11221123if(have_duplicate_entry(oid, exclude, &index_pos))1124return0;11251126if(!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1127/* The pack is missing an object, so it will not have closure */1128if(write_bitmap_index) {1129warning(_(no_closure_warning));1130 write_bitmap_index =0;1131}1132return0;1133}11341135create_object_entry(oid, type,pack_name_hash(name),1136 exclude, name &&no_try_delta(name),1137 index_pos, found_pack, found_offset);1138return1;1139}11401141static intadd_object_entry_from_bitmap(const struct object_id *oid,1142enum object_type type,1143int flags,uint32_t name_hash,1144struct packed_git *pack, off_t offset)1145{1146uint32_t index_pos;11471148display_progress(progress_state, ++nr_seen);11491150if(have_duplicate_entry(oid,0, &index_pos))1151return0;11521153if(!want_object_in_pack(oid,0, &pack, &offset))1154return0;11551156create_object_entry(oid, type, name_hash,0,0, index_pos, pack, offset);1157return1;1158}11591160struct pbase_tree_cache {1161struct object_id oid;1162int ref;1163int temporary;1164void*tree_data;1165unsigned long tree_size;1166};11671168static struct pbase_tree_cache *(pbase_tree_cache[256]);1169static intpbase_tree_cache_ix(const struct object_id *oid)1170{1171return oid->hash[0] %ARRAY_SIZE(pbase_tree_cache);1172}1173static intpbase_tree_cache_ix_incr(int ix)1174{1175return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1176}11771178static struct pbase_tree {1179struct pbase_tree *next;1180/* This is a phony "cache" entry; we are not1181 * going to evict it or find it through _get()1182 * mechanism -- this is for the toplevel node that1183 * would almost always change with any commit.1184 */1185struct pbase_tree_cache pcache;1186} *pbase_tree;11871188static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1189{1190struct pbase_tree_cache *ent, *nent;1191void*data;1192unsigned long size;1193enum object_type type;1194int neigh;1195int my_ix =pbase_tree_cache_ix(oid);1196int available_ix = -1;11971198/* pbase-tree-cache acts as a limited hashtable.1199 * your object will be found at your index or within a few1200 * slots after that slot if it is cached.1201 */1202for(neigh =0; neigh <8; neigh++) {1203 ent = pbase_tree_cache[my_ix];1204if(ent && !oidcmp(&ent->oid, oid)) {1205 ent->ref++;1206return ent;1207}1208else if(((available_ix <0) && (!ent || !ent->ref)) ||1209((0<= available_ix) &&1210(!ent && pbase_tree_cache[available_ix])))1211 available_ix = my_ix;1212if(!ent)1213break;1214 my_ix =pbase_tree_cache_ix_incr(my_ix);1215}12161217/* Did not find one. Either we got a bogus request or1218 * we need to read and perhaps cache.1219 */1220 data =read_object_file(oid, &type, &size);1221if(!data)1222return NULL;1223if(type != OBJ_TREE) {1224free(data);1225return NULL;1226}12271228/* We need to either cache or return a throwaway copy */12291230if(available_ix <0)1231 ent = NULL;1232else{1233 ent = pbase_tree_cache[available_ix];1234 my_ix = available_ix;1235}12361237if(!ent) {1238 nent =xmalloc(sizeof(*nent));1239 nent->temporary = (available_ix <0);1240}1241else{1242/* evict and reuse */1243free(ent->tree_data);1244 nent = ent;1245}1246oidcpy(&nent->oid, oid);1247 nent->tree_data = data;1248 nent->tree_size = size;1249 nent->ref =1;1250if(!nent->temporary)1251 pbase_tree_cache[my_ix] = nent;1252return nent;1253}12541255static voidpbase_tree_put(struct pbase_tree_cache *cache)1256{1257if(!cache->temporary) {1258 cache->ref--;1259return;1260}1261free(cache->tree_data);1262free(cache);1263}12641265static intname_cmp_len(const char*name)1266{1267int i;1268for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1269;1270return i;1271}12721273static voidadd_pbase_object(struct tree_desc *tree,1274const char*name,1275int cmplen,1276const char*fullname)1277{1278struct name_entry entry;1279int cmp;12801281while(tree_entry(tree,&entry)) {1282if(S_ISGITLINK(entry.mode))1283continue;1284 cmp =tree_entry_len(&entry) != cmplen ?1:1285memcmp(name, entry.path, cmplen);1286if(cmp >0)1287continue;1288if(cmp <0)1289return;1290if(name[cmplen] !='/') {1291add_object_entry(entry.oid,1292object_type(entry.mode),1293 fullname,1);1294return;1295}1296if(S_ISDIR(entry.mode)) {1297struct tree_desc sub;1298struct pbase_tree_cache *tree;1299const char*down = name+cmplen+1;1300int downlen =name_cmp_len(down);13011302 tree =pbase_tree_get(entry.oid);1303if(!tree)1304return;1305init_tree_desc(&sub, tree->tree_data, tree->tree_size);13061307add_pbase_object(&sub, down, downlen, fullname);1308pbase_tree_put(tree);1309}1310}1311}13121313static unsigned*done_pbase_paths;1314static int done_pbase_paths_num;1315static int done_pbase_paths_alloc;1316static intdone_pbase_path_pos(unsigned hash)1317{1318int lo =0;1319int hi = done_pbase_paths_num;1320while(lo < hi) {1321int mi = lo + (hi - lo) /2;1322if(done_pbase_paths[mi] == hash)1323return mi;1324if(done_pbase_paths[mi] < hash)1325 hi = mi;1326else1327 lo = mi +1;1328}1329return-lo-1;1330}13311332static intcheck_pbase_path(unsigned hash)1333{1334int pos =done_pbase_path_pos(hash);1335if(0<= pos)1336return1;1337 pos = -pos -1;1338ALLOC_GROW(done_pbase_paths,1339 done_pbase_paths_num +1,1340 done_pbase_paths_alloc);1341 done_pbase_paths_num++;1342if(pos < done_pbase_paths_num)1343MOVE_ARRAY(done_pbase_paths + pos +1, done_pbase_paths + pos,1344 done_pbase_paths_num - pos -1);1345 done_pbase_paths[pos] = hash;1346return0;1347}13481349static voidadd_preferred_base_object(const char*name)1350{1351struct pbase_tree *it;1352int cmplen;1353unsigned hash =pack_name_hash(name);13541355if(!num_preferred_base ||check_pbase_path(hash))1356return;13571358 cmplen =name_cmp_len(name);1359for(it = pbase_tree; it; it = it->next) {1360if(cmplen ==0) {1361add_object_entry(&it->pcache.oid, OBJ_TREE, NULL,1);1362}1363else{1364struct tree_desc tree;1365init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1366add_pbase_object(&tree, name, cmplen, name);1367}1368}1369}13701371static voidadd_preferred_base(struct object_id *oid)1372{1373struct pbase_tree *it;1374void*data;1375unsigned long size;1376struct object_id tree_oid;13771378if(window <= num_preferred_base++)1379return;13801381 data =read_object_with_reference(oid, tree_type, &size, &tree_oid);1382if(!data)1383return;13841385for(it = pbase_tree; it; it = it->next) {1386if(!oidcmp(&it->pcache.oid, &tree_oid)) {1387free(data);1388return;1389}1390}13911392 it =xcalloc(1,sizeof(*it));1393 it->next = pbase_tree;1394 pbase_tree = it;13951396oidcpy(&it->pcache.oid, &tree_oid);1397 it->pcache.tree_data = data;1398 it->pcache.tree_size = size;1399}14001401static voidcleanup_preferred_base(void)1402{1403struct pbase_tree *it;1404unsigned i;14051406 it = pbase_tree;1407 pbase_tree = NULL;1408while(it) {1409struct pbase_tree *tmp = it;1410 it = tmp->next;1411free(tmp->pcache.tree_data);1412free(tmp);1413}14141415for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1416if(!pbase_tree_cache[i])1417continue;1418free(pbase_tree_cache[i]->tree_data);1419FREE_AND_NULL(pbase_tree_cache[i]);1420}14211422FREE_AND_NULL(done_pbase_paths);1423 done_pbase_paths_num = done_pbase_paths_alloc =0;1424}14251426static voidcheck_object(struct object_entry *entry)1427{1428unsigned long canonical_size;14291430if(IN_PACK(entry)) {1431struct packed_git *p =IN_PACK(entry);1432struct pack_window *w_curs = NULL;1433const unsigned char*base_ref = NULL;1434struct object_entry *base_entry;1435unsigned long used, used_0;1436unsigned long avail;1437 off_t ofs;1438unsigned char*buf, c;1439enum object_type type;1440unsigned long in_pack_size;14411442 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);14431444/*1445 * We want in_pack_type even if we do not reuse delta1446 * since non-delta representations could still be reused.1447 */1448 used =unpack_object_header_buffer(buf, avail,1449&type,1450&in_pack_size);1451if(used ==0)1452goto give_up;14531454if(type <0)1455BUG("invalid type%d", type);1456 entry->in_pack_type = type;14571458/*1459 * Determine if this is a delta and if so whether we can1460 * reuse it or not. Otherwise let's find out as cheaply as1461 * possible what the actual type and size for this object is.1462 */1463switch(entry->in_pack_type) {1464default:1465/* Not a delta hence we've already got all we need. */1466oe_set_type(entry, entry->in_pack_type);1467SET_SIZE(entry, in_pack_size);1468 entry->in_pack_header_size = used;1469if(oe_type(entry) < OBJ_COMMIT ||oe_type(entry) > OBJ_BLOB)1470goto give_up;1471unuse_pack(&w_curs);1472return;1473case OBJ_REF_DELTA:1474if(reuse_delta && !entry->preferred_base)1475 base_ref =use_pack(p, &w_curs,1476 entry->in_pack_offset + used, NULL);1477 entry->in_pack_header_size = used + the_hash_algo->rawsz;1478break;1479case OBJ_OFS_DELTA:1480 buf =use_pack(p, &w_curs,1481 entry->in_pack_offset + used, NULL);1482 used_0 =0;1483 c = buf[used_0++];1484 ofs = c &127;1485while(c &128) {1486 ofs +=1;1487if(!ofs ||MSB(ofs,7)) {1488error("delta base offset overflow in pack for%s",1489oid_to_hex(&entry->idx.oid));1490goto give_up;1491}1492 c = buf[used_0++];1493 ofs = (ofs <<7) + (c &127);1494}1495 ofs = entry->in_pack_offset - ofs;1496if(ofs <=0|| ofs >= entry->in_pack_offset) {1497error("delta base offset out of bound for%s",1498oid_to_hex(&entry->idx.oid));1499goto give_up;1500}1501if(reuse_delta && !entry->preferred_base) {1502struct revindex_entry *revidx;1503 revidx =find_pack_revindex(p, ofs);1504if(!revidx)1505goto give_up;1506 base_ref =nth_packed_object_sha1(p, revidx->nr);1507}1508 entry->in_pack_header_size = used + used_0;1509break;1510}15111512if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1513/*1514 * If base_ref was set above that means we wish to1515 * reuse delta data, and we even found that base1516 * in the list of objects we want to pack. Goodie!1517 *1518 * Depth value does not matter - find_deltas() will1519 * never consider reused delta as the base object to1520 * deltify other objects against, in order to avoid1521 * circular deltas.1522 */1523oe_set_type(entry, entry->in_pack_type);1524SET_SIZE(entry, in_pack_size);/* delta size */1525SET_DELTA(entry, base_entry);1526SET_DELTA_SIZE(entry, in_pack_size);1527 entry->delta_sibling_idx = base_entry->delta_child_idx;1528SET_DELTA_CHILD(base_entry, entry);1529unuse_pack(&w_curs);1530return;1531}15321533if(oe_type(entry)) {1534 off_t delta_pos;15351536/*1537 * This must be a delta and we already know what the1538 * final object type is. Let's extract the actual1539 * object size from the delta header.1540 */1541 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1542 canonical_size =get_size_from_delta(p, &w_curs, delta_pos);1543if(canonical_size ==0)1544goto give_up;1545SET_SIZE(entry, canonical_size);1546unuse_pack(&w_curs);1547return;1548}15491550/*1551 * No choice but to fall back to the recursive delta walk1552 * with sha1_object_info() to find about the object type1553 * at this point...1554 */1555 give_up:1556unuse_pack(&w_curs);1557}15581559oe_set_type(entry,1560oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1561if(entry->type_valid) {1562SET_SIZE(entry, canonical_size);1563}else{1564/*1565 * Bad object type is checked in prepare_pack(). This is1566 * to permit a missing preferred base object to be ignored1567 * as a preferred base. Doing so can result in a larger1568 * pack file, but the transfer will still take place.1569 */1570}1571}15721573static intpack_offset_sort(const void*_a,const void*_b)1574{1575const struct object_entry *a = *(struct object_entry **)_a;1576const struct object_entry *b = *(struct object_entry **)_b;1577const struct packed_git *a_in_pack =IN_PACK(a);1578const struct packed_git *b_in_pack =IN_PACK(b);15791580/* avoid filesystem trashing with loose objects */1581if(!a_in_pack && !b_in_pack)1582returnoidcmp(&a->idx.oid, &b->idx.oid);15831584if(a_in_pack < b_in_pack)1585return-1;1586if(a_in_pack > b_in_pack)1587return1;1588return a->in_pack_offset < b->in_pack_offset ? -1:1589(a->in_pack_offset > b->in_pack_offset);1590}15911592/*1593 * Drop an on-disk delta we were planning to reuse. Naively, this would1594 * just involve blanking out the "delta" field, but we have to deal1595 * with some extra book-keeping:1596 *1597 * 1. Removing ourselves from the delta_sibling linked list.1598 *1599 * 2. Updating our size/type to the non-delta representation. These were1600 * either not recorded initially (size) or overwritten with the delta type1601 * (type) when check_object() decided to reuse the delta.1602 *1603 * 3. Resetting our delta depth, as we are now a base object.1604 */1605static voiddrop_reused_delta(struct object_entry *entry)1606{1607unsigned*idx = &to_pack.objects[entry->delta_idx -1].delta_child_idx;1608struct object_info oi = OBJECT_INFO_INIT;1609enum object_type type;1610unsigned long size;16111612while(*idx) {1613struct object_entry *oe = &to_pack.objects[*idx -1];16141615if(oe == entry)1616*idx = oe->delta_sibling_idx;1617else1618 idx = &oe->delta_sibling_idx;1619}1620SET_DELTA(entry, NULL);1621 entry->depth =0;16221623 oi.sizep = &size;1624 oi.typep = &type;1625if(packed_object_info(the_repository,IN_PACK(entry), entry->in_pack_offset, &oi) <0) {1626/*1627 * We failed to get the info from this pack for some reason;1628 * fall back to sha1_object_info, which may find another copy.1629 * And if that fails, the error will be recorded in oe_type(entry)1630 * and dealt with in prepare_pack().1631 */1632oe_set_type(entry,1633oid_object_info(the_repository, &entry->idx.oid, &size));1634}else{1635oe_set_type(entry, type);1636}1637SET_SIZE(entry, size);1638}16391640/*1641 * Follow the chain of deltas from this entry onward, throwing away any links1642 * that cause us to hit a cycle (as determined by the DFS state flags in1643 * the entries).1644 *1645 * We also detect too-long reused chains that would violate our --depth1646 * limit.1647 */1648static voidbreak_delta_chains(struct object_entry *entry)1649{1650/*1651 * The actual depth of each object we will write is stored as an int,1652 * as it cannot exceed our int "depth" limit. But before we break1653 * changes based no that limit, we may potentially go as deep as the1654 * number of objects, which is elsewhere bounded to a uint32_t.1655 */1656uint32_t total_depth;1657struct object_entry *cur, *next;16581659for(cur = entry, total_depth =0;1660 cur;1661 cur =DELTA(cur), total_depth++) {1662if(cur->dfs_state == DFS_DONE) {1663/*1664 * We've already seen this object and know it isn't1665 * part of a cycle. We do need to append its depth1666 * to our count.1667 */1668 total_depth += cur->depth;1669break;1670}16711672/*1673 * We break cycles before looping, so an ACTIVE state (or any1674 * other cruft which made its way into the state variable)1675 * is a bug.1676 */1677if(cur->dfs_state != DFS_NONE)1678BUG("confusing delta dfs state in first pass:%d",1679 cur->dfs_state);16801681/*1682 * Now we know this is the first time we've seen the object. If1683 * it's not a delta, we're done traversing, but we'll mark it1684 * done to save time on future traversals.1685 */1686if(!DELTA(cur)) {1687 cur->dfs_state = DFS_DONE;1688break;1689}16901691/*1692 * Mark ourselves as active and see if the next step causes1693 * us to cycle to another active object. It's important to do1694 * this _before_ we loop, because it impacts where we make the1695 * cut, and thus how our total_depth counter works.1696 * E.g., We may see a partial loop like:1697 *1698 * A -> B -> C -> D -> B1699 *1700 * Cutting B->C breaks the cycle. But now the depth of A is1701 * only 1, and our total_depth counter is at 3. The size of the1702 * error is always one less than the size of the cycle we1703 * broke. Commits C and D were "lost" from A's chain.1704 *1705 * If we instead cut D->B, then the depth of A is correct at 3.1706 * We keep all commits in the chain that we examined.1707 */1708 cur->dfs_state = DFS_ACTIVE;1709if(DELTA(cur)->dfs_state == DFS_ACTIVE) {1710drop_reused_delta(cur);1711 cur->dfs_state = DFS_DONE;1712break;1713}1714}17151716/*1717 * And now that we've gone all the way to the bottom of the chain, we1718 * need to clear the active flags and set the depth fields as1719 * appropriate. Unlike the loop above, which can quit when it drops a1720 * delta, we need to keep going to look for more depth cuts. So we need1721 * an extra "next" pointer to keep going after we reset cur->delta.1722 */1723for(cur = entry; cur; cur = next) {1724 next =DELTA(cur);17251726/*1727 * We should have a chain of zero or more ACTIVE states down to1728 * a final DONE. We can quit after the DONE, because either it1729 * has no bases, or we've already handled them in a previous1730 * call.1731 */1732if(cur->dfs_state == DFS_DONE)1733break;1734else if(cur->dfs_state != DFS_ACTIVE)1735BUG("confusing delta dfs state in second pass:%d",1736 cur->dfs_state);17371738/*1739 * If the total_depth is more than depth, then we need to snip1740 * the chain into two or more smaller chains that don't exceed1741 * the maximum depth. Most of the resulting chains will contain1742 * (depth + 1) entries (i.e., depth deltas plus one base), and1743 * the last chain (i.e., the one containing entry) will contain1744 * whatever entries are left over, namely1745 * (total_depth % (depth + 1)) of them.1746 *1747 * Since we are iterating towards decreasing depth, we need to1748 * decrement total_depth as we go, and we need to write to the1749 * entry what its final depth will be after all of the1750 * snipping. Since we're snipping into chains of length (depth1751 * + 1) entries, the final depth of an entry will be its1752 * original depth modulo (depth + 1). Any time we encounter an1753 * entry whose final depth is supposed to be zero, we snip it1754 * from its delta base, thereby making it so.1755 */1756 cur->depth = (total_depth--) % (depth +1);1757if(!cur->depth)1758drop_reused_delta(cur);17591760 cur->dfs_state = DFS_DONE;1761}1762}17631764static voidget_object_details(void)1765{1766uint32_t i;1767struct object_entry **sorted_by_offset;17681769if(progress)1770 progress_state =start_progress(_("Counting objects"),1771 to_pack.nr_objects);17721773 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1774for(i =0; i < to_pack.nr_objects; i++)1775 sorted_by_offset[i] = to_pack.objects + i;1776QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17771778for(i =0; i < to_pack.nr_objects; i++) {1779struct object_entry *entry = sorted_by_offset[i];1780check_object(entry);1781if(entry->type_valid &&1782oe_size_greater_than(&to_pack, entry, big_file_threshold))1783 entry->no_try_delta =1;1784display_progress(progress_state, i +1);1785}1786stop_progress(&progress_state);17871788/*1789 * This must happen in a second pass, since we rely on the delta1790 * information for the whole list being completed.1791 */1792for(i =0; i < to_pack.nr_objects; i++)1793break_delta_chains(&to_pack.objects[i]);17941795free(sorted_by_offset);1796}17971798/*1799 * We search for deltas in a list sorted by type, by filename hash, and then1800 * by size, so that we see progressively smaller and smaller files.1801 * That's because we prefer deltas to be from the bigger file1802 * to the smaller -- deletes are potentially cheaper, but perhaps1803 * more importantly, the bigger file is likely the more recent1804 * one. The deepest deltas are therefore the oldest objects which are1805 * less susceptible to be accessed often.1806 */1807static inttype_size_sort(const void*_a,const void*_b)1808{1809const struct object_entry *a = *(struct object_entry **)_a;1810const struct object_entry *b = *(struct object_entry **)_b;1811enum object_type a_type =oe_type(a);1812enum object_type b_type =oe_type(b);1813unsigned long a_size =SIZE(a);1814unsigned long b_size =SIZE(b);18151816if(a_type > b_type)1817return-1;1818if(a_type < b_type)1819return1;1820if(a->hash > b->hash)1821return-1;1822if(a->hash < b->hash)1823return1;1824if(a->preferred_base > b->preferred_base)1825return-1;1826if(a->preferred_base < b->preferred_base)1827return1;1828if(a_size > b_size)1829return-1;1830if(a_size < b_size)1831return1;1832return a < b ? -1: (a > b);/* newest first */1833}18341835struct unpacked {1836struct object_entry *entry;1837void*data;1838struct delta_index *index;1839unsigned depth;1840};18411842static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1843unsigned long delta_size)1844{1845if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1846return0;18471848if(delta_size < cache_max_small_delta_size)1849return1;18501851/* cache delta, if objects are large enough compared to delta size */1852if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1853return1;18541855return0;1856}18571858#ifndef NO_PTHREADS18591860static pthread_mutex_t read_mutex;1861#define read_lock() pthread_mutex_lock(&read_mutex)1862#define read_unlock() pthread_mutex_unlock(&read_mutex)18631864static pthread_mutex_t cache_mutex;1865#define cache_lock() pthread_mutex_lock(&cache_mutex)1866#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18671868static pthread_mutex_t progress_mutex;1869#define progress_lock() pthread_mutex_lock(&progress_mutex)1870#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18711872#else18731874#define read_lock() (void)01875#define read_unlock() (void)01876#define cache_lock() (void)01877#define cache_unlock() (void)01878#define progress_lock() (void)01879#define progress_unlock() (void)018801881#endif18821883/*1884 * Return the size of the object without doing any delta1885 * reconstruction (so non-deltas are true object sizes, but deltas1886 * return the size of the delta data).1887 */1888unsigned longoe_get_size_slow(struct packing_data *pack,1889const struct object_entry *e)1890{1891struct packed_git *p;1892struct pack_window *w_curs;1893unsigned char*buf;1894enum object_type type;1895unsigned long used, avail, size;18961897if(e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1898read_lock();1899if(oid_object_info(the_repository, &e->idx.oid, &size) <0)1900die(_("unable to get size of%s"),1901oid_to_hex(&e->idx.oid));1902read_unlock();1903return size;1904}19051906 p =oe_in_pack(pack, e);1907if(!p)1908BUG("when e->type is a delta, it must belong to a pack");19091910read_lock();1911 w_curs = NULL;1912 buf =use_pack(p, &w_curs, e->in_pack_offset, &avail);1913 used =unpack_object_header_buffer(buf, avail, &type, &size);1914if(used ==0)1915die(_("unable to parse object header of%s"),1916oid_to_hex(&e->idx.oid));19171918unuse_pack(&w_curs);1919read_unlock();1920return size;1921}19221923static inttry_delta(struct unpacked *trg,struct unpacked *src,1924unsigned max_depth,unsigned long*mem_usage)1925{1926struct object_entry *trg_entry = trg->entry;1927struct object_entry *src_entry = src->entry;1928unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1929unsigned ref_depth;1930enum object_type type;1931void*delta_buf;19321933/* Don't bother doing diffs between different types */1934if(oe_type(trg_entry) !=oe_type(src_entry))1935return-1;19361937/*1938 * We do not bother to try a delta that we discarded on an1939 * earlier try, but only when reusing delta data. Note that1940 * src_entry that is marked as the preferred_base should always1941 * be considered, as even if we produce a suboptimal delta against1942 * it, we will still save the transfer cost, as we already know1943 * the other side has it and we won't send src_entry at all.1944 */1945if(reuse_delta &&IN_PACK(trg_entry) &&1946IN_PACK(trg_entry) ==IN_PACK(src_entry) &&1947!src_entry->preferred_base &&1948 trg_entry->in_pack_type != OBJ_REF_DELTA &&1949 trg_entry->in_pack_type != OBJ_OFS_DELTA)1950return0;19511952/* Let's not bust the allowed depth. */1953if(src->depth >= max_depth)1954return0;19551956/* Now some size filtering heuristics. */1957 trg_size =SIZE(trg_entry);1958if(!DELTA(trg_entry)) {1959 max_size = trg_size/2- the_hash_algo->rawsz;1960 ref_depth =1;1961}else{1962 max_size =DELTA_SIZE(trg_entry);1963 ref_depth = trg->depth;1964}1965 max_size = (uint64_t)max_size * (max_depth - src->depth) /1966(max_depth - ref_depth +1);1967if(max_size ==0)1968return0;1969 src_size =SIZE(src_entry);1970 sizediff = src_size < trg_size ? trg_size - src_size :0;1971if(sizediff >= max_size)1972return0;1973if(trg_size < src_size /32)1974return0;19751976/* Load data if not already done */1977if(!trg->data) {1978read_lock();1979 trg->data =read_object_file(&trg_entry->idx.oid, &type, &sz);1980read_unlock();1981if(!trg->data)1982die("object%scannot be read",1983oid_to_hex(&trg_entry->idx.oid));1984if(sz != trg_size)1985die("object%sinconsistent object length (%lu vs%lu)",1986oid_to_hex(&trg_entry->idx.oid), sz,1987 trg_size);1988*mem_usage += sz;1989}1990if(!src->data) {1991read_lock();1992 src->data =read_object_file(&src_entry->idx.oid, &type, &sz);1993read_unlock();1994if(!src->data) {1995if(src_entry->preferred_base) {1996static int warned =0;1997if(!warned++)1998warning("object%scannot be read",1999oid_to_hex(&src_entry->idx.oid));2000/*2001 * Those objects are not included in the2002 * resulting pack. Be resilient and ignore2003 * them if they can't be read, in case the2004 * pack could be created nevertheless.2005 */2006return0;2007}2008die("object%scannot be read",2009oid_to_hex(&src_entry->idx.oid));2010}2011if(sz != src_size)2012die("object%sinconsistent object length (%lu vs%lu)",2013oid_to_hex(&src_entry->idx.oid), sz,2014 src_size);2015*mem_usage += sz;2016}2017if(!src->index) {2018 src->index =create_delta_index(src->data, src_size);2019if(!src->index) {2020static int warned =0;2021if(!warned++)2022warning("suboptimal pack - out of memory");2023return0;2024}2025*mem_usage +=sizeof_delta_index(src->index);2026}20272028 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2029if(!delta_buf)2030return0;2031if(delta_size >= (1U<< OE_DELTA_SIZE_BITS)) {2032free(delta_buf);2033return0;2034}20352036if(DELTA(trg_entry)) {2037/* Prefer only shallower same-sized deltas. */2038if(delta_size ==DELTA_SIZE(trg_entry) &&2039 src->depth +1>= trg->depth) {2040free(delta_buf);2041return0;2042}2043}20442045/*2046 * Handle memory allocation outside of the cache2047 * accounting lock. Compiler will optimize the strangeness2048 * away when NO_PTHREADS is defined.2049 */2050free(trg_entry->delta_data);2051cache_lock();2052if(trg_entry->delta_data) {2053 delta_cache_size -=DELTA_SIZE(trg_entry);2054 trg_entry->delta_data = NULL;2055}2056if(delta_cacheable(src_size, trg_size, delta_size)) {2057 delta_cache_size += delta_size;2058cache_unlock();2059 trg_entry->delta_data =xrealloc(delta_buf, delta_size);2060}else{2061cache_unlock();2062free(delta_buf);2063}20642065SET_DELTA(trg_entry, src_entry);2066SET_DELTA_SIZE(trg_entry, delta_size);2067 trg->depth = src->depth +1;20682069return1;2070}20712072static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)2073{2074struct object_entry *child =DELTA_CHILD(me);2075unsigned int m = n;2076while(child) {2077unsigned int c =check_delta_limit(child, n +1);2078if(m < c)2079 m = c;2080 child =DELTA_SIBLING(child);2081}2082return m;2083}20842085static unsigned longfree_unpacked(struct unpacked *n)2086{2087unsigned long freed_mem =sizeof_delta_index(n->index);2088free_delta_index(n->index);2089 n->index = NULL;2090if(n->data) {2091 freed_mem +=SIZE(n->entry);2092FREE_AND_NULL(n->data);2093}2094 n->entry = NULL;2095 n->depth =0;2096return freed_mem;2097}20982099static voidfind_deltas(struct object_entry **list,unsigned*list_size,2100int window,int depth,unsigned*processed)2101{2102uint32_t i, idx =0, count =0;2103struct unpacked *array;2104unsigned long mem_usage =0;21052106 array =xcalloc(window,sizeof(struct unpacked));21072108for(;;) {2109struct object_entry *entry;2110struct unpacked *n = array + idx;2111int j, max_depth, best_base = -1;21122113progress_lock();2114if(!*list_size) {2115progress_unlock();2116break;2117}2118 entry = *list++;2119(*list_size)--;2120if(!entry->preferred_base) {2121(*processed)++;2122display_progress(progress_state, *processed);2123}2124progress_unlock();21252126 mem_usage -=free_unpacked(n);2127 n->entry = entry;21282129while(window_memory_limit &&2130 mem_usage > window_memory_limit &&2131 count >1) {2132uint32_t tail = (idx + window - count) % window;2133 mem_usage -=free_unpacked(array + tail);2134 count--;2135}21362137/* We do not compute delta to *create* objects we are not2138 * going to pack.2139 */2140if(entry->preferred_base)2141goto next;21422143/*2144 * If the current object is at pack edge, take the depth the2145 * objects that depend on the current object into account2146 * otherwise they would become too deep.2147 */2148 max_depth = depth;2149if(DELTA_CHILD(entry)) {2150 max_depth -=check_delta_limit(entry,0);2151if(max_depth <=0)2152goto next;2153}21542155 j = window;2156while(--j >0) {2157int ret;2158uint32_t other_idx = idx + j;2159struct unpacked *m;2160if(other_idx >= window)2161 other_idx -= window;2162 m = array + other_idx;2163if(!m->entry)2164break;2165 ret =try_delta(n, m, max_depth, &mem_usage);2166if(ret <0)2167break;2168else if(ret >0)2169 best_base = other_idx;2170}21712172/*2173 * If we decided to cache the delta data, then it is best2174 * to compress it right away. First because we have to do2175 * it anyway, and doing it here while we're threaded will2176 * save a lot of time in the non threaded write phase,2177 * as well as allow for caching more deltas within2178 * the same cache size limit.2179 * ...2180 * But only if not writing to stdout, since in that case2181 * the network is most likely throttling writes anyway,2182 * and therefore it is best to go to the write phase ASAP2183 * instead, as we can afford spending more time compressing2184 * between writes at that moment.2185 */2186if(entry->delta_data && !pack_to_stdout) {2187unsigned long size;21882189 size =do_compress(&entry->delta_data,DELTA_SIZE(entry));2190if(size < (1U<< OE_Z_DELTA_BITS)) {2191 entry->z_delta_size = size;2192cache_lock();2193 delta_cache_size -=DELTA_SIZE(entry);2194 delta_cache_size += entry->z_delta_size;2195cache_unlock();2196}else{2197FREE_AND_NULL(entry->delta_data);2198 entry->z_delta_size =0;2199}2200}22012202/* if we made n a delta, and if n is already at max2203 * depth, leaving it in the window is pointless. we2204 * should evict it first.2205 */2206if(DELTA(entry) && max_depth <= n->depth)2207continue;22082209/*2210 * Move the best delta base up in the window, after the2211 * currently deltified object, to keep it longer. It will2212 * be the first base object to be attempted next.2213 */2214if(DELTA(entry)) {2215struct unpacked swap = array[best_base];2216int dist = (window + idx - best_base) % window;2217int dst = best_base;2218while(dist--) {2219int src = (dst +1) % window;2220 array[dst] = array[src];2221 dst = src;2222}2223 array[dst] = swap;2224}22252226 next:2227 idx++;2228if(count +1< window)2229 count++;2230if(idx >= window)2231 idx =0;2232}22332234for(i =0; i < window; ++i) {2235free_delta_index(array[i].index);2236free(array[i].data);2237}2238free(array);2239}22402241#ifndef NO_PTHREADS22422243static voidtry_to_free_from_threads(size_t size)2244{2245read_lock();2246release_pack_memory(size);2247read_unlock();2248}22492250static try_to_free_t old_try_to_free_routine;22512252/*2253 * The main thread waits on the condition that (at least) one of the workers2254 * has stopped working (which is indicated in the .working member of2255 * struct thread_params).2256 * When a work thread has completed its work, it sets .working to 0 and2257 * signals the main thread and waits on the condition that .data_ready2258 * becomes 1.2259 */22602261struct thread_params {2262 pthread_t thread;2263struct object_entry **list;2264unsigned list_size;2265unsigned remaining;2266int window;2267int depth;2268int working;2269int data_ready;2270 pthread_mutex_t mutex;2271 pthread_cond_t cond;2272unsigned*processed;2273};22742275static pthread_cond_t progress_cond;22762277/*2278 * Mutex and conditional variable can't be statically-initialized on Windows.2279 */2280static voidinit_threaded_search(void)2281{2282init_recursive_mutex(&read_mutex);2283pthread_mutex_init(&cache_mutex, NULL);2284pthread_mutex_init(&progress_mutex, NULL);2285pthread_cond_init(&progress_cond, NULL);2286 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2287}22882289static voidcleanup_threaded_search(void)2290{2291set_try_to_free_routine(old_try_to_free_routine);2292pthread_cond_destroy(&progress_cond);2293pthread_mutex_destroy(&read_mutex);2294pthread_mutex_destroy(&cache_mutex);2295pthread_mutex_destroy(&progress_mutex);2296}22972298static void*threaded_find_deltas(void*arg)2299{2300struct thread_params *me = arg;23012302progress_lock();2303while(me->remaining) {2304progress_unlock();23052306find_deltas(me->list, &me->remaining,2307 me->window, me->depth, me->processed);23082309progress_lock();2310 me->working =0;2311pthread_cond_signal(&progress_cond);2312progress_unlock();23132314/*2315 * We must not set ->data_ready before we wait on the2316 * condition because the main thread may have set it to 12317 * before we get here. In order to be sure that new2318 * work is available if we see 1 in ->data_ready, it2319 * was initialized to 0 before this thread was spawned2320 * and we reset it to 0 right away.2321 */2322pthread_mutex_lock(&me->mutex);2323while(!me->data_ready)2324pthread_cond_wait(&me->cond, &me->mutex);2325 me->data_ready =0;2326pthread_mutex_unlock(&me->mutex);23272328progress_lock();2329}2330progress_unlock();2331/* leave ->working 1 so that this doesn't get more work assigned */2332return NULL;2333}23342335static voidll_find_deltas(struct object_entry **list,unsigned list_size,2336int window,int depth,unsigned*processed)2337{2338struct thread_params *p;2339int i, ret, active_threads =0;23402341init_threaded_search();23422343if(delta_search_threads <=1) {2344find_deltas(list, &list_size, window, depth, processed);2345cleanup_threaded_search();2346return;2347}2348if(progress > pack_to_stdout)2349fprintf_ln(stderr,"Delta compression using up to%dthreads",2350 delta_search_threads);2351 p =xcalloc(delta_search_threads,sizeof(*p));23522353/* Partition the work amongst work threads. */2354for(i =0; i < delta_search_threads; i++) {2355unsigned sub_size = list_size / (delta_search_threads - i);23562357/* don't use too small segments or no deltas will be found */2358if(sub_size <2*window && i+1< delta_search_threads)2359 sub_size =0;23602361 p[i].window = window;2362 p[i].depth = depth;2363 p[i].processed = processed;2364 p[i].working =1;2365 p[i].data_ready =0;23662367/* try to split chunks on "path" boundaries */2368while(sub_size && sub_size < list_size &&2369 list[sub_size]->hash &&2370 list[sub_size]->hash == list[sub_size-1]->hash)2371 sub_size++;23722373 p[i].list = list;2374 p[i].list_size = sub_size;2375 p[i].remaining = sub_size;23762377 list += sub_size;2378 list_size -= sub_size;2379}23802381/* Start work threads. */2382for(i =0; i < delta_search_threads; i++) {2383if(!p[i].list_size)2384continue;2385pthread_mutex_init(&p[i].mutex, NULL);2386pthread_cond_init(&p[i].cond, NULL);2387 ret =pthread_create(&p[i].thread, NULL,2388 threaded_find_deltas, &p[i]);2389if(ret)2390die("unable to create thread:%s",strerror(ret));2391 active_threads++;2392}23932394/*2395 * Now let's wait for work completion. Each time a thread is done2396 * with its work, we steal half of the remaining work from the2397 * thread with the largest number of unprocessed objects and give2398 * it to that newly idle thread. This ensure good load balancing2399 * until the remaining object list segments are simply too short2400 * to be worth splitting anymore.2401 */2402while(active_threads) {2403struct thread_params *target = NULL;2404struct thread_params *victim = NULL;2405unsigned sub_size =0;24062407progress_lock();2408for(;;) {2409for(i =0; !target && i < delta_search_threads; i++)2410if(!p[i].working)2411 target = &p[i];2412if(target)2413break;2414pthread_cond_wait(&progress_cond, &progress_mutex);2415}24162417for(i =0; i < delta_search_threads; i++)2418if(p[i].remaining >2*window &&2419(!victim || victim->remaining < p[i].remaining))2420 victim = &p[i];2421if(victim) {2422 sub_size = victim->remaining /2;2423 list = victim->list + victim->list_size - sub_size;2424while(sub_size && list[0]->hash &&2425 list[0]->hash == list[-1]->hash) {2426 list++;2427 sub_size--;2428}2429if(!sub_size) {2430/*2431 * It is possible for some "paths" to have2432 * so many objects that no hash boundary2433 * might be found. Let's just steal the2434 * exact half in that case.2435 */2436 sub_size = victim->remaining /2;2437 list -= sub_size;2438}2439 target->list = list;2440 victim->list_size -= sub_size;2441 victim->remaining -= sub_size;2442}2443 target->list_size = sub_size;2444 target->remaining = sub_size;2445 target->working =1;2446progress_unlock();24472448pthread_mutex_lock(&target->mutex);2449 target->data_ready =1;2450pthread_cond_signal(&target->cond);2451pthread_mutex_unlock(&target->mutex);24522453if(!sub_size) {2454pthread_join(target->thread, NULL);2455pthread_cond_destroy(&target->cond);2456pthread_mutex_destroy(&target->mutex);2457 active_threads--;2458}2459}2460cleanup_threaded_search();2461free(p);2462}24632464#else2465#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2466#endif24672468static voidadd_tag_chain(const struct object_id *oid)2469{2470struct tag *tag;24712472/*2473 * We catch duplicates already in add_object_entry(), but we'd2474 * prefer to do this extra check to avoid having to parse the2475 * tag at all if we already know that it's being packed (e.g., if2476 * it was included via bitmaps, we would not have parsed it2477 * previously).2478 */2479if(packlist_find(&to_pack, oid->hash, NULL))2480return;24812482 tag =lookup_tag(oid);2483while(1) {2484if(!tag ||parse_tag(tag) || !tag->tagged)2485die("unable to pack objects reachable from tag%s",2486oid_to_hex(oid));24872488add_object_entry(&tag->object.oid, OBJ_TAG, NULL,0);24892490if(tag->tagged->type != OBJ_TAG)2491return;24922493 tag = (struct tag *)tag->tagged;2494}2495}24962497static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2498{2499struct object_id peeled;25002501if(starts_with(path,"refs/tags/") &&/* is a tag? */2502!peel_ref(path, &peeled) &&/* peelable? */2503packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2504add_tag_chain(oid);2505return0;2506}25072508static voidprepare_pack(int window,int depth)2509{2510struct object_entry **delta_list;2511uint32_t i, nr_deltas;2512unsigned n;25132514get_object_details();25152516/*2517 * If we're locally repacking then we need to be doubly careful2518 * from now on in order to make sure no stealth corruption gets2519 * propagated to the new pack. Clients receiving streamed packs2520 * should validate everything they get anyway so no need to incur2521 * the additional cost here in that case.2522 */2523if(!pack_to_stdout)2524 do_check_packed_object_crc =1;25252526if(!to_pack.nr_objects || !window || !depth)2527return;25282529ALLOC_ARRAY(delta_list, to_pack.nr_objects);2530 nr_deltas = n =0;25312532for(i =0; i < to_pack.nr_objects; i++) {2533struct object_entry *entry = to_pack.objects + i;25342535if(DELTA(entry))2536/* This happens if we decided to reuse existing2537 * delta from a pack. "reuse_delta &&" is implied.2538 */2539continue;25402541if(!entry->type_valid ||2542oe_size_less_than(&to_pack, entry,50))2543continue;25442545if(entry->no_try_delta)2546continue;25472548if(!entry->preferred_base) {2549 nr_deltas++;2550if(oe_type(entry) <0)2551die("unable to get type of object%s",2552oid_to_hex(&entry->idx.oid));2553}else{2554if(oe_type(entry) <0) {2555/*2556 * This object is not found, but we2557 * don't have to include it anyway.2558 */2559continue;2560}2561}25622563 delta_list[n++] = entry;2564}25652566if(nr_deltas && n >1) {2567unsigned nr_done =0;2568if(progress)2569 progress_state =start_progress(_("Compressing objects"),2570 nr_deltas);2571QSORT(delta_list, n, type_size_sort);2572ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2573stop_progress(&progress_state);2574if(nr_done != nr_deltas)2575die("inconsistency with delta count");2576}2577free(delta_list);2578}25792580static intgit_pack_config(const char*k,const char*v,void*cb)2581{2582if(!strcmp(k,"pack.window")) {2583 window =git_config_int(k, v);2584return0;2585}2586if(!strcmp(k,"pack.windowmemory")) {2587 window_memory_limit =git_config_ulong(k, v);2588return0;2589}2590if(!strcmp(k,"pack.depth")) {2591 depth =git_config_int(k, v);2592return0;2593}2594if(!strcmp(k,"pack.deltacachesize")) {2595 max_delta_cache_size =git_config_int(k, v);2596return0;2597}2598if(!strcmp(k,"pack.deltacachelimit")) {2599 cache_max_small_delta_size =git_config_int(k, v);2600return0;2601}2602if(!strcmp(k,"pack.writebitmaphashcache")) {2603if(git_config_bool(k, v))2604 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2605else2606 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2607}2608if(!strcmp(k,"pack.usebitmaps")) {2609 use_bitmap_index_default =git_config_bool(k, v);2610return0;2611}2612if(!strcmp(k,"pack.threads")) {2613 delta_search_threads =git_config_int(k, v);2614if(delta_search_threads <0)2615die("invalid number of threads specified (%d)",2616 delta_search_threads);2617#ifdef NO_PTHREADS2618if(delta_search_threads !=1) {2619warning("no threads support, ignoring%s", k);2620 delta_search_threads =0;2621}2622#endif2623return0;2624}2625if(!strcmp(k,"pack.indexversion")) {2626 pack_idx_opts.version =git_config_int(k, v);2627if(pack_idx_opts.version >2)2628die("bad pack.indexversion=%"PRIu32,2629 pack_idx_opts.version);2630return0;2631}2632returngit_default_config(k, v, cb);2633}26342635static voidread_object_list_from_stdin(void)2636{2637char line[GIT_MAX_HEXSZ +1+ PATH_MAX +2];2638struct object_id oid;2639const char*p;26402641for(;;) {2642if(!fgets(line,sizeof(line), stdin)) {2643if(feof(stdin))2644break;2645if(!ferror(stdin))2646die("BUG: fgets returned NULL, not EOF, not error!");2647if(errno != EINTR)2648die_errno("fgets");2649clearerr(stdin);2650continue;2651}2652if(line[0] =='-') {2653if(get_oid_hex(line+1, &oid))2654die("expected edge object ID, got garbage:\n%s",2655 line);2656add_preferred_base(&oid);2657continue;2658}2659if(parse_oid_hex(line, &oid, &p))2660die("expected object ID, got garbage:\n%s", line);26612662add_preferred_base_object(p +1);2663add_object_entry(&oid, OBJ_NONE, p +1,0);2664}2665}26662667/* Remember to update object flag allocation in object.h */2668#define OBJECT_ADDED (1u<<20)26692670static voidshow_commit(struct commit *commit,void*data)2671{2672add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL,0);2673 commit->object.flags |= OBJECT_ADDED;26742675if(write_bitmap_index)2676index_commit_for_bitmap(commit);2677}26782679static voidshow_object(struct object *obj,const char*name,void*data)2680{2681add_preferred_base_object(name);2682add_object_entry(&obj->oid, obj->type, name,0);2683 obj->flags |= OBJECT_ADDED;2684}26852686static voidshow_object__ma_allow_any(struct object *obj,const char*name,void*data)2687{2688assert(arg_missing_action == MA_ALLOW_ANY);26892690/*2691 * Quietly ignore ALL missing objects. This avoids problems with2692 * staging them now and getting an odd error later.2693 */2694if(!has_object_file(&obj->oid))2695return;26962697show_object(obj, name, data);2698}26992700static voidshow_object__ma_allow_promisor(struct object *obj,const char*name,void*data)2701{2702assert(arg_missing_action == MA_ALLOW_PROMISOR);27032704/*2705 * Quietly ignore EXPECTED missing objects. This avoids problems with2706 * staging them now and getting an odd error later.2707 */2708if(!has_object_file(&obj->oid) &&is_promisor_object(&obj->oid))2709return;27102711show_object(obj, name, data);2712}27132714static intoption_parse_missing_action(const struct option *opt,2715const char*arg,int unset)2716{2717assert(arg);2718assert(!unset);27192720if(!strcmp(arg,"error")) {2721 arg_missing_action = MA_ERROR;2722 fn_show_object = show_object;2723return0;2724}27252726if(!strcmp(arg,"allow-any")) {2727 arg_missing_action = MA_ALLOW_ANY;2728 fetch_if_missing =0;2729 fn_show_object = show_object__ma_allow_any;2730return0;2731}27322733if(!strcmp(arg,"allow-promisor")) {2734 arg_missing_action = MA_ALLOW_PROMISOR;2735 fetch_if_missing =0;2736 fn_show_object = show_object__ma_allow_promisor;2737return0;2738}27392740die(_("invalid value for --missing"));2741return0;2742}27432744static voidshow_edge(struct commit *commit)2745{2746add_preferred_base(&commit->object.oid);2747}27482749struct in_pack_object {2750 off_t offset;2751struct object *object;2752};27532754struct in_pack {2755unsigned int alloc;2756unsigned int nr;2757struct in_pack_object *array;2758};27592760static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2761{2762 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2763 in_pack->array[in_pack->nr].object = object;2764 in_pack->nr++;2765}27662767/*2768 * Compare the objects in the offset order, in order to emulate the2769 * "git rev-list --objects" output that produced the pack originally.2770 */2771static intofscmp(const void*a_,const void*b_)2772{2773struct in_pack_object *a = (struct in_pack_object *)a_;2774struct in_pack_object *b = (struct in_pack_object *)b_;27752776if(a->offset < b->offset)2777return-1;2778else if(a->offset > b->offset)2779return1;2780else2781returnoidcmp(&a->object->oid, &b->object->oid);2782}27832784static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2785{2786struct packed_git *p;2787struct in_pack in_pack;2788uint32_t i;27892790memset(&in_pack,0,sizeof(in_pack));27912792for(p =get_packed_git(the_repository); p; p = p->next) {2793struct object_id oid;2794struct object *o;27952796if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2797continue;2798if(open_pack_index(p))2799die("cannot open pack index");28002801ALLOC_GROW(in_pack.array,2802 in_pack.nr + p->num_objects,2803 in_pack.alloc);28042805for(i =0; i < p->num_objects; i++) {2806nth_packed_object_oid(&oid, p, i);2807 o =lookup_unknown_object(oid.hash);2808if(!(o->flags & OBJECT_ADDED))2809mark_in_pack_object(o, p, &in_pack);2810 o->flags |= OBJECT_ADDED;2811}2812}28132814if(in_pack.nr) {2815QSORT(in_pack.array, in_pack.nr, ofscmp);2816for(i =0; i < in_pack.nr; i++) {2817struct object *o = in_pack.array[i].object;2818add_object_entry(&o->oid, o->type,"",0);2819}2820}2821free(in_pack.array);2822}28232824static intadd_loose_object(const struct object_id *oid,const char*path,2825void*data)2826{2827enum object_type type =oid_object_info(the_repository, oid, NULL);28282829if(type <0) {2830warning("loose object at%scould not be examined", path);2831return0;2832}28332834add_object_entry(oid, type,"",0);2835return0;2836}28372838/*2839 * We actually don't even have to worry about reachability here.2840 * add_object_entry will weed out duplicates, so we just add every2841 * loose object we find.2842 */2843static voidadd_unreachable_loose_objects(void)2844{2845for_each_loose_file_in_objdir(get_object_directory(),2846 add_loose_object,2847 NULL, NULL, NULL);2848}28492850static inthas_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2851{2852static struct packed_git *last_found = (void*)1;2853struct packed_git *p;28542855 p = (last_found != (void*)1) ? last_found :2856get_packed_git(the_repository);28572858while(p) {2859if((!p->pack_local || p->pack_keep ||2860 p->pack_keep_in_core) &&2861find_pack_entry_one(oid->hash, p)) {2862 last_found = p;2863return1;2864}2865if(p == last_found)2866 p =get_packed_git(the_repository);2867else2868 p = p->next;2869if(p == last_found)2870 p = p->next;2871}2872return0;2873}28742875/*2876 * Store a list of sha1s that are should not be discarded2877 * because they are either written too recently, or are2878 * reachable from another object that was.2879 *2880 * This is filled by get_object_list.2881 */2882static struct oid_array recent_objects;28832884static intloosened_object_can_be_discarded(const struct object_id *oid,2885 timestamp_t mtime)2886{2887if(!unpack_unreachable_expiration)2888return0;2889if(mtime > unpack_unreachable_expiration)2890return0;2891if(oid_array_lookup(&recent_objects, oid) >=0)2892return0;2893return1;2894}28952896static voidloosen_unused_packed_objects(struct rev_info *revs)2897{2898struct packed_git *p;2899uint32_t i;2900struct object_id oid;29012902for(p =get_packed_git(the_repository); p; p = p->next) {2903if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2904continue;29052906if(open_pack_index(p))2907die("cannot open pack index");29082909for(i =0; i < p->num_objects; i++) {2910nth_packed_object_oid(&oid, p, i);2911if(!packlist_find(&to_pack, oid.hash, NULL) &&2912!has_sha1_pack_kept_or_nonlocal(&oid) &&2913!loosened_object_can_be_discarded(&oid, p->mtime))2914if(force_object_loose(&oid, p->mtime))2915die("unable to force loose object");2916}2917}2918}29192920/*2921 * This tracks any options which pack-reuse code expects to be on, or which a2922 * reader of the pack might not understand, and which would therefore prevent2923 * blind reuse of what we have on disk.2924 */2925static intpack_options_allow_reuse(void)2926{2927return pack_to_stdout &&2928 allow_ofs_delta &&2929!ignore_packed_keep_on_disk &&2930!ignore_packed_keep_in_core &&2931(!local || !have_non_local_packs) &&2932!incremental;2933}29342935static intget_object_list_from_bitmap(struct rev_info *revs)2936{2937if(prepare_bitmap_walk(revs) <0)2938return-1;29392940if(pack_options_allow_reuse() &&2941!reuse_partial_packfile_from_bitmap(2942&reuse_packfile,2943&reuse_packfile_objects,2944&reuse_packfile_offset)) {2945assert(reuse_packfile_objects);2946 nr_result += reuse_packfile_objects;2947display_progress(progress_state, nr_result);2948}29492950traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2951return0;2952}29532954static voidrecord_recent_object(struct object *obj,2955const char*name,2956void*data)2957{2958oid_array_append(&recent_objects, &obj->oid);2959}29602961static voidrecord_recent_commit(struct commit *commit,void*data)2962{2963oid_array_append(&recent_objects, &commit->object.oid);2964}29652966static voidget_object_list(int ac,const char**av)2967{2968struct rev_info revs;2969char line[1000];2970int flags =0;29712972init_revisions(&revs, NULL);2973 save_commit_buffer =0;2974setup_revisions(ac, av, &revs, NULL);29752976/* make sure shallows are read */2977is_repository_shallow();29782979while(fgets(line,sizeof(line), stdin) != NULL) {2980int len =strlen(line);2981if(len && line[len -1] =='\n')2982 line[--len] =0;2983if(!len)2984break;2985if(*line =='-') {2986if(!strcmp(line,"--not")) {2987 flags ^= UNINTERESTING;2988 write_bitmap_index =0;2989continue;2990}2991if(starts_with(line,"--shallow ")) {2992struct object_id oid;2993if(get_oid_hex(line +10, &oid))2994die("not an SHA-1 '%s'", line +10);2995register_shallow(&oid);2996 use_bitmap_index =0;2997continue;2998}2999die("not a rev '%s'", line);3000}3001if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))3002die("bad revision '%s'", line);3003}30043005if(use_bitmap_index && !get_object_list_from_bitmap(&revs))3006return;30073008if(prepare_revision_walk(&revs))3009die("revision walk setup failed");3010mark_edges_uninteresting(&revs, show_edge);30113012if(!fn_show_object)3013 fn_show_object = show_object;3014traverse_commit_list_filtered(&filter_options, &revs,3015 show_commit, fn_show_object, NULL,3016 NULL);30173018if(unpack_unreachable_expiration) {3019 revs.ignore_missing_links =1;3020if(add_unseen_recent_objects_to_traversal(&revs,3021 unpack_unreachable_expiration))3022die("unable to add recent objects");3023if(prepare_revision_walk(&revs))3024die("revision walk setup failed");3025traverse_commit_list(&revs, record_recent_commit,3026 record_recent_object, NULL);3027}30283029if(keep_unreachable)3030add_objects_in_unpacked_packs(&revs);3031if(pack_loose_unreachable)3032add_unreachable_loose_objects();3033if(unpack_unreachable)3034loosen_unused_packed_objects(&revs);30353036oid_array_clear(&recent_objects);3037}30383039static voidadd_extra_kept_packs(const struct string_list *names)3040{3041struct packed_git *p;30423043if(!names->nr)3044return;30453046for(p =get_packed_git(the_repository); p; p = p->next) {3047const char*name =basename(p->pack_name);3048int i;30493050if(!p->pack_local)3051continue;30523053for(i =0; i < names->nr; i++)3054if(!fspathcmp(name, names->items[i].string))3055break;30563057if(i < names->nr) {3058 p->pack_keep_in_core =1;3059 ignore_packed_keep_in_core =1;3060continue;3061}3062}3063}30643065static intoption_parse_index_version(const struct option *opt,3066const char*arg,int unset)3067{3068char*c;3069const char*val = arg;3070 pack_idx_opts.version =strtoul(val, &c,10);3071if(pack_idx_opts.version >2)3072die(_("unsupported index version%s"), val);3073if(*c ==','&& c[1])3074 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);3075if(*c || pack_idx_opts.off32_limit &0x80000000)3076die(_("bad index version '%s'"), val);3077return0;3078}30793080static intoption_parse_unpack_unreachable(const struct option *opt,3081const char*arg,int unset)3082{3083if(unset) {3084 unpack_unreachable =0;3085 unpack_unreachable_expiration =0;3086}3087else{3088 unpack_unreachable =1;3089if(arg)3090 unpack_unreachable_expiration =approxidate(arg);3091}3092return0;3093}30943095intcmd_pack_objects(int argc,const char**argv,const char*prefix)3096{3097int use_internal_rev_list =0;3098int thin =0;3099int shallow =0;3100int all_progress_implied =0;3101struct argv_array rp = ARGV_ARRAY_INIT;3102int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;3103int rev_list_index =0;3104struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3105struct option pack_objects_options[] = {3106OPT_SET_INT('q',"quiet", &progress,3107N_("do not show progress meter"),0),3108OPT_SET_INT(0,"progress", &progress,3109N_("show progress meter"),1),3110OPT_SET_INT(0,"all-progress", &progress,3111N_("show progress meter during object writing phase"),2),3112OPT_BOOL(0,"all-progress-implied",3113&all_progress_implied,3114N_("similar to --all-progress when progress meter is shown")),3115{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),3116N_("write the pack index file in the specified idx format version"),31170, option_parse_index_version },3118OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,3119N_("maximum size of each output pack file")),3120OPT_BOOL(0,"local", &local,3121N_("ignore borrowed objects from alternate object store")),3122OPT_BOOL(0,"incremental", &incremental,3123N_("ignore packed objects")),3124OPT_INTEGER(0,"window", &window,3125N_("limit pack window by objects")),3126OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,3127N_("limit pack window by memory in addition to object limit")),3128OPT_INTEGER(0,"depth", &depth,3129N_("maximum length of delta chain allowed in the resulting pack")),3130OPT_BOOL(0,"reuse-delta", &reuse_delta,3131N_("reuse existing deltas")),3132OPT_BOOL(0,"reuse-object", &reuse_object,3133N_("reuse existing objects")),3134OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,3135N_("use OFS_DELTA objects")),3136OPT_INTEGER(0,"threads", &delta_search_threads,3137N_("use threads when searching for best delta matches")),3138OPT_BOOL(0,"non-empty", &non_empty,3139N_("do not create an empty pack output")),3140OPT_BOOL(0,"revs", &use_internal_rev_list,3141N_("read revision arguments from standard input")),3142OPT_SET_INT_F(0,"unpacked", &rev_list_unpacked,3143N_("limit the objects to those that are not yet packed"),31441, PARSE_OPT_NONEG),3145OPT_SET_INT_F(0,"all", &rev_list_all,3146N_("include objects reachable from any reference"),31471, PARSE_OPT_NONEG),3148OPT_SET_INT_F(0,"reflog", &rev_list_reflog,3149N_("include objects referred by reflog entries"),31501, PARSE_OPT_NONEG),3151OPT_SET_INT_F(0,"indexed-objects", &rev_list_index,3152N_("include objects referred to by the index"),31531, PARSE_OPT_NONEG),3154OPT_BOOL(0,"stdout", &pack_to_stdout,3155N_("output pack to stdout")),3156OPT_BOOL(0,"include-tag", &include_tag,3157N_("include tag objects that refer to objects to be packed")),3158OPT_BOOL(0,"keep-unreachable", &keep_unreachable,3159N_("keep unreachable objects")),3160OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,3161N_("pack loose unreachable objects")),3162{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),3163N_("unpack unreachable objects newer than <time>"),3164 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3165OPT_BOOL(0,"thin", &thin,3166N_("create thin packs")),3167OPT_BOOL(0,"shallow", &shallow,3168N_("create packs suitable for shallow fetches")),3169OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep_on_disk,3170N_("ignore packs that have companion .keep file")),3171OPT_STRING_LIST(0,"keep-pack", &keep_pack_list,N_("name"),3172N_("ignore this pack")),3173OPT_INTEGER(0,"compression", &pack_compression_level,3174N_("pack compression level")),3175OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,3176N_("do not hide commits by grafts"),0),3177OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,3178N_("use a bitmap index if available to speed up counting objects")),3179OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,3180N_("write a bitmap index together with the pack index")),3181OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3182{ OPTION_CALLBACK,0,"missing", NULL,N_("action"),3183N_("handling for missing objects"), PARSE_OPT_NONEG,3184 option_parse_missing_action },3185OPT_BOOL(0,"exclude-promisor-objects", &exclude_promisor_objects,3186N_("do not pack objects in promisor packfiles")),3187OPT_END(),3188};31893190if(DFS_NUM_STATES > (1<< OE_DFS_STATE_BITS))3191BUG("too many dfs states, increase OE_DFS_STATE_BITS");31923193 check_replace_refs =0;31943195reset_pack_idx_option(&pack_idx_opts);3196git_config(git_pack_config, NULL);31973198 progress =isatty(2);3199 argc =parse_options(argc, argv, prefix, pack_objects_options,3200 pack_usage,0);32013202if(argc) {3203 base_name = argv[0];3204 argc--;3205}3206if(pack_to_stdout != !base_name || argc)3207usage_with_options(pack_usage, pack_objects_options);32083209if(depth >= (1<< OE_DEPTH_BITS)) {3210warning(_("delta chain depth%dis too deep, forcing%d"),3211 depth, (1<< OE_DEPTH_BITS) -1);3212 depth = (1<< OE_DEPTH_BITS) -1;3213}3214if(cache_max_small_delta_size >= (1U<< OE_Z_DELTA_BITS)) {3215warning(_("pack.deltaCacheLimit is too high, forcing%d"),3216(1U<< OE_Z_DELTA_BITS) -1);3217 cache_max_small_delta_size = (1U<< OE_Z_DELTA_BITS) -1;3218}32193220argv_array_push(&rp,"pack-objects");3221if(thin) {3222 use_internal_rev_list =1;3223argv_array_push(&rp, shallow3224?"--objects-edge-aggressive"3225:"--objects-edge");3226}else3227argv_array_push(&rp,"--objects");32283229if(rev_list_all) {3230 use_internal_rev_list =1;3231argv_array_push(&rp,"--all");3232}3233if(rev_list_reflog) {3234 use_internal_rev_list =1;3235argv_array_push(&rp,"--reflog");3236}3237if(rev_list_index) {3238 use_internal_rev_list =1;3239argv_array_push(&rp,"--indexed-objects");3240}3241if(rev_list_unpacked) {3242 use_internal_rev_list =1;3243argv_array_push(&rp,"--unpacked");3244}32453246if(exclude_promisor_objects) {3247 use_internal_rev_list =1;3248 fetch_if_missing =0;3249argv_array_push(&rp,"--exclude-promisor-objects");3250}3251if(unpack_unreachable || keep_unreachable || pack_loose_unreachable)3252 use_internal_rev_list =1;32533254if(!reuse_object)3255 reuse_delta =0;3256if(pack_compression_level == -1)3257 pack_compression_level = Z_DEFAULT_COMPRESSION;3258else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)3259die("bad pack compression level%d", pack_compression_level);32603261if(!delta_search_threads)/* --threads=0 means autodetect */3262 delta_search_threads =online_cpus();32633264#ifdef NO_PTHREADS3265if(delta_search_threads !=1)3266warning("no threads support, ignoring --threads");3267#endif3268if(!pack_to_stdout && !pack_size_limit)3269 pack_size_limit = pack_size_limit_cfg;3270if(pack_to_stdout && pack_size_limit)3271die("--max-pack-size cannot be used to build a pack for transfer");3272if(pack_size_limit && pack_size_limit <1024*1024) {3273warning("minimum pack size limit is 1 MiB");3274 pack_size_limit =1024*1024;3275}32763277if(!pack_to_stdout && thin)3278die("--thin cannot be used to build an indexable pack.");32793280if(keep_unreachable && unpack_unreachable)3281die("--keep-unreachable and --unpack-unreachable are incompatible");3282if(!rev_list_all || !rev_list_reflog || !rev_list_index)3283 unpack_unreachable_expiration =0;32843285if(filter_options.choice) {3286if(!pack_to_stdout)3287die("cannot use --filter without --stdout");3288 use_bitmap_index =0;3289}32903291/*3292 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3293 *3294 * - to produce good pack (with bitmap index not-yet-packed objects are3295 * packed in suboptimal order).3296 *3297 * - to use more robust pack-generation codepath (avoiding possible3298 * bugs in bitmap code and possible bitmap index corruption).3299 */3300if(!pack_to_stdout)3301 use_bitmap_index_default =0;33023303if(use_bitmap_index <0)3304 use_bitmap_index = use_bitmap_index_default;33053306/* "hard" reasons not to use bitmaps; these just won't work at all */3307if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3308 use_bitmap_index =0;33093310if(pack_to_stdout || !rev_list_all)3311 write_bitmap_index =0;33123313if(progress && all_progress_implied)3314 progress =2;33153316add_extra_kept_packs(&keep_pack_list);3317if(ignore_packed_keep_on_disk) {3318struct packed_git *p;3319for(p =get_packed_git(the_repository); p; p = p->next)3320if(p->pack_local && p->pack_keep)3321break;3322if(!p)/* no keep-able packs found */3323 ignore_packed_keep_on_disk =0;3324}3325if(local) {3326/*3327 * unlike ignore_packed_keep_on_disk above, we do not3328 * want to unset "local" based on looking at packs, as3329 * it also covers non-local objects3330 */3331struct packed_git *p;3332for(p =get_packed_git(the_repository); p; p = p->next) {3333if(!p->pack_local) {3334 have_non_local_packs =1;3335break;3336}3337}3338}33393340prepare_packing_data(&to_pack);33413342if(progress)3343 progress_state =start_progress(_("Enumerating objects"),0);3344if(!use_internal_rev_list)3345read_object_list_from_stdin();3346else{3347get_object_list(rp.argc, rp.argv);3348argv_array_clear(&rp);3349}3350cleanup_preferred_base();3351if(include_tag && nr_result)3352for_each_ref(add_ref_tag, NULL);3353stop_progress(&progress_state);33543355if(non_empty && !nr_result)3356return0;3357if(nr_result)3358prepare_pack(window, depth);3359write_pack_file();3360if(progress)3361fprintf_ln(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3362" reused %"PRIu32" (delta %"PRIu32")",3363 written, written_delta, reused, reused_delta);3364return0;3365}