1#include"builtin.h" 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"attr.h" 6#include"object.h" 7#include"blob.h" 8#include"commit.h" 9#include"tag.h" 10#include"tree.h" 11#include"delta.h" 12#include"pack.h" 13#include"pack-revindex.h" 14#include"csum-file.h" 15#include"tree-walk.h" 16#include"diff.h" 17#include"revision.h" 18#include"list-objects.h" 19#include"list-objects-filter.h" 20#include"list-objects-filter-options.h" 21#include"pack-objects.h" 22#include"progress.h" 23#include"refs.h" 24#include"streaming.h" 25#include"thread-utils.h" 26#include"pack-bitmap.h" 27#include"reachable.h" 28#include"sha1-array.h" 29#include"argv-array.h" 30#include"list.h" 31#include"packfile.h" 32#include"object-store.h" 33#include"dir.h" 34 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 36#define SIZE(obj) oe_size(&to_pack, obj) 37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 39#define DELTA(obj) oe_delta(&to_pack, obj) 40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 46 47static const char*pack_usage[] = { 48N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 49N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 50 NULL 51}; 52 53/* 54 * Objects we are going to pack are collected in the `to_pack` structure. 55 * It contains an array (dynamically expanded) of the object data, and a map 56 * that can resolve SHA1s to their position in the array. 57 */ 58static struct packing_data to_pack; 59 60static struct pack_idx_entry **written_list; 61static uint32_t nr_result, nr_written, nr_seen; 62 63static int non_empty; 64static int reuse_delta =1, reuse_object =1; 65static int keep_unreachable, unpack_unreachable, include_tag; 66static timestamp_t unpack_unreachable_expiration; 67static int pack_loose_unreachable; 68static int local; 69static int have_non_local_packs; 70static int incremental; 71static int ignore_packed_keep_on_disk; 72static int ignore_packed_keep_in_core; 73static int allow_ofs_delta; 74static struct pack_idx_option pack_idx_opts; 75static const char*base_name; 76static int progress =1; 77static int window =10; 78static unsigned long pack_size_limit; 79static int depth =50; 80static int delta_search_threads; 81static int pack_to_stdout; 82static int num_preferred_base; 83static struct progress *progress_state; 84 85static struct packed_git *reuse_packfile; 86static uint32_t reuse_packfile_objects; 87static off_t reuse_packfile_offset; 88 89static int use_bitmap_index_default =1; 90static int use_bitmap_index = -1; 91static int write_bitmap_index; 92static uint16_t write_bitmap_options; 93 94static int exclude_promisor_objects; 95 96static unsigned long delta_cache_size =0; 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 98static unsigned long cache_max_small_delta_size =1000; 99 100static unsigned long window_memory_limit =0; 101 102static struct list_objects_filter_options filter_options; 103 104enum missing_action { 105 MA_ERROR =0,/* fail if any missing objects are encountered */ 106 MA_ALLOW_ANY,/* silently allow ALL missing objects */ 107 MA_ALLOW_PROMISOR,/* silently allow all missing PROMISOR objects */ 108}; 109static enum missing_action arg_missing_action; 110static show_object_fn fn_show_object; 111 112/* 113 * stats 114 */ 115static uint32_t written, written_delta; 116static uint32_t reused, reused_delta; 117 118/* 119 * Indexed commits 120 */ 121static struct commit **indexed_commits; 122static unsigned int indexed_commits_nr; 123static unsigned int indexed_commits_alloc; 124 125static voidindex_commit_for_bitmap(struct commit *commit) 126{ 127if(indexed_commits_nr >= indexed_commits_alloc) { 128 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 129REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 130} 131 132 indexed_commits[indexed_commits_nr++] = commit; 133} 134 135static void*get_delta(struct object_entry *entry) 136{ 137unsigned long size, base_size, delta_size; 138void*buf, *base_buf, *delta_buf; 139enum object_type type; 140 141 buf =read_object_file(&entry->idx.oid, &type, &size); 142if(!buf) 143die(_("unable to read%s"),oid_to_hex(&entry->idx.oid)); 144 base_buf =read_object_file(&DELTA(entry)->idx.oid, &type, 145&base_size); 146if(!base_buf) 147die("unable to read%s", 148oid_to_hex(&DELTA(entry)->idx.oid)); 149 delta_buf =diff_delta(base_buf, base_size, 150 buf, size, &delta_size,0); 151/* 152 * We succesfully computed this delta once but dropped it for 153 * memory reasons. Something is very wrong if this time we 154 * recompute and create a different delta. 155 */ 156if(!delta_buf || delta_size !=DELTA_SIZE(entry)) 157BUG("delta size changed"); 158free(buf); 159free(base_buf); 160return delta_buf; 161} 162 163static unsigned longdo_compress(void**pptr,unsigned long size) 164{ 165 git_zstream stream; 166void*in, *out; 167unsigned long maxsize; 168 169git_deflate_init(&stream, pack_compression_level); 170 maxsize =git_deflate_bound(&stream, size); 171 172 in = *pptr; 173 out =xmalloc(maxsize); 174*pptr = out; 175 176 stream.next_in = in; 177 stream.avail_in = size; 178 stream.next_out = out; 179 stream.avail_out = maxsize; 180while(git_deflate(&stream, Z_FINISH) == Z_OK) 181;/* nothing */ 182git_deflate_end(&stream); 183 184free(in); 185return stream.total_out; 186} 187 188static unsigned longwrite_large_blob_data(struct git_istream *st,struct hashfile *f, 189const struct object_id *oid) 190{ 191 git_zstream stream; 192unsigned char ibuf[1024*16]; 193unsigned char obuf[1024*16]; 194unsigned long olen =0; 195 196git_deflate_init(&stream, pack_compression_level); 197 198for(;;) { 199 ssize_t readlen; 200int zret = Z_OK; 201 readlen =read_istream(st, ibuf,sizeof(ibuf)); 202if(readlen == -1) 203die(_("unable to read%s"),oid_to_hex(oid)); 204 205 stream.next_in = ibuf; 206 stream.avail_in = readlen; 207while((stream.avail_in || readlen ==0) && 208(zret == Z_OK || zret == Z_BUF_ERROR)) { 209 stream.next_out = obuf; 210 stream.avail_out =sizeof(obuf); 211 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 212hashwrite(f, obuf, stream.next_out - obuf); 213 olen += stream.next_out - obuf; 214} 215if(stream.avail_in) 216die(_("deflate error (%d)"), zret); 217if(readlen ==0) { 218if(zret != Z_STREAM_END) 219die(_("deflate error (%d)"), zret); 220break; 221} 222} 223git_deflate_end(&stream); 224return olen; 225} 226 227/* 228 * we are going to reuse the existing object data as is. make 229 * sure it is not corrupt. 230 */ 231static intcheck_pack_inflate(struct packed_git *p, 232struct pack_window **w_curs, 233 off_t offset, 234 off_t len, 235unsigned long expect) 236{ 237 git_zstream stream; 238unsigned char fakebuf[4096], *in; 239int st; 240 241memset(&stream,0,sizeof(stream)); 242git_inflate_init(&stream); 243do{ 244 in =use_pack(p, w_curs, offset, &stream.avail_in); 245 stream.next_in = in; 246 stream.next_out = fakebuf; 247 stream.avail_out =sizeof(fakebuf); 248 st =git_inflate(&stream, Z_FINISH); 249 offset += stream.next_in - in; 250}while(st == Z_OK || st == Z_BUF_ERROR); 251git_inflate_end(&stream); 252return(st == Z_STREAM_END && 253 stream.total_out == expect && 254 stream.total_in == len) ?0: -1; 255} 256 257static voidcopy_pack_data(struct hashfile *f, 258struct packed_git *p, 259struct pack_window **w_curs, 260 off_t offset, 261 off_t len) 262{ 263unsigned char*in; 264unsigned long avail; 265 266while(len) { 267 in =use_pack(p, w_curs, offset, &avail); 268if(avail > len) 269 avail = (unsigned long)len; 270hashwrite(f, in, avail); 271 offset += avail; 272 len -= avail; 273} 274} 275 276/* Return 0 if we will bust the pack-size limit */ 277static unsigned longwrite_no_reuse_object(struct hashfile *f,struct object_entry *entry, 278unsigned long limit,int usable_delta) 279{ 280unsigned long size, datalen; 281unsigned char header[MAX_PACK_OBJECT_HEADER], 282 dheader[MAX_PACK_OBJECT_HEADER]; 283unsigned hdrlen; 284enum object_type type; 285void*buf; 286struct git_istream *st = NULL; 287const unsigned hashsz = the_hash_algo->rawsz; 288 289if(!usable_delta) { 290if(oe_type(entry) == OBJ_BLOB && 291oe_size_greater_than(&to_pack, entry, big_file_threshold) && 292(st =open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 293 buf = NULL; 294else{ 295 buf =read_object_file(&entry->idx.oid, &type, &size); 296if(!buf) 297die(_("unable to read%s"), 298oid_to_hex(&entry->idx.oid)); 299} 300/* 301 * make sure no cached delta data remains from a 302 * previous attempt before a pack split occurred. 303 */ 304FREE_AND_NULL(entry->delta_data); 305 entry->z_delta_size =0; 306}else if(entry->delta_data) { 307 size =DELTA_SIZE(entry); 308 buf = entry->delta_data; 309 entry->delta_data = NULL; 310 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 311 OBJ_OFS_DELTA : OBJ_REF_DELTA; 312}else{ 313 buf =get_delta(entry); 314 size =DELTA_SIZE(entry); 315 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 316 OBJ_OFS_DELTA : OBJ_REF_DELTA; 317} 318 319if(st)/* large blob case, just assume we don't compress well */ 320 datalen = size; 321else if(entry->z_delta_size) 322 datalen = entry->z_delta_size; 323else 324 datalen =do_compress(&buf, size); 325 326/* 327 * The object header is a byte of 'type' followed by zero or 328 * more bytes of length. 329 */ 330 hdrlen =encode_in_pack_object_header(header,sizeof(header), 331 type, size); 332 333if(type == OBJ_OFS_DELTA) { 334/* 335 * Deltas with relative base contain an additional 336 * encoding of the relative offset for the delta 337 * base from this object's position in the pack. 338 */ 339 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 340unsigned pos =sizeof(dheader) -1; 341 dheader[pos] = ofs &127; 342while(ofs >>=7) 343 dheader[--pos] =128| (--ofs &127); 344if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 345if(st) 346close_istream(st); 347free(buf); 348return0; 349} 350hashwrite(f, header, hdrlen); 351hashwrite(f, dheader + pos,sizeof(dheader) - pos); 352 hdrlen +=sizeof(dheader) - pos; 353}else if(type == OBJ_REF_DELTA) { 354/* 355 * Deltas with a base reference contain 356 * additional bytes for the base object ID. 357 */ 358if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 359if(st) 360close_istream(st); 361free(buf); 362return0; 363} 364hashwrite(f, header, hdrlen); 365hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 366 hdrlen += hashsz; 367}else{ 368if(limit && hdrlen + datalen + hashsz >= limit) { 369if(st) 370close_istream(st); 371free(buf); 372return0; 373} 374hashwrite(f, header, hdrlen); 375} 376if(st) { 377 datalen =write_large_blob_data(st, f, &entry->idx.oid); 378close_istream(st); 379}else{ 380hashwrite(f, buf, datalen); 381free(buf); 382} 383 384return hdrlen + datalen; 385} 386 387/* Return 0 if we will bust the pack-size limit */ 388static off_t write_reuse_object(struct hashfile *f,struct object_entry *entry, 389unsigned long limit,int usable_delta) 390{ 391struct packed_git *p =IN_PACK(entry); 392struct pack_window *w_curs = NULL; 393struct revindex_entry *revidx; 394 off_t offset; 395enum object_type type =oe_type(entry); 396 off_t datalen; 397unsigned char header[MAX_PACK_OBJECT_HEADER], 398 dheader[MAX_PACK_OBJECT_HEADER]; 399unsigned hdrlen; 400const unsigned hashsz = the_hash_algo->rawsz; 401unsigned long entry_size =SIZE(entry); 402 403if(DELTA(entry)) 404 type = (allow_ofs_delta &&DELTA(entry)->idx.offset) ? 405 OBJ_OFS_DELTA : OBJ_REF_DELTA; 406 hdrlen =encode_in_pack_object_header(header,sizeof(header), 407 type, entry_size); 408 409 offset = entry->in_pack_offset; 410 revidx =find_pack_revindex(p, offset); 411 datalen = revidx[1].offset - offset; 412if(!pack_to_stdout && p->index_version >1&& 413check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 414error(_("bad packed object CRC for%s"), 415oid_to_hex(&entry->idx.oid)); 416unuse_pack(&w_curs); 417returnwrite_no_reuse_object(f, entry, limit, usable_delta); 418} 419 420 offset += entry->in_pack_header_size; 421 datalen -= entry->in_pack_header_size; 422 423if(!pack_to_stdout && p->index_version ==1&& 424check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 425error(_("corrupt packed object for%s"), 426oid_to_hex(&entry->idx.oid)); 427unuse_pack(&w_curs); 428returnwrite_no_reuse_object(f, entry, limit, usable_delta); 429} 430 431if(type == OBJ_OFS_DELTA) { 432 off_t ofs = entry->idx.offset -DELTA(entry)->idx.offset; 433unsigned pos =sizeof(dheader) -1; 434 dheader[pos] = ofs &127; 435while(ofs >>=7) 436 dheader[--pos] =128| (--ofs &127); 437if(limit && hdrlen +sizeof(dheader) - pos + datalen + hashsz >= limit) { 438unuse_pack(&w_curs); 439return0; 440} 441hashwrite(f, header, hdrlen); 442hashwrite(f, dheader + pos,sizeof(dheader) - pos); 443 hdrlen +=sizeof(dheader) - pos; 444 reused_delta++; 445}else if(type == OBJ_REF_DELTA) { 446if(limit && hdrlen + hashsz + datalen + hashsz >= limit) { 447unuse_pack(&w_curs); 448return0; 449} 450hashwrite(f, header, hdrlen); 451hashwrite(f,DELTA(entry)->idx.oid.hash, hashsz); 452 hdrlen += hashsz; 453 reused_delta++; 454}else{ 455if(limit && hdrlen + datalen + hashsz >= limit) { 456unuse_pack(&w_curs); 457return0; 458} 459hashwrite(f, header, hdrlen); 460} 461copy_pack_data(f, p, &w_curs, offset, datalen); 462unuse_pack(&w_curs); 463 reused++; 464return hdrlen + datalen; 465} 466 467/* Return 0 if we will bust the pack-size limit */ 468static off_t write_object(struct hashfile *f, 469struct object_entry *entry, 470 off_t write_offset) 471{ 472unsigned long limit; 473 off_t len; 474int usable_delta, to_reuse; 475 476if(!pack_to_stdout) 477crc32_begin(f); 478 479/* apply size limit if limited packsize and not first object */ 480if(!pack_size_limit || !nr_written) 481 limit =0; 482else if(pack_size_limit <= write_offset) 483/* 484 * the earlier object did not fit the limit; avoid 485 * mistaking this with unlimited (i.e. limit = 0). 486 */ 487 limit =1; 488else 489 limit = pack_size_limit - write_offset; 490 491if(!DELTA(entry)) 492 usable_delta =0;/* no delta */ 493else if(!pack_size_limit) 494 usable_delta =1;/* unlimited packfile */ 495else if(DELTA(entry)->idx.offset == (off_t)-1) 496 usable_delta =0;/* base was written to another pack */ 497else if(DELTA(entry)->idx.offset) 498 usable_delta =1;/* base already exists in this pack */ 499else 500 usable_delta =0;/* base could end up in another pack */ 501 502if(!reuse_object) 503 to_reuse =0;/* explicit */ 504else if(!IN_PACK(entry)) 505 to_reuse =0;/* can't reuse what we don't have */ 506else if(oe_type(entry) == OBJ_REF_DELTA || 507oe_type(entry) == OBJ_OFS_DELTA) 508/* check_object() decided it for us ... */ 509 to_reuse = usable_delta; 510/* ... but pack split may override that */ 511else if(oe_type(entry) != entry->in_pack_type) 512 to_reuse =0;/* pack has delta which is unusable */ 513else if(DELTA(entry)) 514 to_reuse =0;/* we want to pack afresh */ 515else 516 to_reuse =1;/* we have it in-pack undeltified, 517 * and we do not need to deltify it. 518 */ 519 520if(!to_reuse) 521 len =write_no_reuse_object(f, entry, limit, usable_delta); 522else 523 len =write_reuse_object(f, entry, limit, usable_delta); 524if(!len) 525return0; 526 527if(usable_delta) 528 written_delta++; 529 written++; 530if(!pack_to_stdout) 531 entry->idx.crc32 =crc32_end(f); 532return len; 533} 534 535enum write_one_status { 536 WRITE_ONE_SKIP = -1,/* already written */ 537 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 538 WRITE_ONE_WRITTEN =1,/* normal */ 539 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 540}; 541 542static enum write_one_status write_one(struct hashfile *f, 543struct object_entry *e, 544 off_t *offset) 545{ 546 off_t size; 547int recursing; 548 549/* 550 * we set offset to 1 (which is an impossible value) to mark 551 * the fact that this object is involved in "write its base 552 * first before writing a deltified object" recursion. 553 */ 554 recursing = (e->idx.offset ==1); 555if(recursing) { 556warning(_("recursive delta detected for object%s"), 557oid_to_hex(&e->idx.oid)); 558return WRITE_ONE_RECURSIVE; 559}else if(e->idx.offset || e->preferred_base) { 560/* offset is non zero if object is written already. */ 561return WRITE_ONE_SKIP; 562} 563 564/* if we are deltified, write out base object first. */ 565if(DELTA(e)) { 566 e->idx.offset =1;/* now recurse */ 567switch(write_one(f,DELTA(e), offset)) { 568case WRITE_ONE_RECURSIVE: 569/* we cannot depend on this one */ 570SET_DELTA(e, NULL); 571break; 572default: 573break; 574case WRITE_ONE_BREAK: 575 e->idx.offset = recursing; 576return WRITE_ONE_BREAK; 577} 578} 579 580 e->idx.offset = *offset; 581 size =write_object(f, e, *offset); 582if(!size) { 583 e->idx.offset = recursing; 584return WRITE_ONE_BREAK; 585} 586 written_list[nr_written++] = &e->idx; 587 588/* make sure off_t is sufficiently large not to wrap */ 589if(signed_add_overflows(*offset, size)) 590die(_("pack too large for current definition of off_t")); 591*offset += size; 592return WRITE_ONE_WRITTEN; 593} 594 595static intmark_tagged(const char*path,const struct object_id *oid,int flag, 596void*cb_data) 597{ 598struct object_id peeled; 599struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 600 601if(entry) 602 entry->tagged =1; 603if(!peel_ref(path, &peeled)) { 604 entry =packlist_find(&to_pack, peeled.hash, NULL); 605if(entry) 606 entry->tagged =1; 607} 608return0; 609} 610 611staticinlinevoidadd_to_write_order(struct object_entry **wo, 612unsigned int*endp, 613struct object_entry *e) 614{ 615if(e->filled) 616return; 617 wo[(*endp)++] = e; 618 e->filled =1; 619} 620 621static voidadd_descendants_to_write_order(struct object_entry **wo, 622unsigned int*endp, 623struct object_entry *e) 624{ 625int add_to_order =1; 626while(e) { 627if(add_to_order) { 628struct object_entry *s; 629/* add this node... */ 630add_to_write_order(wo, endp, e); 631/* all its siblings... */ 632for(s =DELTA_SIBLING(e); s; s =DELTA_SIBLING(s)) { 633add_to_write_order(wo, endp, s); 634} 635} 636/* drop down a level to add left subtree nodes if possible */ 637if(DELTA_CHILD(e)) { 638 add_to_order =1; 639 e =DELTA_CHILD(e); 640}else{ 641 add_to_order =0; 642/* our sibling might have some children, it is next */ 643if(DELTA_SIBLING(e)) { 644 e =DELTA_SIBLING(e); 645continue; 646} 647/* go back to our parent node */ 648 e =DELTA(e); 649while(e && !DELTA_SIBLING(e)) { 650/* we're on the right side of a subtree, keep 651 * going up until we can go right again */ 652 e =DELTA(e); 653} 654if(!e) { 655/* done- we hit our original root node */ 656return; 657} 658/* pass it off to sibling at this level */ 659 e =DELTA_SIBLING(e); 660} 661}; 662} 663 664static voidadd_family_to_write_order(struct object_entry **wo, 665unsigned int*endp, 666struct object_entry *e) 667{ 668struct object_entry *root; 669 670for(root = e;DELTA(root); root =DELTA(root)) 671;/* nothing */ 672add_descendants_to_write_order(wo, endp, root); 673} 674 675static struct object_entry **compute_write_order(void) 676{ 677unsigned int i, wo_end, last_untagged; 678 679struct object_entry **wo; 680struct object_entry *objects = to_pack.objects; 681 682for(i =0; i < to_pack.nr_objects; i++) { 683 objects[i].tagged =0; 684 objects[i].filled =0; 685SET_DELTA_CHILD(&objects[i], NULL); 686SET_DELTA_SIBLING(&objects[i], NULL); 687} 688 689/* 690 * Fully connect delta_child/delta_sibling network. 691 * Make sure delta_sibling is sorted in the original 692 * recency order. 693 */ 694for(i = to_pack.nr_objects; i >0;) { 695struct object_entry *e = &objects[--i]; 696if(!DELTA(e)) 697continue; 698/* Mark me as the first child */ 699 e->delta_sibling_idx =DELTA(e)->delta_child_idx; 700SET_DELTA_CHILD(DELTA(e), e); 701} 702 703/* 704 * Mark objects that are at the tip of tags. 705 */ 706for_each_tag_ref(mark_tagged, NULL); 707 708/* 709 * Give the objects in the original recency order until 710 * we see a tagged tip. 711 */ 712ALLOC_ARRAY(wo, to_pack.nr_objects); 713for(i = wo_end =0; i < to_pack.nr_objects; i++) { 714if(objects[i].tagged) 715break; 716add_to_write_order(wo, &wo_end, &objects[i]); 717} 718 last_untagged = i; 719 720/* 721 * Then fill all the tagged tips. 722 */ 723for(; i < to_pack.nr_objects; i++) { 724if(objects[i].tagged) 725add_to_write_order(wo, &wo_end, &objects[i]); 726} 727 728/* 729 * And then all remaining commits and tags. 730 */ 731for(i = last_untagged; i < to_pack.nr_objects; i++) { 732if(oe_type(&objects[i]) != OBJ_COMMIT && 733oe_type(&objects[i]) != OBJ_TAG) 734continue; 735add_to_write_order(wo, &wo_end, &objects[i]); 736} 737 738/* 739 * And then all the trees. 740 */ 741for(i = last_untagged; i < to_pack.nr_objects; i++) { 742if(oe_type(&objects[i]) != OBJ_TREE) 743continue; 744add_to_write_order(wo, &wo_end, &objects[i]); 745} 746 747/* 748 * Finally all the rest in really tight order 749 */ 750for(i = last_untagged; i < to_pack.nr_objects; i++) { 751if(!objects[i].filled) 752add_family_to_write_order(wo, &wo_end, &objects[i]); 753} 754 755if(wo_end != to_pack.nr_objects) 756die(_("ordered%uobjects, expected %"PRIu32), 757 wo_end, to_pack.nr_objects); 758 759return wo; 760} 761 762static off_t write_reused_pack(struct hashfile *f) 763{ 764unsigned char buffer[8192]; 765 off_t to_write, total; 766int fd; 767 768if(!is_pack_valid(reuse_packfile)) 769die(_("packfile is invalid:%s"), reuse_packfile->pack_name); 770 771 fd =git_open(reuse_packfile->pack_name); 772if(fd <0) 773die_errno(_("unable to open packfile for reuse:%s"), 774 reuse_packfile->pack_name); 775 776if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 777die_errno(_("unable to seek in reused packfile")); 778 779if(reuse_packfile_offset <0) 780 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 781 782 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 783 784while(to_write) { 785int read_pack =xread(fd, buffer,sizeof(buffer)); 786 787if(read_pack <=0) 788die_errno(_("unable to read from reused packfile")); 789 790if(read_pack > to_write) 791 read_pack = to_write; 792 793hashwrite(f, buffer, read_pack); 794 to_write -= read_pack; 795 796/* 797 * We don't know the actual number of objects written, 798 * only how many bytes written, how many bytes total, and 799 * how many objects total. So we can fake it by pretending all 800 * objects we are writing are the same size. This gives us a 801 * smooth progress meter, and at the end it matches the true 802 * answer. 803 */ 804 written = reuse_packfile_objects * 805(((double)(total - to_write)) / total); 806display_progress(progress_state, written); 807} 808 809close(fd); 810 written = reuse_packfile_objects; 811display_progress(progress_state, written); 812return reuse_packfile_offset -sizeof(struct pack_header); 813} 814 815static const char no_split_warning[] =N_( 816"disabling bitmap writing, packs are split due to pack.packSizeLimit" 817); 818 819static voidwrite_pack_file(void) 820{ 821uint32_t i =0, j; 822struct hashfile *f; 823 off_t offset; 824uint32_t nr_remaining = nr_result; 825time_t last_mtime =0; 826struct object_entry **write_order; 827 828if(progress > pack_to_stdout) 829 progress_state =start_progress(_("Writing objects"), nr_result); 830ALLOC_ARRAY(written_list, to_pack.nr_objects); 831 write_order =compute_write_order(); 832 833do{ 834struct object_id oid; 835char*pack_tmp_name = NULL; 836 837if(pack_to_stdout) 838 f =hashfd_throughput(1,"<stdout>", progress_state); 839else 840 f =create_tmp_packfile(&pack_tmp_name); 841 842 offset =write_pack_header(f, nr_remaining); 843 844if(reuse_packfile) { 845 off_t packfile_size; 846assert(pack_to_stdout); 847 848 packfile_size =write_reused_pack(f); 849 offset += packfile_size; 850} 851 852 nr_written =0; 853for(; i < to_pack.nr_objects; i++) { 854struct object_entry *e = write_order[i]; 855if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 856break; 857display_progress(progress_state, written); 858} 859 860/* 861 * Did we write the wrong # entries in the header? 862 * If so, rewrite it like in fast-import 863 */ 864if(pack_to_stdout) { 865finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 866}else if(nr_written == nr_remaining) { 867finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 868}else{ 869int fd =finalize_hashfile(f, oid.hash,0); 870fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 871 nr_written, oid.hash, offset); 872close(fd); 873if(write_bitmap_index) { 874warning(_(no_split_warning)); 875 write_bitmap_index =0; 876} 877} 878 879if(!pack_to_stdout) { 880struct stat st; 881struct strbuf tmpname = STRBUF_INIT; 882 883/* 884 * Packs are runtime accessed in their mtime 885 * order since newer packs are more likely to contain 886 * younger objects. So if we are creating multiple 887 * packs then we should modify the mtime of later ones 888 * to preserve this property. 889 */ 890if(stat(pack_tmp_name, &st) <0) { 891warning_errno(_("failed to stat%s"), pack_tmp_name); 892}else if(!last_mtime) { 893 last_mtime = st.st_mtime; 894}else{ 895struct utimbuf utb; 896 utb.actime = st.st_atime; 897 utb.modtime = --last_mtime; 898if(utime(pack_tmp_name, &utb) <0) 899warning_errno(_("failed utime() on%s"), pack_tmp_name); 900} 901 902strbuf_addf(&tmpname,"%s-", base_name); 903 904if(write_bitmap_index) { 905bitmap_writer_set_checksum(oid.hash); 906bitmap_writer_build_type_index( 907&to_pack, written_list, nr_written); 908} 909 910finish_tmp_packfile(&tmpname, pack_tmp_name, 911 written_list, nr_written, 912&pack_idx_opts, oid.hash); 913 914if(write_bitmap_index) { 915strbuf_addf(&tmpname,"%s.bitmap",oid_to_hex(&oid)); 916 917stop_progress(&progress_state); 918 919bitmap_writer_show_progress(progress); 920bitmap_writer_reuse_bitmaps(&to_pack); 921bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 922bitmap_writer_build(&to_pack); 923bitmap_writer_finish(written_list, nr_written, 924 tmpname.buf, write_bitmap_options); 925 write_bitmap_index =0; 926} 927 928strbuf_release(&tmpname); 929free(pack_tmp_name); 930puts(oid_to_hex(&oid)); 931} 932 933/* mark written objects as written to previous pack */ 934for(j =0; j < nr_written; j++) { 935 written_list[j]->offset = (off_t)-1; 936} 937 nr_remaining -= nr_written; 938}while(nr_remaining && i < to_pack.nr_objects); 939 940free(written_list); 941free(write_order); 942stop_progress(&progress_state); 943if(written != nr_result) 944die(_("wrote %"PRIu32" objects while expecting %"PRIu32), 945 written, nr_result); 946} 947 948static intno_try_delta(const char*path) 949{ 950static struct attr_check *check; 951 952if(!check) 953 check =attr_check_initl("delta", NULL); 954if(git_check_attr(&the_index, path, check)) 955return0; 956if(ATTR_FALSE(check->items[0].value)) 957return1; 958return0; 959} 960 961/* 962 * When adding an object, check whether we have already added it 963 * to our packing list. If so, we can skip. However, if we are 964 * being asked to excludei t, but the previous mention was to include 965 * it, make sure to adjust its flags and tweak our numbers accordingly. 966 * 967 * As an optimization, we pass out the index position where we would have 968 * found the item, since that saves us from having to look it up again a 969 * few lines later when we want to add the new entry. 970 */ 971static inthave_duplicate_entry(const struct object_id *oid, 972int exclude, 973uint32_t*index_pos) 974{ 975struct object_entry *entry; 976 977 entry =packlist_find(&to_pack, oid->hash, index_pos); 978if(!entry) 979return0; 980 981if(exclude) { 982if(!entry->preferred_base) 983 nr_result--; 984 entry->preferred_base =1; 985} 986 987return1; 988} 989 990static intwant_found_object(int exclude,struct packed_git *p) 991{ 992if(exclude) 993return1; 994if(incremental) 995return0; 996 997/* 998 * When asked to do --local (do not include an object that appears in a 999 * pack we borrow from elsewhere) or --honor-pack-keep (do not include1000 * an object that appears in a pack marked with .keep), finding a pack1001 * that matches the criteria is sufficient for us to decide to omit it.1002 * However, even if this pack does not satisfy the criteria, we need to1003 * make sure no copy of this object appears in _any_ pack that makes us1004 * to omit the object, so we need to check all the packs.1005 *1006 * We can however first check whether these options can possible matter;1007 * if they do not matter we know we want the object in generated pack.1008 * Otherwise, we signal "-1" at the end to tell the caller that we do1009 * not know either way, and it needs to check more packs.1010 */1011if(!ignore_packed_keep_on_disk &&1012!ignore_packed_keep_in_core &&1013(!local || !have_non_local_packs))1014return1;10151016if(local && !p->pack_local)1017return0;1018if(p->pack_local &&1019((ignore_packed_keep_on_disk && p->pack_keep) ||1020(ignore_packed_keep_in_core && p->pack_keep_in_core)))1021return0;10221023/* we don't know yet; keep looking for more packs */1024return-1;1025}10261027/*1028 * Check whether we want the object in the pack (e.g., we do not want1029 * objects found in non-local stores if the "--local" option was used).1030 *1031 * If the caller already knows an existing pack it wants to take the object1032 * from, that is passed in *found_pack and *found_offset; otherwise this1033 * function finds if there is any pack that has the object and returns the pack1034 * and its offset in these variables.1035 */1036static intwant_object_in_pack(const struct object_id *oid,1037int exclude,1038struct packed_git **found_pack,1039 off_t *found_offset)1040{1041int want;1042struct list_head *pos;10431044if(!exclude && local &&has_loose_object_nonlocal(oid))1045return0;10461047/*1048 * If we already know the pack object lives in, start checks from that1049 * pack - in the usual case when neither --local was given nor .keep files1050 * are present we will determine the answer right now.1051 */1052if(*found_pack) {1053 want =want_found_object(exclude, *found_pack);1054if(want != -1)1055return want;1056}1057list_for_each(pos,get_packed_git_mru(the_repository)) {1058struct packed_git *p =list_entry(pos,struct packed_git, mru);1059 off_t offset;10601061if(p == *found_pack)1062 offset = *found_offset;1063else1064 offset =find_pack_entry_one(oid->hash, p);10651066if(offset) {1067if(!*found_pack) {1068if(!is_pack_valid(p))1069continue;1070*found_offset = offset;1071*found_pack = p;1072}1073 want =want_found_object(exclude, p);1074if(!exclude && want >0)1075list_move(&p->mru,1076get_packed_git_mru(the_repository));1077if(want != -1)1078return want;1079}1080}10811082return1;1083}10841085static voidcreate_object_entry(const struct object_id *oid,1086enum object_type type,1087uint32_t hash,1088int exclude,1089int no_try_delta,1090uint32_t index_pos,1091struct packed_git *found_pack,1092 off_t found_offset)1093{1094struct object_entry *entry;10951096 entry =packlist_alloc(&to_pack, oid->hash, index_pos);1097 entry->hash = hash;1098oe_set_type(entry, type);1099if(exclude)1100 entry->preferred_base =1;1101else1102 nr_result++;1103if(found_pack) {1104oe_set_in_pack(&to_pack, entry, found_pack);1105 entry->in_pack_offset = found_offset;1106}11071108 entry->no_try_delta = no_try_delta;1109}11101111static const char no_closure_warning[] =N_(1112"disabling bitmap writing, as some objects are not being packed"1113);11141115static intadd_object_entry(const struct object_id *oid,enum object_type type,1116const char*name,int exclude)1117{1118struct packed_git *found_pack = NULL;1119 off_t found_offset =0;1120uint32_t index_pos;11211122display_progress(progress_state, ++nr_seen);11231124if(have_duplicate_entry(oid, exclude, &index_pos))1125return0;11261127if(!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1128/* The pack is missing an object, so it will not have closure */1129if(write_bitmap_index) {1130warning(_(no_closure_warning));1131 write_bitmap_index =0;1132}1133return0;1134}11351136create_object_entry(oid, type,pack_name_hash(name),1137 exclude, name &&no_try_delta(name),1138 index_pos, found_pack, found_offset);1139return1;1140}11411142static intadd_object_entry_from_bitmap(const struct object_id *oid,1143enum object_type type,1144int flags,uint32_t name_hash,1145struct packed_git *pack, off_t offset)1146{1147uint32_t index_pos;11481149display_progress(progress_state, ++nr_seen);11501151if(have_duplicate_entry(oid,0, &index_pos))1152return0;11531154if(!want_object_in_pack(oid,0, &pack, &offset))1155return0;11561157create_object_entry(oid, type, name_hash,0,0, index_pos, pack, offset);1158return1;1159}11601161struct pbase_tree_cache {1162struct object_id oid;1163int ref;1164int temporary;1165void*tree_data;1166unsigned long tree_size;1167};11681169static struct pbase_tree_cache *(pbase_tree_cache[256]);1170static intpbase_tree_cache_ix(const struct object_id *oid)1171{1172return oid->hash[0] %ARRAY_SIZE(pbase_tree_cache);1173}1174static intpbase_tree_cache_ix_incr(int ix)1175{1176return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1177}11781179static struct pbase_tree {1180struct pbase_tree *next;1181/* This is a phony "cache" entry; we are not1182 * going to evict it or find it through _get()1183 * mechanism -- this is for the toplevel node that1184 * would almost always change with any commit.1185 */1186struct pbase_tree_cache pcache;1187} *pbase_tree;11881189static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1190{1191struct pbase_tree_cache *ent, *nent;1192void*data;1193unsigned long size;1194enum object_type type;1195int neigh;1196int my_ix =pbase_tree_cache_ix(oid);1197int available_ix = -1;11981199/* pbase-tree-cache acts as a limited hashtable.1200 * your object will be found at your index or within a few1201 * slots after that slot if it is cached.1202 */1203for(neigh =0; neigh <8; neigh++) {1204 ent = pbase_tree_cache[my_ix];1205if(ent && !oidcmp(&ent->oid, oid)) {1206 ent->ref++;1207return ent;1208}1209else if(((available_ix <0) && (!ent || !ent->ref)) ||1210((0<= available_ix) &&1211(!ent && pbase_tree_cache[available_ix])))1212 available_ix = my_ix;1213if(!ent)1214break;1215 my_ix =pbase_tree_cache_ix_incr(my_ix);1216}12171218/* Did not find one. Either we got a bogus request or1219 * we need to read and perhaps cache.1220 */1221 data =read_object_file(oid, &type, &size);1222if(!data)1223return NULL;1224if(type != OBJ_TREE) {1225free(data);1226return NULL;1227}12281229/* We need to either cache or return a throwaway copy */12301231if(available_ix <0)1232 ent = NULL;1233else{1234 ent = pbase_tree_cache[available_ix];1235 my_ix = available_ix;1236}12371238if(!ent) {1239 nent =xmalloc(sizeof(*nent));1240 nent->temporary = (available_ix <0);1241}1242else{1243/* evict and reuse */1244free(ent->tree_data);1245 nent = ent;1246}1247oidcpy(&nent->oid, oid);1248 nent->tree_data = data;1249 nent->tree_size = size;1250 nent->ref =1;1251if(!nent->temporary)1252 pbase_tree_cache[my_ix] = nent;1253return nent;1254}12551256static voidpbase_tree_put(struct pbase_tree_cache *cache)1257{1258if(!cache->temporary) {1259 cache->ref--;1260return;1261}1262free(cache->tree_data);1263free(cache);1264}12651266static intname_cmp_len(const char*name)1267{1268int i;1269for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1270;1271return i;1272}12731274static voidadd_pbase_object(struct tree_desc *tree,1275const char*name,1276int cmplen,1277const char*fullname)1278{1279struct name_entry entry;1280int cmp;12811282while(tree_entry(tree,&entry)) {1283if(S_ISGITLINK(entry.mode))1284continue;1285 cmp =tree_entry_len(&entry) != cmplen ?1:1286memcmp(name, entry.path, cmplen);1287if(cmp >0)1288continue;1289if(cmp <0)1290return;1291if(name[cmplen] !='/') {1292add_object_entry(entry.oid,1293object_type(entry.mode),1294 fullname,1);1295return;1296}1297if(S_ISDIR(entry.mode)) {1298struct tree_desc sub;1299struct pbase_tree_cache *tree;1300const char*down = name+cmplen+1;1301int downlen =name_cmp_len(down);13021303 tree =pbase_tree_get(entry.oid);1304if(!tree)1305return;1306init_tree_desc(&sub, tree->tree_data, tree->tree_size);13071308add_pbase_object(&sub, down, downlen, fullname);1309pbase_tree_put(tree);1310}1311}1312}13131314static unsigned*done_pbase_paths;1315static int done_pbase_paths_num;1316static int done_pbase_paths_alloc;1317static intdone_pbase_path_pos(unsigned hash)1318{1319int lo =0;1320int hi = done_pbase_paths_num;1321while(lo < hi) {1322int mi = lo + (hi - lo) /2;1323if(done_pbase_paths[mi] == hash)1324return mi;1325if(done_pbase_paths[mi] < hash)1326 hi = mi;1327else1328 lo = mi +1;1329}1330return-lo-1;1331}13321333static intcheck_pbase_path(unsigned hash)1334{1335int pos =done_pbase_path_pos(hash);1336if(0<= pos)1337return1;1338 pos = -pos -1;1339ALLOC_GROW(done_pbase_paths,1340 done_pbase_paths_num +1,1341 done_pbase_paths_alloc);1342 done_pbase_paths_num++;1343if(pos < done_pbase_paths_num)1344MOVE_ARRAY(done_pbase_paths + pos +1, done_pbase_paths + pos,1345 done_pbase_paths_num - pos -1);1346 done_pbase_paths[pos] = hash;1347return0;1348}13491350static voidadd_preferred_base_object(const char*name)1351{1352struct pbase_tree *it;1353int cmplen;1354unsigned hash =pack_name_hash(name);13551356if(!num_preferred_base ||check_pbase_path(hash))1357return;13581359 cmplen =name_cmp_len(name);1360for(it = pbase_tree; it; it = it->next) {1361if(cmplen ==0) {1362add_object_entry(&it->pcache.oid, OBJ_TREE, NULL,1);1363}1364else{1365struct tree_desc tree;1366init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1367add_pbase_object(&tree, name, cmplen, name);1368}1369}1370}13711372static voidadd_preferred_base(struct object_id *oid)1373{1374struct pbase_tree *it;1375void*data;1376unsigned long size;1377struct object_id tree_oid;13781379if(window <= num_preferred_base++)1380return;13811382 data =read_object_with_reference(oid, tree_type, &size, &tree_oid);1383if(!data)1384return;13851386for(it = pbase_tree; it; it = it->next) {1387if(!oidcmp(&it->pcache.oid, &tree_oid)) {1388free(data);1389return;1390}1391}13921393 it =xcalloc(1,sizeof(*it));1394 it->next = pbase_tree;1395 pbase_tree = it;13961397oidcpy(&it->pcache.oid, &tree_oid);1398 it->pcache.tree_data = data;1399 it->pcache.tree_size = size;1400}14011402static voidcleanup_preferred_base(void)1403{1404struct pbase_tree *it;1405unsigned i;14061407 it = pbase_tree;1408 pbase_tree = NULL;1409while(it) {1410struct pbase_tree *tmp = it;1411 it = tmp->next;1412free(tmp->pcache.tree_data);1413free(tmp);1414}14151416for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1417if(!pbase_tree_cache[i])1418continue;1419free(pbase_tree_cache[i]->tree_data);1420FREE_AND_NULL(pbase_tree_cache[i]);1421}14221423FREE_AND_NULL(done_pbase_paths);1424 done_pbase_paths_num = done_pbase_paths_alloc =0;1425}14261427static voidcheck_object(struct object_entry *entry)1428{1429unsigned long canonical_size;14301431if(IN_PACK(entry)) {1432struct packed_git *p =IN_PACK(entry);1433struct pack_window *w_curs = NULL;1434const unsigned char*base_ref = NULL;1435struct object_entry *base_entry;1436unsigned long used, used_0;1437unsigned long avail;1438 off_t ofs;1439unsigned char*buf, c;1440enum object_type type;1441unsigned long in_pack_size;14421443 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);14441445/*1446 * We want in_pack_type even if we do not reuse delta1447 * since non-delta representations could still be reused.1448 */1449 used =unpack_object_header_buffer(buf, avail,1450&type,1451&in_pack_size);1452if(used ==0)1453goto give_up;14541455if(type <0)1456BUG("invalid type%d", type);1457 entry->in_pack_type = type;14581459/*1460 * Determine if this is a delta and if so whether we can1461 * reuse it or not. Otherwise let's find out as cheaply as1462 * possible what the actual type and size for this object is.1463 */1464switch(entry->in_pack_type) {1465default:1466/* Not a delta hence we've already got all we need. */1467oe_set_type(entry, entry->in_pack_type);1468SET_SIZE(entry, in_pack_size);1469 entry->in_pack_header_size = used;1470if(oe_type(entry) < OBJ_COMMIT ||oe_type(entry) > OBJ_BLOB)1471goto give_up;1472unuse_pack(&w_curs);1473return;1474case OBJ_REF_DELTA:1475if(reuse_delta && !entry->preferred_base)1476 base_ref =use_pack(p, &w_curs,1477 entry->in_pack_offset + used, NULL);1478 entry->in_pack_header_size = used + the_hash_algo->rawsz;1479break;1480case OBJ_OFS_DELTA:1481 buf =use_pack(p, &w_curs,1482 entry->in_pack_offset + used, NULL);1483 used_0 =0;1484 c = buf[used_0++];1485 ofs = c &127;1486while(c &128) {1487 ofs +=1;1488if(!ofs ||MSB(ofs,7)) {1489error(_("delta base offset overflow in pack for%s"),1490oid_to_hex(&entry->idx.oid));1491goto give_up;1492}1493 c = buf[used_0++];1494 ofs = (ofs <<7) + (c &127);1495}1496 ofs = entry->in_pack_offset - ofs;1497if(ofs <=0|| ofs >= entry->in_pack_offset) {1498error(_("delta base offset out of bound for%s"),1499oid_to_hex(&entry->idx.oid));1500goto give_up;1501}1502if(reuse_delta && !entry->preferred_base) {1503struct revindex_entry *revidx;1504 revidx =find_pack_revindex(p, ofs);1505if(!revidx)1506goto give_up;1507 base_ref =nth_packed_object_sha1(p, revidx->nr);1508}1509 entry->in_pack_header_size = used + used_0;1510break;1511}15121513if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1514/*1515 * If base_ref was set above that means we wish to1516 * reuse delta data, and we even found that base1517 * in the list of objects we want to pack. Goodie!1518 *1519 * Depth value does not matter - find_deltas() will1520 * never consider reused delta as the base object to1521 * deltify other objects against, in order to avoid1522 * circular deltas.1523 */1524oe_set_type(entry, entry->in_pack_type);1525SET_SIZE(entry, in_pack_size);/* delta size */1526SET_DELTA(entry, base_entry);1527SET_DELTA_SIZE(entry, in_pack_size);1528 entry->delta_sibling_idx = base_entry->delta_child_idx;1529SET_DELTA_CHILD(base_entry, entry);1530unuse_pack(&w_curs);1531return;1532}15331534if(oe_type(entry)) {1535 off_t delta_pos;15361537/*1538 * This must be a delta and we already know what the1539 * final object type is. Let's extract the actual1540 * object size from the delta header.1541 */1542 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1543 canonical_size =get_size_from_delta(p, &w_curs, delta_pos);1544if(canonical_size ==0)1545goto give_up;1546SET_SIZE(entry, canonical_size);1547unuse_pack(&w_curs);1548return;1549}15501551/*1552 * No choice but to fall back to the recursive delta walk1553 * with sha1_object_info() to find about the object type1554 * at this point...1555 */1556 give_up:1557unuse_pack(&w_curs);1558}15591560oe_set_type(entry,1561oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1562if(entry->type_valid) {1563SET_SIZE(entry, canonical_size);1564}else{1565/*1566 * Bad object type is checked in prepare_pack(). This is1567 * to permit a missing preferred base object to be ignored1568 * as a preferred base. Doing so can result in a larger1569 * pack file, but the transfer will still take place.1570 */1571}1572}15731574static intpack_offset_sort(const void*_a,const void*_b)1575{1576const struct object_entry *a = *(struct object_entry **)_a;1577const struct object_entry *b = *(struct object_entry **)_b;1578const struct packed_git *a_in_pack =IN_PACK(a);1579const struct packed_git *b_in_pack =IN_PACK(b);15801581/* avoid filesystem trashing with loose objects */1582if(!a_in_pack && !b_in_pack)1583returnoidcmp(&a->idx.oid, &b->idx.oid);15841585if(a_in_pack < b_in_pack)1586return-1;1587if(a_in_pack > b_in_pack)1588return1;1589return a->in_pack_offset < b->in_pack_offset ? -1:1590(a->in_pack_offset > b->in_pack_offset);1591}15921593/*1594 * Drop an on-disk delta we were planning to reuse. Naively, this would1595 * just involve blanking out the "delta" field, but we have to deal1596 * with some extra book-keeping:1597 *1598 * 1. Removing ourselves from the delta_sibling linked list.1599 *1600 * 2. Updating our size/type to the non-delta representation. These were1601 * either not recorded initially (size) or overwritten with the delta type1602 * (type) when check_object() decided to reuse the delta.1603 *1604 * 3. Resetting our delta depth, as we are now a base object.1605 */1606static voiddrop_reused_delta(struct object_entry *entry)1607{1608unsigned*idx = &to_pack.objects[entry->delta_idx -1].delta_child_idx;1609struct object_info oi = OBJECT_INFO_INIT;1610enum object_type type;1611unsigned long size;16121613while(*idx) {1614struct object_entry *oe = &to_pack.objects[*idx -1];16151616if(oe == entry)1617*idx = oe->delta_sibling_idx;1618else1619 idx = &oe->delta_sibling_idx;1620}1621SET_DELTA(entry, NULL);1622 entry->depth =0;16231624 oi.sizep = &size;1625 oi.typep = &type;1626if(packed_object_info(the_repository,IN_PACK(entry), entry->in_pack_offset, &oi) <0) {1627/*1628 * We failed to get the info from this pack for some reason;1629 * fall back to sha1_object_info, which may find another copy.1630 * And if that fails, the error will be recorded in oe_type(entry)1631 * and dealt with in prepare_pack().1632 */1633oe_set_type(entry,1634oid_object_info(the_repository, &entry->idx.oid, &size));1635}else{1636oe_set_type(entry, type);1637}1638SET_SIZE(entry, size);1639}16401641/*1642 * Follow the chain of deltas from this entry onward, throwing away any links1643 * that cause us to hit a cycle (as determined by the DFS state flags in1644 * the entries).1645 *1646 * We also detect too-long reused chains that would violate our --depth1647 * limit.1648 */1649static voidbreak_delta_chains(struct object_entry *entry)1650{1651/*1652 * The actual depth of each object we will write is stored as an int,1653 * as it cannot exceed our int "depth" limit. But before we break1654 * changes based no that limit, we may potentially go as deep as the1655 * number of objects, which is elsewhere bounded to a uint32_t.1656 */1657uint32_t total_depth;1658struct object_entry *cur, *next;16591660for(cur = entry, total_depth =0;1661 cur;1662 cur =DELTA(cur), total_depth++) {1663if(cur->dfs_state == DFS_DONE) {1664/*1665 * We've already seen this object and know it isn't1666 * part of a cycle. We do need to append its depth1667 * to our count.1668 */1669 total_depth += cur->depth;1670break;1671}16721673/*1674 * We break cycles before looping, so an ACTIVE state (or any1675 * other cruft which made its way into the state variable)1676 * is a bug.1677 */1678if(cur->dfs_state != DFS_NONE)1679BUG("confusing delta dfs state in first pass:%d",1680 cur->dfs_state);16811682/*1683 * Now we know this is the first time we've seen the object. If1684 * it's not a delta, we're done traversing, but we'll mark it1685 * done to save time on future traversals.1686 */1687if(!DELTA(cur)) {1688 cur->dfs_state = DFS_DONE;1689break;1690}16911692/*1693 * Mark ourselves as active and see if the next step causes1694 * us to cycle to another active object. It's important to do1695 * this _before_ we loop, because it impacts where we make the1696 * cut, and thus how our total_depth counter works.1697 * E.g., We may see a partial loop like:1698 *1699 * A -> B -> C -> D -> B1700 *1701 * Cutting B->C breaks the cycle. But now the depth of A is1702 * only 1, and our total_depth counter is at 3. The size of the1703 * error is always one less than the size of the cycle we1704 * broke. Commits C and D were "lost" from A's chain.1705 *1706 * If we instead cut D->B, then the depth of A is correct at 3.1707 * We keep all commits in the chain that we examined.1708 */1709 cur->dfs_state = DFS_ACTIVE;1710if(DELTA(cur)->dfs_state == DFS_ACTIVE) {1711drop_reused_delta(cur);1712 cur->dfs_state = DFS_DONE;1713break;1714}1715}17161717/*1718 * And now that we've gone all the way to the bottom of the chain, we1719 * need to clear the active flags and set the depth fields as1720 * appropriate. Unlike the loop above, which can quit when it drops a1721 * delta, we need to keep going to look for more depth cuts. So we need1722 * an extra "next" pointer to keep going after we reset cur->delta.1723 */1724for(cur = entry; cur; cur = next) {1725 next =DELTA(cur);17261727/*1728 * We should have a chain of zero or more ACTIVE states down to1729 * a final DONE. We can quit after the DONE, because either it1730 * has no bases, or we've already handled them in a previous1731 * call.1732 */1733if(cur->dfs_state == DFS_DONE)1734break;1735else if(cur->dfs_state != DFS_ACTIVE)1736BUG("confusing delta dfs state in second pass:%d",1737 cur->dfs_state);17381739/*1740 * If the total_depth is more than depth, then we need to snip1741 * the chain into two or more smaller chains that don't exceed1742 * the maximum depth. Most of the resulting chains will contain1743 * (depth + 1) entries (i.e., depth deltas plus one base), and1744 * the last chain (i.e., the one containing entry) will contain1745 * whatever entries are left over, namely1746 * (total_depth % (depth + 1)) of them.1747 *1748 * Since we are iterating towards decreasing depth, we need to1749 * decrement total_depth as we go, and we need to write to the1750 * entry what its final depth will be after all of the1751 * snipping. Since we're snipping into chains of length (depth1752 * + 1) entries, the final depth of an entry will be its1753 * original depth modulo (depth + 1). Any time we encounter an1754 * entry whose final depth is supposed to be zero, we snip it1755 * from its delta base, thereby making it so.1756 */1757 cur->depth = (total_depth--) % (depth +1);1758if(!cur->depth)1759drop_reused_delta(cur);17601761 cur->dfs_state = DFS_DONE;1762}1763}17641765static voidget_object_details(void)1766{1767uint32_t i;1768struct object_entry **sorted_by_offset;17691770if(progress)1771 progress_state =start_progress(_("Counting objects"),1772 to_pack.nr_objects);17731774 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1775for(i =0; i < to_pack.nr_objects; i++)1776 sorted_by_offset[i] = to_pack.objects + i;1777QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17781779for(i =0; i < to_pack.nr_objects; i++) {1780struct object_entry *entry = sorted_by_offset[i];1781check_object(entry);1782if(entry->type_valid &&1783oe_size_greater_than(&to_pack, entry, big_file_threshold))1784 entry->no_try_delta =1;1785display_progress(progress_state, i +1);1786}1787stop_progress(&progress_state);17881789/*1790 * This must happen in a second pass, since we rely on the delta1791 * information for the whole list being completed.1792 */1793for(i =0; i < to_pack.nr_objects; i++)1794break_delta_chains(&to_pack.objects[i]);17951796free(sorted_by_offset);1797}17981799/*1800 * We search for deltas in a list sorted by type, by filename hash, and then1801 * by size, so that we see progressively smaller and smaller files.1802 * That's because we prefer deltas to be from the bigger file1803 * to the smaller -- deletes are potentially cheaper, but perhaps1804 * more importantly, the bigger file is likely the more recent1805 * one. The deepest deltas are therefore the oldest objects which are1806 * less susceptible to be accessed often.1807 */1808static inttype_size_sort(const void*_a,const void*_b)1809{1810const struct object_entry *a = *(struct object_entry **)_a;1811const struct object_entry *b = *(struct object_entry **)_b;1812enum object_type a_type =oe_type(a);1813enum object_type b_type =oe_type(b);1814unsigned long a_size =SIZE(a);1815unsigned long b_size =SIZE(b);18161817if(a_type > b_type)1818return-1;1819if(a_type < b_type)1820return1;1821if(a->hash > b->hash)1822return-1;1823if(a->hash < b->hash)1824return1;1825if(a->preferred_base > b->preferred_base)1826return-1;1827if(a->preferred_base < b->preferred_base)1828return1;1829if(a_size > b_size)1830return-1;1831if(a_size < b_size)1832return1;1833return a < b ? -1: (a > b);/* newest first */1834}18351836struct unpacked {1837struct object_entry *entry;1838void*data;1839struct delta_index *index;1840unsigned depth;1841};18421843static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1844unsigned long delta_size)1845{1846if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1847return0;18481849if(delta_size < cache_max_small_delta_size)1850return1;18511852/* cache delta, if objects are large enough compared to delta size */1853if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1854return1;18551856return0;1857}18581859#ifndef NO_PTHREADS18601861/* Protect access to object database */1862static pthread_mutex_t read_mutex;1863#define read_lock() pthread_mutex_lock(&read_mutex)1864#define read_unlock() pthread_mutex_unlock(&read_mutex)18651866/* Protect delta_cache_size */1867static pthread_mutex_t cache_mutex;1868#define cache_lock() pthread_mutex_lock(&cache_mutex)1869#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18701871/*1872 * Protect object list partitioning (e.g. struct thread_param) and1873 * progress_state1874 */1875static pthread_mutex_t progress_mutex;1876#define progress_lock() pthread_mutex_lock(&progress_mutex)1877#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18781879/*1880 * Access to struct object_entry is unprotected since each thread owns1881 * a portion of the main object list. Just don't access object entries1882 * ahead in the list because they can be stolen and would need1883 * progress_mutex for protection.1884 */1885#else18861887#define read_lock() (void)01888#define read_unlock() (void)01889#define cache_lock() (void)01890#define cache_unlock() (void)01891#define progress_lock() (void)01892#define progress_unlock() (void)018931894#endif18951896/*1897 * Return the size of the object without doing any delta1898 * reconstruction (so non-deltas are true object sizes, but deltas1899 * return the size of the delta data).1900 */1901unsigned longoe_get_size_slow(struct packing_data *pack,1902const struct object_entry *e)1903{1904struct packed_git *p;1905struct pack_window *w_curs;1906unsigned char*buf;1907enum object_type type;1908unsigned long used, avail, size;19091910if(e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1911read_lock();1912if(oid_object_info(the_repository, &e->idx.oid, &size) <0)1913die(_("unable to get size of%s"),1914oid_to_hex(&e->idx.oid));1915read_unlock();1916return size;1917}19181919 p =oe_in_pack(pack, e);1920if(!p)1921BUG("when e->type is a delta, it must belong to a pack");19221923read_lock();1924 w_curs = NULL;1925 buf =use_pack(p, &w_curs, e->in_pack_offset, &avail);1926 used =unpack_object_header_buffer(buf, avail, &type, &size);1927if(used ==0)1928die(_("unable to parse object header of%s"),1929oid_to_hex(&e->idx.oid));19301931unuse_pack(&w_curs);1932read_unlock();1933return size;1934}19351936static inttry_delta(struct unpacked *trg,struct unpacked *src,1937unsigned max_depth,unsigned long*mem_usage)1938{1939struct object_entry *trg_entry = trg->entry;1940struct object_entry *src_entry = src->entry;1941unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1942unsigned ref_depth;1943enum object_type type;1944void*delta_buf;19451946/* Don't bother doing diffs between different types */1947if(oe_type(trg_entry) !=oe_type(src_entry))1948return-1;19491950/*1951 * We do not bother to try a delta that we discarded on an1952 * earlier try, but only when reusing delta data. Note that1953 * src_entry that is marked as the preferred_base should always1954 * be considered, as even if we produce a suboptimal delta against1955 * it, we will still save the transfer cost, as we already know1956 * the other side has it and we won't send src_entry at all.1957 */1958if(reuse_delta &&IN_PACK(trg_entry) &&1959IN_PACK(trg_entry) ==IN_PACK(src_entry) &&1960!src_entry->preferred_base &&1961 trg_entry->in_pack_type != OBJ_REF_DELTA &&1962 trg_entry->in_pack_type != OBJ_OFS_DELTA)1963return0;19641965/* Let's not bust the allowed depth. */1966if(src->depth >= max_depth)1967return0;19681969/* Now some size filtering heuristics. */1970 trg_size =SIZE(trg_entry);1971if(!DELTA(trg_entry)) {1972 max_size = trg_size/2- the_hash_algo->rawsz;1973 ref_depth =1;1974}else{1975 max_size =DELTA_SIZE(trg_entry);1976 ref_depth = trg->depth;1977}1978 max_size = (uint64_t)max_size * (max_depth - src->depth) /1979(max_depth - ref_depth +1);1980if(max_size ==0)1981return0;1982 src_size =SIZE(src_entry);1983 sizediff = src_size < trg_size ? trg_size - src_size :0;1984if(sizediff >= max_size)1985return0;1986if(trg_size < src_size /32)1987return0;19881989/* Load data if not already done */1990if(!trg->data) {1991read_lock();1992 trg->data =read_object_file(&trg_entry->idx.oid, &type, &sz);1993read_unlock();1994if(!trg->data)1995die(_("object%scannot be read"),1996oid_to_hex(&trg_entry->idx.oid));1997if(sz != trg_size)1998die(_("object%sinconsistent object length (%lu vs%lu)"),1999oid_to_hex(&trg_entry->idx.oid), sz,2000 trg_size);2001*mem_usage += sz;2002}2003if(!src->data) {2004read_lock();2005 src->data =read_object_file(&src_entry->idx.oid, &type, &sz);2006read_unlock();2007if(!src->data) {2008if(src_entry->preferred_base) {2009static int warned =0;2010if(!warned++)2011warning(_("object%scannot be read"),2012oid_to_hex(&src_entry->idx.oid));2013/*2014 * Those objects are not included in the2015 * resulting pack. Be resilient and ignore2016 * them if they can't be read, in case the2017 * pack could be created nevertheless.2018 */2019return0;2020}2021die(_("object%scannot be read"),2022oid_to_hex(&src_entry->idx.oid));2023}2024if(sz != src_size)2025die(_("object%sinconsistent object length (%lu vs%lu)"),2026oid_to_hex(&src_entry->idx.oid), sz,2027 src_size);2028*mem_usage += sz;2029}2030if(!src->index) {2031 src->index =create_delta_index(src->data, src_size);2032if(!src->index) {2033static int warned =0;2034if(!warned++)2035warning(_("suboptimal pack - out of memory"));2036return0;2037}2038*mem_usage +=sizeof_delta_index(src->index);2039}20402041 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2042if(!delta_buf)2043return0;2044if(delta_size >= (1U<< OE_DELTA_SIZE_BITS)) {2045free(delta_buf);2046return0;2047}20482049if(DELTA(trg_entry)) {2050/* Prefer only shallower same-sized deltas. */2051if(delta_size ==DELTA_SIZE(trg_entry) &&2052 src->depth +1>= trg->depth) {2053free(delta_buf);2054return0;2055}2056}20572058/*2059 * Handle memory allocation outside of the cache2060 * accounting lock. Compiler will optimize the strangeness2061 * away when NO_PTHREADS is defined.2062 */2063free(trg_entry->delta_data);2064cache_lock();2065if(trg_entry->delta_data) {2066 delta_cache_size -=DELTA_SIZE(trg_entry);2067 trg_entry->delta_data = NULL;2068}2069if(delta_cacheable(src_size, trg_size, delta_size)) {2070 delta_cache_size += delta_size;2071cache_unlock();2072 trg_entry->delta_data =xrealloc(delta_buf, delta_size);2073}else{2074cache_unlock();2075free(delta_buf);2076}20772078SET_DELTA(trg_entry, src_entry);2079SET_DELTA_SIZE(trg_entry, delta_size);2080 trg->depth = src->depth +1;20812082return1;2083}20842085static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)2086{2087struct object_entry *child =DELTA_CHILD(me);2088unsigned int m = n;2089while(child) {2090unsigned int c =check_delta_limit(child, n +1);2091if(m < c)2092 m = c;2093 child =DELTA_SIBLING(child);2094}2095return m;2096}20972098static unsigned longfree_unpacked(struct unpacked *n)2099{2100unsigned long freed_mem =sizeof_delta_index(n->index);2101free_delta_index(n->index);2102 n->index = NULL;2103if(n->data) {2104 freed_mem +=SIZE(n->entry);2105FREE_AND_NULL(n->data);2106}2107 n->entry = NULL;2108 n->depth =0;2109return freed_mem;2110}21112112static voidfind_deltas(struct object_entry **list,unsigned*list_size,2113int window,int depth,unsigned*processed)2114{2115uint32_t i, idx =0, count =0;2116struct unpacked *array;2117unsigned long mem_usage =0;21182119 array =xcalloc(window,sizeof(struct unpacked));21202121for(;;) {2122struct object_entry *entry;2123struct unpacked *n = array + idx;2124int j, max_depth, best_base = -1;21252126progress_lock();2127if(!*list_size) {2128progress_unlock();2129break;2130}2131 entry = *list++;2132(*list_size)--;2133if(!entry->preferred_base) {2134(*processed)++;2135display_progress(progress_state, *processed);2136}2137progress_unlock();21382139 mem_usage -=free_unpacked(n);2140 n->entry = entry;21412142while(window_memory_limit &&2143 mem_usage > window_memory_limit &&2144 count >1) {2145uint32_t tail = (idx + window - count) % window;2146 mem_usage -=free_unpacked(array + tail);2147 count--;2148}21492150/* We do not compute delta to *create* objects we are not2151 * going to pack.2152 */2153if(entry->preferred_base)2154goto next;21552156/*2157 * If the current object is at pack edge, take the depth the2158 * objects that depend on the current object into account2159 * otherwise they would become too deep.2160 */2161 max_depth = depth;2162if(DELTA_CHILD(entry)) {2163 max_depth -=check_delta_limit(entry,0);2164if(max_depth <=0)2165goto next;2166}21672168 j = window;2169while(--j >0) {2170int ret;2171uint32_t other_idx = idx + j;2172struct unpacked *m;2173if(other_idx >= window)2174 other_idx -= window;2175 m = array + other_idx;2176if(!m->entry)2177break;2178 ret =try_delta(n, m, max_depth, &mem_usage);2179if(ret <0)2180break;2181else if(ret >0)2182 best_base = other_idx;2183}21842185/*2186 * If we decided to cache the delta data, then it is best2187 * to compress it right away. First because we have to do2188 * it anyway, and doing it here while we're threaded will2189 * save a lot of time in the non threaded write phase,2190 * as well as allow for caching more deltas within2191 * the same cache size limit.2192 * ...2193 * But only if not writing to stdout, since in that case2194 * the network is most likely throttling writes anyway,2195 * and therefore it is best to go to the write phase ASAP2196 * instead, as we can afford spending more time compressing2197 * between writes at that moment.2198 */2199if(entry->delta_data && !pack_to_stdout) {2200unsigned long size;22012202 size =do_compress(&entry->delta_data,DELTA_SIZE(entry));2203if(size < (1U<< OE_Z_DELTA_BITS)) {2204 entry->z_delta_size = size;2205cache_lock();2206 delta_cache_size -=DELTA_SIZE(entry);2207 delta_cache_size += entry->z_delta_size;2208cache_unlock();2209}else{2210FREE_AND_NULL(entry->delta_data);2211 entry->z_delta_size =0;2212}2213}22142215/* if we made n a delta, and if n is already at max2216 * depth, leaving it in the window is pointless. we2217 * should evict it first.2218 */2219if(DELTA(entry) && max_depth <= n->depth)2220continue;22212222/*2223 * Move the best delta base up in the window, after the2224 * currently deltified object, to keep it longer. It will2225 * be the first base object to be attempted next.2226 */2227if(DELTA(entry)) {2228struct unpacked swap = array[best_base];2229int dist = (window + idx - best_base) % window;2230int dst = best_base;2231while(dist--) {2232int src = (dst +1) % window;2233 array[dst] = array[src];2234 dst = src;2235}2236 array[dst] = swap;2237}22382239 next:2240 idx++;2241if(count +1< window)2242 count++;2243if(idx >= window)2244 idx =0;2245}22462247for(i =0; i < window; ++i) {2248free_delta_index(array[i].index);2249free(array[i].data);2250}2251free(array);2252}22532254#ifndef NO_PTHREADS22552256static voidtry_to_free_from_threads(size_t size)2257{2258read_lock();2259release_pack_memory(size);2260read_unlock();2261}22622263static try_to_free_t old_try_to_free_routine;22642265/*2266 * The main object list is split into smaller lists, each is handed to2267 * one worker.2268 *2269 * The main thread waits on the condition that (at least) one of the workers2270 * has stopped working (which is indicated in the .working member of2271 * struct thread_params).2272 *2273 * When a work thread has completed its work, it sets .working to 0 and2274 * signals the main thread and waits on the condition that .data_ready2275 * becomes 1.2276 *2277 * The main thread steals half of the work from the worker that has2278 * most work left to hand it to the idle worker.2279 */22802281struct thread_params {2282 pthread_t thread;2283struct object_entry **list;2284unsigned list_size;2285unsigned remaining;2286int window;2287int depth;2288int working;2289int data_ready;2290 pthread_mutex_t mutex;2291 pthread_cond_t cond;2292unsigned*processed;2293};22942295static pthread_cond_t progress_cond;22962297/*2298 * Mutex and conditional variable can't be statically-initialized on Windows.2299 */2300static voidinit_threaded_search(void)2301{2302init_recursive_mutex(&read_mutex);2303pthread_mutex_init(&cache_mutex, NULL);2304pthread_mutex_init(&progress_mutex, NULL);2305pthread_cond_init(&progress_cond, NULL);2306 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2307}23082309static voidcleanup_threaded_search(void)2310{2311set_try_to_free_routine(old_try_to_free_routine);2312pthread_cond_destroy(&progress_cond);2313pthread_mutex_destroy(&read_mutex);2314pthread_mutex_destroy(&cache_mutex);2315pthread_mutex_destroy(&progress_mutex);2316}23172318static void*threaded_find_deltas(void*arg)2319{2320struct thread_params *me = arg;23212322progress_lock();2323while(me->remaining) {2324progress_unlock();23252326find_deltas(me->list, &me->remaining,2327 me->window, me->depth, me->processed);23282329progress_lock();2330 me->working =0;2331pthread_cond_signal(&progress_cond);2332progress_unlock();23332334/*2335 * We must not set ->data_ready before we wait on the2336 * condition because the main thread may have set it to 12337 * before we get here. In order to be sure that new2338 * work is available if we see 1 in ->data_ready, it2339 * was initialized to 0 before this thread was spawned2340 * and we reset it to 0 right away.2341 */2342pthread_mutex_lock(&me->mutex);2343while(!me->data_ready)2344pthread_cond_wait(&me->cond, &me->mutex);2345 me->data_ready =0;2346pthread_mutex_unlock(&me->mutex);23472348progress_lock();2349}2350progress_unlock();2351/* leave ->working 1 so that this doesn't get more work assigned */2352return NULL;2353}23542355static voidll_find_deltas(struct object_entry **list,unsigned list_size,2356int window,int depth,unsigned*processed)2357{2358struct thread_params *p;2359int i, ret, active_threads =0;23602361init_threaded_search();23622363if(delta_search_threads <=1) {2364find_deltas(list, &list_size, window, depth, processed);2365cleanup_threaded_search();2366return;2367}2368if(progress > pack_to_stdout)2369fprintf_ln(stderr,_("Delta compression using up to%dthreads"),2370 delta_search_threads);2371 p =xcalloc(delta_search_threads,sizeof(*p));23722373/* Partition the work amongst work threads. */2374for(i =0; i < delta_search_threads; i++) {2375unsigned sub_size = list_size / (delta_search_threads - i);23762377/* don't use too small segments or no deltas will be found */2378if(sub_size <2*window && i+1< delta_search_threads)2379 sub_size =0;23802381 p[i].window = window;2382 p[i].depth = depth;2383 p[i].processed = processed;2384 p[i].working =1;2385 p[i].data_ready =0;23862387/* try to split chunks on "path" boundaries */2388while(sub_size && sub_size < list_size &&2389 list[sub_size]->hash &&2390 list[sub_size]->hash == list[sub_size-1]->hash)2391 sub_size++;23922393 p[i].list = list;2394 p[i].list_size = sub_size;2395 p[i].remaining = sub_size;23962397 list += sub_size;2398 list_size -= sub_size;2399}24002401/* Start work threads. */2402for(i =0; i < delta_search_threads; i++) {2403if(!p[i].list_size)2404continue;2405pthread_mutex_init(&p[i].mutex, NULL);2406pthread_cond_init(&p[i].cond, NULL);2407 ret =pthread_create(&p[i].thread, NULL,2408 threaded_find_deltas, &p[i]);2409if(ret)2410die(_("unable to create thread:%s"),strerror(ret));2411 active_threads++;2412}24132414/*2415 * Now let's wait for work completion. Each time a thread is done2416 * with its work, we steal half of the remaining work from the2417 * thread with the largest number of unprocessed objects and give2418 * it to that newly idle thread. This ensure good load balancing2419 * until the remaining object list segments are simply too short2420 * to be worth splitting anymore.2421 */2422while(active_threads) {2423struct thread_params *target = NULL;2424struct thread_params *victim = NULL;2425unsigned sub_size =0;24262427progress_lock();2428for(;;) {2429for(i =0; !target && i < delta_search_threads; i++)2430if(!p[i].working)2431 target = &p[i];2432if(target)2433break;2434pthread_cond_wait(&progress_cond, &progress_mutex);2435}24362437for(i =0; i < delta_search_threads; i++)2438if(p[i].remaining >2*window &&2439(!victim || victim->remaining < p[i].remaining))2440 victim = &p[i];2441if(victim) {2442 sub_size = victim->remaining /2;2443 list = victim->list + victim->list_size - sub_size;2444while(sub_size && list[0]->hash &&2445 list[0]->hash == list[-1]->hash) {2446 list++;2447 sub_size--;2448}2449if(!sub_size) {2450/*2451 * It is possible for some "paths" to have2452 * so many objects that no hash boundary2453 * might be found. Let's just steal the2454 * exact half in that case.2455 */2456 sub_size = victim->remaining /2;2457 list -= sub_size;2458}2459 target->list = list;2460 victim->list_size -= sub_size;2461 victim->remaining -= sub_size;2462}2463 target->list_size = sub_size;2464 target->remaining = sub_size;2465 target->working =1;2466progress_unlock();24672468pthread_mutex_lock(&target->mutex);2469 target->data_ready =1;2470pthread_cond_signal(&target->cond);2471pthread_mutex_unlock(&target->mutex);24722473if(!sub_size) {2474pthread_join(target->thread, NULL);2475pthread_cond_destroy(&target->cond);2476pthread_mutex_destroy(&target->mutex);2477 active_threads--;2478}2479}2480cleanup_threaded_search();2481free(p);2482}24832484#else2485#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2486#endif24872488static voidadd_tag_chain(const struct object_id *oid)2489{2490struct tag *tag;24912492/*2493 * We catch duplicates already in add_object_entry(), but we'd2494 * prefer to do this extra check to avoid having to parse the2495 * tag at all if we already know that it's being packed (e.g., if2496 * it was included via bitmaps, we would not have parsed it2497 * previously).2498 */2499if(packlist_find(&to_pack, oid->hash, NULL))2500return;25012502 tag =lookup_tag(the_repository, oid);2503while(1) {2504if(!tag ||parse_tag(tag) || !tag->tagged)2505die(_("unable to pack objects reachable from tag%s"),2506oid_to_hex(oid));25072508add_object_entry(&tag->object.oid, OBJ_TAG, NULL,0);25092510if(tag->tagged->type != OBJ_TAG)2511return;25122513 tag = (struct tag *)tag->tagged;2514}2515}25162517static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2518{2519struct object_id peeled;25202521if(starts_with(path,"refs/tags/") &&/* is a tag? */2522!peel_ref(path, &peeled) &&/* peelable? */2523packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2524add_tag_chain(oid);2525return0;2526}25272528static voidprepare_pack(int window,int depth)2529{2530struct object_entry **delta_list;2531uint32_t i, nr_deltas;2532unsigned n;25332534get_object_details();25352536/*2537 * If we're locally repacking then we need to be doubly careful2538 * from now on in order to make sure no stealth corruption gets2539 * propagated to the new pack. Clients receiving streamed packs2540 * should validate everything they get anyway so no need to incur2541 * the additional cost here in that case.2542 */2543if(!pack_to_stdout)2544 do_check_packed_object_crc =1;25452546if(!to_pack.nr_objects || !window || !depth)2547return;25482549ALLOC_ARRAY(delta_list, to_pack.nr_objects);2550 nr_deltas = n =0;25512552for(i =0; i < to_pack.nr_objects; i++) {2553struct object_entry *entry = to_pack.objects + i;25542555if(DELTA(entry))2556/* This happens if we decided to reuse existing2557 * delta from a pack. "reuse_delta &&" is implied.2558 */2559continue;25602561if(!entry->type_valid ||2562oe_size_less_than(&to_pack, entry,50))2563continue;25642565if(entry->no_try_delta)2566continue;25672568if(!entry->preferred_base) {2569 nr_deltas++;2570if(oe_type(entry) <0)2571die(_("unable to get type of object%s"),2572oid_to_hex(&entry->idx.oid));2573}else{2574if(oe_type(entry) <0) {2575/*2576 * This object is not found, but we2577 * don't have to include it anyway.2578 */2579continue;2580}2581}25822583 delta_list[n++] = entry;2584}25852586if(nr_deltas && n >1) {2587unsigned nr_done =0;2588if(progress)2589 progress_state =start_progress(_("Compressing objects"),2590 nr_deltas);2591QSORT(delta_list, n, type_size_sort);2592ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2593stop_progress(&progress_state);2594if(nr_done != nr_deltas)2595die(_("inconsistency with delta count"));2596}2597free(delta_list);2598}25992600static intgit_pack_config(const char*k,const char*v,void*cb)2601{2602if(!strcmp(k,"pack.window")) {2603 window =git_config_int(k, v);2604return0;2605}2606if(!strcmp(k,"pack.windowmemory")) {2607 window_memory_limit =git_config_ulong(k, v);2608return0;2609}2610if(!strcmp(k,"pack.depth")) {2611 depth =git_config_int(k, v);2612return0;2613}2614if(!strcmp(k,"pack.deltacachesize")) {2615 max_delta_cache_size =git_config_int(k, v);2616return0;2617}2618if(!strcmp(k,"pack.deltacachelimit")) {2619 cache_max_small_delta_size =git_config_int(k, v);2620return0;2621}2622if(!strcmp(k,"pack.writebitmaphashcache")) {2623if(git_config_bool(k, v))2624 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2625else2626 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2627}2628if(!strcmp(k,"pack.usebitmaps")) {2629 use_bitmap_index_default =git_config_bool(k, v);2630return0;2631}2632if(!strcmp(k,"pack.threads")) {2633 delta_search_threads =git_config_int(k, v);2634if(delta_search_threads <0)2635die(_("invalid number of threads specified (%d)"),2636 delta_search_threads);2637#ifdef NO_PTHREADS2638if(delta_search_threads !=1) {2639warning(_("no threads support, ignoring%s"), k);2640 delta_search_threads =0;2641}2642#endif2643return0;2644}2645if(!strcmp(k,"pack.indexversion")) {2646 pack_idx_opts.version =git_config_int(k, v);2647if(pack_idx_opts.version >2)2648die(_("bad pack.indexversion=%"PRIu32),2649 pack_idx_opts.version);2650return0;2651}2652returngit_default_config(k, v, cb);2653}26542655static voidread_object_list_from_stdin(void)2656{2657char line[GIT_MAX_HEXSZ +1+ PATH_MAX +2];2658struct object_id oid;2659const char*p;26602661for(;;) {2662if(!fgets(line,sizeof(line), stdin)) {2663if(feof(stdin))2664break;2665if(!ferror(stdin))2666die("BUG: fgets returned NULL, not EOF, not error!");2667if(errno != EINTR)2668die_errno("fgets");2669clearerr(stdin);2670continue;2671}2672if(line[0] =='-') {2673if(get_oid_hex(line+1, &oid))2674die(_("expected edge object ID, got garbage:\n%s"),2675 line);2676add_preferred_base(&oid);2677continue;2678}2679if(parse_oid_hex(line, &oid, &p))2680die(_("expected object ID, got garbage:\n%s"), line);26812682add_preferred_base_object(p +1);2683add_object_entry(&oid, OBJ_NONE, p +1,0);2684}2685}26862687/* Remember to update object flag allocation in object.h */2688#define OBJECT_ADDED (1u<<20)26892690static voidshow_commit(struct commit *commit,void*data)2691{2692add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL,0);2693 commit->object.flags |= OBJECT_ADDED;26942695if(write_bitmap_index)2696index_commit_for_bitmap(commit);2697}26982699static voidshow_object(struct object *obj,const char*name,void*data)2700{2701add_preferred_base_object(name);2702add_object_entry(&obj->oid, obj->type, name,0);2703 obj->flags |= OBJECT_ADDED;2704}27052706static voidshow_object__ma_allow_any(struct object *obj,const char*name,void*data)2707{2708assert(arg_missing_action == MA_ALLOW_ANY);27092710/*2711 * Quietly ignore ALL missing objects. This avoids problems with2712 * staging them now and getting an odd error later.2713 */2714if(!has_object_file(&obj->oid))2715return;27162717show_object(obj, name, data);2718}27192720static voidshow_object__ma_allow_promisor(struct object *obj,const char*name,void*data)2721{2722assert(arg_missing_action == MA_ALLOW_PROMISOR);27232724/*2725 * Quietly ignore EXPECTED missing objects. This avoids problems with2726 * staging them now and getting an odd error later.2727 */2728if(!has_object_file(&obj->oid) &&is_promisor_object(&obj->oid))2729return;27302731show_object(obj, name, data);2732}27332734static intoption_parse_missing_action(const struct option *opt,2735const char*arg,int unset)2736{2737assert(arg);2738assert(!unset);27392740if(!strcmp(arg,"error")) {2741 arg_missing_action = MA_ERROR;2742 fn_show_object = show_object;2743return0;2744}27452746if(!strcmp(arg,"allow-any")) {2747 arg_missing_action = MA_ALLOW_ANY;2748 fetch_if_missing =0;2749 fn_show_object = show_object__ma_allow_any;2750return0;2751}27522753if(!strcmp(arg,"allow-promisor")) {2754 arg_missing_action = MA_ALLOW_PROMISOR;2755 fetch_if_missing =0;2756 fn_show_object = show_object__ma_allow_promisor;2757return0;2758}27592760die(_("invalid value for --missing"));2761return0;2762}27632764static voidshow_edge(struct commit *commit)2765{2766add_preferred_base(&commit->object.oid);2767}27682769struct in_pack_object {2770 off_t offset;2771struct object *object;2772};27732774struct in_pack {2775unsigned int alloc;2776unsigned int nr;2777struct in_pack_object *array;2778};27792780static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2781{2782 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2783 in_pack->array[in_pack->nr].object = object;2784 in_pack->nr++;2785}27862787/*2788 * Compare the objects in the offset order, in order to emulate the2789 * "git rev-list --objects" output that produced the pack originally.2790 */2791static intofscmp(const void*a_,const void*b_)2792{2793struct in_pack_object *a = (struct in_pack_object *)a_;2794struct in_pack_object *b = (struct in_pack_object *)b_;27952796if(a->offset < b->offset)2797return-1;2798else if(a->offset > b->offset)2799return1;2800else2801returnoidcmp(&a->object->oid, &b->object->oid);2802}28032804static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2805{2806struct packed_git *p;2807struct in_pack in_pack;2808uint32_t i;28092810memset(&in_pack,0,sizeof(in_pack));28112812for(p =get_packed_git(the_repository); p; p = p->next) {2813struct object_id oid;2814struct object *o;28152816if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2817continue;2818if(open_pack_index(p))2819die(_("cannot open pack index"));28202821ALLOC_GROW(in_pack.array,2822 in_pack.nr + p->num_objects,2823 in_pack.alloc);28242825for(i =0; i < p->num_objects; i++) {2826nth_packed_object_oid(&oid, p, i);2827 o =lookup_unknown_object(oid.hash);2828if(!(o->flags & OBJECT_ADDED))2829mark_in_pack_object(o, p, &in_pack);2830 o->flags |= OBJECT_ADDED;2831}2832}28332834if(in_pack.nr) {2835QSORT(in_pack.array, in_pack.nr, ofscmp);2836for(i =0; i < in_pack.nr; i++) {2837struct object *o = in_pack.array[i].object;2838add_object_entry(&o->oid, o->type,"",0);2839}2840}2841free(in_pack.array);2842}28432844static intadd_loose_object(const struct object_id *oid,const char*path,2845void*data)2846{2847enum object_type type =oid_object_info(the_repository, oid, NULL);28482849if(type <0) {2850warning(_("loose object at%scould not be examined"), path);2851return0;2852}28532854add_object_entry(oid, type,"",0);2855return0;2856}28572858/*2859 * We actually don't even have to worry about reachability here.2860 * add_object_entry will weed out duplicates, so we just add every2861 * loose object we find.2862 */2863static voidadd_unreachable_loose_objects(void)2864{2865for_each_loose_file_in_objdir(get_object_directory(),2866 add_loose_object,2867 NULL, NULL, NULL);2868}28692870static inthas_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2871{2872static struct packed_git *last_found = (void*)1;2873struct packed_git *p;28742875 p = (last_found != (void*)1) ? last_found :2876get_packed_git(the_repository);28772878while(p) {2879if((!p->pack_local || p->pack_keep ||2880 p->pack_keep_in_core) &&2881find_pack_entry_one(oid->hash, p)) {2882 last_found = p;2883return1;2884}2885if(p == last_found)2886 p =get_packed_git(the_repository);2887else2888 p = p->next;2889if(p == last_found)2890 p = p->next;2891}2892return0;2893}28942895/*2896 * Store a list of sha1s that are should not be discarded2897 * because they are either written too recently, or are2898 * reachable from another object that was.2899 *2900 * This is filled by get_object_list.2901 */2902static struct oid_array recent_objects;29032904static intloosened_object_can_be_discarded(const struct object_id *oid,2905 timestamp_t mtime)2906{2907if(!unpack_unreachable_expiration)2908return0;2909if(mtime > unpack_unreachable_expiration)2910return0;2911if(oid_array_lookup(&recent_objects, oid) >=0)2912return0;2913return1;2914}29152916static voidloosen_unused_packed_objects(struct rev_info *revs)2917{2918struct packed_git *p;2919uint32_t i;2920struct object_id oid;29212922for(p =get_packed_git(the_repository); p; p = p->next) {2923if(!p->pack_local || p->pack_keep || p->pack_keep_in_core)2924continue;29252926if(open_pack_index(p))2927die(_("cannot open pack index"));29282929for(i =0; i < p->num_objects; i++) {2930nth_packed_object_oid(&oid, p, i);2931if(!packlist_find(&to_pack, oid.hash, NULL) &&2932!has_sha1_pack_kept_or_nonlocal(&oid) &&2933!loosened_object_can_be_discarded(&oid, p->mtime))2934if(force_object_loose(&oid, p->mtime))2935die(_("unable to force loose object"));2936}2937}2938}29392940/*2941 * This tracks any options which pack-reuse code expects to be on, or which a2942 * reader of the pack might not understand, and which would therefore prevent2943 * blind reuse of what we have on disk.2944 */2945static intpack_options_allow_reuse(void)2946{2947return pack_to_stdout &&2948 allow_ofs_delta &&2949!ignore_packed_keep_on_disk &&2950!ignore_packed_keep_in_core &&2951(!local || !have_non_local_packs) &&2952!incremental;2953}29542955static intget_object_list_from_bitmap(struct rev_info *revs)2956{2957struct bitmap_index *bitmap_git;2958if(!(bitmap_git =prepare_bitmap_walk(revs)))2959return-1;29602961if(pack_options_allow_reuse() &&2962!reuse_partial_packfile_from_bitmap(2963 bitmap_git,2964&reuse_packfile,2965&reuse_packfile_objects,2966&reuse_packfile_offset)) {2967assert(reuse_packfile_objects);2968 nr_result += reuse_packfile_objects;2969display_progress(progress_state, nr_result);2970}29712972traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);2973free_bitmap_index(bitmap_git);2974return0;2975}29762977static voidrecord_recent_object(struct object *obj,2978const char*name,2979void*data)2980{2981oid_array_append(&recent_objects, &obj->oid);2982}29832984static voidrecord_recent_commit(struct commit *commit,void*data)2985{2986oid_array_append(&recent_objects, &commit->object.oid);2987}29882989static voidget_object_list(int ac,const char**av)2990{2991struct rev_info revs;2992char line[1000];2993int flags =0;29942995init_revisions(&revs, NULL);2996 save_commit_buffer =0;2997setup_revisions(ac, av, &revs, NULL);29982999/* make sure shallows are read */3000is_repository_shallow(the_repository);30013002while(fgets(line,sizeof(line), stdin) != NULL) {3003int len =strlen(line);3004if(len && line[len -1] =='\n')3005 line[--len] =0;3006if(!len)3007break;3008if(*line =='-') {3009if(!strcmp(line,"--not")) {3010 flags ^= UNINTERESTING;3011 write_bitmap_index =0;3012continue;3013}3014if(starts_with(line,"--shallow ")) {3015struct object_id oid;3016if(get_oid_hex(line +10, &oid))3017die("not an SHA-1 '%s'", line +10);3018register_shallow(the_repository, &oid);3019 use_bitmap_index =0;3020continue;3021}3022die(_("not a rev '%s'"), line);3023}3024if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))3025die(_("bad revision '%s'"), line);3026}30273028if(use_bitmap_index && !get_object_list_from_bitmap(&revs))3029return;30303031if(prepare_revision_walk(&revs))3032die(_("revision walk setup failed"));3033mark_edges_uninteresting(&revs, show_edge);30343035if(!fn_show_object)3036 fn_show_object = show_object;3037traverse_commit_list_filtered(&filter_options, &revs,3038 show_commit, fn_show_object, NULL,3039 NULL);30403041if(unpack_unreachable_expiration) {3042 revs.ignore_missing_links =1;3043if(add_unseen_recent_objects_to_traversal(&revs,3044 unpack_unreachable_expiration))3045die(_("unable to add recent objects"));3046if(prepare_revision_walk(&revs))3047die(_("revision walk setup failed"));3048traverse_commit_list(&revs, record_recent_commit,3049 record_recent_object, NULL);3050}30513052if(keep_unreachable)3053add_objects_in_unpacked_packs(&revs);3054if(pack_loose_unreachable)3055add_unreachable_loose_objects();3056if(unpack_unreachable)3057loosen_unused_packed_objects(&revs);30583059oid_array_clear(&recent_objects);3060}30613062static voidadd_extra_kept_packs(const struct string_list *names)3063{3064struct packed_git *p;30653066if(!names->nr)3067return;30683069for(p =get_packed_git(the_repository); p; p = p->next) {3070const char*name =basename(p->pack_name);3071int i;30723073if(!p->pack_local)3074continue;30753076for(i =0; i < names->nr; i++)3077if(!fspathcmp(name, names->items[i].string))3078break;30793080if(i < names->nr) {3081 p->pack_keep_in_core =1;3082 ignore_packed_keep_in_core =1;3083continue;3084}3085}3086}30873088static intoption_parse_index_version(const struct option *opt,3089const char*arg,int unset)3090{3091char*c;3092const char*val = arg;3093 pack_idx_opts.version =strtoul(val, &c,10);3094if(pack_idx_opts.version >2)3095die(_("unsupported index version%s"), val);3096if(*c ==','&& c[1])3097 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);3098if(*c || pack_idx_opts.off32_limit &0x80000000)3099die(_("bad index version '%s'"), val);3100return0;3101}31023103static intoption_parse_unpack_unreachable(const struct option *opt,3104const char*arg,int unset)3105{3106if(unset) {3107 unpack_unreachable =0;3108 unpack_unreachable_expiration =0;3109}3110else{3111 unpack_unreachable =1;3112if(arg)3113 unpack_unreachable_expiration =approxidate(arg);3114}3115return0;3116}31173118intcmd_pack_objects(int argc,const char**argv,const char*prefix)3119{3120int use_internal_rev_list =0;3121int thin =0;3122int shallow =0;3123int all_progress_implied =0;3124struct argv_array rp = ARGV_ARRAY_INIT;3125int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;3126int rev_list_index =0;3127struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3128struct option pack_objects_options[] = {3129OPT_SET_INT('q',"quiet", &progress,3130N_("do not show progress meter"),0),3131OPT_SET_INT(0,"progress", &progress,3132N_("show progress meter"),1),3133OPT_SET_INT(0,"all-progress", &progress,3134N_("show progress meter during object writing phase"),2),3135OPT_BOOL(0,"all-progress-implied",3136&all_progress_implied,3137N_("similar to --all-progress when progress meter is shown")),3138{ OPTION_CALLBACK,0,"index-version", NULL,N_("<version>[,<offset>]"),3139N_("write the pack index file in the specified idx format version"),31400, option_parse_index_version },3141OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,3142N_("maximum size of each output pack file")),3143OPT_BOOL(0,"local", &local,3144N_("ignore borrowed objects from alternate object store")),3145OPT_BOOL(0,"incremental", &incremental,3146N_("ignore packed objects")),3147OPT_INTEGER(0,"window", &window,3148N_("limit pack window by objects")),3149OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,3150N_("limit pack window by memory in addition to object limit")),3151OPT_INTEGER(0,"depth", &depth,3152N_("maximum length of delta chain allowed in the resulting pack")),3153OPT_BOOL(0,"reuse-delta", &reuse_delta,3154N_("reuse existing deltas")),3155OPT_BOOL(0,"reuse-object", &reuse_object,3156N_("reuse existing objects")),3157OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,3158N_("use OFS_DELTA objects")),3159OPT_INTEGER(0,"threads", &delta_search_threads,3160N_("use threads when searching for best delta matches")),3161OPT_BOOL(0,"non-empty", &non_empty,3162N_("do not create an empty pack output")),3163OPT_BOOL(0,"revs", &use_internal_rev_list,3164N_("read revision arguments from standard input")),3165OPT_SET_INT_F(0,"unpacked", &rev_list_unpacked,3166N_("limit the objects to those that are not yet packed"),31671, PARSE_OPT_NONEG),3168OPT_SET_INT_F(0,"all", &rev_list_all,3169N_("include objects reachable from any reference"),31701, PARSE_OPT_NONEG),3171OPT_SET_INT_F(0,"reflog", &rev_list_reflog,3172N_("include objects referred by reflog entries"),31731, PARSE_OPT_NONEG),3174OPT_SET_INT_F(0,"indexed-objects", &rev_list_index,3175N_("include objects referred to by the index"),31761, PARSE_OPT_NONEG),3177OPT_BOOL(0,"stdout", &pack_to_stdout,3178N_("output pack to stdout")),3179OPT_BOOL(0,"include-tag", &include_tag,3180N_("include tag objects that refer to objects to be packed")),3181OPT_BOOL(0,"keep-unreachable", &keep_unreachable,3182N_("keep unreachable objects")),3183OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,3184N_("pack loose unreachable objects")),3185{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),3186N_("unpack unreachable objects newer than <time>"),3187 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3188OPT_BOOL(0,"thin", &thin,3189N_("create thin packs")),3190OPT_BOOL(0,"shallow", &shallow,3191N_("create packs suitable for shallow fetches")),3192OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep_on_disk,3193N_("ignore packs that have companion .keep file")),3194OPT_STRING_LIST(0,"keep-pack", &keep_pack_list,N_("name"),3195N_("ignore this pack")),3196OPT_INTEGER(0,"compression", &pack_compression_level,3197N_("pack compression level")),3198OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,3199N_("do not hide commits by grafts"),0),3200OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,3201N_("use a bitmap index if available to speed up counting objects")),3202OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,3203N_("write a bitmap index together with the pack index")),3204OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3205{ OPTION_CALLBACK,0,"missing", NULL,N_("action"),3206N_("handling for missing objects"), PARSE_OPT_NONEG,3207 option_parse_missing_action },3208OPT_BOOL(0,"exclude-promisor-objects", &exclude_promisor_objects,3209N_("do not pack objects in promisor packfiles")),3210OPT_END(),3211};32123213if(DFS_NUM_STATES > (1<< OE_DFS_STATE_BITS))3214BUG("too many dfs states, increase OE_DFS_STATE_BITS");32153216 read_replace_refs =0;32173218reset_pack_idx_option(&pack_idx_opts);3219git_config(git_pack_config, NULL);32203221 progress =isatty(2);3222 argc =parse_options(argc, argv, prefix, pack_objects_options,3223 pack_usage,0);32243225if(argc) {3226 base_name = argv[0];3227 argc--;3228}3229if(pack_to_stdout != !base_name || argc)3230usage_with_options(pack_usage, pack_objects_options);32313232if(depth >= (1<< OE_DEPTH_BITS)) {3233warning(_("delta chain depth%dis too deep, forcing%d"),3234 depth, (1<< OE_DEPTH_BITS) -1);3235 depth = (1<< OE_DEPTH_BITS) -1;3236}3237if(cache_max_small_delta_size >= (1U<< OE_Z_DELTA_BITS)) {3238warning(_("pack.deltaCacheLimit is too high, forcing%d"),3239(1U<< OE_Z_DELTA_BITS) -1);3240 cache_max_small_delta_size = (1U<< OE_Z_DELTA_BITS) -1;3241}32423243argv_array_push(&rp,"pack-objects");3244if(thin) {3245 use_internal_rev_list =1;3246argv_array_push(&rp, shallow3247?"--objects-edge-aggressive"3248:"--objects-edge");3249}else3250argv_array_push(&rp,"--objects");32513252if(rev_list_all) {3253 use_internal_rev_list =1;3254argv_array_push(&rp,"--all");3255}3256if(rev_list_reflog) {3257 use_internal_rev_list =1;3258argv_array_push(&rp,"--reflog");3259}3260if(rev_list_index) {3261 use_internal_rev_list =1;3262argv_array_push(&rp,"--indexed-objects");3263}3264if(rev_list_unpacked) {3265 use_internal_rev_list =1;3266argv_array_push(&rp,"--unpacked");3267}32683269if(exclude_promisor_objects) {3270 use_internal_rev_list =1;3271 fetch_if_missing =0;3272argv_array_push(&rp,"--exclude-promisor-objects");3273}3274if(unpack_unreachable || keep_unreachable || pack_loose_unreachable)3275 use_internal_rev_list =1;32763277if(!reuse_object)3278 reuse_delta =0;3279if(pack_compression_level == -1)3280 pack_compression_level = Z_DEFAULT_COMPRESSION;3281else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)3282die(_("bad pack compression level%d"), pack_compression_level);32833284if(!delta_search_threads)/* --threads=0 means autodetect */3285 delta_search_threads =online_cpus();32863287#ifdef NO_PTHREADS3288if(delta_search_threads !=1)3289warning(_("no threads support, ignoring --threads"));3290#endif3291if(!pack_to_stdout && !pack_size_limit)3292 pack_size_limit = pack_size_limit_cfg;3293if(pack_to_stdout && pack_size_limit)3294die(_("--max-pack-size cannot be used to build a pack for transfer"));3295if(pack_size_limit && pack_size_limit <1024*1024) {3296warning(_("minimum pack size limit is 1 MiB"));3297 pack_size_limit =1024*1024;3298}32993300if(!pack_to_stdout && thin)3301die(_("--thin cannot be used to build an indexable pack"));33023303if(keep_unreachable && unpack_unreachable)3304die(_("--keep-unreachable and --unpack-unreachable are incompatible"));3305if(!rev_list_all || !rev_list_reflog || !rev_list_index)3306 unpack_unreachable_expiration =0;33073308if(filter_options.choice) {3309if(!pack_to_stdout)3310die(_("cannot use --filter without --stdout"));3311 use_bitmap_index =0;3312}33133314/*3315 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3316 *3317 * - to produce good pack (with bitmap index not-yet-packed objects are3318 * packed in suboptimal order).3319 *3320 * - to use more robust pack-generation codepath (avoiding possible3321 * bugs in bitmap code and possible bitmap index corruption).3322 */3323if(!pack_to_stdout)3324 use_bitmap_index_default =0;33253326if(use_bitmap_index <0)3327 use_bitmap_index = use_bitmap_index_default;33283329/* "hard" reasons not to use bitmaps; these just won't work at all */3330if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow(the_repository))3331 use_bitmap_index =0;33323333if(pack_to_stdout || !rev_list_all)3334 write_bitmap_index =0;33353336if(progress && all_progress_implied)3337 progress =2;33383339add_extra_kept_packs(&keep_pack_list);3340if(ignore_packed_keep_on_disk) {3341struct packed_git *p;3342for(p =get_packed_git(the_repository); p; p = p->next)3343if(p->pack_local && p->pack_keep)3344break;3345if(!p)/* no keep-able packs found */3346 ignore_packed_keep_on_disk =0;3347}3348if(local) {3349/*3350 * unlike ignore_packed_keep_on_disk above, we do not3351 * want to unset "local" based on looking at packs, as3352 * it also covers non-local objects3353 */3354struct packed_git *p;3355for(p =get_packed_git(the_repository); p; p = p->next) {3356if(!p->pack_local) {3357 have_non_local_packs =1;3358break;3359}3360}3361}33623363prepare_packing_data(&to_pack);33643365if(progress)3366 progress_state =start_progress(_("Enumerating objects"),0);3367if(!use_internal_rev_list)3368read_object_list_from_stdin();3369else{3370get_object_list(rp.argc, rp.argv);3371argv_array_clear(&rp);3372}3373cleanup_preferred_base();3374if(include_tag && nr_result)3375for_each_ref(add_ref_tag, NULL);3376stop_progress(&progress_state);33773378if(non_empty && !nr_result)3379return0;3380if(nr_result)3381prepare_pack(window, depth);3382write_pack_file();3383if(progress)3384fprintf_ln(stderr,3385_("Total %"PRIu32" (delta %"PRIu32"),"3386" reused %"PRIu32" (delta %"PRIu32")"),3387 written, written_delta, reused, reused_delta);3388return0;3389}