1#include"builtin.h" 2#include"cache.h" 3#include"attr.h" 4#include"object.h" 5#include"blob.h" 6#include"commit.h" 7#include"tag.h" 8#include"tree.h" 9#include"delta.h" 10#include"pack.h" 11#include"pack-revindex.h" 12#include"csum-file.h" 13#include"tree-walk.h" 14#include"diff.h" 15#include"revision.h" 16#include"list-objects.h" 17#include"pack-objects.h" 18#include"progress.h" 19#include"refs.h" 20#include"streaming.h" 21#include"thread-utils.h" 22#include"pack-bitmap.h" 23#include"reachable.h" 24#include"sha1-array.h" 25#include"argv-array.h" 26#include"mru.h" 27 28static const char*pack_usage[] = { 29N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 30N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 31 NULL 32}; 33 34/* 35 * Objects we are going to pack are collected in the `to_pack` structure. 36 * It contains an array (dynamically expanded) of the object data, and a map 37 * that can resolve SHA1s to their position in the array. 38 */ 39static struct packing_data to_pack; 40 41static struct pack_idx_entry **written_list; 42static uint32_t nr_result, nr_written; 43 44static int non_empty; 45static int reuse_delta =1, reuse_object =1; 46static int keep_unreachable, unpack_unreachable, include_tag; 47static timestamp_t unpack_unreachable_expiration; 48static int pack_loose_unreachable; 49static int local; 50static int have_non_local_packs; 51static int incremental; 52static int ignore_packed_keep; 53static int allow_ofs_delta; 54static struct pack_idx_option pack_idx_opts; 55static const char*base_name; 56static int progress =1; 57static int window =10; 58static unsigned long pack_size_limit; 59static int depth =50; 60static int delta_search_threads; 61static int pack_to_stdout; 62static int num_preferred_base; 63static struct progress *progress_state; 64 65static struct packed_git *reuse_packfile; 66static uint32_t reuse_packfile_objects; 67static off_t reuse_packfile_offset; 68 69static int use_bitmap_index_default =1; 70static int use_bitmap_index = -1; 71static int write_bitmap_index; 72static uint16_t write_bitmap_options; 73 74static unsigned long delta_cache_size =0; 75static unsigned long max_delta_cache_size =256*1024*1024; 76static unsigned long cache_max_small_delta_size =1000; 77 78static unsigned long window_memory_limit =0; 79 80/* 81 * stats 82 */ 83static uint32_t written, written_delta; 84static uint32_t reused, reused_delta; 85 86/* 87 * Indexed commits 88 */ 89static struct commit **indexed_commits; 90static unsigned int indexed_commits_nr; 91static unsigned int indexed_commits_alloc; 92 93static voidindex_commit_for_bitmap(struct commit *commit) 94{ 95if(indexed_commits_nr >= indexed_commits_alloc) { 96 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 97REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 98} 99 100 indexed_commits[indexed_commits_nr++] = commit; 101} 102 103static void*get_delta(struct object_entry *entry) 104{ 105unsigned long size, base_size, delta_size; 106void*buf, *base_buf, *delta_buf; 107enum object_type type; 108 109 buf =read_sha1_file(entry->idx.sha1, &type, &size); 110if(!buf) 111die("unable to read%s",sha1_to_hex(entry->idx.sha1)); 112 base_buf =read_sha1_file(entry->delta->idx.sha1, &type, &base_size); 113if(!base_buf) 114die("unable to read%s",sha1_to_hex(entry->delta->idx.sha1)); 115 delta_buf =diff_delta(base_buf, base_size, 116 buf, size, &delta_size,0); 117if(!delta_buf || delta_size != entry->delta_size) 118die("delta size changed"); 119free(buf); 120free(base_buf); 121return delta_buf; 122} 123 124static unsigned longdo_compress(void**pptr,unsigned long size) 125{ 126 git_zstream stream; 127void*in, *out; 128unsigned long maxsize; 129 130git_deflate_init(&stream, pack_compression_level); 131 maxsize =git_deflate_bound(&stream, size); 132 133 in = *pptr; 134 out =xmalloc(maxsize); 135*pptr = out; 136 137 stream.next_in = in; 138 stream.avail_in = size; 139 stream.next_out = out; 140 stream.avail_out = maxsize; 141while(git_deflate(&stream, Z_FINISH) == Z_OK) 142;/* nothing */ 143git_deflate_end(&stream); 144 145free(in); 146return stream.total_out; 147} 148 149static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 150const unsigned char*sha1) 151{ 152 git_zstream stream; 153unsigned char ibuf[1024*16]; 154unsigned char obuf[1024*16]; 155unsigned long olen =0; 156 157git_deflate_init(&stream, pack_compression_level); 158 159for(;;) { 160 ssize_t readlen; 161int zret = Z_OK; 162 readlen =read_istream(st, ibuf,sizeof(ibuf)); 163if(readlen == -1) 164die(_("unable to read%s"),sha1_to_hex(sha1)); 165 166 stream.next_in = ibuf; 167 stream.avail_in = readlen; 168while((stream.avail_in || readlen ==0) && 169(zret == Z_OK || zret == Z_BUF_ERROR)) { 170 stream.next_out = obuf; 171 stream.avail_out =sizeof(obuf); 172 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 173sha1write(f, obuf, stream.next_out - obuf); 174 olen += stream.next_out - obuf; 175} 176if(stream.avail_in) 177die(_("deflate error (%d)"), zret); 178if(readlen ==0) { 179if(zret != Z_STREAM_END) 180die(_("deflate error (%d)"), zret); 181break; 182} 183} 184git_deflate_end(&stream); 185return olen; 186} 187 188/* 189 * we are going to reuse the existing object data as is. make 190 * sure it is not corrupt. 191 */ 192static intcheck_pack_inflate(struct packed_git *p, 193struct pack_window **w_curs, 194 off_t offset, 195 off_t len, 196unsigned long expect) 197{ 198 git_zstream stream; 199unsigned char fakebuf[4096], *in; 200int st; 201 202memset(&stream,0,sizeof(stream)); 203git_inflate_init(&stream); 204do{ 205 in =use_pack(p, w_curs, offset, &stream.avail_in); 206 stream.next_in = in; 207 stream.next_out = fakebuf; 208 stream.avail_out =sizeof(fakebuf); 209 st =git_inflate(&stream, Z_FINISH); 210 offset += stream.next_in - in; 211}while(st == Z_OK || st == Z_BUF_ERROR); 212git_inflate_end(&stream); 213return(st == Z_STREAM_END && 214 stream.total_out == expect && 215 stream.total_in == len) ?0: -1; 216} 217 218static voidcopy_pack_data(struct sha1file *f, 219struct packed_git *p, 220struct pack_window **w_curs, 221 off_t offset, 222 off_t len) 223{ 224unsigned char*in; 225unsigned long avail; 226 227while(len) { 228 in =use_pack(p, w_curs, offset, &avail); 229if(avail > len) 230 avail = (unsigned long)len; 231sha1write(f, in, avail); 232 offset += avail; 233 len -= avail; 234} 235} 236 237/* Return 0 if we will bust the pack-size limit */ 238static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 239unsigned long limit,int usable_delta) 240{ 241unsigned long size, datalen; 242unsigned char header[MAX_PACK_OBJECT_HEADER], 243 dheader[MAX_PACK_OBJECT_HEADER]; 244unsigned hdrlen; 245enum object_type type; 246void*buf; 247struct git_istream *st = NULL; 248 249if(!usable_delta) { 250if(entry->type == OBJ_BLOB && 251 entry->size > big_file_threshold && 252(st =open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL) 253 buf = NULL; 254else{ 255 buf =read_sha1_file(entry->idx.sha1, &type, &size); 256if(!buf) 257die(_("unable to read%s"),sha1_to_hex(entry->idx.sha1)); 258} 259/* 260 * make sure no cached delta data remains from a 261 * previous attempt before a pack split occurred. 262 */ 263free(entry->delta_data); 264 entry->delta_data = NULL; 265 entry->z_delta_size =0; 266}else if(entry->delta_data) { 267 size = entry->delta_size; 268 buf = entry->delta_data; 269 entry->delta_data = NULL; 270 type = (allow_ofs_delta && entry->delta->idx.offset) ? 271 OBJ_OFS_DELTA : OBJ_REF_DELTA; 272}else{ 273 buf =get_delta(entry); 274 size = entry->delta_size; 275 type = (allow_ofs_delta && entry->delta->idx.offset) ? 276 OBJ_OFS_DELTA : OBJ_REF_DELTA; 277} 278 279if(st)/* large blob case, just assume we don't compress well */ 280 datalen = size; 281else if(entry->z_delta_size) 282 datalen = entry->z_delta_size; 283else 284 datalen =do_compress(&buf, size); 285 286/* 287 * The object header is a byte of 'type' followed by zero or 288 * more bytes of length. 289 */ 290 hdrlen =encode_in_pack_object_header(header,sizeof(header), 291 type, size); 292 293if(type == OBJ_OFS_DELTA) { 294/* 295 * Deltas with relative base contain an additional 296 * encoding of the relative offset for the delta 297 * base from this object's position in the pack. 298 */ 299 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 300unsigned pos =sizeof(dheader) -1; 301 dheader[pos] = ofs &127; 302while(ofs >>=7) 303 dheader[--pos] =128| (--ofs &127); 304if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 305if(st) 306close_istream(st); 307free(buf); 308return0; 309} 310sha1write(f, header, hdrlen); 311sha1write(f, dheader + pos,sizeof(dheader) - pos); 312 hdrlen +=sizeof(dheader) - pos; 313}else if(type == OBJ_REF_DELTA) { 314/* 315 * Deltas with a base reference contain 316 * an additional 20 bytes for the base sha1. 317 */ 318if(limit && hdrlen +20+ datalen +20>= limit) { 319if(st) 320close_istream(st); 321free(buf); 322return0; 323} 324sha1write(f, header, hdrlen); 325sha1write(f, entry->delta->idx.sha1,20); 326 hdrlen +=20; 327}else{ 328if(limit && hdrlen + datalen +20>= limit) { 329if(st) 330close_istream(st); 331free(buf); 332return0; 333} 334sha1write(f, header, hdrlen); 335} 336if(st) { 337 datalen =write_large_blob_data(st, f, entry->idx.sha1); 338close_istream(st); 339}else{ 340sha1write(f, buf, datalen); 341free(buf); 342} 343 344return hdrlen + datalen; 345} 346 347/* Return 0 if we will bust the pack-size limit */ 348static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 349unsigned long limit,int usable_delta) 350{ 351struct packed_git *p = entry->in_pack; 352struct pack_window *w_curs = NULL; 353struct revindex_entry *revidx; 354 off_t offset; 355enum object_type type = entry->type; 356 off_t datalen; 357unsigned char header[MAX_PACK_OBJECT_HEADER], 358 dheader[MAX_PACK_OBJECT_HEADER]; 359unsigned hdrlen; 360 361if(entry->delta) 362 type = (allow_ofs_delta && entry->delta->idx.offset) ? 363 OBJ_OFS_DELTA : OBJ_REF_DELTA; 364 hdrlen =encode_in_pack_object_header(header,sizeof(header), 365 type, entry->size); 366 367 offset = entry->in_pack_offset; 368 revidx =find_pack_revindex(p, offset); 369 datalen = revidx[1].offset - offset; 370if(!pack_to_stdout && p->index_version >1&& 371check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 372error("bad packed object CRC for%s",sha1_to_hex(entry->idx.sha1)); 373unuse_pack(&w_curs); 374returnwrite_no_reuse_object(f, entry, limit, usable_delta); 375} 376 377 offset += entry->in_pack_header_size; 378 datalen -= entry->in_pack_header_size; 379 380if(!pack_to_stdout && p->index_version ==1&& 381check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 382error("corrupt packed object for%s",sha1_to_hex(entry->idx.sha1)); 383unuse_pack(&w_curs); 384returnwrite_no_reuse_object(f, entry, limit, usable_delta); 385} 386 387if(type == OBJ_OFS_DELTA) { 388 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 389unsigned pos =sizeof(dheader) -1; 390 dheader[pos] = ofs &127; 391while(ofs >>=7) 392 dheader[--pos] =128| (--ofs &127); 393if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 394unuse_pack(&w_curs); 395return0; 396} 397sha1write(f, header, hdrlen); 398sha1write(f, dheader + pos,sizeof(dheader) - pos); 399 hdrlen +=sizeof(dheader) - pos; 400 reused_delta++; 401}else if(type == OBJ_REF_DELTA) { 402if(limit && hdrlen +20+ datalen +20>= limit) { 403unuse_pack(&w_curs); 404return0; 405} 406sha1write(f, header, hdrlen); 407sha1write(f, entry->delta->idx.sha1,20); 408 hdrlen +=20; 409 reused_delta++; 410}else{ 411if(limit && hdrlen + datalen +20>= limit) { 412unuse_pack(&w_curs); 413return0; 414} 415sha1write(f, header, hdrlen); 416} 417copy_pack_data(f, p, &w_curs, offset, datalen); 418unuse_pack(&w_curs); 419 reused++; 420return hdrlen + datalen; 421} 422 423/* Return 0 if we will bust the pack-size limit */ 424static off_t write_object(struct sha1file *f, 425struct object_entry *entry, 426 off_t write_offset) 427{ 428unsigned long limit; 429 off_t len; 430int usable_delta, to_reuse; 431 432if(!pack_to_stdout) 433crc32_begin(f); 434 435/* apply size limit if limited packsize and not first object */ 436if(!pack_size_limit || !nr_written) 437 limit =0; 438else if(pack_size_limit <= write_offset) 439/* 440 * the earlier object did not fit the limit; avoid 441 * mistaking this with unlimited (i.e. limit = 0). 442 */ 443 limit =1; 444else 445 limit = pack_size_limit - write_offset; 446 447if(!entry->delta) 448 usable_delta =0;/* no delta */ 449else if(!pack_size_limit) 450 usable_delta =1;/* unlimited packfile */ 451else if(entry->delta->idx.offset == (off_t)-1) 452 usable_delta =0;/* base was written to another pack */ 453else if(entry->delta->idx.offset) 454 usable_delta =1;/* base already exists in this pack */ 455else 456 usable_delta =0;/* base could end up in another pack */ 457 458if(!reuse_object) 459 to_reuse =0;/* explicit */ 460else if(!entry->in_pack) 461 to_reuse =0;/* can't reuse what we don't have */ 462else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 463/* check_object() decided it for us ... */ 464 to_reuse = usable_delta; 465/* ... but pack split may override that */ 466else if(entry->type != entry->in_pack_type) 467 to_reuse =0;/* pack has delta which is unusable */ 468else if(entry->delta) 469 to_reuse =0;/* we want to pack afresh */ 470else 471 to_reuse =1;/* we have it in-pack undeltified, 472 * and we do not need to deltify it. 473 */ 474 475if(!to_reuse) 476 len =write_no_reuse_object(f, entry, limit, usable_delta); 477else 478 len =write_reuse_object(f, entry, limit, usable_delta); 479if(!len) 480return0; 481 482if(usable_delta) 483 written_delta++; 484 written++; 485if(!pack_to_stdout) 486 entry->idx.crc32 =crc32_end(f); 487return len; 488} 489 490enum write_one_status { 491 WRITE_ONE_SKIP = -1,/* already written */ 492 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 493 WRITE_ONE_WRITTEN =1,/* normal */ 494 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 495}; 496 497static enum write_one_status write_one(struct sha1file *f, 498struct object_entry *e, 499 off_t *offset) 500{ 501 off_t size; 502int recursing; 503 504/* 505 * we set offset to 1 (which is an impossible value) to mark 506 * the fact that this object is involved in "write its base 507 * first before writing a deltified object" recursion. 508 */ 509 recursing = (e->idx.offset ==1); 510if(recursing) { 511warning("recursive delta detected for object%s", 512sha1_to_hex(e->idx.sha1)); 513return WRITE_ONE_RECURSIVE; 514}else if(e->idx.offset || e->preferred_base) { 515/* offset is non zero if object is written already. */ 516return WRITE_ONE_SKIP; 517} 518 519/* if we are deltified, write out base object first. */ 520if(e->delta) { 521 e->idx.offset =1;/* now recurse */ 522switch(write_one(f, e->delta, offset)) { 523case WRITE_ONE_RECURSIVE: 524/* we cannot depend on this one */ 525 e->delta = NULL; 526break; 527default: 528break; 529case WRITE_ONE_BREAK: 530 e->idx.offset = recursing; 531return WRITE_ONE_BREAK; 532} 533} 534 535 e->idx.offset = *offset; 536 size =write_object(f, e, *offset); 537if(!size) { 538 e->idx.offset = recursing; 539return WRITE_ONE_BREAK; 540} 541 written_list[nr_written++] = &e->idx; 542 543/* make sure off_t is sufficiently large not to wrap */ 544if(signed_add_overflows(*offset, size)) 545die("pack too large for current definition of off_t"); 546*offset += size; 547return WRITE_ONE_WRITTEN; 548} 549 550static intmark_tagged(const char*path,const struct object_id *oid,int flag, 551void*cb_data) 552{ 553unsigned char peeled[20]; 554struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 555 556if(entry) 557 entry->tagged =1; 558if(!peel_ref(path, peeled)) { 559 entry =packlist_find(&to_pack, peeled, NULL); 560if(entry) 561 entry->tagged =1; 562} 563return0; 564} 565 566staticinlinevoidadd_to_write_order(struct object_entry **wo, 567unsigned int*endp, 568struct object_entry *e) 569{ 570if(e->filled) 571return; 572 wo[(*endp)++] = e; 573 e->filled =1; 574} 575 576static voidadd_descendants_to_write_order(struct object_entry **wo, 577unsigned int*endp, 578struct object_entry *e) 579{ 580int add_to_order =1; 581while(e) { 582if(add_to_order) { 583struct object_entry *s; 584/* add this node... */ 585add_to_write_order(wo, endp, e); 586/* all its siblings... */ 587for(s = e->delta_sibling; s; s = s->delta_sibling) { 588add_to_write_order(wo, endp, s); 589} 590} 591/* drop down a level to add left subtree nodes if possible */ 592if(e->delta_child) { 593 add_to_order =1; 594 e = e->delta_child; 595}else{ 596 add_to_order =0; 597/* our sibling might have some children, it is next */ 598if(e->delta_sibling) { 599 e = e->delta_sibling; 600continue; 601} 602/* go back to our parent node */ 603 e = e->delta; 604while(e && !e->delta_sibling) { 605/* we're on the right side of a subtree, keep 606 * going up until we can go right again */ 607 e = e->delta; 608} 609if(!e) { 610/* done- we hit our original root node */ 611return; 612} 613/* pass it off to sibling at this level */ 614 e = e->delta_sibling; 615} 616}; 617} 618 619static voidadd_family_to_write_order(struct object_entry **wo, 620unsigned int*endp, 621struct object_entry *e) 622{ 623struct object_entry *root; 624 625for(root = e; root->delta; root = root->delta) 626;/* nothing */ 627add_descendants_to_write_order(wo, endp, root); 628} 629 630static struct object_entry **compute_write_order(void) 631{ 632unsigned int i, wo_end, last_untagged; 633 634struct object_entry **wo; 635struct object_entry *objects = to_pack.objects; 636 637for(i =0; i < to_pack.nr_objects; i++) { 638 objects[i].tagged =0; 639 objects[i].filled =0; 640 objects[i].delta_child = NULL; 641 objects[i].delta_sibling = NULL; 642} 643 644/* 645 * Fully connect delta_child/delta_sibling network. 646 * Make sure delta_sibling is sorted in the original 647 * recency order. 648 */ 649for(i = to_pack.nr_objects; i >0;) { 650struct object_entry *e = &objects[--i]; 651if(!e->delta) 652continue; 653/* Mark me as the first child */ 654 e->delta_sibling = e->delta->delta_child; 655 e->delta->delta_child = e; 656} 657 658/* 659 * Mark objects that are at the tip of tags. 660 */ 661for_each_tag_ref(mark_tagged, NULL); 662 663/* 664 * Give the objects in the original recency order until 665 * we see a tagged tip. 666 */ 667ALLOC_ARRAY(wo, to_pack.nr_objects); 668for(i = wo_end =0; i < to_pack.nr_objects; i++) { 669if(objects[i].tagged) 670break; 671add_to_write_order(wo, &wo_end, &objects[i]); 672} 673 last_untagged = i; 674 675/* 676 * Then fill all the tagged tips. 677 */ 678for(; i < to_pack.nr_objects; i++) { 679if(objects[i].tagged) 680add_to_write_order(wo, &wo_end, &objects[i]); 681} 682 683/* 684 * And then all remaining commits and tags. 685 */ 686for(i = last_untagged; i < to_pack.nr_objects; i++) { 687if(objects[i].type != OBJ_COMMIT && 688 objects[i].type != OBJ_TAG) 689continue; 690add_to_write_order(wo, &wo_end, &objects[i]); 691} 692 693/* 694 * And then all the trees. 695 */ 696for(i = last_untagged; i < to_pack.nr_objects; i++) { 697if(objects[i].type != OBJ_TREE) 698continue; 699add_to_write_order(wo, &wo_end, &objects[i]); 700} 701 702/* 703 * Finally all the rest in really tight order 704 */ 705for(i = last_untagged; i < to_pack.nr_objects; i++) { 706if(!objects[i].filled) 707add_family_to_write_order(wo, &wo_end, &objects[i]); 708} 709 710if(wo_end != to_pack.nr_objects) 711die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 712 713return wo; 714} 715 716static off_t write_reused_pack(struct sha1file *f) 717{ 718unsigned char buffer[8192]; 719 off_t to_write, total; 720int fd; 721 722if(!is_pack_valid(reuse_packfile)) 723die("packfile is invalid:%s", reuse_packfile->pack_name); 724 725 fd =git_open(reuse_packfile->pack_name); 726if(fd <0) 727die_errno("unable to open packfile for reuse:%s", 728 reuse_packfile->pack_name); 729 730if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 731die_errno("unable to seek in reused packfile"); 732 733if(reuse_packfile_offset <0) 734 reuse_packfile_offset = reuse_packfile->pack_size -20; 735 736 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 737 738while(to_write) { 739int read_pack =xread(fd, buffer,sizeof(buffer)); 740 741if(read_pack <=0) 742die_errno("unable to read from reused packfile"); 743 744if(read_pack > to_write) 745 read_pack = to_write; 746 747sha1write(f, buffer, read_pack); 748 to_write -= read_pack; 749 750/* 751 * We don't know the actual number of objects written, 752 * only how many bytes written, how many bytes total, and 753 * how many objects total. So we can fake it by pretending all 754 * objects we are writing are the same size. This gives us a 755 * smooth progress meter, and at the end it matches the true 756 * answer. 757 */ 758 written = reuse_packfile_objects * 759(((double)(total - to_write)) / total); 760display_progress(progress_state, written); 761} 762 763close(fd); 764 written = reuse_packfile_objects; 765display_progress(progress_state, written); 766return reuse_packfile_offset -sizeof(struct pack_header); 767} 768 769static const char no_split_warning[] =N_( 770"disabling bitmap writing, packs are split due to pack.packSizeLimit" 771); 772 773static voidwrite_pack_file(void) 774{ 775uint32_t i =0, j; 776struct sha1file *f; 777 off_t offset; 778uint32_t nr_remaining = nr_result; 779time_t last_mtime =0; 780struct object_entry **write_order; 781 782if(progress > pack_to_stdout) 783 progress_state =start_progress(_("Writing objects"), nr_result); 784ALLOC_ARRAY(written_list, to_pack.nr_objects); 785 write_order =compute_write_order(); 786 787do{ 788unsigned char sha1[20]; 789char*pack_tmp_name = NULL; 790 791if(pack_to_stdout) 792 f =sha1fd_throughput(1,"<stdout>", progress_state); 793else 794 f =create_tmp_packfile(&pack_tmp_name); 795 796 offset =write_pack_header(f, nr_remaining); 797 798if(reuse_packfile) { 799 off_t packfile_size; 800assert(pack_to_stdout); 801 802 packfile_size =write_reused_pack(f); 803 offset += packfile_size; 804} 805 806 nr_written =0; 807for(; i < to_pack.nr_objects; i++) { 808struct object_entry *e = write_order[i]; 809if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 810break; 811display_progress(progress_state, written); 812} 813 814/* 815 * Did we write the wrong # entries in the header? 816 * If so, rewrite it like in fast-import 817 */ 818if(pack_to_stdout) { 819sha1close(f, sha1, CSUM_CLOSE); 820}else if(nr_written == nr_remaining) { 821sha1close(f, sha1, CSUM_FSYNC); 822}else{ 823int fd =sha1close(f, sha1,0); 824fixup_pack_header_footer(fd, sha1, pack_tmp_name, 825 nr_written, sha1, offset); 826close(fd); 827if(write_bitmap_index) { 828warning(_(no_split_warning)); 829 write_bitmap_index =0; 830} 831} 832 833if(!pack_to_stdout) { 834struct stat st; 835struct strbuf tmpname = STRBUF_INIT; 836 837/* 838 * Packs are runtime accessed in their mtime 839 * order since newer packs are more likely to contain 840 * younger objects. So if we are creating multiple 841 * packs then we should modify the mtime of later ones 842 * to preserve this property. 843 */ 844if(stat(pack_tmp_name, &st) <0) { 845warning_errno("failed to stat%s", pack_tmp_name); 846}else if(!last_mtime) { 847 last_mtime = st.st_mtime; 848}else{ 849struct utimbuf utb; 850 utb.actime = st.st_atime; 851 utb.modtime = --last_mtime; 852if(utime(pack_tmp_name, &utb) <0) 853warning_errno("failed utime() on%s", pack_tmp_name); 854} 855 856strbuf_addf(&tmpname,"%s-", base_name); 857 858if(write_bitmap_index) { 859bitmap_writer_set_checksum(sha1); 860bitmap_writer_build_type_index(written_list, nr_written); 861} 862 863finish_tmp_packfile(&tmpname, pack_tmp_name, 864 written_list, nr_written, 865&pack_idx_opts, sha1); 866 867if(write_bitmap_index) { 868strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 869 870stop_progress(&progress_state); 871 872bitmap_writer_show_progress(progress); 873bitmap_writer_reuse_bitmaps(&to_pack); 874bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 875bitmap_writer_build(&to_pack); 876bitmap_writer_finish(written_list, nr_written, 877 tmpname.buf, write_bitmap_options); 878 write_bitmap_index =0; 879} 880 881strbuf_release(&tmpname); 882free(pack_tmp_name); 883puts(sha1_to_hex(sha1)); 884} 885 886/* mark written objects as written to previous pack */ 887for(j =0; j < nr_written; j++) { 888 written_list[j]->offset = (off_t)-1; 889} 890 nr_remaining -= nr_written; 891}while(nr_remaining && i < to_pack.nr_objects); 892 893free(written_list); 894free(write_order); 895stop_progress(&progress_state); 896if(written != nr_result) 897die("wrote %"PRIu32" objects while expecting %"PRIu32, 898 written, nr_result); 899} 900 901static intno_try_delta(const char*path) 902{ 903static struct attr_check *check; 904 905if(!check) 906 check =attr_check_initl("delta", NULL); 907if(git_check_attr(path, check)) 908return0; 909if(ATTR_FALSE(check->items[0].value)) 910return1; 911return0; 912} 913 914/* 915 * When adding an object, check whether we have already added it 916 * to our packing list. If so, we can skip. However, if we are 917 * being asked to excludei t, but the previous mention was to include 918 * it, make sure to adjust its flags and tweak our numbers accordingly. 919 * 920 * As an optimization, we pass out the index position where we would have 921 * found the item, since that saves us from having to look it up again a 922 * few lines later when we want to add the new entry. 923 */ 924static inthave_duplicate_entry(const unsigned char*sha1, 925int exclude, 926uint32_t*index_pos) 927{ 928struct object_entry *entry; 929 930 entry =packlist_find(&to_pack, sha1, index_pos); 931if(!entry) 932return0; 933 934if(exclude) { 935if(!entry->preferred_base) 936 nr_result--; 937 entry->preferred_base =1; 938} 939 940return1; 941} 942 943static intwant_found_object(int exclude,struct packed_git *p) 944{ 945if(exclude) 946return1; 947if(incremental) 948return0; 949 950/* 951 * When asked to do --local (do not include an object that appears in a 952 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 953 * an object that appears in a pack marked with .keep), finding a pack 954 * that matches the criteria is sufficient for us to decide to omit it. 955 * However, even if this pack does not satisfy the criteria, we need to 956 * make sure no copy of this object appears in _any_ pack that makes us 957 * to omit the object, so we need to check all the packs. 958 * 959 * We can however first check whether these options can possible matter; 960 * if they do not matter we know we want the object in generated pack. 961 * Otherwise, we signal "-1" at the end to tell the caller that we do 962 * not know either way, and it needs to check more packs. 963 */ 964if(!ignore_packed_keep && 965(!local || !have_non_local_packs)) 966return1; 967 968if(local && !p->pack_local) 969return0; 970if(ignore_packed_keep && p->pack_local && p->pack_keep) 971return0; 972 973/* we don't know yet; keep looking for more packs */ 974return-1; 975} 976 977/* 978 * Check whether we want the object in the pack (e.g., we do not want 979 * objects found in non-local stores if the "--local" option was used). 980 * 981 * If the caller already knows an existing pack it wants to take the object 982 * from, that is passed in *found_pack and *found_offset; otherwise this 983 * function finds if there is any pack that has the object and returns the pack 984 * and its offset in these variables. 985 */ 986static intwant_object_in_pack(const unsigned char*sha1, 987int exclude, 988struct packed_git **found_pack, 989 off_t *found_offset) 990{ 991struct mru_entry *entry; 992int want; 993 994if(!exclude && local &&has_loose_object_nonlocal(sha1)) 995return0; 996 997/* 998 * If we already know the pack object lives in, start checks from that 999 * pack - in the usual case when neither --local was given nor .keep files1000 * are present we will determine the answer right now.1001 */1002if(*found_pack) {1003 want =want_found_object(exclude, *found_pack);1004if(want != -1)1005return want;1006}10071008for(entry = packed_git_mru->head; entry; entry = entry->next) {1009struct packed_git *p = entry->item;1010 off_t offset;10111012if(p == *found_pack)1013 offset = *found_offset;1014else1015 offset =find_pack_entry_one(sha1, p);10161017if(offset) {1018if(!*found_pack) {1019if(!is_pack_valid(p))1020continue;1021*found_offset = offset;1022*found_pack = p;1023}1024 want =want_found_object(exclude, p);1025if(!exclude && want >0)1026mru_mark(packed_git_mru, entry);1027if(want != -1)1028return want;1029}1030}10311032return1;1033}10341035static voidcreate_object_entry(const unsigned char*sha1,1036enum object_type type,1037uint32_t hash,1038int exclude,1039int no_try_delta,1040uint32_t index_pos,1041struct packed_git *found_pack,1042 off_t found_offset)1043{1044struct object_entry *entry;10451046 entry =packlist_alloc(&to_pack, sha1, index_pos);1047 entry->hash = hash;1048if(type)1049 entry->type = type;1050if(exclude)1051 entry->preferred_base =1;1052else1053 nr_result++;1054if(found_pack) {1055 entry->in_pack = found_pack;1056 entry->in_pack_offset = found_offset;1057}10581059 entry->no_try_delta = no_try_delta;1060}10611062static const char no_closure_warning[] =N_(1063"disabling bitmap writing, as some objects are not being packed"1064);10651066static intadd_object_entry(const unsigned char*sha1,enum object_type type,1067const char*name,int exclude)1068{1069struct packed_git *found_pack = NULL;1070 off_t found_offset =0;1071uint32_t index_pos;10721073if(have_duplicate_entry(sha1, exclude, &index_pos))1074return0;10751076if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1077/* The pack is missing an object, so it will not have closure */1078if(write_bitmap_index) {1079warning(_(no_closure_warning));1080 write_bitmap_index =0;1081}1082return0;1083}10841085create_object_entry(sha1, type,pack_name_hash(name),1086 exclude, name &&no_try_delta(name),1087 index_pos, found_pack, found_offset);10881089display_progress(progress_state, nr_result);1090return1;1091}10921093static intadd_object_entry_from_bitmap(const unsigned char*sha1,1094enum object_type type,1095int flags,uint32_t name_hash,1096struct packed_git *pack, off_t offset)1097{1098uint32_t index_pos;10991100if(have_duplicate_entry(sha1,0, &index_pos))1101return0;11021103if(!want_object_in_pack(sha1,0, &pack, &offset))1104return0;11051106create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11071108display_progress(progress_state, nr_result);1109return1;1110}11111112struct pbase_tree_cache {1113unsigned char sha1[20];1114int ref;1115int temporary;1116void*tree_data;1117unsigned long tree_size;1118};11191120static struct pbase_tree_cache *(pbase_tree_cache[256]);1121static intpbase_tree_cache_ix(const unsigned char*sha1)1122{1123return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1124}1125static intpbase_tree_cache_ix_incr(int ix)1126{1127return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1128}11291130static struct pbase_tree {1131struct pbase_tree *next;1132/* This is a phony "cache" entry; we are not1133 * going to evict it or find it through _get()1134 * mechanism -- this is for the toplevel node that1135 * would almost always change with any commit.1136 */1137struct pbase_tree_cache pcache;1138} *pbase_tree;11391140static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1141{1142struct pbase_tree_cache *ent, *nent;1143void*data;1144unsigned long size;1145enum object_type type;1146int neigh;1147int my_ix =pbase_tree_cache_ix(sha1);1148int available_ix = -1;11491150/* pbase-tree-cache acts as a limited hashtable.1151 * your object will be found at your index or within a few1152 * slots after that slot if it is cached.1153 */1154for(neigh =0; neigh <8; neigh++) {1155 ent = pbase_tree_cache[my_ix];1156if(ent && !hashcmp(ent->sha1, sha1)) {1157 ent->ref++;1158return ent;1159}1160else if(((available_ix <0) && (!ent || !ent->ref)) ||1161((0<= available_ix) &&1162(!ent && pbase_tree_cache[available_ix])))1163 available_ix = my_ix;1164if(!ent)1165break;1166 my_ix =pbase_tree_cache_ix_incr(my_ix);1167}11681169/* Did not find one. Either we got a bogus request or1170 * we need to read and perhaps cache.1171 */1172 data =read_sha1_file(sha1, &type, &size);1173if(!data)1174return NULL;1175if(type != OBJ_TREE) {1176free(data);1177return NULL;1178}11791180/* We need to either cache or return a throwaway copy */11811182if(available_ix <0)1183 ent = NULL;1184else{1185 ent = pbase_tree_cache[available_ix];1186 my_ix = available_ix;1187}11881189if(!ent) {1190 nent =xmalloc(sizeof(*nent));1191 nent->temporary = (available_ix <0);1192}1193else{1194/* evict and reuse */1195free(ent->tree_data);1196 nent = ent;1197}1198hashcpy(nent->sha1, sha1);1199 nent->tree_data = data;1200 nent->tree_size = size;1201 nent->ref =1;1202if(!nent->temporary)1203 pbase_tree_cache[my_ix] = nent;1204return nent;1205}12061207static voidpbase_tree_put(struct pbase_tree_cache *cache)1208{1209if(!cache->temporary) {1210 cache->ref--;1211return;1212}1213free(cache->tree_data);1214free(cache);1215}12161217static intname_cmp_len(const char*name)1218{1219int i;1220for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1221;1222return i;1223}12241225static voidadd_pbase_object(struct tree_desc *tree,1226const char*name,1227int cmplen,1228const char*fullname)1229{1230struct name_entry entry;1231int cmp;12321233while(tree_entry(tree,&entry)) {1234if(S_ISGITLINK(entry.mode))1235continue;1236 cmp =tree_entry_len(&entry) != cmplen ?1:1237memcmp(name, entry.path, cmplen);1238if(cmp >0)1239continue;1240if(cmp <0)1241return;1242if(name[cmplen] !='/') {1243add_object_entry(entry.oid->hash,1244object_type(entry.mode),1245 fullname,1);1246return;1247}1248if(S_ISDIR(entry.mode)) {1249struct tree_desc sub;1250struct pbase_tree_cache *tree;1251const char*down = name+cmplen+1;1252int downlen =name_cmp_len(down);12531254 tree =pbase_tree_get(entry.oid->hash);1255if(!tree)1256return;1257init_tree_desc(&sub, tree->tree_data, tree->tree_size);12581259add_pbase_object(&sub, down, downlen, fullname);1260pbase_tree_put(tree);1261}1262}1263}12641265static unsigned*done_pbase_paths;1266static int done_pbase_paths_num;1267static int done_pbase_paths_alloc;1268static intdone_pbase_path_pos(unsigned hash)1269{1270int lo =0;1271int hi = done_pbase_paths_num;1272while(lo < hi) {1273int mi = (hi + lo) /2;1274if(done_pbase_paths[mi] == hash)1275return mi;1276if(done_pbase_paths[mi] < hash)1277 hi = mi;1278else1279 lo = mi +1;1280}1281return-lo-1;1282}12831284static intcheck_pbase_path(unsigned hash)1285{1286int pos = (!done_pbase_paths) ? -1:done_pbase_path_pos(hash);1287if(0<= pos)1288return1;1289 pos = -pos -1;1290ALLOC_GROW(done_pbase_paths,1291 done_pbase_paths_num +1,1292 done_pbase_paths_alloc);1293 done_pbase_paths_num++;1294if(pos < done_pbase_paths_num)1295memmove(done_pbase_paths + pos +1,1296 done_pbase_paths + pos,1297(done_pbase_paths_num - pos -1) *sizeof(unsigned));1298 done_pbase_paths[pos] = hash;1299return0;1300}13011302static voidadd_preferred_base_object(const char*name)1303{1304struct pbase_tree *it;1305int cmplen;1306unsigned hash =pack_name_hash(name);13071308if(!num_preferred_base ||check_pbase_path(hash))1309return;13101311 cmplen =name_cmp_len(name);1312for(it = pbase_tree; it; it = it->next) {1313if(cmplen ==0) {1314add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1315}1316else{1317struct tree_desc tree;1318init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1319add_pbase_object(&tree, name, cmplen, name);1320}1321}1322}13231324static voidadd_preferred_base(unsigned char*sha1)1325{1326struct pbase_tree *it;1327void*data;1328unsigned long size;1329unsigned char tree_sha1[20];13301331if(window <= num_preferred_base++)1332return;13331334 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1335if(!data)1336return;13371338for(it = pbase_tree; it; it = it->next) {1339if(!hashcmp(it->pcache.sha1, tree_sha1)) {1340free(data);1341return;1342}1343}13441345 it =xcalloc(1,sizeof(*it));1346 it->next = pbase_tree;1347 pbase_tree = it;13481349hashcpy(it->pcache.sha1, tree_sha1);1350 it->pcache.tree_data = data;1351 it->pcache.tree_size = size;1352}13531354static voidcleanup_preferred_base(void)1355{1356struct pbase_tree *it;1357unsigned i;13581359 it = pbase_tree;1360 pbase_tree = NULL;1361while(it) {1362struct pbase_tree *this= it;1363 it =this->next;1364free(this->pcache.tree_data);1365free(this);1366}13671368for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1369if(!pbase_tree_cache[i])1370continue;1371free(pbase_tree_cache[i]->tree_data);1372free(pbase_tree_cache[i]);1373 pbase_tree_cache[i] = NULL;1374}13751376free(done_pbase_paths);1377 done_pbase_paths = NULL;1378 done_pbase_paths_num = done_pbase_paths_alloc =0;1379}13801381static voidcheck_object(struct object_entry *entry)1382{1383if(entry->in_pack) {1384struct packed_git *p = entry->in_pack;1385struct pack_window *w_curs = NULL;1386const unsigned char*base_ref = NULL;1387struct object_entry *base_entry;1388unsigned long used, used_0;1389unsigned long avail;1390 off_t ofs;1391unsigned char*buf, c;13921393 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);13941395/*1396 * We want in_pack_type even if we do not reuse delta1397 * since non-delta representations could still be reused.1398 */1399 used =unpack_object_header_buffer(buf, avail,1400&entry->in_pack_type,1401&entry->size);1402if(used ==0)1403goto give_up;14041405/*1406 * Determine if this is a delta and if so whether we can1407 * reuse it or not. Otherwise let's find out as cheaply as1408 * possible what the actual type and size for this object is.1409 */1410switch(entry->in_pack_type) {1411default:1412/* Not a delta hence we've already got all we need. */1413 entry->type = entry->in_pack_type;1414 entry->in_pack_header_size = used;1415if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1416goto give_up;1417unuse_pack(&w_curs);1418return;1419case OBJ_REF_DELTA:1420if(reuse_delta && !entry->preferred_base)1421 base_ref =use_pack(p, &w_curs,1422 entry->in_pack_offset + used, NULL);1423 entry->in_pack_header_size = used +20;1424break;1425case OBJ_OFS_DELTA:1426 buf =use_pack(p, &w_curs,1427 entry->in_pack_offset + used, NULL);1428 used_0 =0;1429 c = buf[used_0++];1430 ofs = c &127;1431while(c &128) {1432 ofs +=1;1433if(!ofs ||MSB(ofs,7)) {1434error("delta base offset overflow in pack for%s",1435sha1_to_hex(entry->idx.sha1));1436goto give_up;1437}1438 c = buf[used_0++];1439 ofs = (ofs <<7) + (c &127);1440}1441 ofs = entry->in_pack_offset - ofs;1442if(ofs <=0|| ofs >= entry->in_pack_offset) {1443error("delta base offset out of bound for%s",1444sha1_to_hex(entry->idx.sha1));1445goto give_up;1446}1447if(reuse_delta && !entry->preferred_base) {1448struct revindex_entry *revidx;1449 revidx =find_pack_revindex(p, ofs);1450if(!revidx)1451goto give_up;1452 base_ref =nth_packed_object_sha1(p, revidx->nr);1453}1454 entry->in_pack_header_size = used + used_0;1455break;1456}14571458if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1459/*1460 * If base_ref was set above that means we wish to1461 * reuse delta data, and we even found that base1462 * in the list of objects we want to pack. Goodie!1463 *1464 * Depth value does not matter - find_deltas() will1465 * never consider reused delta as the base object to1466 * deltify other objects against, in order to avoid1467 * circular deltas.1468 */1469 entry->type = entry->in_pack_type;1470 entry->delta = base_entry;1471 entry->delta_size = entry->size;1472 entry->delta_sibling = base_entry->delta_child;1473 base_entry->delta_child = entry;1474unuse_pack(&w_curs);1475return;1476}14771478if(entry->type) {1479/*1480 * This must be a delta and we already know what the1481 * final object type is. Let's extract the actual1482 * object size from the delta header.1483 */1484 entry->size =get_size_from_delta(p, &w_curs,1485 entry->in_pack_offset + entry->in_pack_header_size);1486if(entry->size ==0)1487goto give_up;1488unuse_pack(&w_curs);1489return;1490}14911492/*1493 * No choice but to fall back to the recursive delta walk1494 * with sha1_object_info() to find about the object type1495 * at this point...1496 */1497 give_up:1498unuse_pack(&w_curs);1499}15001501 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1502/*1503 * The error condition is checked in prepare_pack(). This is1504 * to permit a missing preferred base object to be ignored1505 * as a preferred base. Doing so can result in a larger1506 * pack file, but the transfer will still take place.1507 */1508}15091510static intpack_offset_sort(const void*_a,const void*_b)1511{1512const struct object_entry *a = *(struct object_entry **)_a;1513const struct object_entry *b = *(struct object_entry **)_b;15141515/* avoid filesystem trashing with loose objects */1516if(!a->in_pack && !b->in_pack)1517returnhashcmp(a->idx.sha1, b->idx.sha1);15181519if(a->in_pack < b->in_pack)1520return-1;1521if(a->in_pack > b->in_pack)1522return1;1523return a->in_pack_offset < b->in_pack_offset ? -1:1524(a->in_pack_offset > b->in_pack_offset);1525}15261527/*1528 * Drop an on-disk delta we were planning to reuse. Naively, this would1529 * just involve blanking out the "delta" field, but we have to deal1530 * with some extra book-keeping:1531 *1532 * 1. Removing ourselves from the delta_sibling linked list.1533 *1534 * 2. Updating our size/type to the non-delta representation. These were1535 * either not recorded initially (size) or overwritten with the delta type1536 * (type) when check_object() decided to reuse the delta.1537 *1538 * 3. Resetting our delta depth, as we are now a base object.1539 */1540static voiddrop_reused_delta(struct object_entry *entry)1541{1542struct object_entry **p = &entry->delta->delta_child;1543struct object_info oi = OBJECT_INFO_INIT;15441545while(*p) {1546if(*p == entry)1547*p = (*p)->delta_sibling;1548else1549 p = &(*p)->delta_sibling;1550}1551 entry->delta = NULL;1552 entry->depth =0;15531554 oi.sizep = &entry->size;1555 oi.typep = &entry->type;1556if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1557/*1558 * We failed to get the info from this pack for some reason;1559 * fall back to sha1_object_info, which may find another copy.1560 * And if that fails, the error will be recorded in entry->type1561 * and dealt with in prepare_pack().1562 */1563 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1564}1565}15661567/*1568 * Follow the chain of deltas from this entry onward, throwing away any links1569 * that cause us to hit a cycle (as determined by the DFS state flags in1570 * the entries).1571 *1572 * We also detect too-long reused chains that would violate our --depth1573 * limit.1574 */1575static voidbreak_delta_chains(struct object_entry *entry)1576{1577/*1578 * The actual depth of each object we will write is stored as an int,1579 * as it cannot exceed our int "depth" limit. But before we break1580 * changes based no that limit, we may potentially go as deep as the1581 * number of objects, which is elsewhere bounded to a uint32_t.1582 */1583uint32_t total_depth;1584struct object_entry *cur, *next;15851586for(cur = entry, total_depth =0;1587 cur;1588 cur = cur->delta, total_depth++) {1589if(cur->dfs_state == DFS_DONE) {1590/*1591 * We've already seen this object and know it isn't1592 * part of a cycle. We do need to append its depth1593 * to our count.1594 */1595 total_depth += cur->depth;1596break;1597}15981599/*1600 * We break cycles before looping, so an ACTIVE state (or any1601 * other cruft which made its way into the state variable)1602 * is a bug.1603 */1604if(cur->dfs_state != DFS_NONE)1605die("BUG: confusing delta dfs state in first pass:%d",1606 cur->dfs_state);16071608/*1609 * Now we know this is the first time we've seen the object. If1610 * it's not a delta, we're done traversing, but we'll mark it1611 * done to save time on future traversals.1612 */1613if(!cur->delta) {1614 cur->dfs_state = DFS_DONE;1615break;1616}16171618/*1619 * Mark ourselves as active and see if the next step causes1620 * us to cycle to another active object. It's important to do1621 * this _before_ we loop, because it impacts where we make the1622 * cut, and thus how our total_depth counter works.1623 * E.g., We may see a partial loop like:1624 *1625 * A -> B -> C -> D -> B1626 *1627 * Cutting B->C breaks the cycle. But now the depth of A is1628 * only 1, and our total_depth counter is at 3. The size of the1629 * error is always one less than the size of the cycle we1630 * broke. Commits C and D were "lost" from A's chain.1631 *1632 * If we instead cut D->B, then the depth of A is correct at 3.1633 * We keep all commits in the chain that we examined.1634 */1635 cur->dfs_state = DFS_ACTIVE;1636if(cur->delta->dfs_state == DFS_ACTIVE) {1637drop_reused_delta(cur);1638 cur->dfs_state = DFS_DONE;1639break;1640}1641}16421643/*1644 * And now that we've gone all the way to the bottom of the chain, we1645 * need to clear the active flags and set the depth fields as1646 * appropriate. Unlike the loop above, which can quit when it drops a1647 * delta, we need to keep going to look for more depth cuts. So we need1648 * an extra "next" pointer to keep going after we reset cur->delta.1649 */1650for(cur = entry; cur; cur = next) {1651 next = cur->delta;16521653/*1654 * We should have a chain of zero or more ACTIVE states down to1655 * a final DONE. We can quit after the DONE, because either it1656 * has no bases, or we've already handled them in a previous1657 * call.1658 */1659if(cur->dfs_state == DFS_DONE)1660break;1661else if(cur->dfs_state != DFS_ACTIVE)1662die("BUG: confusing delta dfs state in second pass:%d",1663 cur->dfs_state);16641665/*1666 * If the total_depth is more than depth, then we need to snip1667 * the chain into two or more smaller chains that don't exceed1668 * the maximum depth. Most of the resulting chains will contain1669 * (depth + 1) entries (i.e., depth deltas plus one base), and1670 * the last chain (i.e., the one containing entry) will contain1671 * whatever entries are left over, namely1672 * (total_depth % (depth + 1)) of them.1673 *1674 * Since we are iterating towards decreasing depth, we need to1675 * decrement total_depth as we go, and we need to write to the1676 * entry what its final depth will be after all of the1677 * snipping. Since we're snipping into chains of length (depth1678 * + 1) entries, the final depth of an entry will be its1679 * original depth modulo (depth + 1). Any time we encounter an1680 * entry whose final depth is supposed to be zero, we snip it1681 * from its delta base, thereby making it so.1682 */1683 cur->depth = (total_depth--) % (depth +1);1684if(!cur->depth)1685drop_reused_delta(cur);16861687 cur->dfs_state = DFS_DONE;1688}1689}16901691static voidget_object_details(void)1692{1693uint32_t i;1694struct object_entry **sorted_by_offset;16951696 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1697for(i =0; i < to_pack.nr_objects; i++)1698 sorted_by_offset[i] = to_pack.objects + i;1699QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17001701for(i =0; i < to_pack.nr_objects; i++) {1702struct object_entry *entry = sorted_by_offset[i];1703check_object(entry);1704if(big_file_threshold < entry->size)1705 entry->no_try_delta =1;1706}17071708/*1709 * This must happen in a second pass, since we rely on the delta1710 * information for the whole list being completed.1711 */1712for(i =0; i < to_pack.nr_objects; i++)1713break_delta_chains(&to_pack.objects[i]);17141715free(sorted_by_offset);1716}17171718/*1719 * We search for deltas in a list sorted by type, by filename hash, and then1720 * by size, so that we see progressively smaller and smaller files.1721 * That's because we prefer deltas to be from the bigger file1722 * to the smaller -- deletes are potentially cheaper, but perhaps1723 * more importantly, the bigger file is likely the more recent1724 * one. The deepest deltas are therefore the oldest objects which are1725 * less susceptible to be accessed often.1726 */1727static inttype_size_sort(const void*_a,const void*_b)1728{1729const struct object_entry *a = *(struct object_entry **)_a;1730const struct object_entry *b = *(struct object_entry **)_b;17311732if(a->type > b->type)1733return-1;1734if(a->type < b->type)1735return1;1736if(a->hash > b->hash)1737return-1;1738if(a->hash < b->hash)1739return1;1740if(a->preferred_base > b->preferred_base)1741return-1;1742if(a->preferred_base < b->preferred_base)1743return1;1744if(a->size > b->size)1745return-1;1746if(a->size < b->size)1747return1;1748return a < b ? -1: (a > b);/* newest first */1749}17501751struct unpacked {1752struct object_entry *entry;1753void*data;1754struct delta_index *index;1755unsigned depth;1756};17571758static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1759unsigned long delta_size)1760{1761if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1762return0;17631764if(delta_size < cache_max_small_delta_size)1765return1;17661767/* cache delta, if objects are large enough compared to delta size */1768if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1769return1;17701771return0;1772}17731774#ifndef NO_PTHREADS17751776static pthread_mutex_t read_mutex;1777#define read_lock() pthread_mutex_lock(&read_mutex)1778#define read_unlock() pthread_mutex_unlock(&read_mutex)17791780static pthread_mutex_t cache_mutex;1781#define cache_lock() pthread_mutex_lock(&cache_mutex)1782#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17831784static pthread_mutex_t progress_mutex;1785#define progress_lock() pthread_mutex_lock(&progress_mutex)1786#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17871788#else17891790#define read_lock() (void)01791#define read_unlock() (void)01792#define cache_lock() (void)01793#define cache_unlock() (void)01794#define progress_lock() (void)01795#define progress_unlock() (void)017961797#endif17981799static inttry_delta(struct unpacked *trg,struct unpacked *src,1800unsigned max_depth,unsigned long*mem_usage)1801{1802struct object_entry *trg_entry = trg->entry;1803struct object_entry *src_entry = src->entry;1804unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1805unsigned ref_depth;1806enum object_type type;1807void*delta_buf;18081809/* Don't bother doing diffs between different types */1810if(trg_entry->type != src_entry->type)1811return-1;18121813/*1814 * We do not bother to try a delta that we discarded on an1815 * earlier try, but only when reusing delta data. Note that1816 * src_entry that is marked as the preferred_base should always1817 * be considered, as even if we produce a suboptimal delta against1818 * it, we will still save the transfer cost, as we already know1819 * the other side has it and we won't send src_entry at all.1820 */1821if(reuse_delta && trg_entry->in_pack &&1822 trg_entry->in_pack == src_entry->in_pack &&1823!src_entry->preferred_base &&1824 trg_entry->in_pack_type != OBJ_REF_DELTA &&1825 trg_entry->in_pack_type != OBJ_OFS_DELTA)1826return0;18271828/* Let's not bust the allowed depth. */1829if(src->depth >= max_depth)1830return0;18311832/* Now some size filtering heuristics. */1833 trg_size = trg_entry->size;1834if(!trg_entry->delta) {1835 max_size = trg_size/2-20;1836 ref_depth =1;1837}else{1838 max_size = trg_entry->delta_size;1839 ref_depth = trg->depth;1840}1841 max_size = (uint64_t)max_size * (max_depth - src->depth) /1842(max_depth - ref_depth +1);1843if(max_size ==0)1844return0;1845 src_size = src_entry->size;1846 sizediff = src_size < trg_size ? trg_size - src_size :0;1847if(sizediff >= max_size)1848return0;1849if(trg_size < src_size /32)1850return0;18511852/* Load data if not already done */1853if(!trg->data) {1854read_lock();1855 trg->data =read_sha1_file(trg_entry->idx.sha1, &type, &sz);1856read_unlock();1857if(!trg->data)1858die("object%scannot be read",1859sha1_to_hex(trg_entry->idx.sha1));1860if(sz != trg_size)1861die("object%sinconsistent object length (%lu vs%lu)",1862sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);1863*mem_usage += sz;1864}1865if(!src->data) {1866read_lock();1867 src->data =read_sha1_file(src_entry->idx.sha1, &type, &sz);1868read_unlock();1869if(!src->data) {1870if(src_entry->preferred_base) {1871static int warned =0;1872if(!warned++)1873warning("object%scannot be read",1874sha1_to_hex(src_entry->idx.sha1));1875/*1876 * Those objects are not included in the1877 * resulting pack. Be resilient and ignore1878 * them if they can't be read, in case the1879 * pack could be created nevertheless.1880 */1881return0;1882}1883die("object%scannot be read",1884sha1_to_hex(src_entry->idx.sha1));1885}1886if(sz != src_size)1887die("object%sinconsistent object length (%lu vs%lu)",1888sha1_to_hex(src_entry->idx.sha1), sz, src_size);1889*mem_usage += sz;1890}1891if(!src->index) {1892 src->index =create_delta_index(src->data, src_size);1893if(!src->index) {1894static int warned =0;1895if(!warned++)1896warning("suboptimal pack - out of memory");1897return0;1898}1899*mem_usage +=sizeof_delta_index(src->index);1900}19011902 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1903if(!delta_buf)1904return0;19051906if(trg_entry->delta) {1907/* Prefer only shallower same-sized deltas. */1908if(delta_size == trg_entry->delta_size &&1909 src->depth +1>= trg->depth) {1910free(delta_buf);1911return0;1912}1913}19141915/*1916 * Handle memory allocation outside of the cache1917 * accounting lock. Compiler will optimize the strangeness1918 * away when NO_PTHREADS is defined.1919 */1920free(trg_entry->delta_data);1921cache_lock();1922if(trg_entry->delta_data) {1923 delta_cache_size -= trg_entry->delta_size;1924 trg_entry->delta_data = NULL;1925}1926if(delta_cacheable(src_size, trg_size, delta_size)) {1927 delta_cache_size += delta_size;1928cache_unlock();1929 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1930}else{1931cache_unlock();1932free(delta_buf);1933}19341935 trg_entry->delta = src_entry;1936 trg_entry->delta_size = delta_size;1937 trg->depth = src->depth +1;19381939return1;1940}19411942static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1943{1944struct object_entry *child = me->delta_child;1945unsigned int m = n;1946while(child) {1947unsigned int c =check_delta_limit(child, n +1);1948if(m < c)1949 m = c;1950 child = child->delta_sibling;1951}1952return m;1953}19541955static unsigned longfree_unpacked(struct unpacked *n)1956{1957unsigned long freed_mem =sizeof_delta_index(n->index);1958free_delta_index(n->index);1959 n->index = NULL;1960if(n->data) {1961 freed_mem += n->entry->size;1962free(n->data);1963 n->data = NULL;1964}1965 n->entry = NULL;1966 n->depth =0;1967return freed_mem;1968}19691970static voidfind_deltas(struct object_entry **list,unsigned*list_size,1971int window,int depth,unsigned*processed)1972{1973uint32_t i, idx =0, count =0;1974struct unpacked *array;1975unsigned long mem_usage =0;19761977 array =xcalloc(window,sizeof(struct unpacked));19781979for(;;) {1980struct object_entry *entry;1981struct unpacked *n = array + idx;1982int j, max_depth, best_base = -1;19831984progress_lock();1985if(!*list_size) {1986progress_unlock();1987break;1988}1989 entry = *list++;1990(*list_size)--;1991if(!entry->preferred_base) {1992(*processed)++;1993display_progress(progress_state, *processed);1994}1995progress_unlock();19961997 mem_usage -=free_unpacked(n);1998 n->entry = entry;19992000while(window_memory_limit &&2001 mem_usage > window_memory_limit &&2002 count >1) {2003uint32_t tail = (idx + window - count) % window;2004 mem_usage -=free_unpacked(array + tail);2005 count--;2006}20072008/* We do not compute delta to *create* objects we are not2009 * going to pack.2010 */2011if(entry->preferred_base)2012goto next;20132014/*2015 * If the current object is at pack edge, take the depth the2016 * objects that depend on the current object into account2017 * otherwise they would become too deep.2018 */2019 max_depth = depth;2020if(entry->delta_child) {2021 max_depth -=check_delta_limit(entry,0);2022if(max_depth <=0)2023goto next;2024}20252026 j = window;2027while(--j >0) {2028int ret;2029uint32_t other_idx = idx + j;2030struct unpacked *m;2031if(other_idx >= window)2032 other_idx -= window;2033 m = array + other_idx;2034if(!m->entry)2035break;2036 ret =try_delta(n, m, max_depth, &mem_usage);2037if(ret <0)2038break;2039else if(ret >0)2040 best_base = other_idx;2041}20422043/*2044 * If we decided to cache the delta data, then it is best2045 * to compress it right away. First because we have to do2046 * it anyway, and doing it here while we're threaded will2047 * save a lot of time in the non threaded write phase,2048 * as well as allow for caching more deltas within2049 * the same cache size limit.2050 * ...2051 * But only if not writing to stdout, since in that case2052 * the network is most likely throttling writes anyway,2053 * and therefore it is best to go to the write phase ASAP2054 * instead, as we can afford spending more time compressing2055 * between writes at that moment.2056 */2057if(entry->delta_data && !pack_to_stdout) {2058 entry->z_delta_size =do_compress(&entry->delta_data,2059 entry->delta_size);2060cache_lock();2061 delta_cache_size -= entry->delta_size;2062 delta_cache_size += entry->z_delta_size;2063cache_unlock();2064}20652066/* if we made n a delta, and if n is already at max2067 * depth, leaving it in the window is pointless. we2068 * should evict it first.2069 */2070if(entry->delta && max_depth <= n->depth)2071continue;20722073/*2074 * Move the best delta base up in the window, after the2075 * currently deltified object, to keep it longer. It will2076 * be the first base object to be attempted next.2077 */2078if(entry->delta) {2079struct unpacked swap = array[best_base];2080int dist = (window + idx - best_base) % window;2081int dst = best_base;2082while(dist--) {2083int src = (dst +1) % window;2084 array[dst] = array[src];2085 dst = src;2086}2087 array[dst] = swap;2088}20892090 next:2091 idx++;2092if(count +1< window)2093 count++;2094if(idx >= window)2095 idx =0;2096}20972098for(i =0; i < window; ++i) {2099free_delta_index(array[i].index);2100free(array[i].data);2101}2102free(array);2103}21042105#ifndef NO_PTHREADS21062107static voidtry_to_free_from_threads(size_t size)2108{2109read_lock();2110release_pack_memory(size);2111read_unlock();2112}21132114static try_to_free_t old_try_to_free_routine;21152116/*2117 * The main thread waits on the condition that (at least) one of the workers2118 * has stopped working (which is indicated in the .working member of2119 * struct thread_params).2120 * When a work thread has completed its work, it sets .working to 0 and2121 * signals the main thread and waits on the condition that .data_ready2122 * becomes 1.2123 */21242125struct thread_params {2126 pthread_t thread;2127struct object_entry **list;2128unsigned list_size;2129unsigned remaining;2130int window;2131int depth;2132int working;2133int data_ready;2134 pthread_mutex_t mutex;2135 pthread_cond_t cond;2136unsigned*processed;2137};21382139static pthread_cond_t progress_cond;21402141/*2142 * Mutex and conditional variable can't be statically-initialized on Windows.2143 */2144static voidinit_threaded_search(void)2145{2146init_recursive_mutex(&read_mutex);2147pthread_mutex_init(&cache_mutex, NULL);2148pthread_mutex_init(&progress_mutex, NULL);2149pthread_cond_init(&progress_cond, NULL);2150 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2151}21522153static voidcleanup_threaded_search(void)2154{2155set_try_to_free_routine(old_try_to_free_routine);2156pthread_cond_destroy(&progress_cond);2157pthread_mutex_destroy(&read_mutex);2158pthread_mutex_destroy(&cache_mutex);2159pthread_mutex_destroy(&progress_mutex);2160}21612162static void*threaded_find_deltas(void*arg)2163{2164struct thread_params *me = arg;21652166while(me->remaining) {2167find_deltas(me->list, &me->remaining,2168 me->window, me->depth, me->processed);21692170progress_lock();2171 me->working =0;2172pthread_cond_signal(&progress_cond);2173progress_unlock();21742175/*2176 * We must not set ->data_ready before we wait on the2177 * condition because the main thread may have set it to 12178 * before we get here. In order to be sure that new2179 * work is available if we see 1 in ->data_ready, it2180 * was initialized to 0 before this thread was spawned2181 * and we reset it to 0 right away.2182 */2183pthread_mutex_lock(&me->mutex);2184while(!me->data_ready)2185pthread_cond_wait(&me->cond, &me->mutex);2186 me->data_ready =0;2187pthread_mutex_unlock(&me->mutex);2188}2189/* leave ->working 1 so that this doesn't get more work assigned */2190return NULL;2191}21922193static voidll_find_deltas(struct object_entry **list,unsigned list_size,2194int window,int depth,unsigned*processed)2195{2196struct thread_params *p;2197int i, ret, active_threads =0;21982199init_threaded_search();22002201if(delta_search_threads <=1) {2202find_deltas(list, &list_size, window, depth, processed);2203cleanup_threaded_search();2204return;2205}2206if(progress > pack_to_stdout)2207fprintf(stderr,"Delta compression using up to%dthreads.\n",2208 delta_search_threads);2209 p =xcalloc(delta_search_threads,sizeof(*p));22102211/* Partition the work amongst work threads. */2212for(i =0; i < delta_search_threads; i++) {2213unsigned sub_size = list_size / (delta_search_threads - i);22142215/* don't use too small segments or no deltas will be found */2216if(sub_size <2*window && i+1< delta_search_threads)2217 sub_size =0;22182219 p[i].window = window;2220 p[i].depth = depth;2221 p[i].processed = processed;2222 p[i].working =1;2223 p[i].data_ready =0;22242225/* try to split chunks on "path" boundaries */2226while(sub_size && sub_size < list_size &&2227 list[sub_size]->hash &&2228 list[sub_size]->hash == list[sub_size-1]->hash)2229 sub_size++;22302231 p[i].list = list;2232 p[i].list_size = sub_size;2233 p[i].remaining = sub_size;22342235 list += sub_size;2236 list_size -= sub_size;2237}22382239/* Start work threads. */2240for(i =0; i < delta_search_threads; i++) {2241if(!p[i].list_size)2242continue;2243pthread_mutex_init(&p[i].mutex, NULL);2244pthread_cond_init(&p[i].cond, NULL);2245 ret =pthread_create(&p[i].thread, NULL,2246 threaded_find_deltas, &p[i]);2247if(ret)2248die("unable to create thread:%s",strerror(ret));2249 active_threads++;2250}22512252/*2253 * Now let's wait for work completion. Each time a thread is done2254 * with its work, we steal half of the remaining work from the2255 * thread with the largest number of unprocessed objects and give2256 * it to that newly idle thread. This ensure good load balancing2257 * until the remaining object list segments are simply too short2258 * to be worth splitting anymore.2259 */2260while(active_threads) {2261struct thread_params *target = NULL;2262struct thread_params *victim = NULL;2263unsigned sub_size =0;22642265progress_lock();2266for(;;) {2267for(i =0; !target && i < delta_search_threads; i++)2268if(!p[i].working)2269 target = &p[i];2270if(target)2271break;2272pthread_cond_wait(&progress_cond, &progress_mutex);2273}22742275for(i =0; i < delta_search_threads; i++)2276if(p[i].remaining >2*window &&2277(!victim || victim->remaining < p[i].remaining))2278 victim = &p[i];2279if(victim) {2280 sub_size = victim->remaining /2;2281 list = victim->list + victim->list_size - sub_size;2282while(sub_size && list[0]->hash &&2283 list[0]->hash == list[-1]->hash) {2284 list++;2285 sub_size--;2286}2287if(!sub_size) {2288/*2289 * It is possible for some "paths" to have2290 * so many objects that no hash boundary2291 * might be found. Let's just steal the2292 * exact half in that case.2293 */2294 sub_size = victim->remaining /2;2295 list -= sub_size;2296}2297 target->list = list;2298 victim->list_size -= sub_size;2299 victim->remaining -= sub_size;2300}2301 target->list_size = sub_size;2302 target->remaining = sub_size;2303 target->working =1;2304progress_unlock();23052306pthread_mutex_lock(&target->mutex);2307 target->data_ready =1;2308pthread_cond_signal(&target->cond);2309pthread_mutex_unlock(&target->mutex);23102311if(!sub_size) {2312pthread_join(target->thread, NULL);2313pthread_cond_destroy(&target->cond);2314pthread_mutex_destroy(&target->mutex);2315 active_threads--;2316}2317}2318cleanup_threaded_search();2319free(p);2320}23212322#else2323#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2324#endif23252326static voidadd_tag_chain(const struct object_id *oid)2327{2328struct tag *tag;23292330/*2331 * We catch duplicates already in add_object_entry(), but we'd2332 * prefer to do this extra check to avoid having to parse the2333 * tag at all if we already know that it's being packed (e.g., if2334 * it was included via bitmaps, we would not have parsed it2335 * previously).2336 */2337if(packlist_find(&to_pack, oid->hash, NULL))2338return;23392340 tag =lookup_tag(oid->hash);2341while(1) {2342if(!tag ||parse_tag(tag) || !tag->tagged)2343die("unable to pack objects reachable from tag%s",2344oid_to_hex(oid));23452346add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);23472348if(tag->tagged->type != OBJ_TAG)2349return;23502351 tag = (struct tag *)tag->tagged;2352}2353}23542355static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2356{2357struct object_id peeled;23582359if(starts_with(path,"refs/tags/") &&/* is a tag? */2360!peel_ref(path, peeled.hash) &&/* peelable? */2361packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2362add_tag_chain(oid);2363return0;2364}23652366static voidprepare_pack(int window,int depth)2367{2368struct object_entry **delta_list;2369uint32_t i, nr_deltas;2370unsigned n;23712372get_object_details();23732374/*2375 * If we're locally repacking then we need to be doubly careful2376 * from now on in order to make sure no stealth corruption gets2377 * propagated to the new pack. Clients receiving streamed packs2378 * should validate everything they get anyway so no need to incur2379 * the additional cost here in that case.2380 */2381if(!pack_to_stdout)2382 do_check_packed_object_crc =1;23832384if(!to_pack.nr_objects || !window || !depth)2385return;23862387ALLOC_ARRAY(delta_list, to_pack.nr_objects);2388 nr_deltas = n =0;23892390for(i =0; i < to_pack.nr_objects; i++) {2391struct object_entry *entry = to_pack.objects + i;23922393if(entry->delta)2394/* This happens if we decided to reuse existing2395 * delta from a pack. "reuse_delta &&" is implied.2396 */2397continue;23982399if(entry->size <50)2400continue;24012402if(entry->no_try_delta)2403continue;24042405if(!entry->preferred_base) {2406 nr_deltas++;2407if(entry->type <0)2408die("unable to get type of object%s",2409sha1_to_hex(entry->idx.sha1));2410}else{2411if(entry->type <0) {2412/*2413 * This object is not found, but we2414 * don't have to include it anyway.2415 */2416continue;2417}2418}24192420 delta_list[n++] = entry;2421}24222423if(nr_deltas && n >1) {2424unsigned nr_done =0;2425if(progress)2426 progress_state =start_progress(_("Compressing objects"),2427 nr_deltas);2428QSORT(delta_list, n, type_size_sort);2429ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2430stop_progress(&progress_state);2431if(nr_done != nr_deltas)2432die("inconsistency with delta count");2433}2434free(delta_list);2435}24362437static intgit_pack_config(const char*k,const char*v,void*cb)2438{2439if(!strcmp(k,"pack.window")) {2440 window =git_config_int(k, v);2441return0;2442}2443if(!strcmp(k,"pack.windowmemory")) {2444 window_memory_limit =git_config_ulong(k, v);2445return0;2446}2447if(!strcmp(k,"pack.depth")) {2448 depth =git_config_int(k, v);2449return0;2450}2451if(!strcmp(k,"pack.deltacachesize")) {2452 max_delta_cache_size =git_config_int(k, v);2453return0;2454}2455if(!strcmp(k,"pack.deltacachelimit")) {2456 cache_max_small_delta_size =git_config_int(k, v);2457return0;2458}2459if(!strcmp(k,"pack.writebitmaphashcache")) {2460if(git_config_bool(k, v))2461 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2462else2463 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2464}2465if(!strcmp(k,"pack.usebitmaps")) {2466 use_bitmap_index_default =git_config_bool(k, v);2467return0;2468}2469if(!strcmp(k,"pack.threads")) {2470 delta_search_threads =git_config_int(k, v);2471if(delta_search_threads <0)2472die("invalid number of threads specified (%d)",2473 delta_search_threads);2474#ifdef NO_PTHREADS2475if(delta_search_threads !=1)2476warning("no threads support, ignoring%s", k);2477#endif2478return0;2479}2480if(!strcmp(k,"pack.indexversion")) {2481 pack_idx_opts.version =git_config_int(k, v);2482if(pack_idx_opts.version >2)2483die("bad pack.indexversion=%"PRIu32,2484 pack_idx_opts.version);2485return0;2486}2487returngit_default_config(k, v, cb);2488}24892490static voidread_object_list_from_stdin(void)2491{2492char line[40+1+ PATH_MAX +2];2493unsigned char sha1[20];24942495for(;;) {2496if(!fgets(line,sizeof(line), stdin)) {2497if(feof(stdin))2498break;2499if(!ferror(stdin))2500die("fgets returned NULL, not EOF, not error!");2501if(errno != EINTR)2502die_errno("fgets");2503clearerr(stdin);2504continue;2505}2506if(line[0] =='-') {2507if(get_sha1_hex(line+1, sha1))2508die("expected edge sha1, got garbage:\n%s",2509 line);2510add_preferred_base(sha1);2511continue;2512}2513if(get_sha1_hex(line, sha1))2514die("expected sha1, got garbage:\n%s", line);25152516add_preferred_base_object(line+41);2517add_object_entry(sha1,0, line+41,0);2518}2519}25202521#define OBJECT_ADDED (1u<<20)25222523static voidshow_commit(struct commit *commit,void*data)2524{2525add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2526 commit->object.flags |= OBJECT_ADDED;25272528if(write_bitmap_index)2529index_commit_for_bitmap(commit);2530}25312532static voidshow_object(struct object *obj,const char*name,void*data)2533{2534add_preferred_base_object(name);2535add_object_entry(obj->oid.hash, obj->type, name,0);2536 obj->flags |= OBJECT_ADDED;2537}25382539static voidshow_edge(struct commit *commit)2540{2541add_preferred_base(commit->object.oid.hash);2542}25432544struct in_pack_object {2545 off_t offset;2546struct object *object;2547};25482549struct in_pack {2550int alloc;2551int nr;2552struct in_pack_object *array;2553};25542555static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2556{2557 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2558 in_pack->array[in_pack->nr].object = object;2559 in_pack->nr++;2560}25612562/*2563 * Compare the objects in the offset order, in order to emulate the2564 * "git rev-list --objects" output that produced the pack originally.2565 */2566static intofscmp(const void*a_,const void*b_)2567{2568struct in_pack_object *a = (struct in_pack_object *)a_;2569struct in_pack_object *b = (struct in_pack_object *)b_;25702571if(a->offset < b->offset)2572return-1;2573else if(a->offset > b->offset)2574return1;2575else2576returnoidcmp(&a->object->oid, &b->object->oid);2577}25782579static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2580{2581struct packed_git *p;2582struct in_pack in_pack;2583uint32_t i;25842585memset(&in_pack,0,sizeof(in_pack));25862587for(p = packed_git; p; p = p->next) {2588const unsigned char*sha1;2589struct object *o;25902591if(!p->pack_local || p->pack_keep)2592continue;2593if(open_pack_index(p))2594die("cannot open pack index");25952596ALLOC_GROW(in_pack.array,2597 in_pack.nr + p->num_objects,2598 in_pack.alloc);25992600for(i =0; i < p->num_objects; i++) {2601 sha1 =nth_packed_object_sha1(p, i);2602 o =lookup_unknown_object(sha1);2603if(!(o->flags & OBJECT_ADDED))2604mark_in_pack_object(o, p, &in_pack);2605 o->flags |= OBJECT_ADDED;2606}2607}26082609if(in_pack.nr) {2610QSORT(in_pack.array, in_pack.nr, ofscmp);2611for(i =0; i < in_pack.nr; i++) {2612struct object *o = in_pack.array[i].object;2613add_object_entry(o->oid.hash, o->type,"",0);2614}2615}2616free(in_pack.array);2617}26182619static intadd_loose_object(const struct object_id *oid,const char*path,2620void*data)2621{2622enum object_type type =sha1_object_info(oid->hash, NULL);26232624if(type <0) {2625warning("loose object at%scould not be examined", path);2626return0;2627}26282629add_object_entry(oid->hash, type,"",0);2630return0;2631}26322633/*2634 * We actually don't even have to worry about reachability here.2635 * add_object_entry will weed out duplicates, so we just add every2636 * loose object we find.2637 */2638static voidadd_unreachable_loose_objects(void)2639{2640for_each_loose_file_in_objdir(get_object_directory(),2641 add_loose_object,2642 NULL, NULL, NULL);2643}26442645static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2646{2647static struct packed_git *last_found = (void*)1;2648struct packed_git *p;26492650 p = (last_found != (void*)1) ? last_found : packed_git;26512652while(p) {2653if((!p->pack_local || p->pack_keep) &&2654find_pack_entry_one(sha1, p)) {2655 last_found = p;2656return1;2657}2658if(p == last_found)2659 p = packed_git;2660else2661 p = p->next;2662if(p == last_found)2663 p = p->next;2664}2665return0;2666}26672668/*2669 * Store a list of sha1s that are should not be discarded2670 * because they are either written too recently, or are2671 * reachable from another object that was.2672 *2673 * This is filled by get_object_list.2674 */2675static struct oid_array recent_objects;26762677static intloosened_object_can_be_discarded(const struct object_id *oid,2678 timestamp_t mtime)2679{2680if(!unpack_unreachable_expiration)2681return0;2682if(mtime > unpack_unreachable_expiration)2683return0;2684if(oid_array_lookup(&recent_objects, oid) >=0)2685return0;2686return1;2687}26882689static voidloosen_unused_packed_objects(struct rev_info *revs)2690{2691struct packed_git *p;2692uint32_t i;2693struct object_id oid;26942695for(p = packed_git; p; p = p->next) {2696if(!p->pack_local || p->pack_keep)2697continue;26982699if(open_pack_index(p))2700die("cannot open pack index");27012702for(i =0; i < p->num_objects; i++) {2703nth_packed_object_oid(&oid, p, i);2704if(!packlist_find(&to_pack, oid.hash, NULL) &&2705!has_sha1_pack_kept_or_nonlocal(oid.hash) &&2706!loosened_object_can_be_discarded(&oid, p->mtime))2707if(force_object_loose(oid.hash, p->mtime))2708die("unable to force loose object");2709}2710}2711}27122713/*2714 * This tracks any options which pack-reuse code expects to be on, or which a2715 * reader of the pack might not understand, and which would therefore prevent2716 * blind reuse of what we have on disk.2717 */2718static intpack_options_allow_reuse(void)2719{2720return pack_to_stdout && allow_ofs_delta;2721}27222723static intget_object_list_from_bitmap(struct rev_info *revs)2724{2725if(prepare_bitmap_walk(revs) <0)2726return-1;27272728if(pack_options_allow_reuse() &&2729!reuse_partial_packfile_from_bitmap(2730&reuse_packfile,2731&reuse_packfile_objects,2732&reuse_packfile_offset)) {2733assert(reuse_packfile_objects);2734 nr_result += reuse_packfile_objects;2735display_progress(progress_state, nr_result);2736}27372738traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2739return0;2740}27412742static voidrecord_recent_object(struct object *obj,2743const char*name,2744void*data)2745{2746oid_array_append(&recent_objects, &obj->oid);2747}27482749static voidrecord_recent_commit(struct commit *commit,void*data)2750{2751oid_array_append(&recent_objects, &commit->object.oid);2752}27532754static voidget_object_list(int ac,const char**av)2755{2756struct rev_info revs;2757char line[1000];2758int flags =0;27592760init_revisions(&revs, NULL);2761 save_commit_buffer =0;2762setup_revisions(ac, av, &revs, NULL);27632764/* make sure shallows are read */2765is_repository_shallow();27662767while(fgets(line,sizeof(line), stdin) != NULL) {2768int len =strlen(line);2769if(len && line[len -1] =='\n')2770 line[--len] =0;2771if(!len)2772break;2773if(*line =='-') {2774if(!strcmp(line,"--not")) {2775 flags ^= UNINTERESTING;2776 write_bitmap_index =0;2777continue;2778}2779if(starts_with(line,"--shallow ")) {2780unsigned char sha1[20];2781if(get_sha1_hex(line +10, sha1))2782die("not an SHA-1 '%s'", line +10);2783register_shallow(sha1);2784 use_bitmap_index =0;2785continue;2786}2787die("not a rev '%s'", line);2788}2789if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2790die("bad revision '%s'", line);2791}27922793if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2794return;27952796if(prepare_revision_walk(&revs))2797die("revision walk setup failed");2798mark_edges_uninteresting(&revs, show_edge);2799traverse_commit_list(&revs, show_commit, show_object, NULL);28002801if(unpack_unreachable_expiration) {2802 revs.ignore_missing_links =1;2803if(add_unseen_recent_objects_to_traversal(&revs,2804 unpack_unreachable_expiration))2805die("unable to add recent objects");2806if(prepare_revision_walk(&revs))2807die("revision walk setup failed");2808traverse_commit_list(&revs, record_recent_commit,2809 record_recent_object, NULL);2810}28112812if(keep_unreachable)2813add_objects_in_unpacked_packs(&revs);2814if(pack_loose_unreachable)2815add_unreachable_loose_objects();2816if(unpack_unreachable)2817loosen_unused_packed_objects(&revs);28182819oid_array_clear(&recent_objects);2820}28212822static intoption_parse_index_version(const struct option *opt,2823const char*arg,int unset)2824{2825char*c;2826const char*val = arg;2827 pack_idx_opts.version =strtoul(val, &c,10);2828if(pack_idx_opts.version >2)2829die(_("unsupported index version%s"), val);2830if(*c ==','&& c[1])2831 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2832if(*c || pack_idx_opts.off32_limit &0x80000000)2833die(_("bad index version '%s'"), val);2834return0;2835}28362837static intoption_parse_unpack_unreachable(const struct option *opt,2838const char*arg,int unset)2839{2840if(unset) {2841 unpack_unreachable =0;2842 unpack_unreachable_expiration =0;2843}2844else{2845 unpack_unreachable =1;2846if(arg)2847 unpack_unreachable_expiration =approxidate(arg);2848}2849return0;2850}28512852intcmd_pack_objects(int argc,const char**argv,const char*prefix)2853{2854int use_internal_rev_list =0;2855int thin =0;2856int shallow =0;2857int all_progress_implied =0;2858struct argv_array rp = ARGV_ARRAY_INIT;2859int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2860int rev_list_index =0;2861struct option pack_objects_options[] = {2862OPT_SET_INT('q',"quiet", &progress,2863N_("do not show progress meter"),0),2864OPT_SET_INT(0,"progress", &progress,2865N_("show progress meter"),1),2866OPT_SET_INT(0,"all-progress", &progress,2867N_("show progress meter during object writing phase"),2),2868OPT_BOOL(0,"all-progress-implied",2869&all_progress_implied,2870N_("similar to --all-progress when progress meter is shown")),2871{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2872N_("write the pack index file in the specified idx format version"),28730, option_parse_index_version },2874OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2875N_("maximum size of each output pack file")),2876OPT_BOOL(0,"local", &local,2877N_("ignore borrowed objects from alternate object store")),2878OPT_BOOL(0,"incremental", &incremental,2879N_("ignore packed objects")),2880OPT_INTEGER(0,"window", &window,2881N_("limit pack window by objects")),2882OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2883N_("limit pack window by memory in addition to object limit")),2884OPT_INTEGER(0,"depth", &depth,2885N_("maximum length of delta chain allowed in the resulting pack")),2886OPT_BOOL(0,"reuse-delta", &reuse_delta,2887N_("reuse existing deltas")),2888OPT_BOOL(0,"reuse-object", &reuse_object,2889N_("reuse existing objects")),2890OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2891N_("use OFS_DELTA objects")),2892OPT_INTEGER(0,"threads", &delta_search_threads,2893N_("use threads when searching for best delta matches")),2894OPT_BOOL(0,"non-empty", &non_empty,2895N_("do not create an empty pack output")),2896OPT_BOOL(0,"revs", &use_internal_rev_list,2897N_("read revision arguments from standard input")),2898{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2899N_("limit the objects to those that are not yet packed"),2900 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2901{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2902N_("include objects reachable from any reference"),2903 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2904{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2905N_("include objects referred by reflog entries"),2906 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2907{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2908N_("include objects referred to by the index"),2909 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2910OPT_BOOL(0,"stdout", &pack_to_stdout,2911N_("output pack to stdout")),2912OPT_BOOL(0,"include-tag", &include_tag,2913N_("include tag objects that refer to objects to be packed")),2914OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2915N_("keep unreachable objects")),2916OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2917N_("pack loose unreachable objects")),2918{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2919N_("unpack unreachable objects newer than <time>"),2920 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2921OPT_BOOL(0,"thin", &thin,2922N_("create thin packs")),2923OPT_BOOL(0,"shallow", &shallow,2924N_("create packs suitable for shallow fetches")),2925OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2926N_("ignore packs that have companion .keep file")),2927OPT_INTEGER(0,"compression", &pack_compression_level,2928N_("pack compression level")),2929OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2930N_("do not hide commits by grafts"),0),2931OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2932N_("use a bitmap index if available to speed up counting objects")),2933OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2934N_("write a bitmap index together with the pack index")),2935OPT_END(),2936};29372938 check_replace_refs =0;29392940reset_pack_idx_option(&pack_idx_opts);2941git_config(git_pack_config, NULL);29422943 progress =isatty(2);2944 argc =parse_options(argc, argv, prefix, pack_objects_options,2945 pack_usage,0);29462947if(argc) {2948 base_name = argv[0];2949 argc--;2950}2951if(pack_to_stdout != !base_name || argc)2952usage_with_options(pack_usage, pack_objects_options);29532954argv_array_push(&rp,"pack-objects");2955if(thin) {2956 use_internal_rev_list =1;2957argv_array_push(&rp, shallow2958?"--objects-edge-aggressive"2959:"--objects-edge");2960}else2961argv_array_push(&rp,"--objects");29622963if(rev_list_all) {2964 use_internal_rev_list =1;2965argv_array_push(&rp,"--all");2966}2967if(rev_list_reflog) {2968 use_internal_rev_list =1;2969argv_array_push(&rp,"--reflog");2970}2971if(rev_list_index) {2972 use_internal_rev_list =1;2973argv_array_push(&rp,"--indexed-objects");2974}2975if(rev_list_unpacked) {2976 use_internal_rev_list =1;2977argv_array_push(&rp,"--unpacked");2978}29792980if(!reuse_object)2981 reuse_delta =0;2982if(pack_compression_level == -1)2983 pack_compression_level = Z_DEFAULT_COMPRESSION;2984else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)2985die("bad pack compression level%d", pack_compression_level);29862987if(!delta_search_threads)/* --threads=0 means autodetect */2988 delta_search_threads =online_cpus();29892990#ifdef NO_PTHREADS2991if(delta_search_threads !=1)2992warning("no threads support, ignoring --threads");2993#endif2994if(!pack_to_stdout && !pack_size_limit)2995 pack_size_limit = pack_size_limit_cfg;2996if(pack_to_stdout && pack_size_limit)2997die("--max-pack-size cannot be used to build a pack for transfer.");2998if(pack_size_limit && pack_size_limit <1024*1024) {2999warning("minimum pack size limit is 1 MiB");3000 pack_size_limit =1024*1024;3001}30023003if(!pack_to_stdout && thin)3004die("--thin cannot be used to build an indexable pack.");30053006if(keep_unreachable && unpack_unreachable)3007die("--keep-unreachable and --unpack-unreachable are incompatible.");3008if(!rev_list_all || !rev_list_reflog || !rev_list_index)3009 unpack_unreachable_expiration =0;30103011/*3012 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3013 *3014 * - to produce good pack (with bitmap index not-yet-packed objects are3015 * packed in suboptimal order).3016 *3017 * - to use more robust pack-generation codepath (avoiding possible3018 * bugs in bitmap code and possible bitmap index corruption).3019 */3020if(!pack_to_stdout)3021 use_bitmap_index_default =0;30223023if(use_bitmap_index <0)3024 use_bitmap_index = use_bitmap_index_default;30253026/* "hard" reasons not to use bitmaps; these just won't work at all */3027if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3028 use_bitmap_index =0;30293030if(pack_to_stdout || !rev_list_all)3031 write_bitmap_index =0;30323033if(progress && all_progress_implied)3034 progress =2;30353036prepare_packed_git();3037if(ignore_packed_keep) {3038struct packed_git *p;3039for(p = packed_git; p; p = p->next)3040if(p->pack_local && p->pack_keep)3041break;3042if(!p)/* no keep-able packs found */3043 ignore_packed_keep =0;3044}3045if(local) {3046/*3047 * unlike ignore_packed_keep above, we do not want to3048 * unset "local" based on looking at packs, as it3049 * also covers non-local objects3050 */3051struct packed_git *p;3052for(p = packed_git; p; p = p->next) {3053if(!p->pack_local) {3054 have_non_local_packs =1;3055break;3056}3057}3058}30593060if(progress)3061 progress_state =start_progress(_("Counting objects"),0);3062if(!use_internal_rev_list)3063read_object_list_from_stdin();3064else{3065get_object_list(rp.argc, rp.argv);3066argv_array_clear(&rp);3067}3068cleanup_preferred_base();3069if(include_tag && nr_result)3070for_each_ref(add_ref_tag, NULL);3071stop_progress(&progress_state);30723073if(non_empty && !nr_result)3074return0;3075if(nr_result)3076prepare_pack(window, depth);3077write_pack_file();3078if(progress)3079fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3080" reused %"PRIu32" (delta %"PRIu32")\n",3081 written, written_delta, reused, reused_delta);3082return0;3083}