1#include"builtin.h" 2#include"cache.h" 3#include"attr.h" 4#include"object.h" 5#include"blob.h" 6#include"commit.h" 7#include"tag.h" 8#include"tree.h" 9#include"delta.h" 10#include"pack.h" 11#include"pack-revindex.h" 12#include"csum-file.h" 13#include"tree-walk.h" 14#include"diff.h" 15#include"revision.h" 16#include"list-objects.h" 17#include"pack-objects.h" 18#include"progress.h" 19#include"refs.h" 20#include"streaming.h" 21#include"thread-utils.h" 22#include"pack-bitmap.h" 23#include"reachable.h" 24#include"sha1-array.h" 25#include"argv-array.h" 26#include"mru.h" 27 28static const char*pack_usage[] = { 29N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 30N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 31 NULL 32}; 33 34/* 35 * Objects we are going to pack are collected in the `to_pack` structure. 36 * It contains an array (dynamically expanded) of the object data, and a map 37 * that can resolve SHA1s to their position in the array. 38 */ 39static struct packing_data to_pack; 40 41static struct pack_idx_entry **written_list; 42static uint32_t nr_result, nr_written; 43 44static int non_empty; 45static int reuse_delta =1, reuse_object =1; 46static int keep_unreachable, unpack_unreachable, include_tag; 47static unsigned long unpack_unreachable_expiration; 48static int pack_loose_unreachable; 49static int local; 50static int have_non_local_packs; 51static int incremental; 52static int ignore_packed_keep; 53static int allow_ofs_delta; 54static struct pack_idx_option pack_idx_opts; 55static const char*base_name; 56static int progress =1; 57static int window =10; 58static unsigned long pack_size_limit; 59static int depth =50; 60static int delta_search_threads; 61static int pack_to_stdout; 62static int num_preferred_base; 63static struct progress *progress_state; 64 65static struct packed_git *reuse_packfile; 66static uint32_t reuse_packfile_objects; 67static off_t reuse_packfile_offset; 68 69static int use_bitmap_index_default =1; 70static int use_bitmap_index = -1; 71static int write_bitmap_index; 72static uint16_t write_bitmap_options; 73 74static unsigned long delta_cache_size =0; 75static unsigned long max_delta_cache_size =256*1024*1024; 76static unsigned long cache_max_small_delta_size =1000; 77 78static unsigned long window_memory_limit =0; 79 80/* 81 * stats 82 */ 83static uint32_t written, written_delta; 84static uint32_t reused, reused_delta; 85 86/* 87 * Indexed commits 88 */ 89static struct commit **indexed_commits; 90static unsigned int indexed_commits_nr; 91static unsigned int indexed_commits_alloc; 92 93static voidindex_commit_for_bitmap(struct commit *commit) 94{ 95if(indexed_commits_nr >= indexed_commits_alloc) { 96 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 97REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 98} 99 100 indexed_commits[indexed_commits_nr++] = commit; 101} 102 103static void*get_delta(struct object_entry *entry) 104{ 105unsigned long size, base_size, delta_size; 106void*buf, *base_buf, *delta_buf; 107enum object_type type; 108 109 buf =read_sha1_file(entry->idx.sha1, &type, &size); 110if(!buf) 111die("unable to read%s",sha1_to_hex(entry->idx.sha1)); 112 base_buf =read_sha1_file(entry->delta->idx.sha1, &type, &base_size); 113if(!base_buf) 114die("unable to read%s",sha1_to_hex(entry->delta->idx.sha1)); 115 delta_buf =diff_delta(base_buf, base_size, 116 buf, size, &delta_size,0); 117if(!delta_buf || delta_size != entry->delta_size) 118die("delta size changed"); 119free(buf); 120free(base_buf); 121return delta_buf; 122} 123 124static unsigned longdo_compress(void**pptr,unsigned long size) 125{ 126 git_zstream stream; 127void*in, *out; 128unsigned long maxsize; 129 130git_deflate_init(&stream, pack_compression_level); 131 maxsize =git_deflate_bound(&stream, size); 132 133 in = *pptr; 134 out =xmalloc(maxsize); 135*pptr = out; 136 137 stream.next_in = in; 138 stream.avail_in = size; 139 stream.next_out = out; 140 stream.avail_out = maxsize; 141while(git_deflate(&stream, Z_FINISH) == Z_OK) 142;/* nothing */ 143git_deflate_end(&stream); 144 145free(in); 146return stream.total_out; 147} 148 149static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 150const unsigned char*sha1) 151{ 152 git_zstream stream; 153unsigned char ibuf[1024*16]; 154unsigned char obuf[1024*16]; 155unsigned long olen =0; 156 157git_deflate_init(&stream, pack_compression_level); 158 159for(;;) { 160 ssize_t readlen; 161int zret = Z_OK; 162 readlen =read_istream(st, ibuf,sizeof(ibuf)); 163if(readlen == -1) 164die(_("unable to read%s"),sha1_to_hex(sha1)); 165 166 stream.next_in = ibuf; 167 stream.avail_in = readlen; 168while((stream.avail_in || readlen ==0) && 169(zret == Z_OK || zret == Z_BUF_ERROR)) { 170 stream.next_out = obuf; 171 stream.avail_out =sizeof(obuf); 172 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 173sha1write(f, obuf, stream.next_out - obuf); 174 olen += stream.next_out - obuf; 175} 176if(stream.avail_in) 177die(_("deflate error (%d)"), zret); 178if(readlen ==0) { 179if(zret != Z_STREAM_END) 180die(_("deflate error (%d)"), zret); 181break; 182} 183} 184git_deflate_end(&stream); 185return olen; 186} 187 188/* 189 * we are going to reuse the existing object data as is. make 190 * sure it is not corrupt. 191 */ 192static intcheck_pack_inflate(struct packed_git *p, 193struct pack_window **w_curs, 194 off_t offset, 195 off_t len, 196unsigned long expect) 197{ 198 git_zstream stream; 199unsigned char fakebuf[4096], *in; 200int st; 201 202memset(&stream,0,sizeof(stream)); 203git_inflate_init(&stream); 204do{ 205 in =use_pack(p, w_curs, offset, &stream.avail_in); 206 stream.next_in = in; 207 stream.next_out = fakebuf; 208 stream.avail_out =sizeof(fakebuf); 209 st =git_inflate(&stream, Z_FINISH); 210 offset += stream.next_in - in; 211}while(st == Z_OK || st == Z_BUF_ERROR); 212git_inflate_end(&stream); 213return(st == Z_STREAM_END && 214 stream.total_out == expect && 215 stream.total_in == len) ?0: -1; 216} 217 218static voidcopy_pack_data(struct sha1file *f, 219struct packed_git *p, 220struct pack_window **w_curs, 221 off_t offset, 222 off_t len) 223{ 224unsigned char*in; 225unsigned long avail; 226 227while(len) { 228 in =use_pack(p, w_curs, offset, &avail); 229if(avail > len) 230 avail = (unsigned long)len; 231sha1write(f, in, avail); 232 offset += avail; 233 len -= avail; 234} 235} 236 237/* Return 0 if we will bust the pack-size limit */ 238static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 239unsigned long limit,int usable_delta) 240{ 241unsigned long size, datalen; 242unsigned char header[10], dheader[10]; 243unsigned hdrlen; 244enum object_type type; 245void*buf; 246struct git_istream *st = NULL; 247 248if(!usable_delta) { 249if(entry->type == OBJ_BLOB && 250 entry->size > big_file_threshold && 251(st =open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL) 252 buf = NULL; 253else{ 254 buf =read_sha1_file(entry->idx.sha1, &type, &size); 255if(!buf) 256die(_("unable to read%s"),sha1_to_hex(entry->idx.sha1)); 257} 258/* 259 * make sure no cached delta data remains from a 260 * previous attempt before a pack split occurred. 261 */ 262free(entry->delta_data); 263 entry->delta_data = NULL; 264 entry->z_delta_size =0; 265}else if(entry->delta_data) { 266 size = entry->delta_size; 267 buf = entry->delta_data; 268 entry->delta_data = NULL; 269 type = (allow_ofs_delta && entry->delta->idx.offset) ? 270 OBJ_OFS_DELTA : OBJ_REF_DELTA; 271}else{ 272 buf =get_delta(entry); 273 size = entry->delta_size; 274 type = (allow_ofs_delta && entry->delta->idx.offset) ? 275 OBJ_OFS_DELTA : OBJ_REF_DELTA; 276} 277 278if(st)/* large blob case, just assume we don't compress well */ 279 datalen = size; 280else if(entry->z_delta_size) 281 datalen = entry->z_delta_size; 282else 283 datalen =do_compress(&buf, size); 284 285/* 286 * The object header is a byte of 'type' followed by zero or 287 * more bytes of length. 288 */ 289 hdrlen =encode_in_pack_object_header(type, size, header); 290 291if(type == OBJ_OFS_DELTA) { 292/* 293 * Deltas with relative base contain an additional 294 * encoding of the relative offset for the delta 295 * base from this object's position in the pack. 296 */ 297 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 298unsigned pos =sizeof(dheader) -1; 299 dheader[pos] = ofs &127; 300while(ofs >>=7) 301 dheader[--pos] =128| (--ofs &127); 302if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 303if(st) 304close_istream(st); 305free(buf); 306return0; 307} 308sha1write(f, header, hdrlen); 309sha1write(f, dheader + pos,sizeof(dheader) - pos); 310 hdrlen +=sizeof(dheader) - pos; 311}else if(type == OBJ_REF_DELTA) { 312/* 313 * Deltas with a base reference contain 314 * an additional 20 bytes for the base sha1. 315 */ 316if(limit && hdrlen +20+ datalen +20>= limit) { 317if(st) 318close_istream(st); 319free(buf); 320return0; 321} 322sha1write(f, header, hdrlen); 323sha1write(f, entry->delta->idx.sha1,20); 324 hdrlen +=20; 325}else{ 326if(limit && hdrlen + datalen +20>= limit) { 327if(st) 328close_istream(st); 329free(buf); 330return0; 331} 332sha1write(f, header, hdrlen); 333} 334if(st) { 335 datalen =write_large_blob_data(st, f, entry->idx.sha1); 336close_istream(st); 337}else{ 338sha1write(f, buf, datalen); 339free(buf); 340} 341 342return hdrlen + datalen; 343} 344 345/* Return 0 if we will bust the pack-size limit */ 346static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 347unsigned long limit,int usable_delta) 348{ 349struct packed_git *p = entry->in_pack; 350struct pack_window *w_curs = NULL; 351struct revindex_entry *revidx; 352 off_t offset; 353enum object_type type = entry->type; 354 off_t datalen; 355unsigned char header[10], dheader[10]; 356unsigned hdrlen; 357 358if(entry->delta) 359 type = (allow_ofs_delta && entry->delta->idx.offset) ? 360 OBJ_OFS_DELTA : OBJ_REF_DELTA; 361 hdrlen =encode_in_pack_object_header(type, entry->size, header); 362 363 offset = entry->in_pack_offset; 364 revidx =find_pack_revindex(p, offset); 365 datalen = revidx[1].offset - offset; 366if(!pack_to_stdout && p->index_version >1&& 367check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 368error("bad packed object CRC for%s",sha1_to_hex(entry->idx.sha1)); 369unuse_pack(&w_curs); 370returnwrite_no_reuse_object(f, entry, limit, usable_delta); 371} 372 373 offset += entry->in_pack_header_size; 374 datalen -= entry->in_pack_header_size; 375 376if(!pack_to_stdout && p->index_version ==1&& 377check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 378error("corrupt packed object for%s",sha1_to_hex(entry->idx.sha1)); 379unuse_pack(&w_curs); 380returnwrite_no_reuse_object(f, entry, limit, usable_delta); 381} 382 383if(type == OBJ_OFS_DELTA) { 384 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 385unsigned pos =sizeof(dheader) -1; 386 dheader[pos] = ofs &127; 387while(ofs >>=7) 388 dheader[--pos] =128| (--ofs &127); 389if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 390unuse_pack(&w_curs); 391return0; 392} 393sha1write(f, header, hdrlen); 394sha1write(f, dheader + pos,sizeof(dheader) - pos); 395 hdrlen +=sizeof(dheader) - pos; 396 reused_delta++; 397}else if(type == OBJ_REF_DELTA) { 398if(limit && hdrlen +20+ datalen +20>= limit) { 399unuse_pack(&w_curs); 400return0; 401} 402sha1write(f, header, hdrlen); 403sha1write(f, entry->delta->idx.sha1,20); 404 hdrlen +=20; 405 reused_delta++; 406}else{ 407if(limit && hdrlen + datalen +20>= limit) { 408unuse_pack(&w_curs); 409return0; 410} 411sha1write(f, header, hdrlen); 412} 413copy_pack_data(f, p, &w_curs, offset, datalen); 414unuse_pack(&w_curs); 415 reused++; 416return hdrlen + datalen; 417} 418 419/* Return 0 if we will bust the pack-size limit */ 420static off_t write_object(struct sha1file *f, 421struct object_entry *entry, 422 off_t write_offset) 423{ 424unsigned long limit; 425 off_t len; 426int usable_delta, to_reuse; 427 428if(!pack_to_stdout) 429crc32_begin(f); 430 431/* apply size limit if limited packsize and not first object */ 432if(!pack_size_limit || !nr_written) 433 limit =0; 434else if(pack_size_limit <= write_offset) 435/* 436 * the earlier object did not fit the limit; avoid 437 * mistaking this with unlimited (i.e. limit = 0). 438 */ 439 limit =1; 440else 441 limit = pack_size_limit - write_offset; 442 443if(!entry->delta) 444 usable_delta =0;/* no delta */ 445else if(!pack_size_limit) 446 usable_delta =1;/* unlimited packfile */ 447else if(entry->delta->idx.offset == (off_t)-1) 448 usable_delta =0;/* base was written to another pack */ 449else if(entry->delta->idx.offset) 450 usable_delta =1;/* base already exists in this pack */ 451else 452 usable_delta =0;/* base could end up in another pack */ 453 454if(!reuse_object) 455 to_reuse =0;/* explicit */ 456else if(!entry->in_pack) 457 to_reuse =0;/* can't reuse what we don't have */ 458else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 459/* check_object() decided it for us ... */ 460 to_reuse = usable_delta; 461/* ... but pack split may override that */ 462else if(entry->type != entry->in_pack_type) 463 to_reuse =0;/* pack has delta which is unusable */ 464else if(entry->delta) 465 to_reuse =0;/* we want to pack afresh */ 466else 467 to_reuse =1;/* we have it in-pack undeltified, 468 * and we do not need to deltify it. 469 */ 470 471if(!to_reuse) 472 len =write_no_reuse_object(f, entry, limit, usable_delta); 473else 474 len =write_reuse_object(f, entry, limit, usable_delta); 475if(!len) 476return0; 477 478if(usable_delta) 479 written_delta++; 480 written++; 481if(!pack_to_stdout) 482 entry->idx.crc32 =crc32_end(f); 483return len; 484} 485 486enum write_one_status { 487 WRITE_ONE_SKIP = -1,/* already written */ 488 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 489 WRITE_ONE_WRITTEN =1,/* normal */ 490 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 491}; 492 493static enum write_one_status write_one(struct sha1file *f, 494struct object_entry *e, 495 off_t *offset) 496{ 497 off_t size; 498int recursing; 499 500/* 501 * we set offset to 1 (which is an impossible value) to mark 502 * the fact that this object is involved in "write its base 503 * first before writing a deltified object" recursion. 504 */ 505 recursing = (e->idx.offset ==1); 506if(recursing) { 507warning("recursive delta detected for object%s", 508sha1_to_hex(e->idx.sha1)); 509return WRITE_ONE_RECURSIVE; 510}else if(e->idx.offset || e->preferred_base) { 511/* offset is non zero if object is written already. */ 512return WRITE_ONE_SKIP; 513} 514 515/* if we are deltified, write out base object first. */ 516if(e->delta) { 517 e->idx.offset =1;/* now recurse */ 518switch(write_one(f, e->delta, offset)) { 519case WRITE_ONE_RECURSIVE: 520/* we cannot depend on this one */ 521 e->delta = NULL; 522break; 523default: 524break; 525case WRITE_ONE_BREAK: 526 e->idx.offset = recursing; 527return WRITE_ONE_BREAK; 528} 529} 530 531 e->idx.offset = *offset; 532 size =write_object(f, e, *offset); 533if(!size) { 534 e->idx.offset = recursing; 535return WRITE_ONE_BREAK; 536} 537 written_list[nr_written++] = &e->idx; 538 539/* make sure off_t is sufficiently large not to wrap */ 540if(signed_add_overflows(*offset, size)) 541die("pack too large for current definition of off_t"); 542*offset += size; 543return WRITE_ONE_WRITTEN; 544} 545 546static intmark_tagged(const char*path,const struct object_id *oid,int flag, 547void*cb_data) 548{ 549unsigned char peeled[20]; 550struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 551 552if(entry) 553 entry->tagged =1; 554if(!peel_ref(path, peeled)) { 555 entry =packlist_find(&to_pack, peeled, NULL); 556if(entry) 557 entry->tagged =1; 558} 559return0; 560} 561 562staticinlinevoidadd_to_write_order(struct object_entry **wo, 563unsigned int*endp, 564struct object_entry *e) 565{ 566if(e->filled) 567return; 568 wo[(*endp)++] = e; 569 e->filled =1; 570} 571 572static voidadd_descendants_to_write_order(struct object_entry **wo, 573unsigned int*endp, 574struct object_entry *e) 575{ 576int add_to_order =1; 577while(e) { 578if(add_to_order) { 579struct object_entry *s; 580/* add this node... */ 581add_to_write_order(wo, endp, e); 582/* all its siblings... */ 583for(s = e->delta_sibling; s; s = s->delta_sibling) { 584add_to_write_order(wo, endp, s); 585} 586} 587/* drop down a level to add left subtree nodes if possible */ 588if(e->delta_child) { 589 add_to_order =1; 590 e = e->delta_child; 591}else{ 592 add_to_order =0; 593/* our sibling might have some children, it is next */ 594if(e->delta_sibling) { 595 e = e->delta_sibling; 596continue; 597} 598/* go back to our parent node */ 599 e = e->delta; 600while(e && !e->delta_sibling) { 601/* we're on the right side of a subtree, keep 602 * going up until we can go right again */ 603 e = e->delta; 604} 605if(!e) { 606/* done- we hit our original root node */ 607return; 608} 609/* pass it off to sibling at this level */ 610 e = e->delta_sibling; 611} 612}; 613} 614 615static voidadd_family_to_write_order(struct object_entry **wo, 616unsigned int*endp, 617struct object_entry *e) 618{ 619struct object_entry *root; 620 621for(root = e; root->delta; root = root->delta) 622;/* nothing */ 623add_descendants_to_write_order(wo, endp, root); 624} 625 626static struct object_entry **compute_write_order(void) 627{ 628unsigned int i, wo_end, last_untagged; 629 630struct object_entry **wo; 631struct object_entry *objects = to_pack.objects; 632 633for(i =0; i < to_pack.nr_objects; i++) { 634 objects[i].tagged =0; 635 objects[i].filled =0; 636 objects[i].delta_child = NULL; 637 objects[i].delta_sibling = NULL; 638} 639 640/* 641 * Fully connect delta_child/delta_sibling network. 642 * Make sure delta_sibling is sorted in the original 643 * recency order. 644 */ 645for(i = to_pack.nr_objects; i >0;) { 646struct object_entry *e = &objects[--i]; 647if(!e->delta) 648continue; 649/* Mark me as the first child */ 650 e->delta_sibling = e->delta->delta_child; 651 e->delta->delta_child = e; 652} 653 654/* 655 * Mark objects that are at the tip of tags. 656 */ 657for_each_tag_ref(mark_tagged, NULL); 658 659/* 660 * Give the objects in the original recency order until 661 * we see a tagged tip. 662 */ 663ALLOC_ARRAY(wo, to_pack.nr_objects); 664for(i = wo_end =0; i < to_pack.nr_objects; i++) { 665if(objects[i].tagged) 666break; 667add_to_write_order(wo, &wo_end, &objects[i]); 668} 669 last_untagged = i; 670 671/* 672 * Then fill all the tagged tips. 673 */ 674for(; i < to_pack.nr_objects; i++) { 675if(objects[i].tagged) 676add_to_write_order(wo, &wo_end, &objects[i]); 677} 678 679/* 680 * And then all remaining commits and tags. 681 */ 682for(i = last_untagged; i < to_pack.nr_objects; i++) { 683if(objects[i].type != OBJ_COMMIT && 684 objects[i].type != OBJ_TAG) 685continue; 686add_to_write_order(wo, &wo_end, &objects[i]); 687} 688 689/* 690 * And then all the trees. 691 */ 692for(i = last_untagged; i < to_pack.nr_objects; i++) { 693if(objects[i].type != OBJ_TREE) 694continue; 695add_to_write_order(wo, &wo_end, &objects[i]); 696} 697 698/* 699 * Finally all the rest in really tight order 700 */ 701for(i = last_untagged; i < to_pack.nr_objects; i++) { 702if(!objects[i].filled) 703add_family_to_write_order(wo, &wo_end, &objects[i]); 704} 705 706if(wo_end != to_pack.nr_objects) 707die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 708 709return wo; 710} 711 712static off_t write_reused_pack(struct sha1file *f) 713{ 714unsigned char buffer[8192]; 715 off_t to_write, total; 716int fd; 717 718if(!is_pack_valid(reuse_packfile)) 719die("packfile is invalid:%s", reuse_packfile->pack_name); 720 721 fd =git_open(reuse_packfile->pack_name); 722if(fd <0) 723die_errno("unable to open packfile for reuse:%s", 724 reuse_packfile->pack_name); 725 726if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 727die_errno("unable to seek in reused packfile"); 728 729if(reuse_packfile_offset <0) 730 reuse_packfile_offset = reuse_packfile->pack_size -20; 731 732 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 733 734while(to_write) { 735int read_pack =xread(fd, buffer,sizeof(buffer)); 736 737if(read_pack <=0) 738die_errno("unable to read from reused packfile"); 739 740if(read_pack > to_write) 741 read_pack = to_write; 742 743sha1write(f, buffer, read_pack); 744 to_write -= read_pack; 745 746/* 747 * We don't know the actual number of objects written, 748 * only how many bytes written, how many bytes total, and 749 * how many objects total. So we can fake it by pretending all 750 * objects we are writing are the same size. This gives us a 751 * smooth progress meter, and at the end it matches the true 752 * answer. 753 */ 754 written = reuse_packfile_objects * 755(((double)(total - to_write)) / total); 756display_progress(progress_state, written); 757} 758 759close(fd); 760 written = reuse_packfile_objects; 761display_progress(progress_state, written); 762return reuse_packfile_offset -sizeof(struct pack_header); 763} 764 765static const char no_split_warning[] =N_( 766"disabling bitmap writing, packs are split due to pack.packSizeLimit" 767); 768 769static voidwrite_pack_file(void) 770{ 771uint32_t i =0, j; 772struct sha1file *f; 773 off_t offset; 774uint32_t nr_remaining = nr_result; 775time_t last_mtime =0; 776struct object_entry **write_order; 777 778if(progress > pack_to_stdout) 779 progress_state =start_progress(_("Writing objects"), nr_result); 780ALLOC_ARRAY(written_list, to_pack.nr_objects); 781 write_order =compute_write_order(); 782 783do{ 784unsigned char sha1[20]; 785char*pack_tmp_name = NULL; 786 787if(pack_to_stdout) 788 f =sha1fd_throughput(1,"<stdout>", progress_state); 789else 790 f =create_tmp_packfile(&pack_tmp_name); 791 792 offset =write_pack_header(f, nr_remaining); 793 794if(reuse_packfile) { 795 off_t packfile_size; 796assert(pack_to_stdout); 797 798 packfile_size =write_reused_pack(f); 799 offset += packfile_size; 800} 801 802 nr_written =0; 803for(; i < to_pack.nr_objects; i++) { 804struct object_entry *e = write_order[i]; 805if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 806break; 807display_progress(progress_state, written); 808} 809 810/* 811 * Did we write the wrong # entries in the header? 812 * If so, rewrite it like in fast-import 813 */ 814if(pack_to_stdout) { 815sha1close(f, sha1, CSUM_CLOSE); 816}else if(nr_written == nr_remaining) { 817sha1close(f, sha1, CSUM_FSYNC); 818}else{ 819int fd =sha1close(f, sha1,0); 820fixup_pack_header_footer(fd, sha1, pack_tmp_name, 821 nr_written, sha1, offset); 822close(fd); 823if(write_bitmap_index) { 824warning(_(no_split_warning)); 825 write_bitmap_index =0; 826} 827} 828 829if(!pack_to_stdout) { 830struct stat st; 831struct strbuf tmpname = STRBUF_INIT; 832 833/* 834 * Packs are runtime accessed in their mtime 835 * order since newer packs are more likely to contain 836 * younger objects. So if we are creating multiple 837 * packs then we should modify the mtime of later ones 838 * to preserve this property. 839 */ 840if(stat(pack_tmp_name, &st) <0) { 841warning_errno("failed to stat%s", pack_tmp_name); 842}else if(!last_mtime) { 843 last_mtime = st.st_mtime; 844}else{ 845struct utimbuf utb; 846 utb.actime = st.st_atime; 847 utb.modtime = --last_mtime; 848if(utime(pack_tmp_name, &utb) <0) 849warning_errno("failed utime() on%s", pack_tmp_name); 850} 851 852strbuf_addf(&tmpname,"%s-", base_name); 853 854if(write_bitmap_index) { 855bitmap_writer_set_checksum(sha1); 856bitmap_writer_build_type_index(written_list, nr_written); 857} 858 859finish_tmp_packfile(&tmpname, pack_tmp_name, 860 written_list, nr_written, 861&pack_idx_opts, sha1); 862 863if(write_bitmap_index) { 864strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 865 866stop_progress(&progress_state); 867 868bitmap_writer_show_progress(progress); 869bitmap_writer_reuse_bitmaps(&to_pack); 870bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 871bitmap_writer_build(&to_pack); 872bitmap_writer_finish(written_list, nr_written, 873 tmpname.buf, write_bitmap_options); 874 write_bitmap_index =0; 875} 876 877strbuf_release(&tmpname); 878free(pack_tmp_name); 879puts(sha1_to_hex(sha1)); 880} 881 882/* mark written objects as written to previous pack */ 883for(j =0; j < nr_written; j++) { 884 written_list[j]->offset = (off_t)-1; 885} 886 nr_remaining -= nr_written; 887}while(nr_remaining && i < to_pack.nr_objects); 888 889free(written_list); 890free(write_order); 891stop_progress(&progress_state); 892if(written != nr_result) 893die("wrote %"PRIu32" objects while expecting %"PRIu32, 894 written, nr_result); 895} 896 897static intno_try_delta(const char*path) 898{ 899static struct attr_check *check; 900 901if(!check) 902 check =attr_check_initl("delta", NULL); 903if(git_check_attr(path, check)) 904return0; 905if(ATTR_FALSE(check->items[0].value)) 906return1; 907return0; 908} 909 910/* 911 * When adding an object, check whether we have already added it 912 * to our packing list. If so, we can skip. However, if we are 913 * being asked to excludei t, but the previous mention was to include 914 * it, make sure to adjust its flags and tweak our numbers accordingly. 915 * 916 * As an optimization, we pass out the index position where we would have 917 * found the item, since that saves us from having to look it up again a 918 * few lines later when we want to add the new entry. 919 */ 920static inthave_duplicate_entry(const unsigned char*sha1, 921int exclude, 922uint32_t*index_pos) 923{ 924struct object_entry *entry; 925 926 entry =packlist_find(&to_pack, sha1, index_pos); 927if(!entry) 928return0; 929 930if(exclude) { 931if(!entry->preferred_base) 932 nr_result--; 933 entry->preferred_base =1; 934} 935 936return1; 937} 938 939static intwant_found_object(int exclude,struct packed_git *p) 940{ 941if(exclude) 942return1; 943if(incremental) 944return0; 945 946/* 947 * When asked to do --local (do not include an object that appears in a 948 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 949 * an object that appears in a pack marked with .keep), finding a pack 950 * that matches the criteria is sufficient for us to decide to omit it. 951 * However, even if this pack does not satisfy the criteria, we need to 952 * make sure no copy of this object appears in _any_ pack that makes us 953 * to omit the object, so we need to check all the packs. 954 * 955 * We can however first check whether these options can possible matter; 956 * if they do not matter we know we want the object in generated pack. 957 * Otherwise, we signal "-1" at the end to tell the caller that we do 958 * not know either way, and it needs to check more packs. 959 */ 960if(!ignore_packed_keep && 961(!local || !have_non_local_packs)) 962return1; 963 964if(local && !p->pack_local) 965return0; 966if(ignore_packed_keep && p->pack_local && p->pack_keep) 967return0; 968 969/* we don't know yet; keep looking for more packs */ 970return-1; 971} 972 973/* 974 * Check whether we want the object in the pack (e.g., we do not want 975 * objects found in non-local stores if the "--local" option was used). 976 * 977 * If the caller already knows an existing pack it wants to take the object 978 * from, that is passed in *found_pack and *found_offset; otherwise this 979 * function finds if there is any pack that has the object and returns the pack 980 * and its offset in these variables. 981 */ 982static intwant_object_in_pack(const unsigned char*sha1, 983int exclude, 984struct packed_git **found_pack, 985 off_t *found_offset) 986{ 987struct mru_entry *entry; 988int want; 989 990if(!exclude && local &&has_loose_object_nonlocal(sha1)) 991return0; 992 993/* 994 * If we already know the pack object lives in, start checks from that 995 * pack - in the usual case when neither --local was given nor .keep files 996 * are present we will determine the answer right now. 997 */ 998if(*found_pack) { 999 want =want_found_object(exclude, *found_pack);1000if(want != -1)1001return want;1002}10031004for(entry = packed_git_mru->head; entry; entry = entry->next) {1005struct packed_git *p = entry->item;1006 off_t offset;10071008if(p == *found_pack)1009 offset = *found_offset;1010else1011 offset =find_pack_entry_one(sha1, p);10121013if(offset) {1014if(!*found_pack) {1015if(!is_pack_valid(p))1016continue;1017*found_offset = offset;1018*found_pack = p;1019}1020 want =want_found_object(exclude, p);1021if(!exclude && want >0)1022mru_mark(packed_git_mru, entry);1023if(want != -1)1024return want;1025}1026}10271028return1;1029}10301031static voidcreate_object_entry(const unsigned char*sha1,1032enum object_type type,1033uint32_t hash,1034int exclude,1035int no_try_delta,1036uint32_t index_pos,1037struct packed_git *found_pack,1038 off_t found_offset)1039{1040struct object_entry *entry;10411042 entry =packlist_alloc(&to_pack, sha1, index_pos);1043 entry->hash = hash;1044if(type)1045 entry->type = type;1046if(exclude)1047 entry->preferred_base =1;1048else1049 nr_result++;1050if(found_pack) {1051 entry->in_pack = found_pack;1052 entry->in_pack_offset = found_offset;1053}10541055 entry->no_try_delta = no_try_delta;1056}10571058static const char no_closure_warning[] =N_(1059"disabling bitmap writing, as some objects are not being packed"1060);10611062static intadd_object_entry(const unsigned char*sha1,enum object_type type,1063const char*name,int exclude)1064{1065struct packed_git *found_pack = NULL;1066 off_t found_offset =0;1067uint32_t index_pos;10681069if(have_duplicate_entry(sha1, exclude, &index_pos))1070return0;10711072if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1073/* The pack is missing an object, so it will not have closure */1074if(write_bitmap_index) {1075warning(_(no_closure_warning));1076 write_bitmap_index =0;1077}1078return0;1079}10801081create_object_entry(sha1, type,pack_name_hash(name),1082 exclude, name &&no_try_delta(name),1083 index_pos, found_pack, found_offset);10841085display_progress(progress_state, nr_result);1086return1;1087}10881089static intadd_object_entry_from_bitmap(const unsigned char*sha1,1090enum object_type type,1091int flags,uint32_t name_hash,1092struct packed_git *pack, off_t offset)1093{1094uint32_t index_pos;10951096if(have_duplicate_entry(sha1,0, &index_pos))1097return0;10981099if(!want_object_in_pack(sha1,0, &pack, &offset))1100return0;11011102create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11031104display_progress(progress_state, nr_result);1105return1;1106}11071108struct pbase_tree_cache {1109unsigned char sha1[20];1110int ref;1111int temporary;1112void*tree_data;1113unsigned long tree_size;1114};11151116static struct pbase_tree_cache *(pbase_tree_cache[256]);1117static intpbase_tree_cache_ix(const unsigned char*sha1)1118{1119return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1120}1121static intpbase_tree_cache_ix_incr(int ix)1122{1123return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1124}11251126static struct pbase_tree {1127struct pbase_tree *next;1128/* This is a phony "cache" entry; we are not1129 * going to evict it or find it through _get()1130 * mechanism -- this is for the toplevel node that1131 * would almost always change with any commit.1132 */1133struct pbase_tree_cache pcache;1134} *pbase_tree;11351136static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1137{1138struct pbase_tree_cache *ent, *nent;1139void*data;1140unsigned long size;1141enum object_type type;1142int neigh;1143int my_ix =pbase_tree_cache_ix(sha1);1144int available_ix = -1;11451146/* pbase-tree-cache acts as a limited hashtable.1147 * your object will be found at your index or within a few1148 * slots after that slot if it is cached.1149 */1150for(neigh =0; neigh <8; neigh++) {1151 ent = pbase_tree_cache[my_ix];1152if(ent && !hashcmp(ent->sha1, sha1)) {1153 ent->ref++;1154return ent;1155}1156else if(((available_ix <0) && (!ent || !ent->ref)) ||1157((0<= available_ix) &&1158(!ent && pbase_tree_cache[available_ix])))1159 available_ix = my_ix;1160if(!ent)1161break;1162 my_ix =pbase_tree_cache_ix_incr(my_ix);1163}11641165/* Did not find one. Either we got a bogus request or1166 * we need to read and perhaps cache.1167 */1168 data =read_sha1_file(sha1, &type, &size);1169if(!data)1170return NULL;1171if(type != OBJ_TREE) {1172free(data);1173return NULL;1174}11751176/* We need to either cache or return a throwaway copy */11771178if(available_ix <0)1179 ent = NULL;1180else{1181 ent = pbase_tree_cache[available_ix];1182 my_ix = available_ix;1183}11841185if(!ent) {1186 nent =xmalloc(sizeof(*nent));1187 nent->temporary = (available_ix <0);1188}1189else{1190/* evict and reuse */1191free(ent->tree_data);1192 nent = ent;1193}1194hashcpy(nent->sha1, sha1);1195 nent->tree_data = data;1196 nent->tree_size = size;1197 nent->ref =1;1198if(!nent->temporary)1199 pbase_tree_cache[my_ix] = nent;1200return nent;1201}12021203static voidpbase_tree_put(struct pbase_tree_cache *cache)1204{1205if(!cache->temporary) {1206 cache->ref--;1207return;1208}1209free(cache->tree_data);1210free(cache);1211}12121213static intname_cmp_len(const char*name)1214{1215int i;1216for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1217;1218return i;1219}12201221static voidadd_pbase_object(struct tree_desc *tree,1222const char*name,1223int cmplen,1224const char*fullname)1225{1226struct name_entry entry;1227int cmp;12281229while(tree_entry(tree,&entry)) {1230if(S_ISGITLINK(entry.mode))1231continue;1232 cmp =tree_entry_len(&entry) != cmplen ?1:1233memcmp(name, entry.path, cmplen);1234if(cmp >0)1235continue;1236if(cmp <0)1237return;1238if(name[cmplen] !='/') {1239add_object_entry(entry.oid->hash,1240object_type(entry.mode),1241 fullname,1);1242return;1243}1244if(S_ISDIR(entry.mode)) {1245struct tree_desc sub;1246struct pbase_tree_cache *tree;1247const char*down = name+cmplen+1;1248int downlen =name_cmp_len(down);12491250 tree =pbase_tree_get(entry.oid->hash);1251if(!tree)1252return;1253init_tree_desc(&sub, tree->tree_data, tree->tree_size);12541255add_pbase_object(&sub, down, downlen, fullname);1256pbase_tree_put(tree);1257}1258}1259}12601261static unsigned*done_pbase_paths;1262static int done_pbase_paths_num;1263static int done_pbase_paths_alloc;1264static intdone_pbase_path_pos(unsigned hash)1265{1266int lo =0;1267int hi = done_pbase_paths_num;1268while(lo < hi) {1269int mi = (hi + lo) /2;1270if(done_pbase_paths[mi] == hash)1271return mi;1272if(done_pbase_paths[mi] < hash)1273 hi = mi;1274else1275 lo = mi +1;1276}1277return-lo-1;1278}12791280static intcheck_pbase_path(unsigned hash)1281{1282int pos = (!done_pbase_paths) ? -1:done_pbase_path_pos(hash);1283if(0<= pos)1284return1;1285 pos = -pos -1;1286ALLOC_GROW(done_pbase_paths,1287 done_pbase_paths_num +1,1288 done_pbase_paths_alloc);1289 done_pbase_paths_num++;1290if(pos < done_pbase_paths_num)1291memmove(done_pbase_paths + pos +1,1292 done_pbase_paths + pos,1293(done_pbase_paths_num - pos -1) *sizeof(unsigned));1294 done_pbase_paths[pos] = hash;1295return0;1296}12971298static voidadd_preferred_base_object(const char*name)1299{1300struct pbase_tree *it;1301int cmplen;1302unsigned hash =pack_name_hash(name);13031304if(!num_preferred_base ||check_pbase_path(hash))1305return;13061307 cmplen =name_cmp_len(name);1308for(it = pbase_tree; it; it = it->next) {1309if(cmplen ==0) {1310add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1311}1312else{1313struct tree_desc tree;1314init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1315add_pbase_object(&tree, name, cmplen, name);1316}1317}1318}13191320static voidadd_preferred_base(unsigned char*sha1)1321{1322struct pbase_tree *it;1323void*data;1324unsigned long size;1325unsigned char tree_sha1[20];13261327if(window <= num_preferred_base++)1328return;13291330 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1331if(!data)1332return;13331334for(it = pbase_tree; it; it = it->next) {1335if(!hashcmp(it->pcache.sha1, tree_sha1)) {1336free(data);1337return;1338}1339}13401341 it =xcalloc(1,sizeof(*it));1342 it->next = pbase_tree;1343 pbase_tree = it;13441345hashcpy(it->pcache.sha1, tree_sha1);1346 it->pcache.tree_data = data;1347 it->pcache.tree_size = size;1348}13491350static voidcleanup_preferred_base(void)1351{1352struct pbase_tree *it;1353unsigned i;13541355 it = pbase_tree;1356 pbase_tree = NULL;1357while(it) {1358struct pbase_tree *this= it;1359 it =this->next;1360free(this->pcache.tree_data);1361free(this);1362}13631364for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1365if(!pbase_tree_cache[i])1366continue;1367free(pbase_tree_cache[i]->tree_data);1368free(pbase_tree_cache[i]);1369 pbase_tree_cache[i] = NULL;1370}13711372free(done_pbase_paths);1373 done_pbase_paths = NULL;1374 done_pbase_paths_num = done_pbase_paths_alloc =0;1375}13761377static voidcheck_object(struct object_entry *entry)1378{1379if(entry->in_pack) {1380struct packed_git *p = entry->in_pack;1381struct pack_window *w_curs = NULL;1382const unsigned char*base_ref = NULL;1383struct object_entry *base_entry;1384unsigned long used, used_0;1385unsigned long avail;1386 off_t ofs;1387unsigned char*buf, c;13881389 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);13901391/*1392 * We want in_pack_type even if we do not reuse delta1393 * since non-delta representations could still be reused.1394 */1395 used =unpack_object_header_buffer(buf, avail,1396&entry->in_pack_type,1397&entry->size);1398if(used ==0)1399goto give_up;14001401/*1402 * Determine if this is a delta and if so whether we can1403 * reuse it or not. Otherwise let's find out as cheaply as1404 * possible what the actual type and size for this object is.1405 */1406switch(entry->in_pack_type) {1407default:1408/* Not a delta hence we've already got all we need. */1409 entry->type = entry->in_pack_type;1410 entry->in_pack_header_size = used;1411if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1412goto give_up;1413unuse_pack(&w_curs);1414return;1415case OBJ_REF_DELTA:1416if(reuse_delta && !entry->preferred_base)1417 base_ref =use_pack(p, &w_curs,1418 entry->in_pack_offset + used, NULL);1419 entry->in_pack_header_size = used +20;1420break;1421case OBJ_OFS_DELTA:1422 buf =use_pack(p, &w_curs,1423 entry->in_pack_offset + used, NULL);1424 used_0 =0;1425 c = buf[used_0++];1426 ofs = c &127;1427while(c &128) {1428 ofs +=1;1429if(!ofs ||MSB(ofs,7)) {1430error("delta base offset overflow in pack for%s",1431sha1_to_hex(entry->idx.sha1));1432goto give_up;1433}1434 c = buf[used_0++];1435 ofs = (ofs <<7) + (c &127);1436}1437 ofs = entry->in_pack_offset - ofs;1438if(ofs <=0|| ofs >= entry->in_pack_offset) {1439error("delta base offset out of bound for%s",1440sha1_to_hex(entry->idx.sha1));1441goto give_up;1442}1443if(reuse_delta && !entry->preferred_base) {1444struct revindex_entry *revidx;1445 revidx =find_pack_revindex(p, ofs);1446if(!revidx)1447goto give_up;1448 base_ref =nth_packed_object_sha1(p, revidx->nr);1449}1450 entry->in_pack_header_size = used + used_0;1451break;1452}14531454if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1455/*1456 * If base_ref was set above that means we wish to1457 * reuse delta data, and we even found that base1458 * in the list of objects we want to pack. Goodie!1459 *1460 * Depth value does not matter - find_deltas() will1461 * never consider reused delta as the base object to1462 * deltify other objects against, in order to avoid1463 * circular deltas.1464 */1465 entry->type = entry->in_pack_type;1466 entry->delta = base_entry;1467 entry->delta_size = entry->size;1468 entry->delta_sibling = base_entry->delta_child;1469 base_entry->delta_child = entry;1470unuse_pack(&w_curs);1471return;1472}14731474if(entry->type) {1475/*1476 * This must be a delta and we already know what the1477 * final object type is. Let's extract the actual1478 * object size from the delta header.1479 */1480 entry->size =get_size_from_delta(p, &w_curs,1481 entry->in_pack_offset + entry->in_pack_header_size);1482if(entry->size ==0)1483goto give_up;1484unuse_pack(&w_curs);1485return;1486}14871488/*1489 * No choice but to fall back to the recursive delta walk1490 * with sha1_object_info() to find about the object type1491 * at this point...1492 */1493 give_up:1494unuse_pack(&w_curs);1495}14961497 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1498/*1499 * The error condition is checked in prepare_pack(). This is1500 * to permit a missing preferred base object to be ignored1501 * as a preferred base. Doing so can result in a larger1502 * pack file, but the transfer will still take place.1503 */1504}15051506static intpack_offset_sort(const void*_a,const void*_b)1507{1508const struct object_entry *a = *(struct object_entry **)_a;1509const struct object_entry *b = *(struct object_entry **)_b;15101511/* avoid filesystem trashing with loose objects */1512if(!a->in_pack && !b->in_pack)1513returnhashcmp(a->idx.sha1, b->idx.sha1);15141515if(a->in_pack < b->in_pack)1516return-1;1517if(a->in_pack > b->in_pack)1518return1;1519return a->in_pack_offset < b->in_pack_offset ? -1:1520(a->in_pack_offset > b->in_pack_offset);1521}15221523/*1524 * Drop an on-disk delta we were planning to reuse. Naively, this would1525 * just involve blanking out the "delta" field, but we have to deal1526 * with some extra book-keeping:1527 *1528 * 1. Removing ourselves from the delta_sibling linked list.1529 *1530 * 2. Updating our size/type to the non-delta representation. These were1531 * either not recorded initially (size) or overwritten with the delta type1532 * (type) when check_object() decided to reuse the delta.1533 *1534 * 3. Resetting our delta depth, as we are now a base object.1535 */1536static voiddrop_reused_delta(struct object_entry *entry)1537{1538struct object_entry **p = &entry->delta->delta_child;1539struct object_info oi = OBJECT_INFO_INIT;15401541while(*p) {1542if(*p == entry)1543*p = (*p)->delta_sibling;1544else1545 p = &(*p)->delta_sibling;1546}1547 entry->delta = NULL;1548 entry->depth =0;15491550 oi.sizep = &entry->size;1551 oi.typep = &entry->type;1552if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1553/*1554 * We failed to get the info from this pack for some reason;1555 * fall back to sha1_object_info, which may find another copy.1556 * And if that fails, the error will be recorded in entry->type1557 * and dealt with in prepare_pack().1558 */1559 entry->type =sha1_object_info(entry->idx.sha1, &entry->size);1560}1561}15621563/*1564 * Follow the chain of deltas from this entry onward, throwing away any links1565 * that cause us to hit a cycle (as determined by the DFS state flags in1566 * the entries).1567 *1568 * We also detect too-long reused chains that would violate our --depth1569 * limit.1570 */1571static voidbreak_delta_chains(struct object_entry *entry)1572{1573/*1574 * The actual depth of each object we will write is stored as an int,1575 * as it cannot exceed our int "depth" limit. But before we break1576 * changes based no that limit, we may potentially go as deep as the1577 * number of objects, which is elsewhere bounded to a uint32_t.1578 */1579uint32_t total_depth;1580struct object_entry *cur, *next;15811582for(cur = entry, total_depth =0;1583 cur;1584 cur = cur->delta, total_depth++) {1585if(cur->dfs_state == DFS_DONE) {1586/*1587 * We've already seen this object and know it isn't1588 * part of a cycle. We do need to append its depth1589 * to our count.1590 */1591 total_depth += cur->depth;1592break;1593}15941595/*1596 * We break cycles before looping, so an ACTIVE state (or any1597 * other cruft which made its way into the state variable)1598 * is a bug.1599 */1600if(cur->dfs_state != DFS_NONE)1601die("BUG: confusing delta dfs state in first pass:%d",1602 cur->dfs_state);16031604/*1605 * Now we know this is the first time we've seen the object. If1606 * it's not a delta, we're done traversing, but we'll mark it1607 * done to save time on future traversals.1608 */1609if(!cur->delta) {1610 cur->dfs_state = DFS_DONE;1611break;1612}16131614/*1615 * Mark ourselves as active and see if the next step causes1616 * us to cycle to another active object. It's important to do1617 * this _before_ we loop, because it impacts where we make the1618 * cut, and thus how our total_depth counter works.1619 * E.g., We may see a partial loop like:1620 *1621 * A -> B -> C -> D -> B1622 *1623 * Cutting B->C breaks the cycle. But now the depth of A is1624 * only 1, and our total_depth counter is at 3. The size of the1625 * error is always one less than the size of the cycle we1626 * broke. Commits C and D were "lost" from A's chain.1627 *1628 * If we instead cut D->B, then the depth of A is correct at 3.1629 * We keep all commits in the chain that we examined.1630 */1631 cur->dfs_state = DFS_ACTIVE;1632if(cur->delta->dfs_state == DFS_ACTIVE) {1633drop_reused_delta(cur);1634 cur->dfs_state = DFS_DONE;1635break;1636}1637}16381639/*1640 * And now that we've gone all the way to the bottom of the chain, we1641 * need to clear the active flags and set the depth fields as1642 * appropriate. Unlike the loop above, which can quit when it drops a1643 * delta, we need to keep going to look for more depth cuts. So we need1644 * an extra "next" pointer to keep going after we reset cur->delta.1645 */1646for(cur = entry; cur; cur = next) {1647 next = cur->delta;16481649/*1650 * We should have a chain of zero or more ACTIVE states down to1651 * a final DONE. We can quit after the DONE, because either it1652 * has no bases, or we've already handled them in a previous1653 * call.1654 */1655if(cur->dfs_state == DFS_DONE)1656break;1657else if(cur->dfs_state != DFS_ACTIVE)1658die("BUG: confusing delta dfs state in second pass:%d",1659 cur->dfs_state);16601661/*1662 * If the total_depth is more than depth, then we need to snip1663 * the chain into two or more smaller chains that don't exceed1664 * the maximum depth. Most of the resulting chains will contain1665 * (depth + 1) entries (i.e., depth deltas plus one base), and1666 * the last chain (i.e., the one containing entry) will contain1667 * whatever entries are left over, namely1668 * (total_depth % (depth + 1)) of them.1669 *1670 * Since we are iterating towards decreasing depth, we need to1671 * decrement total_depth as we go, and we need to write to the1672 * entry what its final depth will be after all of the1673 * snipping. Since we're snipping into chains of length (depth1674 * + 1) entries, the final depth of an entry will be its1675 * original depth modulo (depth + 1). Any time we encounter an1676 * entry whose final depth is supposed to be zero, we snip it1677 * from its delta base, thereby making it so.1678 */1679 cur->depth = (total_depth--) % (depth +1);1680if(!cur->depth)1681drop_reused_delta(cur);16821683 cur->dfs_state = DFS_DONE;1684}1685}16861687static voidget_object_details(void)1688{1689uint32_t i;1690struct object_entry **sorted_by_offset;16911692 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1693for(i =0; i < to_pack.nr_objects; i++)1694 sorted_by_offset[i] = to_pack.objects + i;1695QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);16961697for(i =0; i < to_pack.nr_objects; i++) {1698struct object_entry *entry = sorted_by_offset[i];1699check_object(entry);1700if(big_file_threshold < entry->size)1701 entry->no_try_delta =1;1702}17031704/*1705 * This must happen in a second pass, since we rely on the delta1706 * information for the whole list being completed.1707 */1708for(i =0; i < to_pack.nr_objects; i++)1709break_delta_chains(&to_pack.objects[i]);17101711free(sorted_by_offset);1712}17131714/*1715 * We search for deltas in a list sorted by type, by filename hash, and then1716 * by size, so that we see progressively smaller and smaller files.1717 * That's because we prefer deltas to be from the bigger file1718 * to the smaller -- deletes are potentially cheaper, but perhaps1719 * more importantly, the bigger file is likely the more recent1720 * one. The deepest deltas are therefore the oldest objects which are1721 * less susceptible to be accessed often.1722 */1723static inttype_size_sort(const void*_a,const void*_b)1724{1725const struct object_entry *a = *(struct object_entry **)_a;1726const struct object_entry *b = *(struct object_entry **)_b;17271728if(a->type > b->type)1729return-1;1730if(a->type < b->type)1731return1;1732if(a->hash > b->hash)1733return-1;1734if(a->hash < b->hash)1735return1;1736if(a->preferred_base > b->preferred_base)1737return-1;1738if(a->preferred_base < b->preferred_base)1739return1;1740if(a->size > b->size)1741return-1;1742if(a->size < b->size)1743return1;1744return a < b ? -1: (a > b);/* newest first */1745}17461747struct unpacked {1748struct object_entry *entry;1749void*data;1750struct delta_index *index;1751unsigned depth;1752};17531754static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1755unsigned long delta_size)1756{1757if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1758return0;17591760if(delta_size < cache_max_small_delta_size)1761return1;17621763/* cache delta, if objects are large enough compared to delta size */1764if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1765return1;17661767return0;1768}17691770#ifndef NO_PTHREADS17711772static pthread_mutex_t read_mutex;1773#define read_lock() pthread_mutex_lock(&read_mutex)1774#define read_unlock() pthread_mutex_unlock(&read_mutex)17751776static pthread_mutex_t cache_mutex;1777#define cache_lock() pthread_mutex_lock(&cache_mutex)1778#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17791780static pthread_mutex_t progress_mutex;1781#define progress_lock() pthread_mutex_lock(&progress_mutex)1782#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17831784#else17851786#define read_lock() (void)01787#define read_unlock() (void)01788#define cache_lock() (void)01789#define cache_unlock() (void)01790#define progress_lock() (void)01791#define progress_unlock() (void)017921793#endif17941795static inttry_delta(struct unpacked *trg,struct unpacked *src,1796unsigned max_depth,unsigned long*mem_usage)1797{1798struct object_entry *trg_entry = trg->entry;1799struct object_entry *src_entry = src->entry;1800unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1801unsigned ref_depth;1802enum object_type type;1803void*delta_buf;18041805/* Don't bother doing diffs between different types */1806if(trg_entry->type != src_entry->type)1807return-1;18081809/*1810 * We do not bother to try a delta that we discarded on an1811 * earlier try, but only when reusing delta data. Note that1812 * src_entry that is marked as the preferred_base should always1813 * be considered, as even if we produce a suboptimal delta against1814 * it, we will still save the transfer cost, as we already know1815 * the other side has it and we won't send src_entry at all.1816 */1817if(reuse_delta && trg_entry->in_pack &&1818 trg_entry->in_pack == src_entry->in_pack &&1819!src_entry->preferred_base &&1820 trg_entry->in_pack_type != OBJ_REF_DELTA &&1821 trg_entry->in_pack_type != OBJ_OFS_DELTA)1822return0;18231824/* Let's not bust the allowed depth. */1825if(src->depth >= max_depth)1826return0;18271828/* Now some size filtering heuristics. */1829 trg_size = trg_entry->size;1830if(!trg_entry->delta) {1831 max_size = trg_size/2-20;1832 ref_depth =1;1833}else{1834 max_size = trg_entry->delta_size;1835 ref_depth = trg->depth;1836}1837 max_size = (uint64_t)max_size * (max_depth - src->depth) /1838(max_depth - ref_depth +1);1839if(max_size ==0)1840return0;1841 src_size = src_entry->size;1842 sizediff = src_size < trg_size ? trg_size - src_size :0;1843if(sizediff >= max_size)1844return0;1845if(trg_size < src_size /32)1846return0;18471848/* Load data if not already done */1849if(!trg->data) {1850read_lock();1851 trg->data =read_sha1_file(trg_entry->idx.sha1, &type, &sz);1852read_unlock();1853if(!trg->data)1854die("object%scannot be read",1855sha1_to_hex(trg_entry->idx.sha1));1856if(sz != trg_size)1857die("object%sinconsistent object length (%lu vs%lu)",1858sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);1859*mem_usage += sz;1860}1861if(!src->data) {1862read_lock();1863 src->data =read_sha1_file(src_entry->idx.sha1, &type, &sz);1864read_unlock();1865if(!src->data) {1866if(src_entry->preferred_base) {1867static int warned =0;1868if(!warned++)1869warning("object%scannot be read",1870sha1_to_hex(src_entry->idx.sha1));1871/*1872 * Those objects are not included in the1873 * resulting pack. Be resilient and ignore1874 * them if they can't be read, in case the1875 * pack could be created nevertheless.1876 */1877return0;1878}1879die("object%scannot be read",1880sha1_to_hex(src_entry->idx.sha1));1881}1882if(sz != src_size)1883die("object%sinconsistent object length (%lu vs%lu)",1884sha1_to_hex(src_entry->idx.sha1), sz, src_size);1885*mem_usage += sz;1886}1887if(!src->index) {1888 src->index =create_delta_index(src->data, src_size);1889if(!src->index) {1890static int warned =0;1891if(!warned++)1892warning("suboptimal pack - out of memory");1893return0;1894}1895*mem_usage +=sizeof_delta_index(src->index);1896}18971898 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1899if(!delta_buf)1900return0;19011902if(trg_entry->delta) {1903/* Prefer only shallower same-sized deltas. */1904if(delta_size == trg_entry->delta_size &&1905 src->depth +1>= trg->depth) {1906free(delta_buf);1907return0;1908}1909}19101911/*1912 * Handle memory allocation outside of the cache1913 * accounting lock. Compiler will optimize the strangeness1914 * away when NO_PTHREADS is defined.1915 */1916free(trg_entry->delta_data);1917cache_lock();1918if(trg_entry->delta_data) {1919 delta_cache_size -= trg_entry->delta_size;1920 trg_entry->delta_data = NULL;1921}1922if(delta_cacheable(src_size, trg_size, delta_size)) {1923 delta_cache_size += delta_size;1924cache_unlock();1925 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1926}else{1927cache_unlock();1928free(delta_buf);1929}19301931 trg_entry->delta = src_entry;1932 trg_entry->delta_size = delta_size;1933 trg->depth = src->depth +1;19341935return1;1936}19371938static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1939{1940struct object_entry *child = me->delta_child;1941unsigned int m = n;1942while(child) {1943unsigned int c =check_delta_limit(child, n +1);1944if(m < c)1945 m = c;1946 child = child->delta_sibling;1947}1948return m;1949}19501951static unsigned longfree_unpacked(struct unpacked *n)1952{1953unsigned long freed_mem =sizeof_delta_index(n->index);1954free_delta_index(n->index);1955 n->index = NULL;1956if(n->data) {1957 freed_mem += n->entry->size;1958free(n->data);1959 n->data = NULL;1960}1961 n->entry = NULL;1962 n->depth =0;1963return freed_mem;1964}19651966static voidfind_deltas(struct object_entry **list,unsigned*list_size,1967int window,int depth,unsigned*processed)1968{1969uint32_t i, idx =0, count =0;1970struct unpacked *array;1971unsigned long mem_usage =0;19721973 array =xcalloc(window,sizeof(struct unpacked));19741975for(;;) {1976struct object_entry *entry;1977struct unpacked *n = array + idx;1978int j, max_depth, best_base = -1;19791980progress_lock();1981if(!*list_size) {1982progress_unlock();1983break;1984}1985 entry = *list++;1986(*list_size)--;1987if(!entry->preferred_base) {1988(*processed)++;1989display_progress(progress_state, *processed);1990}1991progress_unlock();19921993 mem_usage -=free_unpacked(n);1994 n->entry = entry;19951996while(window_memory_limit &&1997 mem_usage > window_memory_limit &&1998 count >1) {1999uint32_t tail = (idx + window - count) % window;2000 mem_usage -=free_unpacked(array + tail);2001 count--;2002}20032004/* We do not compute delta to *create* objects we are not2005 * going to pack.2006 */2007if(entry->preferred_base)2008goto next;20092010/*2011 * If the current object is at pack edge, take the depth the2012 * objects that depend on the current object into account2013 * otherwise they would become too deep.2014 */2015 max_depth = depth;2016if(entry->delta_child) {2017 max_depth -=check_delta_limit(entry,0);2018if(max_depth <=0)2019goto next;2020}20212022 j = window;2023while(--j >0) {2024int ret;2025uint32_t other_idx = idx + j;2026struct unpacked *m;2027if(other_idx >= window)2028 other_idx -= window;2029 m = array + other_idx;2030if(!m->entry)2031break;2032 ret =try_delta(n, m, max_depth, &mem_usage);2033if(ret <0)2034break;2035else if(ret >0)2036 best_base = other_idx;2037}20382039/*2040 * If we decided to cache the delta data, then it is best2041 * to compress it right away. First because we have to do2042 * it anyway, and doing it here while we're threaded will2043 * save a lot of time in the non threaded write phase,2044 * as well as allow for caching more deltas within2045 * the same cache size limit.2046 * ...2047 * But only if not writing to stdout, since in that case2048 * the network is most likely throttling writes anyway,2049 * and therefore it is best to go to the write phase ASAP2050 * instead, as we can afford spending more time compressing2051 * between writes at that moment.2052 */2053if(entry->delta_data && !pack_to_stdout) {2054 entry->z_delta_size =do_compress(&entry->delta_data,2055 entry->delta_size);2056cache_lock();2057 delta_cache_size -= entry->delta_size;2058 delta_cache_size += entry->z_delta_size;2059cache_unlock();2060}20612062/* if we made n a delta, and if n is already at max2063 * depth, leaving it in the window is pointless. we2064 * should evict it first.2065 */2066if(entry->delta && max_depth <= n->depth)2067continue;20682069/*2070 * Move the best delta base up in the window, after the2071 * currently deltified object, to keep it longer. It will2072 * be the first base object to be attempted next.2073 */2074if(entry->delta) {2075struct unpacked swap = array[best_base];2076int dist = (window + idx - best_base) % window;2077int dst = best_base;2078while(dist--) {2079int src = (dst +1) % window;2080 array[dst] = array[src];2081 dst = src;2082}2083 array[dst] = swap;2084}20852086 next:2087 idx++;2088if(count +1< window)2089 count++;2090if(idx >= window)2091 idx =0;2092}20932094for(i =0; i < window; ++i) {2095free_delta_index(array[i].index);2096free(array[i].data);2097}2098free(array);2099}21002101#ifndef NO_PTHREADS21022103static voidtry_to_free_from_threads(size_t size)2104{2105read_lock();2106release_pack_memory(size);2107read_unlock();2108}21092110static try_to_free_t old_try_to_free_routine;21112112/*2113 * The main thread waits on the condition that (at least) one of the workers2114 * has stopped working (which is indicated in the .working member of2115 * struct thread_params).2116 * When a work thread has completed its work, it sets .working to 0 and2117 * signals the main thread and waits on the condition that .data_ready2118 * becomes 1.2119 */21202121struct thread_params {2122 pthread_t thread;2123struct object_entry **list;2124unsigned list_size;2125unsigned remaining;2126int window;2127int depth;2128int working;2129int data_ready;2130 pthread_mutex_t mutex;2131 pthread_cond_t cond;2132unsigned*processed;2133};21342135static pthread_cond_t progress_cond;21362137/*2138 * Mutex and conditional variable can't be statically-initialized on Windows.2139 */2140static voidinit_threaded_search(void)2141{2142init_recursive_mutex(&read_mutex);2143pthread_mutex_init(&cache_mutex, NULL);2144pthread_mutex_init(&progress_mutex, NULL);2145pthread_cond_init(&progress_cond, NULL);2146 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2147}21482149static voidcleanup_threaded_search(void)2150{2151set_try_to_free_routine(old_try_to_free_routine);2152pthread_cond_destroy(&progress_cond);2153pthread_mutex_destroy(&read_mutex);2154pthread_mutex_destroy(&cache_mutex);2155pthread_mutex_destroy(&progress_mutex);2156}21572158static void*threaded_find_deltas(void*arg)2159{2160struct thread_params *me = arg;21612162while(me->remaining) {2163find_deltas(me->list, &me->remaining,2164 me->window, me->depth, me->processed);21652166progress_lock();2167 me->working =0;2168pthread_cond_signal(&progress_cond);2169progress_unlock();21702171/*2172 * We must not set ->data_ready before we wait on the2173 * condition because the main thread may have set it to 12174 * before we get here. In order to be sure that new2175 * work is available if we see 1 in ->data_ready, it2176 * was initialized to 0 before this thread was spawned2177 * and we reset it to 0 right away.2178 */2179pthread_mutex_lock(&me->mutex);2180while(!me->data_ready)2181pthread_cond_wait(&me->cond, &me->mutex);2182 me->data_ready =0;2183pthread_mutex_unlock(&me->mutex);2184}2185/* leave ->working 1 so that this doesn't get more work assigned */2186return NULL;2187}21882189static voidll_find_deltas(struct object_entry **list,unsigned list_size,2190int window,int depth,unsigned*processed)2191{2192struct thread_params *p;2193int i, ret, active_threads =0;21942195init_threaded_search();21962197if(delta_search_threads <=1) {2198find_deltas(list, &list_size, window, depth, processed);2199cleanup_threaded_search();2200return;2201}2202if(progress > pack_to_stdout)2203fprintf(stderr,"Delta compression using up to%dthreads.\n",2204 delta_search_threads);2205 p =xcalloc(delta_search_threads,sizeof(*p));22062207/* Partition the work amongst work threads. */2208for(i =0; i < delta_search_threads; i++) {2209unsigned sub_size = list_size / (delta_search_threads - i);22102211/* don't use too small segments or no deltas will be found */2212if(sub_size <2*window && i+1< delta_search_threads)2213 sub_size =0;22142215 p[i].window = window;2216 p[i].depth = depth;2217 p[i].processed = processed;2218 p[i].working =1;2219 p[i].data_ready =0;22202221/* try to split chunks on "path" boundaries */2222while(sub_size && sub_size < list_size &&2223 list[sub_size]->hash &&2224 list[sub_size]->hash == list[sub_size-1]->hash)2225 sub_size++;22262227 p[i].list = list;2228 p[i].list_size = sub_size;2229 p[i].remaining = sub_size;22302231 list += sub_size;2232 list_size -= sub_size;2233}22342235/* Start work threads. */2236for(i =0; i < delta_search_threads; i++) {2237if(!p[i].list_size)2238continue;2239pthread_mutex_init(&p[i].mutex, NULL);2240pthread_cond_init(&p[i].cond, NULL);2241 ret =pthread_create(&p[i].thread, NULL,2242 threaded_find_deltas, &p[i]);2243if(ret)2244die("unable to create thread:%s",strerror(ret));2245 active_threads++;2246}22472248/*2249 * Now let's wait for work completion. Each time a thread is done2250 * with its work, we steal half of the remaining work from the2251 * thread with the largest number of unprocessed objects and give2252 * it to that newly idle thread. This ensure good load balancing2253 * until the remaining object list segments are simply too short2254 * to be worth splitting anymore.2255 */2256while(active_threads) {2257struct thread_params *target = NULL;2258struct thread_params *victim = NULL;2259unsigned sub_size =0;22602261progress_lock();2262for(;;) {2263for(i =0; !target && i < delta_search_threads; i++)2264if(!p[i].working)2265 target = &p[i];2266if(target)2267break;2268pthread_cond_wait(&progress_cond, &progress_mutex);2269}22702271for(i =0; i < delta_search_threads; i++)2272if(p[i].remaining >2*window &&2273(!victim || victim->remaining < p[i].remaining))2274 victim = &p[i];2275if(victim) {2276 sub_size = victim->remaining /2;2277 list = victim->list + victim->list_size - sub_size;2278while(sub_size && list[0]->hash &&2279 list[0]->hash == list[-1]->hash) {2280 list++;2281 sub_size--;2282}2283if(!sub_size) {2284/*2285 * It is possible for some "paths" to have2286 * so many objects that no hash boundary2287 * might be found. Let's just steal the2288 * exact half in that case.2289 */2290 sub_size = victim->remaining /2;2291 list -= sub_size;2292}2293 target->list = list;2294 victim->list_size -= sub_size;2295 victim->remaining -= sub_size;2296}2297 target->list_size = sub_size;2298 target->remaining = sub_size;2299 target->working =1;2300progress_unlock();23012302pthread_mutex_lock(&target->mutex);2303 target->data_ready =1;2304pthread_cond_signal(&target->cond);2305pthread_mutex_unlock(&target->mutex);23062307if(!sub_size) {2308pthread_join(target->thread, NULL);2309pthread_cond_destroy(&target->cond);2310pthread_mutex_destroy(&target->mutex);2311 active_threads--;2312}2313}2314cleanup_threaded_search();2315free(p);2316}23172318#else2319#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2320#endif23212322static voidadd_tag_chain(const struct object_id *oid)2323{2324struct tag *tag;23252326/*2327 * We catch duplicates already in add_object_entry(), but we'd2328 * prefer to do this extra check to avoid having to parse the2329 * tag at all if we already know that it's being packed (e.g., if2330 * it was included via bitmaps, we would not have parsed it2331 * previously).2332 */2333if(packlist_find(&to_pack, oid->hash, NULL))2334return;23352336 tag =lookup_tag(oid->hash);2337while(1) {2338if(!tag ||parse_tag(tag) || !tag->tagged)2339die("unable to pack objects reachable from tag%s",2340oid_to_hex(oid));23412342add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);23432344if(tag->tagged->type != OBJ_TAG)2345return;23462347 tag = (struct tag *)tag->tagged;2348}2349}23502351static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2352{2353struct object_id peeled;23542355if(starts_with(path,"refs/tags/") &&/* is a tag? */2356!peel_ref(path, peeled.hash) &&/* peelable? */2357packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2358add_tag_chain(oid);2359return0;2360}23612362static voidprepare_pack(int window,int depth)2363{2364struct object_entry **delta_list;2365uint32_t i, nr_deltas;2366unsigned n;23672368get_object_details();23692370/*2371 * If we're locally repacking then we need to be doubly careful2372 * from now on in order to make sure no stealth corruption gets2373 * propagated to the new pack. Clients receiving streamed packs2374 * should validate everything they get anyway so no need to incur2375 * the additional cost here in that case.2376 */2377if(!pack_to_stdout)2378 do_check_packed_object_crc =1;23792380if(!to_pack.nr_objects || !window || !depth)2381return;23822383ALLOC_ARRAY(delta_list, to_pack.nr_objects);2384 nr_deltas = n =0;23852386for(i =0; i < to_pack.nr_objects; i++) {2387struct object_entry *entry = to_pack.objects + i;23882389if(entry->delta)2390/* This happens if we decided to reuse existing2391 * delta from a pack. "reuse_delta &&" is implied.2392 */2393continue;23942395if(entry->size <50)2396continue;23972398if(entry->no_try_delta)2399continue;24002401if(!entry->preferred_base) {2402 nr_deltas++;2403if(entry->type <0)2404die("unable to get type of object%s",2405sha1_to_hex(entry->idx.sha1));2406}else{2407if(entry->type <0) {2408/*2409 * This object is not found, but we2410 * don't have to include it anyway.2411 */2412continue;2413}2414}24152416 delta_list[n++] = entry;2417}24182419if(nr_deltas && n >1) {2420unsigned nr_done =0;2421if(progress)2422 progress_state =start_progress(_("Compressing objects"),2423 nr_deltas);2424QSORT(delta_list, n, type_size_sort);2425ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2426stop_progress(&progress_state);2427if(nr_done != nr_deltas)2428die("inconsistency with delta count");2429}2430free(delta_list);2431}24322433static intgit_pack_config(const char*k,const char*v,void*cb)2434{2435if(!strcmp(k,"pack.window")) {2436 window =git_config_int(k, v);2437return0;2438}2439if(!strcmp(k,"pack.windowmemory")) {2440 window_memory_limit =git_config_ulong(k, v);2441return0;2442}2443if(!strcmp(k,"pack.depth")) {2444 depth =git_config_int(k, v);2445return0;2446}2447if(!strcmp(k,"pack.deltacachesize")) {2448 max_delta_cache_size =git_config_int(k, v);2449return0;2450}2451if(!strcmp(k,"pack.deltacachelimit")) {2452 cache_max_small_delta_size =git_config_int(k, v);2453return0;2454}2455if(!strcmp(k,"pack.writebitmaphashcache")) {2456if(git_config_bool(k, v))2457 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2458else2459 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2460}2461if(!strcmp(k,"pack.usebitmaps")) {2462 use_bitmap_index_default =git_config_bool(k, v);2463return0;2464}2465if(!strcmp(k,"pack.threads")) {2466 delta_search_threads =git_config_int(k, v);2467if(delta_search_threads <0)2468die("invalid number of threads specified (%d)",2469 delta_search_threads);2470#ifdef NO_PTHREADS2471if(delta_search_threads !=1)2472warning("no threads support, ignoring%s", k);2473#endif2474return0;2475}2476if(!strcmp(k,"pack.indexversion")) {2477 pack_idx_opts.version =git_config_int(k, v);2478if(pack_idx_opts.version >2)2479die("bad pack.indexversion=%"PRIu32,2480 pack_idx_opts.version);2481return0;2482}2483returngit_default_config(k, v, cb);2484}24852486static voidread_object_list_from_stdin(void)2487{2488char line[40+1+ PATH_MAX +2];2489unsigned char sha1[20];24902491for(;;) {2492if(!fgets(line,sizeof(line), stdin)) {2493if(feof(stdin))2494break;2495if(!ferror(stdin))2496die("fgets returned NULL, not EOF, not error!");2497if(errno != EINTR)2498die_errno("fgets");2499clearerr(stdin);2500continue;2501}2502if(line[0] =='-') {2503if(get_sha1_hex(line+1, sha1))2504die("expected edge sha1, got garbage:\n%s",2505 line);2506add_preferred_base(sha1);2507continue;2508}2509if(get_sha1_hex(line, sha1))2510die("expected sha1, got garbage:\n%s", line);25112512add_preferred_base_object(line+41);2513add_object_entry(sha1,0, line+41,0);2514}2515}25162517#define OBJECT_ADDED (1u<<20)25182519static voidshow_commit(struct commit *commit,void*data)2520{2521add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2522 commit->object.flags |= OBJECT_ADDED;25232524if(write_bitmap_index)2525index_commit_for_bitmap(commit);2526}25272528static voidshow_object(struct object *obj,const char*name,void*data)2529{2530add_preferred_base_object(name);2531add_object_entry(obj->oid.hash, obj->type, name,0);2532 obj->flags |= OBJECT_ADDED;2533}25342535static voidshow_edge(struct commit *commit)2536{2537add_preferred_base(commit->object.oid.hash);2538}25392540struct in_pack_object {2541 off_t offset;2542struct object *object;2543};25442545struct in_pack {2546int alloc;2547int nr;2548struct in_pack_object *array;2549};25502551static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2552{2553 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2554 in_pack->array[in_pack->nr].object = object;2555 in_pack->nr++;2556}25572558/*2559 * Compare the objects in the offset order, in order to emulate the2560 * "git rev-list --objects" output that produced the pack originally.2561 */2562static intofscmp(const void*a_,const void*b_)2563{2564struct in_pack_object *a = (struct in_pack_object *)a_;2565struct in_pack_object *b = (struct in_pack_object *)b_;25662567if(a->offset < b->offset)2568return-1;2569else if(a->offset > b->offset)2570return1;2571else2572returnoidcmp(&a->object->oid, &b->object->oid);2573}25742575static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2576{2577struct packed_git *p;2578struct in_pack in_pack;2579uint32_t i;25802581memset(&in_pack,0,sizeof(in_pack));25822583for(p = packed_git; p; p = p->next) {2584const unsigned char*sha1;2585struct object *o;25862587if(!p->pack_local || p->pack_keep)2588continue;2589if(open_pack_index(p))2590die("cannot open pack index");25912592ALLOC_GROW(in_pack.array,2593 in_pack.nr + p->num_objects,2594 in_pack.alloc);25952596for(i =0; i < p->num_objects; i++) {2597 sha1 =nth_packed_object_sha1(p, i);2598 o =lookup_unknown_object(sha1);2599if(!(o->flags & OBJECT_ADDED))2600mark_in_pack_object(o, p, &in_pack);2601 o->flags |= OBJECT_ADDED;2602}2603}26042605if(in_pack.nr) {2606QSORT(in_pack.array, in_pack.nr, ofscmp);2607for(i =0; i < in_pack.nr; i++) {2608struct object *o = in_pack.array[i].object;2609add_object_entry(o->oid.hash, o->type,"",0);2610}2611}2612free(in_pack.array);2613}26142615static intadd_loose_object(const struct object_id *oid,const char*path,2616void*data)2617{2618enum object_type type =sha1_object_info(oid->hash, NULL);26192620if(type <0) {2621warning("loose object at%scould not be examined", path);2622return0;2623}26242625add_object_entry(oid->hash, type,"",0);2626return0;2627}26282629/*2630 * We actually don't even have to worry about reachability here.2631 * add_object_entry will weed out duplicates, so we just add every2632 * loose object we find.2633 */2634static voidadd_unreachable_loose_objects(void)2635{2636for_each_loose_file_in_objdir(get_object_directory(),2637 add_loose_object,2638 NULL, NULL, NULL);2639}26402641static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2642{2643static struct packed_git *last_found = (void*)1;2644struct packed_git *p;26452646 p = (last_found != (void*)1) ? last_found : packed_git;26472648while(p) {2649if((!p->pack_local || p->pack_keep) &&2650find_pack_entry_one(sha1, p)) {2651 last_found = p;2652return1;2653}2654if(p == last_found)2655 p = packed_git;2656else2657 p = p->next;2658if(p == last_found)2659 p = p->next;2660}2661return0;2662}26632664/*2665 * Store a list of sha1s that are should not be discarded2666 * because they are either written too recently, or are2667 * reachable from another object that was.2668 *2669 * This is filled by get_object_list.2670 */2671static struct sha1_array recent_objects;26722673static intloosened_object_can_be_discarded(const unsigned char*sha1,2674unsigned long mtime)2675{2676if(!unpack_unreachable_expiration)2677return0;2678if(mtime > unpack_unreachable_expiration)2679return0;2680if(sha1_array_lookup(&recent_objects, sha1) >=0)2681return0;2682return1;2683}26842685static voidloosen_unused_packed_objects(struct rev_info *revs)2686{2687struct packed_git *p;2688uint32_t i;2689const unsigned char*sha1;26902691for(p = packed_git; p; p = p->next) {2692if(!p->pack_local || p->pack_keep)2693continue;26942695if(open_pack_index(p))2696die("cannot open pack index");26972698for(i =0; i < p->num_objects; i++) {2699 sha1 =nth_packed_object_sha1(p, i);2700if(!packlist_find(&to_pack, sha1, NULL) &&2701!has_sha1_pack_kept_or_nonlocal(sha1) &&2702!loosened_object_can_be_discarded(sha1, p->mtime))2703if(force_object_loose(sha1, p->mtime))2704die("unable to force loose object");2705}2706}2707}27082709/*2710 * This tracks any options which pack-reuse code expects to be on, or which a2711 * reader of the pack might not understand, and which would therefore prevent2712 * blind reuse of what we have on disk.2713 */2714static intpack_options_allow_reuse(void)2715{2716return pack_to_stdout && allow_ofs_delta;2717}27182719static intget_object_list_from_bitmap(struct rev_info *revs)2720{2721if(prepare_bitmap_walk(revs) <0)2722return-1;27232724if(pack_options_allow_reuse() &&2725!reuse_partial_packfile_from_bitmap(2726&reuse_packfile,2727&reuse_packfile_objects,2728&reuse_packfile_offset)) {2729assert(reuse_packfile_objects);2730 nr_result += reuse_packfile_objects;2731display_progress(progress_state, nr_result);2732}27332734traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2735return0;2736}27372738static voidrecord_recent_object(struct object *obj,2739const char*name,2740void*data)2741{2742sha1_array_append(&recent_objects, obj->oid.hash);2743}27442745static voidrecord_recent_commit(struct commit *commit,void*data)2746{2747sha1_array_append(&recent_objects, commit->object.oid.hash);2748}27492750static voidget_object_list(int ac,const char**av)2751{2752struct rev_info revs;2753char line[1000];2754int flags =0;27552756init_revisions(&revs, NULL);2757 save_commit_buffer =0;2758setup_revisions(ac, av, &revs, NULL);27592760/* make sure shallows are read */2761is_repository_shallow();27622763while(fgets(line,sizeof(line), stdin) != NULL) {2764int len =strlen(line);2765if(len && line[len -1] =='\n')2766 line[--len] =0;2767if(!len)2768break;2769if(*line =='-') {2770if(!strcmp(line,"--not")) {2771 flags ^= UNINTERESTING;2772 write_bitmap_index =0;2773continue;2774}2775if(starts_with(line,"--shallow ")) {2776unsigned char sha1[20];2777if(get_sha1_hex(line +10, sha1))2778die("not an SHA-1 '%s'", line +10);2779register_shallow(sha1);2780 use_bitmap_index =0;2781continue;2782}2783die("not a rev '%s'", line);2784}2785if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2786die("bad revision '%s'", line);2787}27882789if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2790return;27912792if(prepare_revision_walk(&revs))2793die("revision walk setup failed");2794mark_edges_uninteresting(&revs, show_edge);2795traverse_commit_list(&revs, show_commit, show_object, NULL);27962797if(unpack_unreachable_expiration) {2798 revs.ignore_missing_links =1;2799if(add_unseen_recent_objects_to_traversal(&revs,2800 unpack_unreachable_expiration))2801die("unable to add recent objects");2802if(prepare_revision_walk(&revs))2803die("revision walk setup failed");2804traverse_commit_list(&revs, record_recent_commit,2805 record_recent_object, NULL);2806}28072808if(keep_unreachable)2809add_objects_in_unpacked_packs(&revs);2810if(pack_loose_unreachable)2811add_unreachable_loose_objects();2812if(unpack_unreachable)2813loosen_unused_packed_objects(&revs);28142815sha1_array_clear(&recent_objects);2816}28172818static intoption_parse_index_version(const struct option *opt,2819const char*arg,int unset)2820{2821char*c;2822const char*val = arg;2823 pack_idx_opts.version =strtoul(val, &c,10);2824if(pack_idx_opts.version >2)2825die(_("unsupported index version%s"), val);2826if(*c ==','&& c[1])2827 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2828if(*c || pack_idx_opts.off32_limit &0x80000000)2829die(_("bad index version '%s'"), val);2830return0;2831}28322833static intoption_parse_unpack_unreachable(const struct option *opt,2834const char*arg,int unset)2835{2836if(unset) {2837 unpack_unreachable =0;2838 unpack_unreachable_expiration =0;2839}2840else{2841 unpack_unreachable =1;2842if(arg)2843 unpack_unreachable_expiration =approxidate(arg);2844}2845return0;2846}28472848intcmd_pack_objects(int argc,const char**argv,const char*prefix)2849{2850int use_internal_rev_list =0;2851int thin =0;2852int shallow =0;2853int all_progress_implied =0;2854struct argv_array rp = ARGV_ARRAY_INIT;2855int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2856int rev_list_index =0;2857struct option pack_objects_options[] = {2858OPT_SET_INT('q',"quiet", &progress,2859N_("do not show progress meter"),0),2860OPT_SET_INT(0,"progress", &progress,2861N_("show progress meter"),1),2862OPT_SET_INT(0,"all-progress", &progress,2863N_("show progress meter during object writing phase"),2),2864OPT_BOOL(0,"all-progress-implied",2865&all_progress_implied,2866N_("similar to --all-progress when progress meter is shown")),2867{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2868N_("write the pack index file in the specified idx format version"),28690, option_parse_index_version },2870OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2871N_("maximum size of each output pack file")),2872OPT_BOOL(0,"local", &local,2873N_("ignore borrowed objects from alternate object store")),2874OPT_BOOL(0,"incremental", &incremental,2875N_("ignore packed objects")),2876OPT_INTEGER(0,"window", &window,2877N_("limit pack window by objects")),2878OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2879N_("limit pack window by memory in addition to object limit")),2880OPT_INTEGER(0,"depth", &depth,2881N_("maximum length of delta chain allowed in the resulting pack")),2882OPT_BOOL(0,"reuse-delta", &reuse_delta,2883N_("reuse existing deltas")),2884OPT_BOOL(0,"reuse-object", &reuse_object,2885N_("reuse existing objects")),2886OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2887N_("use OFS_DELTA objects")),2888OPT_INTEGER(0,"threads", &delta_search_threads,2889N_("use threads when searching for best delta matches")),2890OPT_BOOL(0,"non-empty", &non_empty,2891N_("do not create an empty pack output")),2892OPT_BOOL(0,"revs", &use_internal_rev_list,2893N_("read revision arguments from standard input")),2894{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2895N_("limit the objects to those that are not yet packed"),2896 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2897{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2898N_("include objects reachable from any reference"),2899 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2900{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2901N_("include objects referred by reflog entries"),2902 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2903{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2904N_("include objects referred to by the index"),2905 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2906OPT_BOOL(0,"stdout", &pack_to_stdout,2907N_("output pack to stdout")),2908OPT_BOOL(0,"include-tag", &include_tag,2909N_("include tag objects that refer to objects to be packed")),2910OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2911N_("keep unreachable objects")),2912OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2913N_("pack loose unreachable objects")),2914{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2915N_("unpack unreachable objects newer than <time>"),2916 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2917OPT_BOOL(0,"thin", &thin,2918N_("create thin packs")),2919OPT_BOOL(0,"shallow", &shallow,2920N_("create packs suitable for shallow fetches")),2921OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2922N_("ignore packs that have companion .keep file")),2923OPT_INTEGER(0,"compression", &pack_compression_level,2924N_("pack compression level")),2925OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2926N_("do not hide commits by grafts"),0),2927OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2928N_("use a bitmap index if available to speed up counting objects")),2929OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2930N_("write a bitmap index together with the pack index")),2931OPT_END(),2932};29332934 check_replace_refs =0;29352936reset_pack_idx_option(&pack_idx_opts);2937git_config(git_pack_config, NULL);29382939 progress =isatty(2);2940 argc =parse_options(argc, argv, prefix, pack_objects_options,2941 pack_usage,0);29422943if(argc) {2944 base_name = argv[0];2945 argc--;2946}2947if(pack_to_stdout != !base_name || argc)2948usage_with_options(pack_usage, pack_objects_options);29492950argv_array_push(&rp,"pack-objects");2951if(thin) {2952 use_internal_rev_list =1;2953argv_array_push(&rp, shallow2954?"--objects-edge-aggressive"2955:"--objects-edge");2956}else2957argv_array_push(&rp,"--objects");29582959if(rev_list_all) {2960 use_internal_rev_list =1;2961argv_array_push(&rp,"--all");2962}2963if(rev_list_reflog) {2964 use_internal_rev_list =1;2965argv_array_push(&rp,"--reflog");2966}2967if(rev_list_index) {2968 use_internal_rev_list =1;2969argv_array_push(&rp,"--indexed-objects");2970}2971if(rev_list_unpacked) {2972 use_internal_rev_list =1;2973argv_array_push(&rp,"--unpacked");2974}29752976if(!reuse_object)2977 reuse_delta =0;2978if(pack_compression_level == -1)2979 pack_compression_level = Z_DEFAULT_COMPRESSION;2980else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)2981die("bad pack compression level%d", pack_compression_level);29822983if(!delta_search_threads)/* --threads=0 means autodetect */2984 delta_search_threads =online_cpus();29852986#ifdef NO_PTHREADS2987if(delta_search_threads !=1)2988warning("no threads support, ignoring --threads");2989#endif2990if(!pack_to_stdout && !pack_size_limit)2991 pack_size_limit = pack_size_limit_cfg;2992if(pack_to_stdout && pack_size_limit)2993die("--max-pack-size cannot be used to build a pack for transfer.");2994if(pack_size_limit && pack_size_limit <1024*1024) {2995warning("minimum pack size limit is 1 MiB");2996 pack_size_limit =1024*1024;2997}29982999if(!pack_to_stdout && thin)3000die("--thin cannot be used to build an indexable pack.");30013002if(keep_unreachable && unpack_unreachable)3003die("--keep-unreachable and --unpack-unreachable are incompatible.");3004if(!rev_list_all || !rev_list_reflog || !rev_list_index)3005 unpack_unreachable_expiration =0;30063007/*3008 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3009 *3010 * - to produce good pack (with bitmap index not-yet-packed objects are3011 * packed in suboptimal order).3012 *3013 * - to use more robust pack-generation codepath (avoiding possible3014 * bugs in bitmap code and possible bitmap index corruption).3015 */3016if(!pack_to_stdout)3017 use_bitmap_index_default =0;30183019if(use_bitmap_index <0)3020 use_bitmap_index = use_bitmap_index_default;30213022/* "hard" reasons not to use bitmaps; these just won't work at all */3023if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3024 use_bitmap_index =0;30253026if(pack_to_stdout || !rev_list_all)3027 write_bitmap_index =0;30283029if(progress && all_progress_implied)3030 progress =2;30313032prepare_packed_git();3033if(ignore_packed_keep) {3034struct packed_git *p;3035for(p = packed_git; p; p = p->next)3036if(p->pack_local && p->pack_keep)3037break;3038if(!p)/* no keep-able packs found */3039 ignore_packed_keep =0;3040}3041if(local) {3042/*3043 * unlike ignore_packed_keep above, we do not want to3044 * unset "local" based on looking at packs, as it3045 * also covers non-local objects3046 */3047struct packed_git *p;3048for(p = packed_git; p; p = p->next) {3049if(!p->pack_local) {3050 have_non_local_packs =1;3051break;3052}3053}3054}30553056if(progress)3057 progress_state =start_progress(_("Counting objects"),0);3058if(!use_internal_rev_list)3059read_object_list_from_stdin();3060else{3061get_object_list(rp.argc, rp.argv);3062argv_array_clear(&rp);3063}3064cleanup_preferred_base();3065if(include_tag && nr_result)3066for_each_ref(add_ref_tag, NULL);3067stop_progress(&progress_state);30683069if(non_empty && !nr_result)3070return0;3071if(nr_result)3072prepare_pack(window, depth);3073write_pack_file();3074if(progress)3075fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3076" reused %"PRIu32" (delta %"PRIu32")\n",3077 written, written_delta, reused, reused_delta);3078return0;3079}