1#include"builtin.h" 2#include"cache.h" 3#include"config.h" 4#include"attr.h" 5#include"object.h" 6#include"blob.h" 7#include"commit.h" 8#include"tag.h" 9#include"tree.h" 10#include"delta.h" 11#include"pack.h" 12#include"pack-revindex.h" 13#include"csum-file.h" 14#include"tree-walk.h" 15#include"diff.h" 16#include"revision.h" 17#include"list-objects.h" 18#include"pack-objects.h" 19#include"progress.h" 20#include"refs.h" 21#include"streaming.h" 22#include"thread-utils.h" 23#include"pack-bitmap.h" 24#include"reachable.h" 25#include"sha1-array.h" 26#include"argv-array.h" 27#include"mru.h" 28 29static const char*pack_usage[] = { 30N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 31N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 32 NULL 33}; 34 35/* 36 * Objects we are going to pack are collected in the `to_pack` structure. 37 * It contains an array (dynamically expanded) of the object data, and a map 38 * that can resolve SHA1s to their position in the array. 39 */ 40static struct packing_data to_pack; 41 42static struct pack_idx_entry **written_list; 43static uint32_t nr_result, nr_written; 44 45static int non_empty; 46static int reuse_delta =1, reuse_object =1; 47static int keep_unreachable, unpack_unreachable, include_tag; 48static timestamp_t unpack_unreachable_expiration; 49static int pack_loose_unreachable; 50static int local; 51static int have_non_local_packs; 52static int incremental; 53static int ignore_packed_keep; 54static int allow_ofs_delta; 55static struct pack_idx_option pack_idx_opts; 56static const char*base_name; 57static int progress =1; 58static int window =10; 59static unsigned long pack_size_limit; 60static int depth =50; 61static int delta_search_threads; 62static int pack_to_stdout; 63static int num_preferred_base; 64static struct progress *progress_state; 65 66static struct packed_git *reuse_packfile; 67static uint32_t reuse_packfile_objects; 68static off_t reuse_packfile_offset; 69 70static int use_bitmap_index_default =1; 71static int use_bitmap_index = -1; 72static int write_bitmap_index; 73static uint16_t write_bitmap_options; 74 75static unsigned long delta_cache_size =0; 76static unsigned long max_delta_cache_size =256*1024*1024; 77static unsigned long cache_max_small_delta_size =1000; 78 79static unsigned long window_memory_limit =0; 80 81/* 82 * stats 83 */ 84static uint32_t written, written_delta; 85static uint32_t reused, reused_delta; 86 87/* 88 * Indexed commits 89 */ 90static struct commit **indexed_commits; 91static unsigned int indexed_commits_nr; 92static unsigned int indexed_commits_alloc; 93 94static voidindex_commit_for_bitmap(struct commit *commit) 95{ 96if(indexed_commits_nr >= indexed_commits_alloc) { 97 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 98REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 99} 100 101 indexed_commits[indexed_commits_nr++] = commit; 102} 103 104static void*get_delta(struct object_entry *entry) 105{ 106unsigned long size, base_size, delta_size; 107void*buf, *base_buf, *delta_buf; 108enum object_type type; 109 110 buf =read_sha1_file(entry->idx.oid.hash, &type, &size); 111if(!buf) 112die("unable to read%s",oid_to_hex(&entry->idx.oid)); 113 base_buf =read_sha1_file(entry->delta->idx.oid.hash, &type, 114&base_size); 115if(!base_buf) 116die("unable to read%s", 117oid_to_hex(&entry->delta->idx.oid)); 118 delta_buf =diff_delta(base_buf, base_size, 119 buf, size, &delta_size,0); 120if(!delta_buf || delta_size != entry->delta_size) 121die("delta size changed"); 122free(buf); 123free(base_buf); 124return delta_buf; 125} 126 127static unsigned longdo_compress(void**pptr,unsigned long size) 128{ 129 git_zstream stream; 130void*in, *out; 131unsigned long maxsize; 132 133git_deflate_init(&stream, pack_compression_level); 134 maxsize =git_deflate_bound(&stream, size); 135 136 in = *pptr; 137 out =xmalloc(maxsize); 138*pptr = out; 139 140 stream.next_in = in; 141 stream.avail_in = size; 142 stream.next_out = out; 143 stream.avail_out = maxsize; 144while(git_deflate(&stream, Z_FINISH) == Z_OK) 145;/* nothing */ 146git_deflate_end(&stream); 147 148free(in); 149return stream.total_out; 150} 151 152static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 153const unsigned char*sha1) 154{ 155 git_zstream stream; 156unsigned char ibuf[1024*16]; 157unsigned char obuf[1024*16]; 158unsigned long olen =0; 159 160git_deflate_init(&stream, pack_compression_level); 161 162for(;;) { 163 ssize_t readlen; 164int zret = Z_OK; 165 readlen =read_istream(st, ibuf,sizeof(ibuf)); 166if(readlen == -1) 167die(_("unable to read%s"),sha1_to_hex(sha1)); 168 169 stream.next_in = ibuf; 170 stream.avail_in = readlen; 171while((stream.avail_in || readlen ==0) && 172(zret == Z_OK || zret == Z_BUF_ERROR)) { 173 stream.next_out = obuf; 174 stream.avail_out =sizeof(obuf); 175 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 176sha1write(f, obuf, stream.next_out - obuf); 177 olen += stream.next_out - obuf; 178} 179if(stream.avail_in) 180die(_("deflate error (%d)"), zret); 181if(readlen ==0) { 182if(zret != Z_STREAM_END) 183die(_("deflate error (%d)"), zret); 184break; 185} 186} 187git_deflate_end(&stream); 188return olen; 189} 190 191/* 192 * we are going to reuse the existing object data as is. make 193 * sure it is not corrupt. 194 */ 195static intcheck_pack_inflate(struct packed_git *p, 196struct pack_window **w_curs, 197 off_t offset, 198 off_t len, 199unsigned long expect) 200{ 201 git_zstream stream; 202unsigned char fakebuf[4096], *in; 203int st; 204 205memset(&stream,0,sizeof(stream)); 206git_inflate_init(&stream); 207do{ 208 in =use_pack(p, w_curs, offset, &stream.avail_in); 209 stream.next_in = in; 210 stream.next_out = fakebuf; 211 stream.avail_out =sizeof(fakebuf); 212 st =git_inflate(&stream, Z_FINISH); 213 offset += stream.next_in - in; 214}while(st == Z_OK || st == Z_BUF_ERROR); 215git_inflate_end(&stream); 216return(st == Z_STREAM_END && 217 stream.total_out == expect && 218 stream.total_in == len) ?0: -1; 219} 220 221static voidcopy_pack_data(struct sha1file *f, 222struct packed_git *p, 223struct pack_window **w_curs, 224 off_t offset, 225 off_t len) 226{ 227unsigned char*in; 228unsigned long avail; 229 230while(len) { 231 in =use_pack(p, w_curs, offset, &avail); 232if(avail > len) 233 avail = (unsigned long)len; 234sha1write(f, in, avail); 235 offset += avail; 236 len -= avail; 237} 238} 239 240/* Return 0 if we will bust the pack-size limit */ 241static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 242unsigned long limit,int usable_delta) 243{ 244unsigned long size, datalen; 245unsigned char header[MAX_PACK_OBJECT_HEADER], 246 dheader[MAX_PACK_OBJECT_HEADER]; 247unsigned hdrlen; 248enum object_type type; 249void*buf; 250struct git_istream *st = NULL; 251 252if(!usable_delta) { 253if(entry->type == OBJ_BLOB && 254 entry->size > big_file_threshold && 255(st =open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL) 256 buf = NULL; 257else{ 258 buf =read_sha1_file(entry->idx.oid.hash, &type, 259&size); 260if(!buf) 261die(_("unable to read%s"), 262oid_to_hex(&entry->idx.oid)); 263} 264/* 265 * make sure no cached delta data remains from a 266 * previous attempt before a pack split occurred. 267 */ 268FREE_AND_NULL(entry->delta_data); 269 entry->z_delta_size =0; 270}else if(entry->delta_data) { 271 size = entry->delta_size; 272 buf = entry->delta_data; 273 entry->delta_data = NULL; 274 type = (allow_ofs_delta && entry->delta->idx.offset) ? 275 OBJ_OFS_DELTA : OBJ_REF_DELTA; 276}else{ 277 buf =get_delta(entry); 278 size = entry->delta_size; 279 type = (allow_ofs_delta && entry->delta->idx.offset) ? 280 OBJ_OFS_DELTA : OBJ_REF_DELTA; 281} 282 283if(st)/* large blob case, just assume we don't compress well */ 284 datalen = size; 285else if(entry->z_delta_size) 286 datalen = entry->z_delta_size; 287else 288 datalen =do_compress(&buf, size); 289 290/* 291 * The object header is a byte of 'type' followed by zero or 292 * more bytes of length. 293 */ 294 hdrlen =encode_in_pack_object_header(header,sizeof(header), 295 type, size); 296 297if(type == OBJ_OFS_DELTA) { 298/* 299 * Deltas with relative base contain an additional 300 * encoding of the relative offset for the delta 301 * base from this object's position in the pack. 302 */ 303 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 304unsigned pos =sizeof(dheader) -1; 305 dheader[pos] = ofs &127; 306while(ofs >>=7) 307 dheader[--pos] =128| (--ofs &127); 308if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 309if(st) 310close_istream(st); 311free(buf); 312return0; 313} 314sha1write(f, header, hdrlen); 315sha1write(f, dheader + pos,sizeof(dheader) - pos); 316 hdrlen +=sizeof(dheader) - pos; 317}else if(type == OBJ_REF_DELTA) { 318/* 319 * Deltas with a base reference contain 320 * an additional 20 bytes for the base sha1. 321 */ 322if(limit && hdrlen +20+ datalen +20>= limit) { 323if(st) 324close_istream(st); 325free(buf); 326return0; 327} 328sha1write(f, header, hdrlen); 329sha1write(f, entry->delta->idx.oid.hash,20); 330 hdrlen +=20; 331}else{ 332if(limit && hdrlen + datalen +20>= limit) { 333if(st) 334close_istream(st); 335free(buf); 336return0; 337} 338sha1write(f, header, hdrlen); 339} 340if(st) { 341 datalen =write_large_blob_data(st, f, entry->idx.oid.hash); 342close_istream(st); 343}else{ 344sha1write(f, buf, datalen); 345free(buf); 346} 347 348return hdrlen + datalen; 349} 350 351/* Return 0 if we will bust the pack-size limit */ 352static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 353unsigned long limit,int usable_delta) 354{ 355struct packed_git *p = entry->in_pack; 356struct pack_window *w_curs = NULL; 357struct revindex_entry *revidx; 358 off_t offset; 359enum object_type type = entry->type; 360 off_t datalen; 361unsigned char header[MAX_PACK_OBJECT_HEADER], 362 dheader[MAX_PACK_OBJECT_HEADER]; 363unsigned hdrlen; 364 365if(entry->delta) 366 type = (allow_ofs_delta && entry->delta->idx.offset) ? 367 OBJ_OFS_DELTA : OBJ_REF_DELTA; 368 hdrlen =encode_in_pack_object_header(header,sizeof(header), 369 type, entry->size); 370 371 offset = entry->in_pack_offset; 372 revidx =find_pack_revindex(p, offset); 373 datalen = revidx[1].offset - offset; 374if(!pack_to_stdout && p->index_version >1&& 375check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 376error("bad packed object CRC for%s", 377oid_to_hex(&entry->idx.oid)); 378unuse_pack(&w_curs); 379returnwrite_no_reuse_object(f, entry, limit, usable_delta); 380} 381 382 offset += entry->in_pack_header_size; 383 datalen -= entry->in_pack_header_size; 384 385if(!pack_to_stdout && p->index_version ==1&& 386check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 387error("corrupt packed object for%s", 388oid_to_hex(&entry->idx.oid)); 389unuse_pack(&w_curs); 390returnwrite_no_reuse_object(f, entry, limit, usable_delta); 391} 392 393if(type == OBJ_OFS_DELTA) { 394 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 395unsigned pos =sizeof(dheader) -1; 396 dheader[pos] = ofs &127; 397while(ofs >>=7) 398 dheader[--pos] =128| (--ofs &127); 399if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 400unuse_pack(&w_curs); 401return0; 402} 403sha1write(f, header, hdrlen); 404sha1write(f, dheader + pos,sizeof(dheader) - pos); 405 hdrlen +=sizeof(dheader) - pos; 406 reused_delta++; 407}else if(type == OBJ_REF_DELTA) { 408if(limit && hdrlen +20+ datalen +20>= limit) { 409unuse_pack(&w_curs); 410return0; 411} 412sha1write(f, header, hdrlen); 413sha1write(f, entry->delta->idx.oid.hash,20); 414 hdrlen +=20; 415 reused_delta++; 416}else{ 417if(limit && hdrlen + datalen +20>= limit) { 418unuse_pack(&w_curs); 419return0; 420} 421sha1write(f, header, hdrlen); 422} 423copy_pack_data(f, p, &w_curs, offset, datalen); 424unuse_pack(&w_curs); 425 reused++; 426return hdrlen + datalen; 427} 428 429/* Return 0 if we will bust the pack-size limit */ 430static off_t write_object(struct sha1file *f, 431struct object_entry *entry, 432 off_t write_offset) 433{ 434unsigned long limit; 435 off_t len; 436int usable_delta, to_reuse; 437 438if(!pack_to_stdout) 439crc32_begin(f); 440 441/* apply size limit if limited packsize and not first object */ 442if(!pack_size_limit || !nr_written) 443 limit =0; 444else if(pack_size_limit <= write_offset) 445/* 446 * the earlier object did not fit the limit; avoid 447 * mistaking this with unlimited (i.e. limit = 0). 448 */ 449 limit =1; 450else 451 limit = pack_size_limit - write_offset; 452 453if(!entry->delta) 454 usable_delta =0;/* no delta */ 455else if(!pack_size_limit) 456 usable_delta =1;/* unlimited packfile */ 457else if(entry->delta->idx.offset == (off_t)-1) 458 usable_delta =0;/* base was written to another pack */ 459else if(entry->delta->idx.offset) 460 usable_delta =1;/* base already exists in this pack */ 461else 462 usable_delta =0;/* base could end up in another pack */ 463 464if(!reuse_object) 465 to_reuse =0;/* explicit */ 466else if(!entry->in_pack) 467 to_reuse =0;/* can't reuse what we don't have */ 468else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 469/* check_object() decided it for us ... */ 470 to_reuse = usable_delta; 471/* ... but pack split may override that */ 472else if(entry->type != entry->in_pack_type) 473 to_reuse =0;/* pack has delta which is unusable */ 474else if(entry->delta) 475 to_reuse =0;/* we want to pack afresh */ 476else 477 to_reuse =1;/* we have it in-pack undeltified, 478 * and we do not need to deltify it. 479 */ 480 481if(!to_reuse) 482 len =write_no_reuse_object(f, entry, limit, usable_delta); 483else 484 len =write_reuse_object(f, entry, limit, usable_delta); 485if(!len) 486return0; 487 488if(usable_delta) 489 written_delta++; 490 written++; 491if(!pack_to_stdout) 492 entry->idx.crc32 =crc32_end(f); 493return len; 494} 495 496enum write_one_status { 497 WRITE_ONE_SKIP = -1,/* already written */ 498 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 499 WRITE_ONE_WRITTEN =1,/* normal */ 500 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 501}; 502 503static enum write_one_status write_one(struct sha1file *f, 504struct object_entry *e, 505 off_t *offset) 506{ 507 off_t size; 508int recursing; 509 510/* 511 * we set offset to 1 (which is an impossible value) to mark 512 * the fact that this object is involved in "write its base 513 * first before writing a deltified object" recursion. 514 */ 515 recursing = (e->idx.offset ==1); 516if(recursing) { 517warning("recursive delta detected for object%s", 518oid_to_hex(&e->idx.oid)); 519return WRITE_ONE_RECURSIVE; 520}else if(e->idx.offset || e->preferred_base) { 521/* offset is non zero if object is written already. */ 522return WRITE_ONE_SKIP; 523} 524 525/* if we are deltified, write out base object first. */ 526if(e->delta) { 527 e->idx.offset =1;/* now recurse */ 528switch(write_one(f, e->delta, offset)) { 529case WRITE_ONE_RECURSIVE: 530/* we cannot depend on this one */ 531 e->delta = NULL; 532break; 533default: 534break; 535case WRITE_ONE_BREAK: 536 e->idx.offset = recursing; 537return WRITE_ONE_BREAK; 538} 539} 540 541 e->idx.offset = *offset; 542 size =write_object(f, e, *offset); 543if(!size) { 544 e->idx.offset = recursing; 545return WRITE_ONE_BREAK; 546} 547 written_list[nr_written++] = &e->idx; 548 549/* make sure off_t is sufficiently large not to wrap */ 550if(signed_add_overflows(*offset, size)) 551die("pack too large for current definition of off_t"); 552*offset += size; 553return WRITE_ONE_WRITTEN; 554} 555 556static intmark_tagged(const char*path,const struct object_id *oid,int flag, 557void*cb_data) 558{ 559unsigned char peeled[20]; 560struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 561 562if(entry) 563 entry->tagged =1; 564if(!peel_ref(path, peeled)) { 565 entry =packlist_find(&to_pack, peeled, NULL); 566if(entry) 567 entry->tagged =1; 568} 569return0; 570} 571 572staticinlinevoidadd_to_write_order(struct object_entry **wo, 573unsigned int*endp, 574struct object_entry *e) 575{ 576if(e->filled) 577return; 578 wo[(*endp)++] = e; 579 e->filled =1; 580} 581 582static voidadd_descendants_to_write_order(struct object_entry **wo, 583unsigned int*endp, 584struct object_entry *e) 585{ 586int add_to_order =1; 587while(e) { 588if(add_to_order) { 589struct object_entry *s; 590/* add this node... */ 591add_to_write_order(wo, endp, e); 592/* all its siblings... */ 593for(s = e->delta_sibling; s; s = s->delta_sibling) { 594add_to_write_order(wo, endp, s); 595} 596} 597/* drop down a level to add left subtree nodes if possible */ 598if(e->delta_child) { 599 add_to_order =1; 600 e = e->delta_child; 601}else{ 602 add_to_order =0; 603/* our sibling might have some children, it is next */ 604if(e->delta_sibling) { 605 e = e->delta_sibling; 606continue; 607} 608/* go back to our parent node */ 609 e = e->delta; 610while(e && !e->delta_sibling) { 611/* we're on the right side of a subtree, keep 612 * going up until we can go right again */ 613 e = e->delta; 614} 615if(!e) { 616/* done- we hit our original root node */ 617return; 618} 619/* pass it off to sibling at this level */ 620 e = e->delta_sibling; 621} 622}; 623} 624 625static voidadd_family_to_write_order(struct object_entry **wo, 626unsigned int*endp, 627struct object_entry *e) 628{ 629struct object_entry *root; 630 631for(root = e; root->delta; root = root->delta) 632;/* nothing */ 633add_descendants_to_write_order(wo, endp, root); 634} 635 636static struct object_entry **compute_write_order(void) 637{ 638unsigned int i, wo_end, last_untagged; 639 640struct object_entry **wo; 641struct object_entry *objects = to_pack.objects; 642 643for(i =0; i < to_pack.nr_objects; i++) { 644 objects[i].tagged =0; 645 objects[i].filled =0; 646 objects[i].delta_child = NULL; 647 objects[i].delta_sibling = NULL; 648} 649 650/* 651 * Fully connect delta_child/delta_sibling network. 652 * Make sure delta_sibling is sorted in the original 653 * recency order. 654 */ 655for(i = to_pack.nr_objects; i >0;) { 656struct object_entry *e = &objects[--i]; 657if(!e->delta) 658continue; 659/* Mark me as the first child */ 660 e->delta_sibling = e->delta->delta_child; 661 e->delta->delta_child = e; 662} 663 664/* 665 * Mark objects that are at the tip of tags. 666 */ 667for_each_tag_ref(mark_tagged, NULL); 668 669/* 670 * Give the objects in the original recency order until 671 * we see a tagged tip. 672 */ 673ALLOC_ARRAY(wo, to_pack.nr_objects); 674for(i = wo_end =0; i < to_pack.nr_objects; i++) { 675if(objects[i].tagged) 676break; 677add_to_write_order(wo, &wo_end, &objects[i]); 678} 679 last_untagged = i; 680 681/* 682 * Then fill all the tagged tips. 683 */ 684for(; i < to_pack.nr_objects; i++) { 685if(objects[i].tagged) 686add_to_write_order(wo, &wo_end, &objects[i]); 687} 688 689/* 690 * And then all remaining commits and tags. 691 */ 692for(i = last_untagged; i < to_pack.nr_objects; i++) { 693if(objects[i].type != OBJ_COMMIT && 694 objects[i].type != OBJ_TAG) 695continue; 696add_to_write_order(wo, &wo_end, &objects[i]); 697} 698 699/* 700 * And then all the trees. 701 */ 702for(i = last_untagged; i < to_pack.nr_objects; i++) { 703if(objects[i].type != OBJ_TREE) 704continue; 705add_to_write_order(wo, &wo_end, &objects[i]); 706} 707 708/* 709 * Finally all the rest in really tight order 710 */ 711for(i = last_untagged; i < to_pack.nr_objects; i++) { 712if(!objects[i].filled) 713add_family_to_write_order(wo, &wo_end, &objects[i]); 714} 715 716if(wo_end != to_pack.nr_objects) 717die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 718 719return wo; 720} 721 722static off_t write_reused_pack(struct sha1file *f) 723{ 724unsigned char buffer[8192]; 725 off_t to_write, total; 726int fd; 727 728if(!is_pack_valid(reuse_packfile)) 729die("packfile is invalid:%s", reuse_packfile->pack_name); 730 731 fd =git_open(reuse_packfile->pack_name); 732if(fd <0) 733die_errno("unable to open packfile for reuse:%s", 734 reuse_packfile->pack_name); 735 736if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 737die_errno("unable to seek in reused packfile"); 738 739if(reuse_packfile_offset <0) 740 reuse_packfile_offset = reuse_packfile->pack_size -20; 741 742 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 743 744while(to_write) { 745int read_pack =xread(fd, buffer,sizeof(buffer)); 746 747if(read_pack <=0) 748die_errno("unable to read from reused packfile"); 749 750if(read_pack > to_write) 751 read_pack = to_write; 752 753sha1write(f, buffer, read_pack); 754 to_write -= read_pack; 755 756/* 757 * We don't know the actual number of objects written, 758 * only how many bytes written, how many bytes total, and 759 * how many objects total. So we can fake it by pretending all 760 * objects we are writing are the same size. This gives us a 761 * smooth progress meter, and at the end it matches the true 762 * answer. 763 */ 764 written = reuse_packfile_objects * 765(((double)(total - to_write)) / total); 766display_progress(progress_state, written); 767} 768 769close(fd); 770 written = reuse_packfile_objects; 771display_progress(progress_state, written); 772return reuse_packfile_offset -sizeof(struct pack_header); 773} 774 775static const char no_split_warning[] =N_( 776"disabling bitmap writing, packs are split due to pack.packSizeLimit" 777); 778 779static voidwrite_pack_file(void) 780{ 781uint32_t i =0, j; 782struct sha1file *f; 783 off_t offset; 784uint32_t nr_remaining = nr_result; 785time_t last_mtime =0; 786struct object_entry **write_order; 787 788if(progress > pack_to_stdout) 789 progress_state =start_progress(_("Writing objects"), nr_result); 790ALLOC_ARRAY(written_list, to_pack.nr_objects); 791 write_order =compute_write_order(); 792 793do{ 794unsigned char sha1[20]; 795char*pack_tmp_name = NULL; 796 797if(pack_to_stdout) 798 f =sha1fd_throughput(1,"<stdout>", progress_state); 799else 800 f =create_tmp_packfile(&pack_tmp_name); 801 802 offset =write_pack_header(f, nr_remaining); 803 804if(reuse_packfile) { 805 off_t packfile_size; 806assert(pack_to_stdout); 807 808 packfile_size =write_reused_pack(f); 809 offset += packfile_size; 810} 811 812 nr_written =0; 813for(; i < to_pack.nr_objects; i++) { 814struct object_entry *e = write_order[i]; 815if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 816break; 817display_progress(progress_state, written); 818} 819 820/* 821 * Did we write the wrong # entries in the header? 822 * If so, rewrite it like in fast-import 823 */ 824if(pack_to_stdout) { 825sha1close(f, sha1, CSUM_CLOSE); 826}else if(nr_written == nr_remaining) { 827sha1close(f, sha1, CSUM_FSYNC); 828}else{ 829int fd =sha1close(f, sha1,0); 830fixup_pack_header_footer(fd, sha1, pack_tmp_name, 831 nr_written, sha1, offset); 832close(fd); 833if(write_bitmap_index) { 834warning(_(no_split_warning)); 835 write_bitmap_index =0; 836} 837} 838 839if(!pack_to_stdout) { 840struct stat st; 841struct strbuf tmpname = STRBUF_INIT; 842 843/* 844 * Packs are runtime accessed in their mtime 845 * order since newer packs are more likely to contain 846 * younger objects. So if we are creating multiple 847 * packs then we should modify the mtime of later ones 848 * to preserve this property. 849 */ 850if(stat(pack_tmp_name, &st) <0) { 851warning_errno("failed to stat%s", pack_tmp_name); 852}else if(!last_mtime) { 853 last_mtime = st.st_mtime; 854}else{ 855struct utimbuf utb; 856 utb.actime = st.st_atime; 857 utb.modtime = --last_mtime; 858if(utime(pack_tmp_name, &utb) <0) 859warning_errno("failed utime() on%s", pack_tmp_name); 860} 861 862strbuf_addf(&tmpname,"%s-", base_name); 863 864if(write_bitmap_index) { 865bitmap_writer_set_checksum(sha1); 866bitmap_writer_build_type_index(written_list, nr_written); 867} 868 869finish_tmp_packfile(&tmpname, pack_tmp_name, 870 written_list, nr_written, 871&pack_idx_opts, sha1); 872 873if(write_bitmap_index) { 874strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 875 876stop_progress(&progress_state); 877 878bitmap_writer_show_progress(progress); 879bitmap_writer_reuse_bitmaps(&to_pack); 880bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 881bitmap_writer_build(&to_pack); 882bitmap_writer_finish(written_list, nr_written, 883 tmpname.buf, write_bitmap_options); 884 write_bitmap_index =0; 885} 886 887strbuf_release(&tmpname); 888free(pack_tmp_name); 889puts(sha1_to_hex(sha1)); 890} 891 892/* mark written objects as written to previous pack */ 893for(j =0; j < nr_written; j++) { 894 written_list[j]->offset = (off_t)-1; 895} 896 nr_remaining -= nr_written; 897}while(nr_remaining && i < to_pack.nr_objects); 898 899free(written_list); 900free(write_order); 901stop_progress(&progress_state); 902if(written != nr_result) 903die("wrote %"PRIu32" objects while expecting %"PRIu32, 904 written, nr_result); 905} 906 907static intno_try_delta(const char*path) 908{ 909static struct attr_check *check; 910 911if(!check) 912 check =attr_check_initl("delta", NULL); 913if(git_check_attr(path, check)) 914return0; 915if(ATTR_FALSE(check->items[0].value)) 916return1; 917return0; 918} 919 920/* 921 * When adding an object, check whether we have already added it 922 * to our packing list. If so, we can skip. However, if we are 923 * being asked to excludei t, but the previous mention was to include 924 * it, make sure to adjust its flags and tweak our numbers accordingly. 925 * 926 * As an optimization, we pass out the index position where we would have 927 * found the item, since that saves us from having to look it up again a 928 * few lines later when we want to add the new entry. 929 */ 930static inthave_duplicate_entry(const unsigned char*sha1, 931int exclude, 932uint32_t*index_pos) 933{ 934struct object_entry *entry; 935 936 entry =packlist_find(&to_pack, sha1, index_pos); 937if(!entry) 938return0; 939 940if(exclude) { 941if(!entry->preferred_base) 942 nr_result--; 943 entry->preferred_base =1; 944} 945 946return1; 947} 948 949static intwant_found_object(int exclude,struct packed_git *p) 950{ 951if(exclude) 952return1; 953if(incremental) 954return0; 955 956/* 957 * When asked to do --local (do not include an object that appears in a 958 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 959 * an object that appears in a pack marked with .keep), finding a pack 960 * that matches the criteria is sufficient for us to decide to omit it. 961 * However, even if this pack does not satisfy the criteria, we need to 962 * make sure no copy of this object appears in _any_ pack that makes us 963 * to omit the object, so we need to check all the packs. 964 * 965 * We can however first check whether these options can possible matter; 966 * if they do not matter we know we want the object in generated pack. 967 * Otherwise, we signal "-1" at the end to tell the caller that we do 968 * not know either way, and it needs to check more packs. 969 */ 970if(!ignore_packed_keep && 971(!local || !have_non_local_packs)) 972return1; 973 974if(local && !p->pack_local) 975return0; 976if(ignore_packed_keep && p->pack_local && p->pack_keep) 977return0; 978 979/* we don't know yet; keep looking for more packs */ 980return-1; 981} 982 983/* 984 * Check whether we want the object in the pack (e.g., we do not want 985 * objects found in non-local stores if the "--local" option was used). 986 * 987 * If the caller already knows an existing pack it wants to take the object 988 * from, that is passed in *found_pack and *found_offset; otherwise this 989 * function finds if there is any pack that has the object and returns the pack 990 * and its offset in these variables. 991 */ 992static intwant_object_in_pack(const unsigned char*sha1, 993int exclude, 994struct packed_git **found_pack, 995 off_t *found_offset) 996{ 997struct mru_entry *entry; 998int want; 9991000if(!exclude && local &&has_loose_object_nonlocal(sha1))1001return0;10021003/*1004 * If we already know the pack object lives in, start checks from that1005 * pack - in the usual case when neither --local was given nor .keep files1006 * are present we will determine the answer right now.1007 */1008if(*found_pack) {1009 want =want_found_object(exclude, *found_pack);1010if(want != -1)1011return want;1012}10131014for(entry = packed_git_mru->head; entry; entry = entry->next) {1015struct packed_git *p = entry->item;1016 off_t offset;10171018if(p == *found_pack)1019 offset = *found_offset;1020else1021 offset =find_pack_entry_one(sha1, p);10221023if(offset) {1024if(!*found_pack) {1025if(!is_pack_valid(p))1026continue;1027*found_offset = offset;1028*found_pack = p;1029}1030 want =want_found_object(exclude, p);1031if(!exclude && want >0)1032mru_mark(packed_git_mru, entry);1033if(want != -1)1034return want;1035}1036}10371038return1;1039}10401041static voidcreate_object_entry(const unsigned char*sha1,1042enum object_type type,1043uint32_t hash,1044int exclude,1045int no_try_delta,1046uint32_t index_pos,1047struct packed_git *found_pack,1048 off_t found_offset)1049{1050struct object_entry *entry;10511052 entry =packlist_alloc(&to_pack, sha1, index_pos);1053 entry->hash = hash;1054if(type)1055 entry->type = type;1056if(exclude)1057 entry->preferred_base =1;1058else1059 nr_result++;1060if(found_pack) {1061 entry->in_pack = found_pack;1062 entry->in_pack_offset = found_offset;1063}10641065 entry->no_try_delta = no_try_delta;1066}10671068static const char no_closure_warning[] =N_(1069"disabling bitmap writing, as some objects are not being packed"1070);10711072static intadd_object_entry(const unsigned char*sha1,enum object_type type,1073const char*name,int exclude)1074{1075struct packed_git *found_pack = NULL;1076 off_t found_offset =0;1077uint32_t index_pos;10781079if(have_duplicate_entry(sha1, exclude, &index_pos))1080return0;10811082if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1083/* The pack is missing an object, so it will not have closure */1084if(write_bitmap_index) {1085warning(_(no_closure_warning));1086 write_bitmap_index =0;1087}1088return0;1089}10901091create_object_entry(sha1, type,pack_name_hash(name),1092 exclude, name &&no_try_delta(name),1093 index_pos, found_pack, found_offset);10941095display_progress(progress_state, nr_result);1096return1;1097}10981099static intadd_object_entry_from_bitmap(const unsigned char*sha1,1100enum object_type type,1101int flags,uint32_t name_hash,1102struct packed_git *pack, off_t offset)1103{1104uint32_t index_pos;11051106if(have_duplicate_entry(sha1,0, &index_pos))1107return0;11081109if(!want_object_in_pack(sha1,0, &pack, &offset))1110return0;11111112create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11131114display_progress(progress_state, nr_result);1115return1;1116}11171118struct pbase_tree_cache {1119unsigned char sha1[20];1120int ref;1121int temporary;1122void*tree_data;1123unsigned long tree_size;1124};11251126static struct pbase_tree_cache *(pbase_tree_cache[256]);1127static intpbase_tree_cache_ix(const unsigned char*sha1)1128{1129return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1130}1131static intpbase_tree_cache_ix_incr(int ix)1132{1133return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1134}11351136static struct pbase_tree {1137struct pbase_tree *next;1138/* This is a phony "cache" entry; we are not1139 * going to evict it or find it through _get()1140 * mechanism -- this is for the toplevel node that1141 * would almost always change with any commit.1142 */1143struct pbase_tree_cache pcache;1144} *pbase_tree;11451146static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1147{1148struct pbase_tree_cache *ent, *nent;1149void*data;1150unsigned long size;1151enum object_type type;1152int neigh;1153int my_ix =pbase_tree_cache_ix(sha1);1154int available_ix = -1;11551156/* pbase-tree-cache acts as a limited hashtable.1157 * your object will be found at your index or within a few1158 * slots after that slot if it is cached.1159 */1160for(neigh =0; neigh <8; neigh++) {1161 ent = pbase_tree_cache[my_ix];1162if(ent && !hashcmp(ent->sha1, sha1)) {1163 ent->ref++;1164return ent;1165}1166else if(((available_ix <0) && (!ent || !ent->ref)) ||1167((0<= available_ix) &&1168(!ent && pbase_tree_cache[available_ix])))1169 available_ix = my_ix;1170if(!ent)1171break;1172 my_ix =pbase_tree_cache_ix_incr(my_ix);1173}11741175/* Did not find one. Either we got a bogus request or1176 * we need to read and perhaps cache.1177 */1178 data =read_sha1_file(sha1, &type, &size);1179if(!data)1180return NULL;1181if(type != OBJ_TREE) {1182free(data);1183return NULL;1184}11851186/* We need to either cache or return a throwaway copy */11871188if(available_ix <0)1189 ent = NULL;1190else{1191 ent = pbase_tree_cache[available_ix];1192 my_ix = available_ix;1193}11941195if(!ent) {1196 nent =xmalloc(sizeof(*nent));1197 nent->temporary = (available_ix <0);1198}1199else{1200/* evict and reuse */1201free(ent->tree_data);1202 nent = ent;1203}1204hashcpy(nent->sha1, sha1);1205 nent->tree_data = data;1206 nent->tree_size = size;1207 nent->ref =1;1208if(!nent->temporary)1209 pbase_tree_cache[my_ix] = nent;1210return nent;1211}12121213static voidpbase_tree_put(struct pbase_tree_cache *cache)1214{1215if(!cache->temporary) {1216 cache->ref--;1217return;1218}1219free(cache->tree_data);1220free(cache);1221}12221223static intname_cmp_len(const char*name)1224{1225int i;1226for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1227;1228return i;1229}12301231static voidadd_pbase_object(struct tree_desc *tree,1232const char*name,1233int cmplen,1234const char*fullname)1235{1236struct name_entry entry;1237int cmp;12381239while(tree_entry(tree,&entry)) {1240if(S_ISGITLINK(entry.mode))1241continue;1242 cmp =tree_entry_len(&entry) != cmplen ?1:1243memcmp(name, entry.path, cmplen);1244if(cmp >0)1245continue;1246if(cmp <0)1247return;1248if(name[cmplen] !='/') {1249add_object_entry(entry.oid->hash,1250object_type(entry.mode),1251 fullname,1);1252return;1253}1254if(S_ISDIR(entry.mode)) {1255struct tree_desc sub;1256struct pbase_tree_cache *tree;1257const char*down = name+cmplen+1;1258int downlen =name_cmp_len(down);12591260 tree =pbase_tree_get(entry.oid->hash);1261if(!tree)1262return;1263init_tree_desc(&sub, tree->tree_data, tree->tree_size);12641265add_pbase_object(&sub, down, downlen, fullname);1266pbase_tree_put(tree);1267}1268}1269}12701271static unsigned*done_pbase_paths;1272static int done_pbase_paths_num;1273static int done_pbase_paths_alloc;1274static intdone_pbase_path_pos(unsigned hash)1275{1276int lo =0;1277int hi = done_pbase_paths_num;1278while(lo < hi) {1279int mi = (hi + lo) /2;1280if(done_pbase_paths[mi] == hash)1281return mi;1282if(done_pbase_paths[mi] < hash)1283 hi = mi;1284else1285 lo = mi +1;1286}1287return-lo-1;1288}12891290static intcheck_pbase_path(unsigned hash)1291{1292int pos = (!done_pbase_paths) ? -1:done_pbase_path_pos(hash);1293if(0<= pos)1294return1;1295 pos = -pos -1;1296ALLOC_GROW(done_pbase_paths,1297 done_pbase_paths_num +1,1298 done_pbase_paths_alloc);1299 done_pbase_paths_num++;1300if(pos < done_pbase_paths_num)1301memmove(done_pbase_paths + pos +1,1302 done_pbase_paths + pos,1303(done_pbase_paths_num - pos -1) *sizeof(unsigned));1304 done_pbase_paths[pos] = hash;1305return0;1306}13071308static voidadd_preferred_base_object(const char*name)1309{1310struct pbase_tree *it;1311int cmplen;1312unsigned hash =pack_name_hash(name);13131314if(!num_preferred_base ||check_pbase_path(hash))1315return;13161317 cmplen =name_cmp_len(name);1318for(it = pbase_tree; it; it = it->next) {1319if(cmplen ==0) {1320add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1321}1322else{1323struct tree_desc tree;1324init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1325add_pbase_object(&tree, name, cmplen, name);1326}1327}1328}13291330static voidadd_preferred_base(unsigned char*sha1)1331{1332struct pbase_tree *it;1333void*data;1334unsigned long size;1335unsigned char tree_sha1[20];13361337if(window <= num_preferred_base++)1338return;13391340 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1341if(!data)1342return;13431344for(it = pbase_tree; it; it = it->next) {1345if(!hashcmp(it->pcache.sha1, tree_sha1)) {1346free(data);1347return;1348}1349}13501351 it =xcalloc(1,sizeof(*it));1352 it->next = pbase_tree;1353 pbase_tree = it;13541355hashcpy(it->pcache.sha1, tree_sha1);1356 it->pcache.tree_data = data;1357 it->pcache.tree_size = size;1358}13591360static voidcleanup_preferred_base(void)1361{1362struct pbase_tree *it;1363unsigned i;13641365 it = pbase_tree;1366 pbase_tree = NULL;1367while(it) {1368struct pbase_tree *this= it;1369 it =this->next;1370free(this->pcache.tree_data);1371free(this);1372}13731374for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1375if(!pbase_tree_cache[i])1376continue;1377free(pbase_tree_cache[i]->tree_data);1378FREE_AND_NULL(pbase_tree_cache[i]);1379}13801381FREE_AND_NULL(done_pbase_paths);1382 done_pbase_paths_num = done_pbase_paths_alloc =0;1383}13841385static voidcheck_object(struct object_entry *entry)1386{1387if(entry->in_pack) {1388struct packed_git *p = entry->in_pack;1389struct pack_window *w_curs = NULL;1390const unsigned char*base_ref = NULL;1391struct object_entry *base_entry;1392unsigned long used, used_0;1393unsigned long avail;1394 off_t ofs;1395unsigned char*buf, c;13961397 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);13981399/*1400 * We want in_pack_type even if we do not reuse delta1401 * since non-delta representations could still be reused.1402 */1403 used =unpack_object_header_buffer(buf, avail,1404&entry->in_pack_type,1405&entry->size);1406if(used ==0)1407goto give_up;14081409/*1410 * Determine if this is a delta and if so whether we can1411 * reuse it or not. Otherwise let's find out as cheaply as1412 * possible what the actual type and size for this object is.1413 */1414switch(entry->in_pack_type) {1415default:1416/* Not a delta hence we've already got all we need. */1417 entry->type = entry->in_pack_type;1418 entry->in_pack_header_size = used;1419if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1420goto give_up;1421unuse_pack(&w_curs);1422return;1423case OBJ_REF_DELTA:1424if(reuse_delta && !entry->preferred_base)1425 base_ref =use_pack(p, &w_curs,1426 entry->in_pack_offset + used, NULL);1427 entry->in_pack_header_size = used +20;1428break;1429case OBJ_OFS_DELTA:1430 buf =use_pack(p, &w_curs,1431 entry->in_pack_offset + used, NULL);1432 used_0 =0;1433 c = buf[used_0++];1434 ofs = c &127;1435while(c &128) {1436 ofs +=1;1437if(!ofs ||MSB(ofs,7)) {1438error("delta base offset overflow in pack for%s",1439oid_to_hex(&entry->idx.oid));1440goto give_up;1441}1442 c = buf[used_0++];1443 ofs = (ofs <<7) + (c &127);1444}1445 ofs = entry->in_pack_offset - ofs;1446if(ofs <=0|| ofs >= entry->in_pack_offset) {1447error("delta base offset out of bound for%s",1448oid_to_hex(&entry->idx.oid));1449goto give_up;1450}1451if(reuse_delta && !entry->preferred_base) {1452struct revindex_entry *revidx;1453 revidx =find_pack_revindex(p, ofs);1454if(!revidx)1455goto give_up;1456 base_ref =nth_packed_object_sha1(p, revidx->nr);1457}1458 entry->in_pack_header_size = used + used_0;1459break;1460}14611462if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1463/*1464 * If base_ref was set above that means we wish to1465 * reuse delta data, and we even found that base1466 * in the list of objects we want to pack. Goodie!1467 *1468 * Depth value does not matter - find_deltas() will1469 * never consider reused delta as the base object to1470 * deltify other objects against, in order to avoid1471 * circular deltas.1472 */1473 entry->type = entry->in_pack_type;1474 entry->delta = base_entry;1475 entry->delta_size = entry->size;1476 entry->delta_sibling = base_entry->delta_child;1477 base_entry->delta_child = entry;1478unuse_pack(&w_curs);1479return;1480}14811482if(entry->type) {1483/*1484 * This must be a delta and we already know what the1485 * final object type is. Let's extract the actual1486 * object size from the delta header.1487 */1488 entry->size =get_size_from_delta(p, &w_curs,1489 entry->in_pack_offset + entry->in_pack_header_size);1490if(entry->size ==0)1491goto give_up;1492unuse_pack(&w_curs);1493return;1494}14951496/*1497 * No choice but to fall back to the recursive delta walk1498 * with sha1_object_info() to find about the object type1499 * at this point...1500 */1501 give_up:1502unuse_pack(&w_curs);1503}15041505 entry->type =sha1_object_info(entry->idx.oid.hash, &entry->size);1506/*1507 * The error condition is checked in prepare_pack(). This is1508 * to permit a missing preferred base object to be ignored1509 * as a preferred base. Doing so can result in a larger1510 * pack file, but the transfer will still take place.1511 */1512}15131514static intpack_offset_sort(const void*_a,const void*_b)1515{1516const struct object_entry *a = *(struct object_entry **)_a;1517const struct object_entry *b = *(struct object_entry **)_b;15181519/* avoid filesystem trashing with loose objects */1520if(!a->in_pack && !b->in_pack)1521returnoidcmp(&a->idx.oid, &b->idx.oid);15221523if(a->in_pack < b->in_pack)1524return-1;1525if(a->in_pack > b->in_pack)1526return1;1527return a->in_pack_offset < b->in_pack_offset ? -1:1528(a->in_pack_offset > b->in_pack_offset);1529}15301531/*1532 * Drop an on-disk delta we were planning to reuse. Naively, this would1533 * just involve blanking out the "delta" field, but we have to deal1534 * with some extra book-keeping:1535 *1536 * 1. Removing ourselves from the delta_sibling linked list.1537 *1538 * 2. Updating our size/type to the non-delta representation. These were1539 * either not recorded initially (size) or overwritten with the delta type1540 * (type) when check_object() decided to reuse the delta.1541 *1542 * 3. Resetting our delta depth, as we are now a base object.1543 */1544static voiddrop_reused_delta(struct object_entry *entry)1545{1546struct object_entry **p = &entry->delta->delta_child;1547struct object_info oi = OBJECT_INFO_INIT;15481549while(*p) {1550if(*p == entry)1551*p = (*p)->delta_sibling;1552else1553 p = &(*p)->delta_sibling;1554}1555 entry->delta = NULL;1556 entry->depth =0;15571558 oi.sizep = &entry->size;1559 oi.typep = &entry->type;1560if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1561/*1562 * We failed to get the info from this pack for some reason;1563 * fall back to sha1_object_info, which may find another copy.1564 * And if that fails, the error will be recorded in entry->type1565 * and dealt with in prepare_pack().1566 */1567 entry->type =sha1_object_info(entry->idx.oid.hash,1568&entry->size);1569}1570}15711572/*1573 * Follow the chain of deltas from this entry onward, throwing away any links1574 * that cause us to hit a cycle (as determined by the DFS state flags in1575 * the entries).1576 *1577 * We also detect too-long reused chains that would violate our --depth1578 * limit.1579 */1580static voidbreak_delta_chains(struct object_entry *entry)1581{1582/*1583 * The actual depth of each object we will write is stored as an int,1584 * as it cannot exceed our int "depth" limit. But before we break1585 * changes based no that limit, we may potentially go as deep as the1586 * number of objects, which is elsewhere bounded to a uint32_t.1587 */1588uint32_t total_depth;1589struct object_entry *cur, *next;15901591for(cur = entry, total_depth =0;1592 cur;1593 cur = cur->delta, total_depth++) {1594if(cur->dfs_state == DFS_DONE) {1595/*1596 * We've already seen this object and know it isn't1597 * part of a cycle. We do need to append its depth1598 * to our count.1599 */1600 total_depth += cur->depth;1601break;1602}16031604/*1605 * We break cycles before looping, so an ACTIVE state (or any1606 * other cruft which made its way into the state variable)1607 * is a bug.1608 */1609if(cur->dfs_state != DFS_NONE)1610die("BUG: confusing delta dfs state in first pass:%d",1611 cur->dfs_state);16121613/*1614 * Now we know this is the first time we've seen the object. If1615 * it's not a delta, we're done traversing, but we'll mark it1616 * done to save time on future traversals.1617 */1618if(!cur->delta) {1619 cur->dfs_state = DFS_DONE;1620break;1621}16221623/*1624 * Mark ourselves as active and see if the next step causes1625 * us to cycle to another active object. It's important to do1626 * this _before_ we loop, because it impacts where we make the1627 * cut, and thus how our total_depth counter works.1628 * E.g., We may see a partial loop like:1629 *1630 * A -> B -> C -> D -> B1631 *1632 * Cutting B->C breaks the cycle. But now the depth of A is1633 * only 1, and our total_depth counter is at 3. The size of the1634 * error is always one less than the size of the cycle we1635 * broke. Commits C and D were "lost" from A's chain.1636 *1637 * If we instead cut D->B, then the depth of A is correct at 3.1638 * We keep all commits in the chain that we examined.1639 */1640 cur->dfs_state = DFS_ACTIVE;1641if(cur->delta->dfs_state == DFS_ACTIVE) {1642drop_reused_delta(cur);1643 cur->dfs_state = DFS_DONE;1644break;1645}1646}16471648/*1649 * And now that we've gone all the way to the bottom of the chain, we1650 * need to clear the active flags and set the depth fields as1651 * appropriate. Unlike the loop above, which can quit when it drops a1652 * delta, we need to keep going to look for more depth cuts. So we need1653 * an extra "next" pointer to keep going after we reset cur->delta.1654 */1655for(cur = entry; cur; cur = next) {1656 next = cur->delta;16571658/*1659 * We should have a chain of zero or more ACTIVE states down to1660 * a final DONE. We can quit after the DONE, because either it1661 * has no bases, or we've already handled them in a previous1662 * call.1663 */1664if(cur->dfs_state == DFS_DONE)1665break;1666else if(cur->dfs_state != DFS_ACTIVE)1667die("BUG: confusing delta dfs state in second pass:%d",1668 cur->dfs_state);16691670/*1671 * If the total_depth is more than depth, then we need to snip1672 * the chain into two or more smaller chains that don't exceed1673 * the maximum depth. Most of the resulting chains will contain1674 * (depth + 1) entries (i.e., depth deltas plus one base), and1675 * the last chain (i.e., the one containing entry) will contain1676 * whatever entries are left over, namely1677 * (total_depth % (depth + 1)) of them.1678 *1679 * Since we are iterating towards decreasing depth, we need to1680 * decrement total_depth as we go, and we need to write to the1681 * entry what its final depth will be after all of the1682 * snipping. Since we're snipping into chains of length (depth1683 * + 1) entries, the final depth of an entry will be its1684 * original depth modulo (depth + 1). Any time we encounter an1685 * entry whose final depth is supposed to be zero, we snip it1686 * from its delta base, thereby making it so.1687 */1688 cur->depth = (total_depth--) % (depth +1);1689if(!cur->depth)1690drop_reused_delta(cur);16911692 cur->dfs_state = DFS_DONE;1693}1694}16951696static voidget_object_details(void)1697{1698uint32_t i;1699struct object_entry **sorted_by_offset;17001701 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1702for(i =0; i < to_pack.nr_objects; i++)1703 sorted_by_offset[i] = to_pack.objects + i;1704QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17051706for(i =0; i < to_pack.nr_objects; i++) {1707struct object_entry *entry = sorted_by_offset[i];1708check_object(entry);1709if(big_file_threshold < entry->size)1710 entry->no_try_delta =1;1711}17121713/*1714 * This must happen in a second pass, since we rely on the delta1715 * information for the whole list being completed.1716 */1717for(i =0; i < to_pack.nr_objects; i++)1718break_delta_chains(&to_pack.objects[i]);17191720free(sorted_by_offset);1721}17221723/*1724 * We search for deltas in a list sorted by type, by filename hash, and then1725 * by size, so that we see progressively smaller and smaller files.1726 * That's because we prefer deltas to be from the bigger file1727 * to the smaller -- deletes are potentially cheaper, but perhaps1728 * more importantly, the bigger file is likely the more recent1729 * one. The deepest deltas are therefore the oldest objects which are1730 * less susceptible to be accessed often.1731 */1732static inttype_size_sort(const void*_a,const void*_b)1733{1734const struct object_entry *a = *(struct object_entry **)_a;1735const struct object_entry *b = *(struct object_entry **)_b;17361737if(a->type > b->type)1738return-1;1739if(a->type < b->type)1740return1;1741if(a->hash > b->hash)1742return-1;1743if(a->hash < b->hash)1744return1;1745if(a->preferred_base > b->preferred_base)1746return-1;1747if(a->preferred_base < b->preferred_base)1748return1;1749if(a->size > b->size)1750return-1;1751if(a->size < b->size)1752return1;1753return a < b ? -1: (a > b);/* newest first */1754}17551756struct unpacked {1757struct object_entry *entry;1758void*data;1759struct delta_index *index;1760unsigned depth;1761};17621763static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1764unsigned long delta_size)1765{1766if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1767return0;17681769if(delta_size < cache_max_small_delta_size)1770return1;17711772/* cache delta, if objects are large enough compared to delta size */1773if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1774return1;17751776return0;1777}17781779#ifndef NO_PTHREADS17801781static pthread_mutex_t read_mutex;1782#define read_lock() pthread_mutex_lock(&read_mutex)1783#define read_unlock() pthread_mutex_unlock(&read_mutex)17841785static pthread_mutex_t cache_mutex;1786#define cache_lock() pthread_mutex_lock(&cache_mutex)1787#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17881789static pthread_mutex_t progress_mutex;1790#define progress_lock() pthread_mutex_lock(&progress_mutex)1791#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17921793#else17941795#define read_lock() (void)01796#define read_unlock() (void)01797#define cache_lock() (void)01798#define cache_unlock() (void)01799#define progress_lock() (void)01800#define progress_unlock() (void)018011802#endif18031804static inttry_delta(struct unpacked *trg,struct unpacked *src,1805unsigned max_depth,unsigned long*mem_usage)1806{1807struct object_entry *trg_entry = trg->entry;1808struct object_entry *src_entry = src->entry;1809unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1810unsigned ref_depth;1811enum object_type type;1812void*delta_buf;18131814/* Don't bother doing diffs between different types */1815if(trg_entry->type != src_entry->type)1816return-1;18171818/*1819 * We do not bother to try a delta that we discarded on an1820 * earlier try, but only when reusing delta data. Note that1821 * src_entry that is marked as the preferred_base should always1822 * be considered, as even if we produce a suboptimal delta against1823 * it, we will still save the transfer cost, as we already know1824 * the other side has it and we won't send src_entry at all.1825 */1826if(reuse_delta && trg_entry->in_pack &&1827 trg_entry->in_pack == src_entry->in_pack &&1828!src_entry->preferred_base &&1829 trg_entry->in_pack_type != OBJ_REF_DELTA &&1830 trg_entry->in_pack_type != OBJ_OFS_DELTA)1831return0;18321833/* Let's not bust the allowed depth. */1834if(src->depth >= max_depth)1835return0;18361837/* Now some size filtering heuristics. */1838 trg_size = trg_entry->size;1839if(!trg_entry->delta) {1840 max_size = trg_size/2-20;1841 ref_depth =1;1842}else{1843 max_size = trg_entry->delta_size;1844 ref_depth = trg->depth;1845}1846 max_size = (uint64_t)max_size * (max_depth - src->depth) /1847(max_depth - ref_depth +1);1848if(max_size ==0)1849return0;1850 src_size = src_entry->size;1851 sizediff = src_size < trg_size ? trg_size - src_size :0;1852if(sizediff >= max_size)1853return0;1854if(trg_size < src_size /32)1855return0;18561857/* Load data if not already done */1858if(!trg->data) {1859read_lock();1860 trg->data =read_sha1_file(trg_entry->idx.oid.hash, &type,1861&sz);1862read_unlock();1863if(!trg->data)1864die("object%scannot be read",1865oid_to_hex(&trg_entry->idx.oid));1866if(sz != trg_size)1867die("object%sinconsistent object length (%lu vs%lu)",1868oid_to_hex(&trg_entry->idx.oid), sz,1869 trg_size);1870*mem_usage += sz;1871}1872if(!src->data) {1873read_lock();1874 src->data =read_sha1_file(src_entry->idx.oid.hash, &type,1875&sz);1876read_unlock();1877if(!src->data) {1878if(src_entry->preferred_base) {1879static int warned =0;1880if(!warned++)1881warning("object%scannot be read",1882oid_to_hex(&src_entry->idx.oid));1883/*1884 * Those objects are not included in the1885 * resulting pack. Be resilient and ignore1886 * them if they can't be read, in case the1887 * pack could be created nevertheless.1888 */1889return0;1890}1891die("object%scannot be read",1892oid_to_hex(&src_entry->idx.oid));1893}1894if(sz != src_size)1895die("object%sinconsistent object length (%lu vs%lu)",1896oid_to_hex(&src_entry->idx.oid), sz,1897 src_size);1898*mem_usage += sz;1899}1900if(!src->index) {1901 src->index =create_delta_index(src->data, src_size);1902if(!src->index) {1903static int warned =0;1904if(!warned++)1905warning("suboptimal pack - out of memory");1906return0;1907}1908*mem_usage +=sizeof_delta_index(src->index);1909}19101911 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1912if(!delta_buf)1913return0;19141915if(trg_entry->delta) {1916/* Prefer only shallower same-sized deltas. */1917if(delta_size == trg_entry->delta_size &&1918 src->depth +1>= trg->depth) {1919free(delta_buf);1920return0;1921}1922}19231924/*1925 * Handle memory allocation outside of the cache1926 * accounting lock. Compiler will optimize the strangeness1927 * away when NO_PTHREADS is defined.1928 */1929free(trg_entry->delta_data);1930cache_lock();1931if(trg_entry->delta_data) {1932 delta_cache_size -= trg_entry->delta_size;1933 trg_entry->delta_data = NULL;1934}1935if(delta_cacheable(src_size, trg_size, delta_size)) {1936 delta_cache_size += delta_size;1937cache_unlock();1938 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1939}else{1940cache_unlock();1941free(delta_buf);1942}19431944 trg_entry->delta = src_entry;1945 trg_entry->delta_size = delta_size;1946 trg->depth = src->depth +1;19471948return1;1949}19501951static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1952{1953struct object_entry *child = me->delta_child;1954unsigned int m = n;1955while(child) {1956unsigned int c =check_delta_limit(child, n +1);1957if(m < c)1958 m = c;1959 child = child->delta_sibling;1960}1961return m;1962}19631964static unsigned longfree_unpacked(struct unpacked *n)1965{1966unsigned long freed_mem =sizeof_delta_index(n->index);1967free_delta_index(n->index);1968 n->index = NULL;1969if(n->data) {1970 freed_mem += n->entry->size;1971FREE_AND_NULL(n->data);1972}1973 n->entry = NULL;1974 n->depth =0;1975return freed_mem;1976}19771978static voidfind_deltas(struct object_entry **list,unsigned*list_size,1979int window,int depth,unsigned*processed)1980{1981uint32_t i, idx =0, count =0;1982struct unpacked *array;1983unsigned long mem_usage =0;19841985 array =xcalloc(window,sizeof(struct unpacked));19861987for(;;) {1988struct object_entry *entry;1989struct unpacked *n = array + idx;1990int j, max_depth, best_base = -1;19911992progress_lock();1993if(!*list_size) {1994progress_unlock();1995break;1996}1997 entry = *list++;1998(*list_size)--;1999if(!entry->preferred_base) {2000(*processed)++;2001display_progress(progress_state, *processed);2002}2003progress_unlock();20042005 mem_usage -=free_unpacked(n);2006 n->entry = entry;20072008while(window_memory_limit &&2009 mem_usage > window_memory_limit &&2010 count >1) {2011uint32_t tail = (idx + window - count) % window;2012 mem_usage -=free_unpacked(array + tail);2013 count--;2014}20152016/* We do not compute delta to *create* objects we are not2017 * going to pack.2018 */2019if(entry->preferred_base)2020goto next;20212022/*2023 * If the current object is at pack edge, take the depth the2024 * objects that depend on the current object into account2025 * otherwise they would become too deep.2026 */2027 max_depth = depth;2028if(entry->delta_child) {2029 max_depth -=check_delta_limit(entry,0);2030if(max_depth <=0)2031goto next;2032}20332034 j = window;2035while(--j >0) {2036int ret;2037uint32_t other_idx = idx + j;2038struct unpacked *m;2039if(other_idx >= window)2040 other_idx -= window;2041 m = array + other_idx;2042if(!m->entry)2043break;2044 ret =try_delta(n, m, max_depth, &mem_usage);2045if(ret <0)2046break;2047else if(ret >0)2048 best_base = other_idx;2049}20502051/*2052 * If we decided to cache the delta data, then it is best2053 * to compress it right away. First because we have to do2054 * it anyway, and doing it here while we're threaded will2055 * save a lot of time in the non threaded write phase,2056 * as well as allow for caching more deltas within2057 * the same cache size limit.2058 * ...2059 * But only if not writing to stdout, since in that case2060 * the network is most likely throttling writes anyway,2061 * and therefore it is best to go to the write phase ASAP2062 * instead, as we can afford spending more time compressing2063 * between writes at that moment.2064 */2065if(entry->delta_data && !pack_to_stdout) {2066 entry->z_delta_size =do_compress(&entry->delta_data,2067 entry->delta_size);2068cache_lock();2069 delta_cache_size -= entry->delta_size;2070 delta_cache_size += entry->z_delta_size;2071cache_unlock();2072}20732074/* if we made n a delta, and if n is already at max2075 * depth, leaving it in the window is pointless. we2076 * should evict it first.2077 */2078if(entry->delta && max_depth <= n->depth)2079continue;20802081/*2082 * Move the best delta base up in the window, after the2083 * currently deltified object, to keep it longer. It will2084 * be the first base object to be attempted next.2085 */2086if(entry->delta) {2087struct unpacked swap = array[best_base];2088int dist = (window + idx - best_base) % window;2089int dst = best_base;2090while(dist--) {2091int src = (dst +1) % window;2092 array[dst] = array[src];2093 dst = src;2094}2095 array[dst] = swap;2096}20972098 next:2099 idx++;2100if(count +1< window)2101 count++;2102if(idx >= window)2103 idx =0;2104}21052106for(i =0; i < window; ++i) {2107free_delta_index(array[i].index);2108free(array[i].data);2109}2110free(array);2111}21122113#ifndef NO_PTHREADS21142115static voidtry_to_free_from_threads(size_t size)2116{2117read_lock();2118release_pack_memory(size);2119read_unlock();2120}21212122static try_to_free_t old_try_to_free_routine;21232124/*2125 * The main thread waits on the condition that (at least) one of the workers2126 * has stopped working (which is indicated in the .working member of2127 * struct thread_params).2128 * When a work thread has completed its work, it sets .working to 0 and2129 * signals the main thread and waits on the condition that .data_ready2130 * becomes 1.2131 */21322133struct thread_params {2134 pthread_t thread;2135struct object_entry **list;2136unsigned list_size;2137unsigned remaining;2138int window;2139int depth;2140int working;2141int data_ready;2142 pthread_mutex_t mutex;2143 pthread_cond_t cond;2144unsigned*processed;2145};21462147static pthread_cond_t progress_cond;21482149/*2150 * Mutex and conditional variable can't be statically-initialized on Windows.2151 */2152static voidinit_threaded_search(void)2153{2154init_recursive_mutex(&read_mutex);2155pthread_mutex_init(&cache_mutex, NULL);2156pthread_mutex_init(&progress_mutex, NULL);2157pthread_cond_init(&progress_cond, NULL);2158 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2159}21602161static voidcleanup_threaded_search(void)2162{2163set_try_to_free_routine(old_try_to_free_routine);2164pthread_cond_destroy(&progress_cond);2165pthread_mutex_destroy(&read_mutex);2166pthread_mutex_destroy(&cache_mutex);2167pthread_mutex_destroy(&progress_mutex);2168}21692170static void*threaded_find_deltas(void*arg)2171{2172struct thread_params *me = arg;21732174while(me->remaining) {2175find_deltas(me->list, &me->remaining,2176 me->window, me->depth, me->processed);21772178progress_lock();2179 me->working =0;2180pthread_cond_signal(&progress_cond);2181progress_unlock();21822183/*2184 * We must not set ->data_ready before we wait on the2185 * condition because the main thread may have set it to 12186 * before we get here. In order to be sure that new2187 * work is available if we see 1 in ->data_ready, it2188 * was initialized to 0 before this thread was spawned2189 * and we reset it to 0 right away.2190 */2191pthread_mutex_lock(&me->mutex);2192while(!me->data_ready)2193pthread_cond_wait(&me->cond, &me->mutex);2194 me->data_ready =0;2195pthread_mutex_unlock(&me->mutex);2196}2197/* leave ->working 1 so that this doesn't get more work assigned */2198return NULL;2199}22002201static voidll_find_deltas(struct object_entry **list,unsigned list_size,2202int window,int depth,unsigned*processed)2203{2204struct thread_params *p;2205int i, ret, active_threads =0;22062207init_threaded_search();22082209if(delta_search_threads <=1) {2210find_deltas(list, &list_size, window, depth, processed);2211cleanup_threaded_search();2212return;2213}2214if(progress > pack_to_stdout)2215fprintf(stderr,"Delta compression using up to%dthreads.\n",2216 delta_search_threads);2217 p =xcalloc(delta_search_threads,sizeof(*p));22182219/* Partition the work amongst work threads. */2220for(i =0; i < delta_search_threads; i++) {2221unsigned sub_size = list_size / (delta_search_threads - i);22222223/* don't use too small segments or no deltas will be found */2224if(sub_size <2*window && i+1< delta_search_threads)2225 sub_size =0;22262227 p[i].window = window;2228 p[i].depth = depth;2229 p[i].processed = processed;2230 p[i].working =1;2231 p[i].data_ready =0;22322233/* try to split chunks on "path" boundaries */2234while(sub_size && sub_size < list_size &&2235 list[sub_size]->hash &&2236 list[sub_size]->hash == list[sub_size-1]->hash)2237 sub_size++;22382239 p[i].list = list;2240 p[i].list_size = sub_size;2241 p[i].remaining = sub_size;22422243 list += sub_size;2244 list_size -= sub_size;2245}22462247/* Start work threads. */2248for(i =0; i < delta_search_threads; i++) {2249if(!p[i].list_size)2250continue;2251pthread_mutex_init(&p[i].mutex, NULL);2252pthread_cond_init(&p[i].cond, NULL);2253 ret =pthread_create(&p[i].thread, NULL,2254 threaded_find_deltas, &p[i]);2255if(ret)2256die("unable to create thread:%s",strerror(ret));2257 active_threads++;2258}22592260/*2261 * Now let's wait for work completion. Each time a thread is done2262 * with its work, we steal half of the remaining work from the2263 * thread with the largest number of unprocessed objects and give2264 * it to that newly idle thread. This ensure good load balancing2265 * until the remaining object list segments are simply too short2266 * to be worth splitting anymore.2267 */2268while(active_threads) {2269struct thread_params *target = NULL;2270struct thread_params *victim = NULL;2271unsigned sub_size =0;22722273progress_lock();2274for(;;) {2275for(i =0; !target && i < delta_search_threads; i++)2276if(!p[i].working)2277 target = &p[i];2278if(target)2279break;2280pthread_cond_wait(&progress_cond, &progress_mutex);2281}22822283for(i =0; i < delta_search_threads; i++)2284if(p[i].remaining >2*window &&2285(!victim || victim->remaining < p[i].remaining))2286 victim = &p[i];2287if(victim) {2288 sub_size = victim->remaining /2;2289 list = victim->list + victim->list_size - sub_size;2290while(sub_size && list[0]->hash &&2291 list[0]->hash == list[-1]->hash) {2292 list++;2293 sub_size--;2294}2295if(!sub_size) {2296/*2297 * It is possible for some "paths" to have2298 * so many objects that no hash boundary2299 * might be found. Let's just steal the2300 * exact half in that case.2301 */2302 sub_size = victim->remaining /2;2303 list -= sub_size;2304}2305 target->list = list;2306 victim->list_size -= sub_size;2307 victim->remaining -= sub_size;2308}2309 target->list_size = sub_size;2310 target->remaining = sub_size;2311 target->working =1;2312progress_unlock();23132314pthread_mutex_lock(&target->mutex);2315 target->data_ready =1;2316pthread_cond_signal(&target->cond);2317pthread_mutex_unlock(&target->mutex);23182319if(!sub_size) {2320pthread_join(target->thread, NULL);2321pthread_cond_destroy(&target->cond);2322pthread_mutex_destroy(&target->mutex);2323 active_threads--;2324}2325}2326cleanup_threaded_search();2327free(p);2328}23292330#else2331#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2332#endif23332334static voidadd_tag_chain(const struct object_id *oid)2335{2336struct tag *tag;23372338/*2339 * We catch duplicates already in add_object_entry(), but we'd2340 * prefer to do this extra check to avoid having to parse the2341 * tag at all if we already know that it's being packed (e.g., if2342 * it was included via bitmaps, we would not have parsed it2343 * previously).2344 */2345if(packlist_find(&to_pack, oid->hash, NULL))2346return;23472348 tag =lookup_tag(oid);2349while(1) {2350if(!tag ||parse_tag(tag) || !tag->tagged)2351die("unable to pack objects reachable from tag%s",2352oid_to_hex(oid));23532354add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);23552356if(tag->tagged->type != OBJ_TAG)2357return;23582359 tag = (struct tag *)tag->tagged;2360}2361}23622363static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2364{2365struct object_id peeled;23662367if(starts_with(path,"refs/tags/") &&/* is a tag? */2368!peel_ref(path, peeled.hash) &&/* peelable? */2369packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2370add_tag_chain(oid);2371return0;2372}23732374static voidprepare_pack(int window,int depth)2375{2376struct object_entry **delta_list;2377uint32_t i, nr_deltas;2378unsigned n;23792380get_object_details();23812382/*2383 * If we're locally repacking then we need to be doubly careful2384 * from now on in order to make sure no stealth corruption gets2385 * propagated to the new pack. Clients receiving streamed packs2386 * should validate everything they get anyway so no need to incur2387 * the additional cost here in that case.2388 */2389if(!pack_to_stdout)2390 do_check_packed_object_crc =1;23912392if(!to_pack.nr_objects || !window || !depth)2393return;23942395ALLOC_ARRAY(delta_list, to_pack.nr_objects);2396 nr_deltas = n =0;23972398for(i =0; i < to_pack.nr_objects; i++) {2399struct object_entry *entry = to_pack.objects + i;24002401if(entry->delta)2402/* This happens if we decided to reuse existing2403 * delta from a pack. "reuse_delta &&" is implied.2404 */2405continue;24062407if(entry->size <50)2408continue;24092410if(entry->no_try_delta)2411continue;24122413if(!entry->preferred_base) {2414 nr_deltas++;2415if(entry->type <0)2416die("unable to get type of object%s",2417oid_to_hex(&entry->idx.oid));2418}else{2419if(entry->type <0) {2420/*2421 * This object is not found, but we2422 * don't have to include it anyway.2423 */2424continue;2425}2426}24272428 delta_list[n++] = entry;2429}24302431if(nr_deltas && n >1) {2432unsigned nr_done =0;2433if(progress)2434 progress_state =start_progress(_("Compressing objects"),2435 nr_deltas);2436QSORT(delta_list, n, type_size_sort);2437ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2438stop_progress(&progress_state);2439if(nr_done != nr_deltas)2440die("inconsistency with delta count");2441}2442free(delta_list);2443}24442445static intgit_pack_config(const char*k,const char*v,void*cb)2446{2447if(!strcmp(k,"pack.window")) {2448 window =git_config_int(k, v);2449return0;2450}2451if(!strcmp(k,"pack.windowmemory")) {2452 window_memory_limit =git_config_ulong(k, v);2453return0;2454}2455if(!strcmp(k,"pack.depth")) {2456 depth =git_config_int(k, v);2457return0;2458}2459if(!strcmp(k,"pack.deltacachesize")) {2460 max_delta_cache_size =git_config_int(k, v);2461return0;2462}2463if(!strcmp(k,"pack.deltacachelimit")) {2464 cache_max_small_delta_size =git_config_int(k, v);2465return0;2466}2467if(!strcmp(k,"pack.writebitmaphashcache")) {2468if(git_config_bool(k, v))2469 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2470else2471 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2472}2473if(!strcmp(k,"pack.usebitmaps")) {2474 use_bitmap_index_default =git_config_bool(k, v);2475return0;2476}2477if(!strcmp(k,"pack.threads")) {2478 delta_search_threads =git_config_int(k, v);2479if(delta_search_threads <0)2480die("invalid number of threads specified (%d)",2481 delta_search_threads);2482#ifdef NO_PTHREADS2483if(delta_search_threads !=1) {2484warning("no threads support, ignoring%s", k);2485 delta_search_threads =0;2486}2487#endif2488return0;2489}2490if(!strcmp(k,"pack.indexversion")) {2491 pack_idx_opts.version =git_config_int(k, v);2492if(pack_idx_opts.version >2)2493die("bad pack.indexversion=%"PRIu32,2494 pack_idx_opts.version);2495return0;2496}2497returngit_default_config(k, v, cb);2498}24992500static voidread_object_list_from_stdin(void)2501{2502char line[40+1+ PATH_MAX +2];2503unsigned char sha1[20];25042505for(;;) {2506if(!fgets(line,sizeof(line), stdin)) {2507if(feof(stdin))2508break;2509if(!ferror(stdin))2510die("fgets returned NULL, not EOF, not error!");2511if(errno != EINTR)2512die_errno("fgets");2513clearerr(stdin);2514continue;2515}2516if(line[0] =='-') {2517if(get_sha1_hex(line+1, sha1))2518die("expected edge sha1, got garbage:\n%s",2519 line);2520add_preferred_base(sha1);2521continue;2522}2523if(get_sha1_hex(line, sha1))2524die("expected sha1, got garbage:\n%s", line);25252526add_preferred_base_object(line+41);2527add_object_entry(sha1,0, line+41,0);2528}2529}25302531#define OBJECT_ADDED (1u<<20)25322533static voidshow_commit(struct commit *commit,void*data)2534{2535add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2536 commit->object.flags |= OBJECT_ADDED;25372538if(write_bitmap_index)2539index_commit_for_bitmap(commit);2540}25412542static voidshow_object(struct object *obj,const char*name,void*data)2543{2544add_preferred_base_object(name);2545add_object_entry(obj->oid.hash, obj->type, name,0);2546 obj->flags |= OBJECT_ADDED;2547}25482549static voidshow_edge(struct commit *commit)2550{2551add_preferred_base(commit->object.oid.hash);2552}25532554struct in_pack_object {2555 off_t offset;2556struct object *object;2557};25582559struct in_pack {2560int alloc;2561int nr;2562struct in_pack_object *array;2563};25642565static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2566{2567 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2568 in_pack->array[in_pack->nr].object = object;2569 in_pack->nr++;2570}25712572/*2573 * Compare the objects in the offset order, in order to emulate the2574 * "git rev-list --objects" output that produced the pack originally.2575 */2576static intofscmp(const void*a_,const void*b_)2577{2578struct in_pack_object *a = (struct in_pack_object *)a_;2579struct in_pack_object *b = (struct in_pack_object *)b_;25802581if(a->offset < b->offset)2582return-1;2583else if(a->offset > b->offset)2584return1;2585else2586returnoidcmp(&a->object->oid, &b->object->oid);2587}25882589static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2590{2591struct packed_git *p;2592struct in_pack in_pack;2593uint32_t i;25942595memset(&in_pack,0,sizeof(in_pack));25962597for(p = packed_git; p; p = p->next) {2598const unsigned char*sha1;2599struct object *o;26002601if(!p->pack_local || p->pack_keep)2602continue;2603if(open_pack_index(p))2604die("cannot open pack index");26052606ALLOC_GROW(in_pack.array,2607 in_pack.nr + p->num_objects,2608 in_pack.alloc);26092610for(i =0; i < p->num_objects; i++) {2611 sha1 =nth_packed_object_sha1(p, i);2612 o =lookup_unknown_object(sha1);2613if(!(o->flags & OBJECT_ADDED))2614mark_in_pack_object(o, p, &in_pack);2615 o->flags |= OBJECT_ADDED;2616}2617}26182619if(in_pack.nr) {2620QSORT(in_pack.array, in_pack.nr, ofscmp);2621for(i =0; i < in_pack.nr; i++) {2622struct object *o = in_pack.array[i].object;2623add_object_entry(o->oid.hash, o->type,"",0);2624}2625}2626free(in_pack.array);2627}26282629static intadd_loose_object(const struct object_id *oid,const char*path,2630void*data)2631{2632enum object_type type =sha1_object_info(oid->hash, NULL);26332634if(type <0) {2635warning("loose object at%scould not be examined", path);2636return0;2637}26382639add_object_entry(oid->hash, type,"",0);2640return0;2641}26422643/*2644 * We actually don't even have to worry about reachability here.2645 * add_object_entry will weed out duplicates, so we just add every2646 * loose object we find.2647 */2648static voidadd_unreachable_loose_objects(void)2649{2650for_each_loose_file_in_objdir(get_object_directory(),2651 add_loose_object,2652 NULL, NULL, NULL);2653}26542655static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2656{2657static struct packed_git *last_found = (void*)1;2658struct packed_git *p;26592660 p = (last_found != (void*)1) ? last_found : packed_git;26612662while(p) {2663if((!p->pack_local || p->pack_keep) &&2664find_pack_entry_one(sha1, p)) {2665 last_found = p;2666return1;2667}2668if(p == last_found)2669 p = packed_git;2670else2671 p = p->next;2672if(p == last_found)2673 p = p->next;2674}2675return0;2676}26772678/*2679 * Store a list of sha1s that are should not be discarded2680 * because they are either written too recently, or are2681 * reachable from another object that was.2682 *2683 * This is filled by get_object_list.2684 */2685static struct oid_array recent_objects;26862687static intloosened_object_can_be_discarded(const struct object_id *oid,2688 timestamp_t mtime)2689{2690if(!unpack_unreachable_expiration)2691return0;2692if(mtime > unpack_unreachable_expiration)2693return0;2694if(oid_array_lookup(&recent_objects, oid) >=0)2695return0;2696return1;2697}26982699static voidloosen_unused_packed_objects(struct rev_info *revs)2700{2701struct packed_git *p;2702uint32_t i;2703struct object_id oid;27042705for(p = packed_git; p; p = p->next) {2706if(!p->pack_local || p->pack_keep)2707continue;27082709if(open_pack_index(p))2710die("cannot open pack index");27112712for(i =0; i < p->num_objects; i++) {2713nth_packed_object_oid(&oid, p, i);2714if(!packlist_find(&to_pack, oid.hash, NULL) &&2715!has_sha1_pack_kept_or_nonlocal(oid.hash) &&2716!loosened_object_can_be_discarded(&oid, p->mtime))2717if(force_object_loose(oid.hash, p->mtime))2718die("unable to force loose object");2719}2720}2721}27222723/*2724 * This tracks any options which pack-reuse code expects to be on, or which a2725 * reader of the pack might not understand, and which would therefore prevent2726 * blind reuse of what we have on disk.2727 */2728static intpack_options_allow_reuse(void)2729{2730return pack_to_stdout &&2731 allow_ofs_delta &&2732!ignore_packed_keep &&2733(!local || !have_non_local_packs) &&2734!incremental;2735}27362737static intget_object_list_from_bitmap(struct rev_info *revs)2738{2739if(prepare_bitmap_walk(revs) <0)2740return-1;27412742if(pack_options_allow_reuse() &&2743!reuse_partial_packfile_from_bitmap(2744&reuse_packfile,2745&reuse_packfile_objects,2746&reuse_packfile_offset)) {2747assert(reuse_packfile_objects);2748 nr_result += reuse_packfile_objects;2749display_progress(progress_state, nr_result);2750}27512752traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2753return0;2754}27552756static voidrecord_recent_object(struct object *obj,2757const char*name,2758void*data)2759{2760oid_array_append(&recent_objects, &obj->oid);2761}27622763static voidrecord_recent_commit(struct commit *commit,void*data)2764{2765oid_array_append(&recent_objects, &commit->object.oid);2766}27672768static voidget_object_list(int ac,const char**av)2769{2770struct rev_info revs;2771char line[1000];2772int flags =0;27732774init_revisions(&revs, NULL);2775 save_commit_buffer =0;2776setup_revisions(ac, av, &revs, NULL);27772778/* make sure shallows are read */2779is_repository_shallow();27802781while(fgets(line,sizeof(line), stdin) != NULL) {2782int len =strlen(line);2783if(len && line[len -1] =='\n')2784 line[--len] =0;2785if(!len)2786break;2787if(*line =='-') {2788if(!strcmp(line,"--not")) {2789 flags ^= UNINTERESTING;2790 write_bitmap_index =0;2791continue;2792}2793if(starts_with(line,"--shallow ")) {2794struct object_id oid;2795if(get_oid_hex(line +10, &oid))2796die("not an SHA-1 '%s'", line +10);2797register_shallow(&oid);2798 use_bitmap_index =0;2799continue;2800}2801die("not a rev '%s'", line);2802}2803if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2804die("bad revision '%s'", line);2805}28062807if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2808return;28092810if(prepare_revision_walk(&revs))2811die("revision walk setup failed");2812mark_edges_uninteresting(&revs, show_edge);2813traverse_commit_list(&revs, show_commit, show_object, NULL);28142815if(unpack_unreachable_expiration) {2816 revs.ignore_missing_links =1;2817if(add_unseen_recent_objects_to_traversal(&revs,2818 unpack_unreachable_expiration))2819die("unable to add recent objects");2820if(prepare_revision_walk(&revs))2821die("revision walk setup failed");2822traverse_commit_list(&revs, record_recent_commit,2823 record_recent_object, NULL);2824}28252826if(keep_unreachable)2827add_objects_in_unpacked_packs(&revs);2828if(pack_loose_unreachable)2829add_unreachable_loose_objects();2830if(unpack_unreachable)2831loosen_unused_packed_objects(&revs);28322833oid_array_clear(&recent_objects);2834}28352836static intoption_parse_index_version(const struct option *opt,2837const char*arg,int unset)2838{2839char*c;2840const char*val = arg;2841 pack_idx_opts.version =strtoul(val, &c,10);2842if(pack_idx_opts.version >2)2843die(_("unsupported index version%s"), val);2844if(*c ==','&& c[1])2845 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2846if(*c || pack_idx_opts.off32_limit &0x80000000)2847die(_("bad index version '%s'"), val);2848return0;2849}28502851static intoption_parse_unpack_unreachable(const struct option *opt,2852const char*arg,int unset)2853{2854if(unset) {2855 unpack_unreachable =0;2856 unpack_unreachable_expiration =0;2857}2858else{2859 unpack_unreachable =1;2860if(arg)2861 unpack_unreachable_expiration =approxidate(arg);2862}2863return0;2864}28652866intcmd_pack_objects(int argc,const char**argv,const char*prefix)2867{2868int use_internal_rev_list =0;2869int thin =0;2870int shallow =0;2871int all_progress_implied =0;2872struct argv_array rp = ARGV_ARRAY_INIT;2873int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2874int rev_list_index =0;2875struct option pack_objects_options[] = {2876OPT_SET_INT('q',"quiet", &progress,2877N_("do not show progress meter"),0),2878OPT_SET_INT(0,"progress", &progress,2879N_("show progress meter"),1),2880OPT_SET_INT(0,"all-progress", &progress,2881N_("show progress meter during object writing phase"),2),2882OPT_BOOL(0,"all-progress-implied",2883&all_progress_implied,2884N_("similar to --all-progress when progress meter is shown")),2885{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2886N_("write the pack index file in the specified idx format version"),28870, option_parse_index_version },2888OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2889N_("maximum size of each output pack file")),2890OPT_BOOL(0,"local", &local,2891N_("ignore borrowed objects from alternate object store")),2892OPT_BOOL(0,"incremental", &incremental,2893N_("ignore packed objects")),2894OPT_INTEGER(0,"window", &window,2895N_("limit pack window by objects")),2896OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2897N_("limit pack window by memory in addition to object limit")),2898OPT_INTEGER(0,"depth", &depth,2899N_("maximum length of delta chain allowed in the resulting pack")),2900OPT_BOOL(0,"reuse-delta", &reuse_delta,2901N_("reuse existing deltas")),2902OPT_BOOL(0,"reuse-object", &reuse_object,2903N_("reuse existing objects")),2904OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2905N_("use OFS_DELTA objects")),2906OPT_INTEGER(0,"threads", &delta_search_threads,2907N_("use threads when searching for best delta matches")),2908OPT_BOOL(0,"non-empty", &non_empty,2909N_("do not create an empty pack output")),2910OPT_BOOL(0,"revs", &use_internal_rev_list,2911N_("read revision arguments from standard input")),2912{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2913N_("limit the objects to those that are not yet packed"),2914 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2915{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2916N_("include objects reachable from any reference"),2917 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2918{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2919N_("include objects referred by reflog entries"),2920 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2921{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2922N_("include objects referred to by the index"),2923 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2924OPT_BOOL(0,"stdout", &pack_to_stdout,2925N_("output pack to stdout")),2926OPT_BOOL(0,"include-tag", &include_tag,2927N_("include tag objects that refer to objects to be packed")),2928OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2929N_("keep unreachable objects")),2930OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2931N_("pack loose unreachable objects")),2932{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2933N_("unpack unreachable objects newer than <time>"),2934 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2935OPT_BOOL(0,"thin", &thin,2936N_("create thin packs")),2937OPT_BOOL(0,"shallow", &shallow,2938N_("create packs suitable for shallow fetches")),2939OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2940N_("ignore packs that have companion .keep file")),2941OPT_INTEGER(0,"compression", &pack_compression_level,2942N_("pack compression level")),2943OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2944N_("do not hide commits by grafts"),0),2945OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2946N_("use a bitmap index if available to speed up counting objects")),2947OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2948N_("write a bitmap index together with the pack index")),2949OPT_END(),2950};29512952 check_replace_refs =0;29532954reset_pack_idx_option(&pack_idx_opts);2955git_config(git_pack_config, NULL);29562957 progress =isatty(2);2958 argc =parse_options(argc, argv, prefix, pack_objects_options,2959 pack_usage,0);29602961if(argc) {2962 base_name = argv[0];2963 argc--;2964}2965if(pack_to_stdout != !base_name || argc)2966usage_with_options(pack_usage, pack_objects_options);29672968argv_array_push(&rp,"pack-objects");2969if(thin) {2970 use_internal_rev_list =1;2971argv_array_push(&rp, shallow2972?"--objects-edge-aggressive"2973:"--objects-edge");2974}else2975argv_array_push(&rp,"--objects");29762977if(rev_list_all) {2978 use_internal_rev_list =1;2979argv_array_push(&rp,"--all");2980}2981if(rev_list_reflog) {2982 use_internal_rev_list =1;2983argv_array_push(&rp,"--reflog");2984}2985if(rev_list_index) {2986 use_internal_rev_list =1;2987argv_array_push(&rp,"--indexed-objects");2988}2989if(rev_list_unpacked) {2990 use_internal_rev_list =1;2991argv_array_push(&rp,"--unpacked");2992}29932994if(!reuse_object)2995 reuse_delta =0;2996if(pack_compression_level == -1)2997 pack_compression_level = Z_DEFAULT_COMPRESSION;2998else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)2999die("bad pack compression level%d", pack_compression_level);30003001if(!delta_search_threads)/* --threads=0 means autodetect */3002 delta_search_threads =online_cpus();30033004#ifdef NO_PTHREADS3005if(delta_search_threads !=1)3006warning("no threads support, ignoring --threads");3007#endif3008if(!pack_to_stdout && !pack_size_limit)3009 pack_size_limit = pack_size_limit_cfg;3010if(pack_to_stdout && pack_size_limit)3011die("--max-pack-size cannot be used to build a pack for transfer.");3012if(pack_size_limit && pack_size_limit <1024*1024) {3013warning("minimum pack size limit is 1 MiB");3014 pack_size_limit =1024*1024;3015}30163017if(!pack_to_stdout && thin)3018die("--thin cannot be used to build an indexable pack.");30193020if(keep_unreachable && unpack_unreachable)3021die("--keep-unreachable and --unpack-unreachable are incompatible.");3022if(!rev_list_all || !rev_list_reflog || !rev_list_index)3023 unpack_unreachable_expiration =0;30243025/*3026 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3027 *3028 * - to produce good pack (with bitmap index not-yet-packed objects are3029 * packed in suboptimal order).3030 *3031 * - to use more robust pack-generation codepath (avoiding possible3032 * bugs in bitmap code and possible bitmap index corruption).3033 */3034if(!pack_to_stdout)3035 use_bitmap_index_default =0;30363037if(use_bitmap_index <0)3038 use_bitmap_index = use_bitmap_index_default;30393040/* "hard" reasons not to use bitmaps; these just won't work at all */3041if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3042 use_bitmap_index =0;30433044if(pack_to_stdout || !rev_list_all)3045 write_bitmap_index =0;30463047if(progress && all_progress_implied)3048 progress =2;30493050prepare_packed_git();3051if(ignore_packed_keep) {3052struct packed_git *p;3053for(p = packed_git; p; p = p->next)3054if(p->pack_local && p->pack_keep)3055break;3056if(!p)/* no keep-able packs found */3057 ignore_packed_keep =0;3058}3059if(local) {3060/*3061 * unlike ignore_packed_keep above, we do not want to3062 * unset "local" based on looking at packs, as it3063 * also covers non-local objects3064 */3065struct packed_git *p;3066for(p = packed_git; p; p = p->next) {3067if(!p->pack_local) {3068 have_non_local_packs =1;3069break;3070}3071}3072}30733074if(progress)3075 progress_state =start_progress(_("Counting objects"),0);3076if(!use_internal_rev_list)3077read_object_list_from_stdin();3078else{3079get_object_list(rp.argc, rp.argv);3080argv_array_clear(&rp);3081}3082cleanup_preferred_base();3083if(include_tag && nr_result)3084for_each_ref(add_ref_tag, NULL);3085stop_progress(&progress_state);30863087if(non_empty && !nr_result)3088return0;3089if(nr_result)3090prepare_pack(window, depth);3091write_pack_file();3092if(progress)3093fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3094" reused %"PRIu32" (delta %"PRIu32")\n",3095 written, written_delta, reused, reused_delta);3096return0;3097}