1#include"builtin.h" 2#include"cache.h" 3#include"attr.h" 4#include"object.h" 5#include"blob.h" 6#include"commit.h" 7#include"tag.h" 8#include"tree.h" 9#include"delta.h" 10#include"pack.h" 11#include"pack-revindex.h" 12#include"csum-file.h" 13#include"tree-walk.h" 14#include"diff.h" 15#include"revision.h" 16#include"list-objects.h" 17#include"pack-objects.h" 18#include"progress.h" 19#include"refs.h" 20#include"streaming.h" 21#include"thread-utils.h" 22#include"pack-bitmap.h" 23#include"reachable.h" 24#include"sha1-array.h" 25#include"argv-array.h" 26#include"mru.h" 27 28static const char*pack_usage[] = { 29N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 30N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 31 NULL 32}; 33 34/* 35 * Objects we are going to pack are collected in the `to_pack` structure. 36 * It contains an array (dynamically expanded) of the object data, and a map 37 * that can resolve SHA1s to their position in the array. 38 */ 39static struct packing_data to_pack; 40 41static struct pack_idx_entry **written_list; 42static uint32_t nr_result, nr_written; 43 44static int non_empty; 45static int reuse_delta =1, reuse_object =1; 46static int keep_unreachable, unpack_unreachable, include_tag; 47static timestamp_t unpack_unreachable_expiration; 48static int pack_loose_unreachable; 49static int local; 50static int have_non_local_packs; 51static int incremental; 52static int ignore_packed_keep; 53static int allow_ofs_delta; 54static struct pack_idx_option pack_idx_opts; 55static const char*base_name; 56static int progress =1; 57static int window =10; 58static unsigned long pack_size_limit; 59static int depth =50; 60static int delta_search_threads; 61static int pack_to_stdout; 62static int num_preferred_base; 63static struct progress *progress_state; 64 65static struct packed_git *reuse_packfile; 66static uint32_t reuse_packfile_objects; 67static off_t reuse_packfile_offset; 68 69static int use_bitmap_index_default =1; 70static int use_bitmap_index = -1; 71static int write_bitmap_index; 72static uint16_t write_bitmap_options; 73 74static unsigned long delta_cache_size =0; 75static unsigned long max_delta_cache_size =256*1024*1024; 76static unsigned long cache_max_small_delta_size =1000; 77 78static unsigned long window_memory_limit =0; 79 80/* 81 * stats 82 */ 83static uint32_t written, written_delta; 84static uint32_t reused, reused_delta; 85 86/* 87 * Indexed commits 88 */ 89static struct commit **indexed_commits; 90static unsigned int indexed_commits_nr; 91static unsigned int indexed_commits_alloc; 92 93static voidindex_commit_for_bitmap(struct commit *commit) 94{ 95if(indexed_commits_nr >= indexed_commits_alloc) { 96 indexed_commits_alloc = (indexed_commits_alloc +32) *2; 97REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 98} 99 100 indexed_commits[indexed_commits_nr++] = commit; 101} 102 103static void*get_delta(struct object_entry *entry) 104{ 105unsigned long size, base_size, delta_size; 106void*buf, *base_buf, *delta_buf; 107enum object_type type; 108 109 buf =read_sha1_file(entry->idx.oid.hash, &type, &size); 110if(!buf) 111die("unable to read%s",oid_to_hex(&entry->idx.oid)); 112 base_buf =read_sha1_file(entry->delta->idx.oid.hash, &type, 113&base_size); 114if(!base_buf) 115die("unable to read%s", 116oid_to_hex(&entry->delta->idx.oid)); 117 delta_buf =diff_delta(base_buf, base_size, 118 buf, size, &delta_size,0); 119if(!delta_buf || delta_size != entry->delta_size) 120die("delta size changed"); 121free(buf); 122free(base_buf); 123return delta_buf; 124} 125 126static unsigned longdo_compress(void**pptr,unsigned long size) 127{ 128 git_zstream stream; 129void*in, *out; 130unsigned long maxsize; 131 132git_deflate_init(&stream, pack_compression_level); 133 maxsize =git_deflate_bound(&stream, size); 134 135 in = *pptr; 136 out =xmalloc(maxsize); 137*pptr = out; 138 139 stream.next_in = in; 140 stream.avail_in = size; 141 stream.next_out = out; 142 stream.avail_out = maxsize; 143while(git_deflate(&stream, Z_FINISH) == Z_OK) 144;/* nothing */ 145git_deflate_end(&stream); 146 147free(in); 148return stream.total_out; 149} 150 151static unsigned longwrite_large_blob_data(struct git_istream *st,struct sha1file *f, 152const unsigned char*sha1) 153{ 154 git_zstream stream; 155unsigned char ibuf[1024*16]; 156unsigned char obuf[1024*16]; 157unsigned long olen =0; 158 159git_deflate_init(&stream, pack_compression_level); 160 161for(;;) { 162 ssize_t readlen; 163int zret = Z_OK; 164 readlen =read_istream(st, ibuf,sizeof(ibuf)); 165if(readlen == -1) 166die(_("unable to read%s"),sha1_to_hex(sha1)); 167 168 stream.next_in = ibuf; 169 stream.avail_in = readlen; 170while((stream.avail_in || readlen ==0) && 171(zret == Z_OK || zret == Z_BUF_ERROR)) { 172 stream.next_out = obuf; 173 stream.avail_out =sizeof(obuf); 174 zret =git_deflate(&stream, readlen ?0: Z_FINISH); 175sha1write(f, obuf, stream.next_out - obuf); 176 olen += stream.next_out - obuf; 177} 178if(stream.avail_in) 179die(_("deflate error (%d)"), zret); 180if(readlen ==0) { 181if(zret != Z_STREAM_END) 182die(_("deflate error (%d)"), zret); 183break; 184} 185} 186git_deflate_end(&stream); 187return olen; 188} 189 190/* 191 * we are going to reuse the existing object data as is. make 192 * sure it is not corrupt. 193 */ 194static intcheck_pack_inflate(struct packed_git *p, 195struct pack_window **w_curs, 196 off_t offset, 197 off_t len, 198unsigned long expect) 199{ 200 git_zstream stream; 201unsigned char fakebuf[4096], *in; 202int st; 203 204memset(&stream,0,sizeof(stream)); 205git_inflate_init(&stream); 206do{ 207 in =use_pack(p, w_curs, offset, &stream.avail_in); 208 stream.next_in = in; 209 stream.next_out = fakebuf; 210 stream.avail_out =sizeof(fakebuf); 211 st =git_inflate(&stream, Z_FINISH); 212 offset += stream.next_in - in; 213}while(st == Z_OK || st == Z_BUF_ERROR); 214git_inflate_end(&stream); 215return(st == Z_STREAM_END && 216 stream.total_out == expect && 217 stream.total_in == len) ?0: -1; 218} 219 220static voidcopy_pack_data(struct sha1file *f, 221struct packed_git *p, 222struct pack_window **w_curs, 223 off_t offset, 224 off_t len) 225{ 226unsigned char*in; 227unsigned long avail; 228 229while(len) { 230 in =use_pack(p, w_curs, offset, &avail); 231if(avail > len) 232 avail = (unsigned long)len; 233sha1write(f, in, avail); 234 offset += avail; 235 len -= avail; 236} 237} 238 239/* Return 0 if we will bust the pack-size limit */ 240static unsigned longwrite_no_reuse_object(struct sha1file *f,struct object_entry *entry, 241unsigned long limit,int usable_delta) 242{ 243unsigned long size, datalen; 244unsigned char header[MAX_PACK_OBJECT_HEADER], 245 dheader[MAX_PACK_OBJECT_HEADER]; 246unsigned hdrlen; 247enum object_type type; 248void*buf; 249struct git_istream *st = NULL; 250 251if(!usable_delta) { 252if(entry->type == OBJ_BLOB && 253 entry->size > big_file_threshold && 254(st =open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL) 255 buf = NULL; 256else{ 257 buf =read_sha1_file(entry->idx.oid.hash, &type, 258&size); 259if(!buf) 260die(_("unable to read%s"), 261oid_to_hex(&entry->idx.oid)); 262} 263/* 264 * make sure no cached delta data remains from a 265 * previous attempt before a pack split occurred. 266 */ 267FREE_AND_NULL(entry->delta_data); 268 entry->z_delta_size =0; 269}else if(entry->delta_data) { 270 size = entry->delta_size; 271 buf = entry->delta_data; 272 entry->delta_data = NULL; 273 type = (allow_ofs_delta && entry->delta->idx.offset) ? 274 OBJ_OFS_DELTA : OBJ_REF_DELTA; 275}else{ 276 buf =get_delta(entry); 277 size = entry->delta_size; 278 type = (allow_ofs_delta && entry->delta->idx.offset) ? 279 OBJ_OFS_DELTA : OBJ_REF_DELTA; 280} 281 282if(st)/* large blob case, just assume we don't compress well */ 283 datalen = size; 284else if(entry->z_delta_size) 285 datalen = entry->z_delta_size; 286else 287 datalen =do_compress(&buf, size); 288 289/* 290 * The object header is a byte of 'type' followed by zero or 291 * more bytes of length. 292 */ 293 hdrlen =encode_in_pack_object_header(header,sizeof(header), 294 type, size); 295 296if(type == OBJ_OFS_DELTA) { 297/* 298 * Deltas with relative base contain an additional 299 * encoding of the relative offset for the delta 300 * base from this object's position in the pack. 301 */ 302 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 303unsigned pos =sizeof(dheader) -1; 304 dheader[pos] = ofs &127; 305while(ofs >>=7) 306 dheader[--pos] =128| (--ofs &127); 307if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 308if(st) 309close_istream(st); 310free(buf); 311return0; 312} 313sha1write(f, header, hdrlen); 314sha1write(f, dheader + pos,sizeof(dheader) - pos); 315 hdrlen +=sizeof(dheader) - pos; 316}else if(type == OBJ_REF_DELTA) { 317/* 318 * Deltas with a base reference contain 319 * an additional 20 bytes for the base sha1. 320 */ 321if(limit && hdrlen +20+ datalen +20>= limit) { 322if(st) 323close_istream(st); 324free(buf); 325return0; 326} 327sha1write(f, header, hdrlen); 328sha1write(f, entry->delta->idx.oid.hash,20); 329 hdrlen +=20; 330}else{ 331if(limit && hdrlen + datalen +20>= limit) { 332if(st) 333close_istream(st); 334free(buf); 335return0; 336} 337sha1write(f, header, hdrlen); 338} 339if(st) { 340 datalen =write_large_blob_data(st, f, entry->idx.oid.hash); 341close_istream(st); 342}else{ 343sha1write(f, buf, datalen); 344free(buf); 345} 346 347return hdrlen + datalen; 348} 349 350/* Return 0 if we will bust the pack-size limit */ 351static off_t write_reuse_object(struct sha1file *f,struct object_entry *entry, 352unsigned long limit,int usable_delta) 353{ 354struct packed_git *p = entry->in_pack; 355struct pack_window *w_curs = NULL; 356struct revindex_entry *revidx; 357 off_t offset; 358enum object_type type = entry->type; 359 off_t datalen; 360unsigned char header[MAX_PACK_OBJECT_HEADER], 361 dheader[MAX_PACK_OBJECT_HEADER]; 362unsigned hdrlen; 363 364if(entry->delta) 365 type = (allow_ofs_delta && entry->delta->idx.offset) ? 366 OBJ_OFS_DELTA : OBJ_REF_DELTA; 367 hdrlen =encode_in_pack_object_header(header,sizeof(header), 368 type, entry->size); 369 370 offset = entry->in_pack_offset; 371 revidx =find_pack_revindex(p, offset); 372 datalen = revidx[1].offset - offset; 373if(!pack_to_stdout && p->index_version >1&& 374check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 375error("bad packed object CRC for%s", 376oid_to_hex(&entry->idx.oid)); 377unuse_pack(&w_curs); 378returnwrite_no_reuse_object(f, entry, limit, usable_delta); 379} 380 381 offset += entry->in_pack_header_size; 382 datalen -= entry->in_pack_header_size; 383 384if(!pack_to_stdout && p->index_version ==1&& 385check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 386error("corrupt packed object for%s", 387oid_to_hex(&entry->idx.oid)); 388unuse_pack(&w_curs); 389returnwrite_no_reuse_object(f, entry, limit, usable_delta); 390} 391 392if(type == OBJ_OFS_DELTA) { 393 off_t ofs = entry->idx.offset - entry->delta->idx.offset; 394unsigned pos =sizeof(dheader) -1; 395 dheader[pos] = ofs &127; 396while(ofs >>=7) 397 dheader[--pos] =128| (--ofs &127); 398if(limit && hdrlen +sizeof(dheader) - pos + datalen +20>= limit) { 399unuse_pack(&w_curs); 400return0; 401} 402sha1write(f, header, hdrlen); 403sha1write(f, dheader + pos,sizeof(dheader) - pos); 404 hdrlen +=sizeof(dheader) - pos; 405 reused_delta++; 406}else if(type == OBJ_REF_DELTA) { 407if(limit && hdrlen +20+ datalen +20>= limit) { 408unuse_pack(&w_curs); 409return0; 410} 411sha1write(f, header, hdrlen); 412sha1write(f, entry->delta->idx.oid.hash,20); 413 hdrlen +=20; 414 reused_delta++; 415}else{ 416if(limit && hdrlen + datalen +20>= limit) { 417unuse_pack(&w_curs); 418return0; 419} 420sha1write(f, header, hdrlen); 421} 422copy_pack_data(f, p, &w_curs, offset, datalen); 423unuse_pack(&w_curs); 424 reused++; 425return hdrlen + datalen; 426} 427 428/* Return 0 if we will bust the pack-size limit */ 429static off_t write_object(struct sha1file *f, 430struct object_entry *entry, 431 off_t write_offset) 432{ 433unsigned long limit; 434 off_t len; 435int usable_delta, to_reuse; 436 437if(!pack_to_stdout) 438crc32_begin(f); 439 440/* apply size limit if limited packsize and not first object */ 441if(!pack_size_limit || !nr_written) 442 limit =0; 443else if(pack_size_limit <= write_offset) 444/* 445 * the earlier object did not fit the limit; avoid 446 * mistaking this with unlimited (i.e. limit = 0). 447 */ 448 limit =1; 449else 450 limit = pack_size_limit - write_offset; 451 452if(!entry->delta) 453 usable_delta =0;/* no delta */ 454else if(!pack_size_limit) 455 usable_delta =1;/* unlimited packfile */ 456else if(entry->delta->idx.offset == (off_t)-1) 457 usable_delta =0;/* base was written to another pack */ 458else if(entry->delta->idx.offset) 459 usable_delta =1;/* base already exists in this pack */ 460else 461 usable_delta =0;/* base could end up in another pack */ 462 463if(!reuse_object) 464 to_reuse =0;/* explicit */ 465else if(!entry->in_pack) 466 to_reuse =0;/* can't reuse what we don't have */ 467else if(entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) 468/* check_object() decided it for us ... */ 469 to_reuse = usable_delta; 470/* ... but pack split may override that */ 471else if(entry->type != entry->in_pack_type) 472 to_reuse =0;/* pack has delta which is unusable */ 473else if(entry->delta) 474 to_reuse =0;/* we want to pack afresh */ 475else 476 to_reuse =1;/* we have it in-pack undeltified, 477 * and we do not need to deltify it. 478 */ 479 480if(!to_reuse) 481 len =write_no_reuse_object(f, entry, limit, usable_delta); 482else 483 len =write_reuse_object(f, entry, limit, usable_delta); 484if(!len) 485return0; 486 487if(usable_delta) 488 written_delta++; 489 written++; 490if(!pack_to_stdout) 491 entry->idx.crc32 =crc32_end(f); 492return len; 493} 494 495enum write_one_status { 496 WRITE_ONE_SKIP = -1,/* already written */ 497 WRITE_ONE_BREAK =0,/* writing this will bust the limit; not written */ 498 WRITE_ONE_WRITTEN =1,/* normal */ 499 WRITE_ONE_RECURSIVE =2/* already scheduled to be written */ 500}; 501 502static enum write_one_status write_one(struct sha1file *f, 503struct object_entry *e, 504 off_t *offset) 505{ 506 off_t size; 507int recursing; 508 509/* 510 * we set offset to 1 (which is an impossible value) to mark 511 * the fact that this object is involved in "write its base 512 * first before writing a deltified object" recursion. 513 */ 514 recursing = (e->idx.offset ==1); 515if(recursing) { 516warning("recursive delta detected for object%s", 517oid_to_hex(&e->idx.oid)); 518return WRITE_ONE_RECURSIVE; 519}else if(e->idx.offset || e->preferred_base) { 520/* offset is non zero if object is written already. */ 521return WRITE_ONE_SKIP; 522} 523 524/* if we are deltified, write out base object first. */ 525if(e->delta) { 526 e->idx.offset =1;/* now recurse */ 527switch(write_one(f, e->delta, offset)) { 528case WRITE_ONE_RECURSIVE: 529/* we cannot depend on this one */ 530 e->delta = NULL; 531break; 532default: 533break; 534case WRITE_ONE_BREAK: 535 e->idx.offset = recursing; 536return WRITE_ONE_BREAK; 537} 538} 539 540 e->idx.offset = *offset; 541 size =write_object(f, e, *offset); 542if(!size) { 543 e->idx.offset = recursing; 544return WRITE_ONE_BREAK; 545} 546 written_list[nr_written++] = &e->idx; 547 548/* make sure off_t is sufficiently large not to wrap */ 549if(signed_add_overflows(*offset, size)) 550die("pack too large for current definition of off_t"); 551*offset += size; 552return WRITE_ONE_WRITTEN; 553} 554 555static intmark_tagged(const char*path,const struct object_id *oid,int flag, 556void*cb_data) 557{ 558unsigned char peeled[20]; 559struct object_entry *entry =packlist_find(&to_pack, oid->hash, NULL); 560 561if(entry) 562 entry->tagged =1; 563if(!peel_ref(path, peeled)) { 564 entry =packlist_find(&to_pack, peeled, NULL); 565if(entry) 566 entry->tagged =1; 567} 568return0; 569} 570 571staticinlinevoidadd_to_write_order(struct object_entry **wo, 572unsigned int*endp, 573struct object_entry *e) 574{ 575if(e->filled) 576return; 577 wo[(*endp)++] = e; 578 e->filled =1; 579} 580 581static voidadd_descendants_to_write_order(struct object_entry **wo, 582unsigned int*endp, 583struct object_entry *e) 584{ 585int add_to_order =1; 586while(e) { 587if(add_to_order) { 588struct object_entry *s; 589/* add this node... */ 590add_to_write_order(wo, endp, e); 591/* all its siblings... */ 592for(s = e->delta_sibling; s; s = s->delta_sibling) { 593add_to_write_order(wo, endp, s); 594} 595} 596/* drop down a level to add left subtree nodes if possible */ 597if(e->delta_child) { 598 add_to_order =1; 599 e = e->delta_child; 600}else{ 601 add_to_order =0; 602/* our sibling might have some children, it is next */ 603if(e->delta_sibling) { 604 e = e->delta_sibling; 605continue; 606} 607/* go back to our parent node */ 608 e = e->delta; 609while(e && !e->delta_sibling) { 610/* we're on the right side of a subtree, keep 611 * going up until we can go right again */ 612 e = e->delta; 613} 614if(!e) { 615/* done- we hit our original root node */ 616return; 617} 618/* pass it off to sibling at this level */ 619 e = e->delta_sibling; 620} 621}; 622} 623 624static voidadd_family_to_write_order(struct object_entry **wo, 625unsigned int*endp, 626struct object_entry *e) 627{ 628struct object_entry *root; 629 630for(root = e; root->delta; root = root->delta) 631;/* nothing */ 632add_descendants_to_write_order(wo, endp, root); 633} 634 635static struct object_entry **compute_write_order(void) 636{ 637unsigned int i, wo_end, last_untagged; 638 639struct object_entry **wo; 640struct object_entry *objects = to_pack.objects; 641 642for(i =0; i < to_pack.nr_objects; i++) { 643 objects[i].tagged =0; 644 objects[i].filled =0; 645 objects[i].delta_child = NULL; 646 objects[i].delta_sibling = NULL; 647} 648 649/* 650 * Fully connect delta_child/delta_sibling network. 651 * Make sure delta_sibling is sorted in the original 652 * recency order. 653 */ 654for(i = to_pack.nr_objects; i >0;) { 655struct object_entry *e = &objects[--i]; 656if(!e->delta) 657continue; 658/* Mark me as the first child */ 659 e->delta_sibling = e->delta->delta_child; 660 e->delta->delta_child = e; 661} 662 663/* 664 * Mark objects that are at the tip of tags. 665 */ 666for_each_tag_ref(mark_tagged, NULL); 667 668/* 669 * Give the objects in the original recency order until 670 * we see a tagged tip. 671 */ 672ALLOC_ARRAY(wo, to_pack.nr_objects); 673for(i = wo_end =0; i < to_pack.nr_objects; i++) { 674if(objects[i].tagged) 675break; 676add_to_write_order(wo, &wo_end, &objects[i]); 677} 678 last_untagged = i; 679 680/* 681 * Then fill all the tagged tips. 682 */ 683for(; i < to_pack.nr_objects; i++) { 684if(objects[i].tagged) 685add_to_write_order(wo, &wo_end, &objects[i]); 686} 687 688/* 689 * And then all remaining commits and tags. 690 */ 691for(i = last_untagged; i < to_pack.nr_objects; i++) { 692if(objects[i].type != OBJ_COMMIT && 693 objects[i].type != OBJ_TAG) 694continue; 695add_to_write_order(wo, &wo_end, &objects[i]); 696} 697 698/* 699 * And then all the trees. 700 */ 701for(i = last_untagged; i < to_pack.nr_objects; i++) { 702if(objects[i].type != OBJ_TREE) 703continue; 704add_to_write_order(wo, &wo_end, &objects[i]); 705} 706 707/* 708 * Finally all the rest in really tight order 709 */ 710for(i = last_untagged; i < to_pack.nr_objects; i++) { 711if(!objects[i].filled) 712add_family_to_write_order(wo, &wo_end, &objects[i]); 713} 714 715if(wo_end != to_pack.nr_objects) 716die("ordered%uobjects, expected %"PRIu32, wo_end, to_pack.nr_objects); 717 718return wo; 719} 720 721static off_t write_reused_pack(struct sha1file *f) 722{ 723unsigned char buffer[8192]; 724 off_t to_write, total; 725int fd; 726 727if(!is_pack_valid(reuse_packfile)) 728die("packfile is invalid:%s", reuse_packfile->pack_name); 729 730 fd =git_open(reuse_packfile->pack_name); 731if(fd <0) 732die_errno("unable to open packfile for reuse:%s", 733 reuse_packfile->pack_name); 734 735if(lseek(fd,sizeof(struct pack_header), SEEK_SET) == -1) 736die_errno("unable to seek in reused packfile"); 737 738if(reuse_packfile_offset <0) 739 reuse_packfile_offset = reuse_packfile->pack_size -20; 740 741 total = to_write = reuse_packfile_offset -sizeof(struct pack_header); 742 743while(to_write) { 744int read_pack =xread(fd, buffer,sizeof(buffer)); 745 746if(read_pack <=0) 747die_errno("unable to read from reused packfile"); 748 749if(read_pack > to_write) 750 read_pack = to_write; 751 752sha1write(f, buffer, read_pack); 753 to_write -= read_pack; 754 755/* 756 * We don't know the actual number of objects written, 757 * only how many bytes written, how many bytes total, and 758 * how many objects total. So we can fake it by pretending all 759 * objects we are writing are the same size. This gives us a 760 * smooth progress meter, and at the end it matches the true 761 * answer. 762 */ 763 written = reuse_packfile_objects * 764(((double)(total - to_write)) / total); 765display_progress(progress_state, written); 766} 767 768close(fd); 769 written = reuse_packfile_objects; 770display_progress(progress_state, written); 771return reuse_packfile_offset -sizeof(struct pack_header); 772} 773 774static const char no_split_warning[] =N_( 775"disabling bitmap writing, packs are split due to pack.packSizeLimit" 776); 777 778static voidwrite_pack_file(void) 779{ 780uint32_t i =0, j; 781struct sha1file *f; 782 off_t offset; 783uint32_t nr_remaining = nr_result; 784time_t last_mtime =0; 785struct object_entry **write_order; 786 787if(progress > pack_to_stdout) 788 progress_state =start_progress(_("Writing objects"), nr_result); 789ALLOC_ARRAY(written_list, to_pack.nr_objects); 790 write_order =compute_write_order(); 791 792do{ 793unsigned char sha1[20]; 794char*pack_tmp_name = NULL; 795 796if(pack_to_stdout) 797 f =sha1fd_throughput(1,"<stdout>", progress_state); 798else 799 f =create_tmp_packfile(&pack_tmp_name); 800 801 offset =write_pack_header(f, nr_remaining); 802 803if(reuse_packfile) { 804 off_t packfile_size; 805assert(pack_to_stdout); 806 807 packfile_size =write_reused_pack(f); 808 offset += packfile_size; 809} 810 811 nr_written =0; 812for(; i < to_pack.nr_objects; i++) { 813struct object_entry *e = write_order[i]; 814if(write_one(f, e, &offset) == WRITE_ONE_BREAK) 815break; 816display_progress(progress_state, written); 817} 818 819/* 820 * Did we write the wrong # entries in the header? 821 * If so, rewrite it like in fast-import 822 */ 823if(pack_to_stdout) { 824sha1close(f, sha1, CSUM_CLOSE); 825}else if(nr_written == nr_remaining) { 826sha1close(f, sha1, CSUM_FSYNC); 827}else{ 828int fd =sha1close(f, sha1,0); 829fixup_pack_header_footer(fd, sha1, pack_tmp_name, 830 nr_written, sha1, offset); 831close(fd); 832if(write_bitmap_index) { 833warning(_(no_split_warning)); 834 write_bitmap_index =0; 835} 836} 837 838if(!pack_to_stdout) { 839struct stat st; 840struct strbuf tmpname = STRBUF_INIT; 841 842/* 843 * Packs are runtime accessed in their mtime 844 * order since newer packs are more likely to contain 845 * younger objects. So if we are creating multiple 846 * packs then we should modify the mtime of later ones 847 * to preserve this property. 848 */ 849if(stat(pack_tmp_name, &st) <0) { 850warning_errno("failed to stat%s", pack_tmp_name); 851}else if(!last_mtime) { 852 last_mtime = st.st_mtime; 853}else{ 854struct utimbuf utb; 855 utb.actime = st.st_atime; 856 utb.modtime = --last_mtime; 857if(utime(pack_tmp_name, &utb) <0) 858warning_errno("failed utime() on%s", pack_tmp_name); 859} 860 861strbuf_addf(&tmpname,"%s-", base_name); 862 863if(write_bitmap_index) { 864bitmap_writer_set_checksum(sha1); 865bitmap_writer_build_type_index(written_list, nr_written); 866} 867 868finish_tmp_packfile(&tmpname, pack_tmp_name, 869 written_list, nr_written, 870&pack_idx_opts, sha1); 871 872if(write_bitmap_index) { 873strbuf_addf(&tmpname,"%s.bitmap",sha1_to_hex(sha1)); 874 875stop_progress(&progress_state); 876 877bitmap_writer_show_progress(progress); 878bitmap_writer_reuse_bitmaps(&to_pack); 879bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 880bitmap_writer_build(&to_pack); 881bitmap_writer_finish(written_list, nr_written, 882 tmpname.buf, write_bitmap_options); 883 write_bitmap_index =0; 884} 885 886strbuf_release(&tmpname); 887free(pack_tmp_name); 888puts(sha1_to_hex(sha1)); 889} 890 891/* mark written objects as written to previous pack */ 892for(j =0; j < nr_written; j++) { 893 written_list[j]->offset = (off_t)-1; 894} 895 nr_remaining -= nr_written; 896}while(nr_remaining && i < to_pack.nr_objects); 897 898free(written_list); 899free(write_order); 900stop_progress(&progress_state); 901if(written != nr_result) 902die("wrote %"PRIu32" objects while expecting %"PRIu32, 903 written, nr_result); 904} 905 906static intno_try_delta(const char*path) 907{ 908static struct attr_check *check; 909 910if(!check) 911 check =attr_check_initl("delta", NULL); 912if(git_check_attr(path, check)) 913return0; 914if(ATTR_FALSE(check->items[0].value)) 915return1; 916return0; 917} 918 919/* 920 * When adding an object, check whether we have already added it 921 * to our packing list. If so, we can skip. However, if we are 922 * being asked to excludei t, but the previous mention was to include 923 * it, make sure to adjust its flags and tweak our numbers accordingly. 924 * 925 * As an optimization, we pass out the index position where we would have 926 * found the item, since that saves us from having to look it up again a 927 * few lines later when we want to add the new entry. 928 */ 929static inthave_duplicate_entry(const unsigned char*sha1, 930int exclude, 931uint32_t*index_pos) 932{ 933struct object_entry *entry; 934 935 entry =packlist_find(&to_pack, sha1, index_pos); 936if(!entry) 937return0; 938 939if(exclude) { 940if(!entry->preferred_base) 941 nr_result--; 942 entry->preferred_base =1; 943} 944 945return1; 946} 947 948static intwant_found_object(int exclude,struct packed_git *p) 949{ 950if(exclude) 951return1; 952if(incremental) 953return0; 954 955/* 956 * When asked to do --local (do not include an object that appears in a 957 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 958 * an object that appears in a pack marked with .keep), finding a pack 959 * that matches the criteria is sufficient for us to decide to omit it. 960 * However, even if this pack does not satisfy the criteria, we need to 961 * make sure no copy of this object appears in _any_ pack that makes us 962 * to omit the object, so we need to check all the packs. 963 * 964 * We can however first check whether these options can possible matter; 965 * if they do not matter we know we want the object in generated pack. 966 * Otherwise, we signal "-1" at the end to tell the caller that we do 967 * not know either way, and it needs to check more packs. 968 */ 969if(!ignore_packed_keep && 970(!local || !have_non_local_packs)) 971return1; 972 973if(local && !p->pack_local) 974return0; 975if(ignore_packed_keep && p->pack_local && p->pack_keep) 976return0; 977 978/* we don't know yet; keep looking for more packs */ 979return-1; 980} 981 982/* 983 * Check whether we want the object in the pack (e.g., we do not want 984 * objects found in non-local stores if the "--local" option was used). 985 * 986 * If the caller already knows an existing pack it wants to take the object 987 * from, that is passed in *found_pack and *found_offset; otherwise this 988 * function finds if there is any pack that has the object and returns the pack 989 * and its offset in these variables. 990 */ 991static intwant_object_in_pack(const unsigned char*sha1, 992int exclude, 993struct packed_git **found_pack, 994 off_t *found_offset) 995{ 996struct mru_entry *entry; 997int want; 998 999if(!exclude && local &&has_loose_object_nonlocal(sha1))1000return0;10011002/*1003 * If we already know the pack object lives in, start checks from that1004 * pack - in the usual case when neither --local was given nor .keep files1005 * are present we will determine the answer right now.1006 */1007if(*found_pack) {1008 want =want_found_object(exclude, *found_pack);1009if(want != -1)1010return want;1011}10121013for(entry = packed_git_mru->head; entry; entry = entry->next) {1014struct packed_git *p = entry->item;1015 off_t offset;10161017if(p == *found_pack)1018 offset = *found_offset;1019else1020 offset =find_pack_entry_one(sha1, p);10211022if(offset) {1023if(!*found_pack) {1024if(!is_pack_valid(p))1025continue;1026*found_offset = offset;1027*found_pack = p;1028}1029 want =want_found_object(exclude, p);1030if(!exclude && want >0)1031mru_mark(packed_git_mru, entry);1032if(want != -1)1033return want;1034}1035}10361037return1;1038}10391040static voidcreate_object_entry(const unsigned char*sha1,1041enum object_type type,1042uint32_t hash,1043int exclude,1044int no_try_delta,1045uint32_t index_pos,1046struct packed_git *found_pack,1047 off_t found_offset)1048{1049struct object_entry *entry;10501051 entry =packlist_alloc(&to_pack, sha1, index_pos);1052 entry->hash = hash;1053if(type)1054 entry->type = type;1055if(exclude)1056 entry->preferred_base =1;1057else1058 nr_result++;1059if(found_pack) {1060 entry->in_pack = found_pack;1061 entry->in_pack_offset = found_offset;1062}10631064 entry->no_try_delta = no_try_delta;1065}10661067static const char no_closure_warning[] =N_(1068"disabling bitmap writing, as some objects are not being packed"1069);10701071static intadd_object_entry(const unsigned char*sha1,enum object_type type,1072const char*name,int exclude)1073{1074struct packed_git *found_pack = NULL;1075 off_t found_offset =0;1076uint32_t index_pos;10771078if(have_duplicate_entry(sha1, exclude, &index_pos))1079return0;10801081if(!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {1082/* The pack is missing an object, so it will not have closure */1083if(write_bitmap_index) {1084warning(_(no_closure_warning));1085 write_bitmap_index =0;1086}1087return0;1088}10891090create_object_entry(sha1, type,pack_name_hash(name),1091 exclude, name &&no_try_delta(name),1092 index_pos, found_pack, found_offset);10931094display_progress(progress_state, nr_result);1095return1;1096}10971098static intadd_object_entry_from_bitmap(const unsigned char*sha1,1099enum object_type type,1100int flags,uint32_t name_hash,1101struct packed_git *pack, off_t offset)1102{1103uint32_t index_pos;11041105if(have_duplicate_entry(sha1,0, &index_pos))1106return0;11071108if(!want_object_in_pack(sha1,0, &pack, &offset))1109return0;11101111create_object_entry(sha1, type, name_hash,0,0, index_pos, pack, offset);11121113display_progress(progress_state, nr_result);1114return1;1115}11161117struct pbase_tree_cache {1118unsigned char sha1[20];1119int ref;1120int temporary;1121void*tree_data;1122unsigned long tree_size;1123};11241125static struct pbase_tree_cache *(pbase_tree_cache[256]);1126static intpbase_tree_cache_ix(const unsigned char*sha1)1127{1128return sha1[0] %ARRAY_SIZE(pbase_tree_cache);1129}1130static intpbase_tree_cache_ix_incr(int ix)1131{1132return(ix+1) %ARRAY_SIZE(pbase_tree_cache);1133}11341135static struct pbase_tree {1136struct pbase_tree *next;1137/* This is a phony "cache" entry; we are not1138 * going to evict it or find it through _get()1139 * mechanism -- this is for the toplevel node that1140 * would almost always change with any commit.1141 */1142struct pbase_tree_cache pcache;1143} *pbase_tree;11441145static struct pbase_tree_cache *pbase_tree_get(const unsigned char*sha1)1146{1147struct pbase_tree_cache *ent, *nent;1148void*data;1149unsigned long size;1150enum object_type type;1151int neigh;1152int my_ix =pbase_tree_cache_ix(sha1);1153int available_ix = -1;11541155/* pbase-tree-cache acts as a limited hashtable.1156 * your object will be found at your index or within a few1157 * slots after that slot if it is cached.1158 */1159for(neigh =0; neigh <8; neigh++) {1160 ent = pbase_tree_cache[my_ix];1161if(ent && !hashcmp(ent->sha1, sha1)) {1162 ent->ref++;1163return ent;1164}1165else if(((available_ix <0) && (!ent || !ent->ref)) ||1166((0<= available_ix) &&1167(!ent && pbase_tree_cache[available_ix])))1168 available_ix = my_ix;1169if(!ent)1170break;1171 my_ix =pbase_tree_cache_ix_incr(my_ix);1172}11731174/* Did not find one. Either we got a bogus request or1175 * we need to read and perhaps cache.1176 */1177 data =read_sha1_file(sha1, &type, &size);1178if(!data)1179return NULL;1180if(type != OBJ_TREE) {1181free(data);1182return NULL;1183}11841185/* We need to either cache or return a throwaway copy */11861187if(available_ix <0)1188 ent = NULL;1189else{1190 ent = pbase_tree_cache[available_ix];1191 my_ix = available_ix;1192}11931194if(!ent) {1195 nent =xmalloc(sizeof(*nent));1196 nent->temporary = (available_ix <0);1197}1198else{1199/* evict and reuse */1200free(ent->tree_data);1201 nent = ent;1202}1203hashcpy(nent->sha1, sha1);1204 nent->tree_data = data;1205 nent->tree_size = size;1206 nent->ref =1;1207if(!nent->temporary)1208 pbase_tree_cache[my_ix] = nent;1209return nent;1210}12111212static voidpbase_tree_put(struct pbase_tree_cache *cache)1213{1214if(!cache->temporary) {1215 cache->ref--;1216return;1217}1218free(cache->tree_data);1219free(cache);1220}12211222static intname_cmp_len(const char*name)1223{1224int i;1225for(i =0; name[i] && name[i] !='\n'&& name[i] !='/'; i++)1226;1227return i;1228}12291230static voidadd_pbase_object(struct tree_desc *tree,1231const char*name,1232int cmplen,1233const char*fullname)1234{1235struct name_entry entry;1236int cmp;12371238while(tree_entry(tree,&entry)) {1239if(S_ISGITLINK(entry.mode))1240continue;1241 cmp =tree_entry_len(&entry) != cmplen ?1:1242memcmp(name, entry.path, cmplen);1243if(cmp >0)1244continue;1245if(cmp <0)1246return;1247if(name[cmplen] !='/') {1248add_object_entry(entry.oid->hash,1249object_type(entry.mode),1250 fullname,1);1251return;1252}1253if(S_ISDIR(entry.mode)) {1254struct tree_desc sub;1255struct pbase_tree_cache *tree;1256const char*down = name+cmplen+1;1257int downlen =name_cmp_len(down);12581259 tree =pbase_tree_get(entry.oid->hash);1260if(!tree)1261return;1262init_tree_desc(&sub, tree->tree_data, tree->tree_size);12631264add_pbase_object(&sub, down, downlen, fullname);1265pbase_tree_put(tree);1266}1267}1268}12691270static unsigned*done_pbase_paths;1271static int done_pbase_paths_num;1272static int done_pbase_paths_alloc;1273static intdone_pbase_path_pos(unsigned hash)1274{1275int lo =0;1276int hi = done_pbase_paths_num;1277while(lo < hi) {1278int mi = (hi + lo) /2;1279if(done_pbase_paths[mi] == hash)1280return mi;1281if(done_pbase_paths[mi] < hash)1282 hi = mi;1283else1284 lo = mi +1;1285}1286return-lo-1;1287}12881289static intcheck_pbase_path(unsigned hash)1290{1291int pos = (!done_pbase_paths) ? -1:done_pbase_path_pos(hash);1292if(0<= pos)1293return1;1294 pos = -pos -1;1295ALLOC_GROW(done_pbase_paths,1296 done_pbase_paths_num +1,1297 done_pbase_paths_alloc);1298 done_pbase_paths_num++;1299if(pos < done_pbase_paths_num)1300memmove(done_pbase_paths + pos +1,1301 done_pbase_paths + pos,1302(done_pbase_paths_num - pos -1) *sizeof(unsigned));1303 done_pbase_paths[pos] = hash;1304return0;1305}13061307static voidadd_preferred_base_object(const char*name)1308{1309struct pbase_tree *it;1310int cmplen;1311unsigned hash =pack_name_hash(name);13121313if(!num_preferred_base ||check_pbase_path(hash))1314return;13151316 cmplen =name_cmp_len(name);1317for(it = pbase_tree; it; it = it->next) {1318if(cmplen ==0) {1319add_object_entry(it->pcache.sha1, OBJ_TREE, NULL,1);1320}1321else{1322struct tree_desc tree;1323init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1324add_pbase_object(&tree, name, cmplen, name);1325}1326}1327}13281329static voidadd_preferred_base(unsigned char*sha1)1330{1331struct pbase_tree *it;1332void*data;1333unsigned long size;1334unsigned char tree_sha1[20];13351336if(window <= num_preferred_base++)1337return;13381339 data =read_object_with_reference(sha1, tree_type, &size, tree_sha1);1340if(!data)1341return;13421343for(it = pbase_tree; it; it = it->next) {1344if(!hashcmp(it->pcache.sha1, tree_sha1)) {1345free(data);1346return;1347}1348}13491350 it =xcalloc(1,sizeof(*it));1351 it->next = pbase_tree;1352 pbase_tree = it;13531354hashcpy(it->pcache.sha1, tree_sha1);1355 it->pcache.tree_data = data;1356 it->pcache.tree_size = size;1357}13581359static voidcleanup_preferred_base(void)1360{1361struct pbase_tree *it;1362unsigned i;13631364 it = pbase_tree;1365 pbase_tree = NULL;1366while(it) {1367struct pbase_tree *this= it;1368 it =this->next;1369free(this->pcache.tree_data);1370free(this);1371}13721373for(i =0; i <ARRAY_SIZE(pbase_tree_cache); i++) {1374if(!pbase_tree_cache[i])1375continue;1376free(pbase_tree_cache[i]->tree_data);1377FREE_AND_NULL(pbase_tree_cache[i]);1378}13791380FREE_AND_NULL(done_pbase_paths);1381 done_pbase_paths_num = done_pbase_paths_alloc =0;1382}13831384static voidcheck_object(struct object_entry *entry)1385{1386if(entry->in_pack) {1387struct packed_git *p = entry->in_pack;1388struct pack_window *w_curs = NULL;1389const unsigned char*base_ref = NULL;1390struct object_entry *base_entry;1391unsigned long used, used_0;1392unsigned long avail;1393 off_t ofs;1394unsigned char*buf, c;13951396 buf =use_pack(p, &w_curs, entry->in_pack_offset, &avail);13971398/*1399 * We want in_pack_type even if we do not reuse delta1400 * since non-delta representations could still be reused.1401 */1402 used =unpack_object_header_buffer(buf, avail,1403&entry->in_pack_type,1404&entry->size);1405if(used ==0)1406goto give_up;14071408/*1409 * Determine if this is a delta and if so whether we can1410 * reuse it or not. Otherwise let's find out as cheaply as1411 * possible what the actual type and size for this object is.1412 */1413switch(entry->in_pack_type) {1414default:1415/* Not a delta hence we've already got all we need. */1416 entry->type = entry->in_pack_type;1417 entry->in_pack_header_size = used;1418if(entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)1419goto give_up;1420unuse_pack(&w_curs);1421return;1422case OBJ_REF_DELTA:1423if(reuse_delta && !entry->preferred_base)1424 base_ref =use_pack(p, &w_curs,1425 entry->in_pack_offset + used, NULL);1426 entry->in_pack_header_size = used +20;1427break;1428case OBJ_OFS_DELTA:1429 buf =use_pack(p, &w_curs,1430 entry->in_pack_offset + used, NULL);1431 used_0 =0;1432 c = buf[used_0++];1433 ofs = c &127;1434while(c &128) {1435 ofs +=1;1436if(!ofs ||MSB(ofs,7)) {1437error("delta base offset overflow in pack for%s",1438oid_to_hex(&entry->idx.oid));1439goto give_up;1440}1441 c = buf[used_0++];1442 ofs = (ofs <<7) + (c &127);1443}1444 ofs = entry->in_pack_offset - ofs;1445if(ofs <=0|| ofs >= entry->in_pack_offset) {1446error("delta base offset out of bound for%s",1447oid_to_hex(&entry->idx.oid));1448goto give_up;1449}1450if(reuse_delta && !entry->preferred_base) {1451struct revindex_entry *revidx;1452 revidx =find_pack_revindex(p, ofs);1453if(!revidx)1454goto give_up;1455 base_ref =nth_packed_object_sha1(p, revidx->nr);1456}1457 entry->in_pack_header_size = used + used_0;1458break;1459}14601461if(base_ref && (base_entry =packlist_find(&to_pack, base_ref, NULL))) {1462/*1463 * If base_ref was set above that means we wish to1464 * reuse delta data, and we even found that base1465 * in the list of objects we want to pack. Goodie!1466 *1467 * Depth value does not matter - find_deltas() will1468 * never consider reused delta as the base object to1469 * deltify other objects against, in order to avoid1470 * circular deltas.1471 */1472 entry->type = entry->in_pack_type;1473 entry->delta = base_entry;1474 entry->delta_size = entry->size;1475 entry->delta_sibling = base_entry->delta_child;1476 base_entry->delta_child = entry;1477unuse_pack(&w_curs);1478return;1479}14801481if(entry->type) {1482/*1483 * This must be a delta and we already know what the1484 * final object type is. Let's extract the actual1485 * object size from the delta header.1486 */1487 entry->size =get_size_from_delta(p, &w_curs,1488 entry->in_pack_offset + entry->in_pack_header_size);1489if(entry->size ==0)1490goto give_up;1491unuse_pack(&w_curs);1492return;1493}14941495/*1496 * No choice but to fall back to the recursive delta walk1497 * with sha1_object_info() to find about the object type1498 * at this point...1499 */1500 give_up:1501unuse_pack(&w_curs);1502}15031504 entry->type =sha1_object_info(entry->idx.oid.hash, &entry->size);1505/*1506 * The error condition is checked in prepare_pack(). This is1507 * to permit a missing preferred base object to be ignored1508 * as a preferred base. Doing so can result in a larger1509 * pack file, but the transfer will still take place.1510 */1511}15121513static intpack_offset_sort(const void*_a,const void*_b)1514{1515const struct object_entry *a = *(struct object_entry **)_a;1516const struct object_entry *b = *(struct object_entry **)_b;15171518/* avoid filesystem trashing with loose objects */1519if(!a->in_pack && !b->in_pack)1520returnoidcmp(&a->idx.oid, &b->idx.oid);15211522if(a->in_pack < b->in_pack)1523return-1;1524if(a->in_pack > b->in_pack)1525return1;1526return a->in_pack_offset < b->in_pack_offset ? -1:1527(a->in_pack_offset > b->in_pack_offset);1528}15291530/*1531 * Drop an on-disk delta we were planning to reuse. Naively, this would1532 * just involve blanking out the "delta" field, but we have to deal1533 * with some extra book-keeping:1534 *1535 * 1. Removing ourselves from the delta_sibling linked list.1536 *1537 * 2. Updating our size/type to the non-delta representation. These were1538 * either not recorded initially (size) or overwritten with the delta type1539 * (type) when check_object() decided to reuse the delta.1540 *1541 * 3. Resetting our delta depth, as we are now a base object.1542 */1543static voiddrop_reused_delta(struct object_entry *entry)1544{1545struct object_entry **p = &entry->delta->delta_child;1546struct object_info oi = OBJECT_INFO_INIT;15471548while(*p) {1549if(*p == entry)1550*p = (*p)->delta_sibling;1551else1552 p = &(*p)->delta_sibling;1553}1554 entry->delta = NULL;1555 entry->depth =0;15561557 oi.sizep = &entry->size;1558 oi.typep = &entry->type;1559if(packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) <0) {1560/*1561 * We failed to get the info from this pack for some reason;1562 * fall back to sha1_object_info, which may find another copy.1563 * And if that fails, the error will be recorded in entry->type1564 * and dealt with in prepare_pack().1565 */1566 entry->type =sha1_object_info(entry->idx.oid.hash,1567&entry->size);1568}1569}15701571/*1572 * Follow the chain of deltas from this entry onward, throwing away any links1573 * that cause us to hit a cycle (as determined by the DFS state flags in1574 * the entries).1575 *1576 * We also detect too-long reused chains that would violate our --depth1577 * limit.1578 */1579static voidbreak_delta_chains(struct object_entry *entry)1580{1581/*1582 * The actual depth of each object we will write is stored as an int,1583 * as it cannot exceed our int "depth" limit. But before we break1584 * changes based no that limit, we may potentially go as deep as the1585 * number of objects, which is elsewhere bounded to a uint32_t.1586 */1587uint32_t total_depth;1588struct object_entry *cur, *next;15891590for(cur = entry, total_depth =0;1591 cur;1592 cur = cur->delta, total_depth++) {1593if(cur->dfs_state == DFS_DONE) {1594/*1595 * We've already seen this object and know it isn't1596 * part of a cycle. We do need to append its depth1597 * to our count.1598 */1599 total_depth += cur->depth;1600break;1601}16021603/*1604 * We break cycles before looping, so an ACTIVE state (or any1605 * other cruft which made its way into the state variable)1606 * is a bug.1607 */1608if(cur->dfs_state != DFS_NONE)1609die("BUG: confusing delta dfs state in first pass:%d",1610 cur->dfs_state);16111612/*1613 * Now we know this is the first time we've seen the object. If1614 * it's not a delta, we're done traversing, but we'll mark it1615 * done to save time on future traversals.1616 */1617if(!cur->delta) {1618 cur->dfs_state = DFS_DONE;1619break;1620}16211622/*1623 * Mark ourselves as active and see if the next step causes1624 * us to cycle to another active object. It's important to do1625 * this _before_ we loop, because it impacts where we make the1626 * cut, and thus how our total_depth counter works.1627 * E.g., We may see a partial loop like:1628 *1629 * A -> B -> C -> D -> B1630 *1631 * Cutting B->C breaks the cycle. But now the depth of A is1632 * only 1, and our total_depth counter is at 3. The size of the1633 * error is always one less than the size of the cycle we1634 * broke. Commits C and D were "lost" from A's chain.1635 *1636 * If we instead cut D->B, then the depth of A is correct at 3.1637 * We keep all commits in the chain that we examined.1638 */1639 cur->dfs_state = DFS_ACTIVE;1640if(cur->delta->dfs_state == DFS_ACTIVE) {1641drop_reused_delta(cur);1642 cur->dfs_state = DFS_DONE;1643break;1644}1645}16461647/*1648 * And now that we've gone all the way to the bottom of the chain, we1649 * need to clear the active flags and set the depth fields as1650 * appropriate. Unlike the loop above, which can quit when it drops a1651 * delta, we need to keep going to look for more depth cuts. So we need1652 * an extra "next" pointer to keep going after we reset cur->delta.1653 */1654for(cur = entry; cur; cur = next) {1655 next = cur->delta;16561657/*1658 * We should have a chain of zero or more ACTIVE states down to1659 * a final DONE. We can quit after the DONE, because either it1660 * has no bases, or we've already handled them in a previous1661 * call.1662 */1663if(cur->dfs_state == DFS_DONE)1664break;1665else if(cur->dfs_state != DFS_ACTIVE)1666die("BUG: confusing delta dfs state in second pass:%d",1667 cur->dfs_state);16681669/*1670 * If the total_depth is more than depth, then we need to snip1671 * the chain into two or more smaller chains that don't exceed1672 * the maximum depth. Most of the resulting chains will contain1673 * (depth + 1) entries (i.e., depth deltas plus one base), and1674 * the last chain (i.e., the one containing entry) will contain1675 * whatever entries are left over, namely1676 * (total_depth % (depth + 1)) of them.1677 *1678 * Since we are iterating towards decreasing depth, we need to1679 * decrement total_depth as we go, and we need to write to the1680 * entry what its final depth will be after all of the1681 * snipping. Since we're snipping into chains of length (depth1682 * + 1) entries, the final depth of an entry will be its1683 * original depth modulo (depth + 1). Any time we encounter an1684 * entry whose final depth is supposed to be zero, we snip it1685 * from its delta base, thereby making it so.1686 */1687 cur->depth = (total_depth--) % (depth +1);1688if(!cur->depth)1689drop_reused_delta(cur);16901691 cur->dfs_state = DFS_DONE;1692}1693}16941695static voidget_object_details(void)1696{1697uint32_t i;1698struct object_entry **sorted_by_offset;16991700 sorted_by_offset =xcalloc(to_pack.nr_objects,sizeof(struct object_entry *));1701for(i =0; i < to_pack.nr_objects; i++)1702 sorted_by_offset[i] = to_pack.objects + i;1703QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17041705for(i =0; i < to_pack.nr_objects; i++) {1706struct object_entry *entry = sorted_by_offset[i];1707check_object(entry);1708if(big_file_threshold < entry->size)1709 entry->no_try_delta =1;1710}17111712/*1713 * This must happen in a second pass, since we rely on the delta1714 * information for the whole list being completed.1715 */1716for(i =0; i < to_pack.nr_objects; i++)1717break_delta_chains(&to_pack.objects[i]);17181719free(sorted_by_offset);1720}17211722/*1723 * We search for deltas in a list sorted by type, by filename hash, and then1724 * by size, so that we see progressively smaller and smaller files.1725 * That's because we prefer deltas to be from the bigger file1726 * to the smaller -- deletes are potentially cheaper, but perhaps1727 * more importantly, the bigger file is likely the more recent1728 * one. The deepest deltas are therefore the oldest objects which are1729 * less susceptible to be accessed often.1730 */1731static inttype_size_sort(const void*_a,const void*_b)1732{1733const struct object_entry *a = *(struct object_entry **)_a;1734const struct object_entry *b = *(struct object_entry **)_b;17351736if(a->type > b->type)1737return-1;1738if(a->type < b->type)1739return1;1740if(a->hash > b->hash)1741return-1;1742if(a->hash < b->hash)1743return1;1744if(a->preferred_base > b->preferred_base)1745return-1;1746if(a->preferred_base < b->preferred_base)1747return1;1748if(a->size > b->size)1749return-1;1750if(a->size < b->size)1751return1;1752return a < b ? -1: (a > b);/* newest first */1753}17541755struct unpacked {1756struct object_entry *entry;1757void*data;1758struct delta_index *index;1759unsigned depth;1760};17611762static intdelta_cacheable(unsigned long src_size,unsigned long trg_size,1763unsigned long delta_size)1764{1765if(max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1766return0;17671768if(delta_size < cache_max_small_delta_size)1769return1;17701771/* cache delta, if objects are large enough compared to delta size */1772if((src_size >>20) + (trg_size >>21) > (delta_size >>10))1773return1;17741775return0;1776}17771778#ifndef NO_PTHREADS17791780static pthread_mutex_t read_mutex;1781#define read_lock() pthread_mutex_lock(&read_mutex)1782#define read_unlock() pthread_mutex_unlock(&read_mutex)17831784static pthread_mutex_t cache_mutex;1785#define cache_lock() pthread_mutex_lock(&cache_mutex)1786#define cache_unlock() pthread_mutex_unlock(&cache_mutex)17871788static pthread_mutex_t progress_mutex;1789#define progress_lock() pthread_mutex_lock(&progress_mutex)1790#define progress_unlock() pthread_mutex_unlock(&progress_mutex)17911792#else17931794#define read_lock() (void)01795#define read_unlock() (void)01796#define cache_lock() (void)01797#define cache_unlock() (void)01798#define progress_lock() (void)01799#define progress_unlock() (void)018001801#endif18021803static inttry_delta(struct unpacked *trg,struct unpacked *src,1804unsigned max_depth,unsigned long*mem_usage)1805{1806struct object_entry *trg_entry = trg->entry;1807struct object_entry *src_entry = src->entry;1808unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1809unsigned ref_depth;1810enum object_type type;1811void*delta_buf;18121813/* Don't bother doing diffs between different types */1814if(trg_entry->type != src_entry->type)1815return-1;18161817/*1818 * We do not bother to try a delta that we discarded on an1819 * earlier try, but only when reusing delta data. Note that1820 * src_entry that is marked as the preferred_base should always1821 * be considered, as even if we produce a suboptimal delta against1822 * it, we will still save the transfer cost, as we already know1823 * the other side has it and we won't send src_entry at all.1824 */1825if(reuse_delta && trg_entry->in_pack &&1826 trg_entry->in_pack == src_entry->in_pack &&1827!src_entry->preferred_base &&1828 trg_entry->in_pack_type != OBJ_REF_DELTA &&1829 trg_entry->in_pack_type != OBJ_OFS_DELTA)1830return0;18311832/* Let's not bust the allowed depth. */1833if(src->depth >= max_depth)1834return0;18351836/* Now some size filtering heuristics. */1837 trg_size = trg_entry->size;1838if(!trg_entry->delta) {1839 max_size = trg_size/2-20;1840 ref_depth =1;1841}else{1842 max_size = trg_entry->delta_size;1843 ref_depth = trg->depth;1844}1845 max_size = (uint64_t)max_size * (max_depth - src->depth) /1846(max_depth - ref_depth +1);1847if(max_size ==0)1848return0;1849 src_size = src_entry->size;1850 sizediff = src_size < trg_size ? trg_size - src_size :0;1851if(sizediff >= max_size)1852return0;1853if(trg_size < src_size /32)1854return0;18551856/* Load data if not already done */1857if(!trg->data) {1858read_lock();1859 trg->data =read_sha1_file(trg_entry->idx.oid.hash, &type,1860&sz);1861read_unlock();1862if(!trg->data)1863die("object%scannot be read",1864oid_to_hex(&trg_entry->idx.oid));1865if(sz != trg_size)1866die("object%sinconsistent object length (%lu vs%lu)",1867oid_to_hex(&trg_entry->idx.oid), sz,1868 trg_size);1869*mem_usage += sz;1870}1871if(!src->data) {1872read_lock();1873 src->data =read_sha1_file(src_entry->idx.oid.hash, &type,1874&sz);1875read_unlock();1876if(!src->data) {1877if(src_entry->preferred_base) {1878static int warned =0;1879if(!warned++)1880warning("object%scannot be read",1881oid_to_hex(&src_entry->idx.oid));1882/*1883 * Those objects are not included in the1884 * resulting pack. Be resilient and ignore1885 * them if they can't be read, in case the1886 * pack could be created nevertheless.1887 */1888return0;1889}1890die("object%scannot be read",1891oid_to_hex(&src_entry->idx.oid));1892}1893if(sz != src_size)1894die("object%sinconsistent object length (%lu vs%lu)",1895oid_to_hex(&src_entry->idx.oid), sz,1896 src_size);1897*mem_usage += sz;1898}1899if(!src->index) {1900 src->index =create_delta_index(src->data, src_size);1901if(!src->index) {1902static int warned =0;1903if(!warned++)1904warning("suboptimal pack - out of memory");1905return0;1906}1907*mem_usage +=sizeof_delta_index(src->index);1908}19091910 delta_buf =create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1911if(!delta_buf)1912return0;19131914if(trg_entry->delta) {1915/* Prefer only shallower same-sized deltas. */1916if(delta_size == trg_entry->delta_size &&1917 src->depth +1>= trg->depth) {1918free(delta_buf);1919return0;1920}1921}19221923/*1924 * Handle memory allocation outside of the cache1925 * accounting lock. Compiler will optimize the strangeness1926 * away when NO_PTHREADS is defined.1927 */1928free(trg_entry->delta_data);1929cache_lock();1930if(trg_entry->delta_data) {1931 delta_cache_size -= trg_entry->delta_size;1932 trg_entry->delta_data = NULL;1933}1934if(delta_cacheable(src_size, trg_size, delta_size)) {1935 delta_cache_size += delta_size;1936cache_unlock();1937 trg_entry->delta_data =xrealloc(delta_buf, delta_size);1938}else{1939cache_unlock();1940free(delta_buf);1941}19421943 trg_entry->delta = src_entry;1944 trg_entry->delta_size = delta_size;1945 trg->depth = src->depth +1;19461947return1;1948}19491950static unsigned intcheck_delta_limit(struct object_entry *me,unsigned int n)1951{1952struct object_entry *child = me->delta_child;1953unsigned int m = n;1954while(child) {1955unsigned int c =check_delta_limit(child, n +1);1956if(m < c)1957 m = c;1958 child = child->delta_sibling;1959}1960return m;1961}19621963static unsigned longfree_unpacked(struct unpacked *n)1964{1965unsigned long freed_mem =sizeof_delta_index(n->index);1966free_delta_index(n->index);1967 n->index = NULL;1968if(n->data) {1969 freed_mem += n->entry->size;1970FREE_AND_NULL(n->data);1971}1972 n->entry = NULL;1973 n->depth =0;1974return freed_mem;1975}19761977static voidfind_deltas(struct object_entry **list,unsigned*list_size,1978int window,int depth,unsigned*processed)1979{1980uint32_t i, idx =0, count =0;1981struct unpacked *array;1982unsigned long mem_usage =0;19831984 array =xcalloc(window,sizeof(struct unpacked));19851986for(;;) {1987struct object_entry *entry;1988struct unpacked *n = array + idx;1989int j, max_depth, best_base = -1;19901991progress_lock();1992if(!*list_size) {1993progress_unlock();1994break;1995}1996 entry = *list++;1997(*list_size)--;1998if(!entry->preferred_base) {1999(*processed)++;2000display_progress(progress_state, *processed);2001}2002progress_unlock();20032004 mem_usage -=free_unpacked(n);2005 n->entry = entry;20062007while(window_memory_limit &&2008 mem_usage > window_memory_limit &&2009 count >1) {2010uint32_t tail = (idx + window - count) % window;2011 mem_usage -=free_unpacked(array + tail);2012 count--;2013}20142015/* We do not compute delta to *create* objects we are not2016 * going to pack.2017 */2018if(entry->preferred_base)2019goto next;20202021/*2022 * If the current object is at pack edge, take the depth the2023 * objects that depend on the current object into account2024 * otherwise they would become too deep.2025 */2026 max_depth = depth;2027if(entry->delta_child) {2028 max_depth -=check_delta_limit(entry,0);2029if(max_depth <=0)2030goto next;2031}20322033 j = window;2034while(--j >0) {2035int ret;2036uint32_t other_idx = idx + j;2037struct unpacked *m;2038if(other_idx >= window)2039 other_idx -= window;2040 m = array + other_idx;2041if(!m->entry)2042break;2043 ret =try_delta(n, m, max_depth, &mem_usage);2044if(ret <0)2045break;2046else if(ret >0)2047 best_base = other_idx;2048}20492050/*2051 * If we decided to cache the delta data, then it is best2052 * to compress it right away. First because we have to do2053 * it anyway, and doing it here while we're threaded will2054 * save a lot of time in the non threaded write phase,2055 * as well as allow for caching more deltas within2056 * the same cache size limit.2057 * ...2058 * But only if not writing to stdout, since in that case2059 * the network is most likely throttling writes anyway,2060 * and therefore it is best to go to the write phase ASAP2061 * instead, as we can afford spending more time compressing2062 * between writes at that moment.2063 */2064if(entry->delta_data && !pack_to_stdout) {2065 entry->z_delta_size =do_compress(&entry->delta_data,2066 entry->delta_size);2067cache_lock();2068 delta_cache_size -= entry->delta_size;2069 delta_cache_size += entry->z_delta_size;2070cache_unlock();2071}20722073/* if we made n a delta, and if n is already at max2074 * depth, leaving it in the window is pointless. we2075 * should evict it first.2076 */2077if(entry->delta && max_depth <= n->depth)2078continue;20792080/*2081 * Move the best delta base up in the window, after the2082 * currently deltified object, to keep it longer. It will2083 * be the first base object to be attempted next.2084 */2085if(entry->delta) {2086struct unpacked swap = array[best_base];2087int dist = (window + idx - best_base) % window;2088int dst = best_base;2089while(dist--) {2090int src = (dst +1) % window;2091 array[dst] = array[src];2092 dst = src;2093}2094 array[dst] = swap;2095}20962097 next:2098 idx++;2099if(count +1< window)2100 count++;2101if(idx >= window)2102 idx =0;2103}21042105for(i =0; i < window; ++i) {2106free_delta_index(array[i].index);2107free(array[i].data);2108}2109free(array);2110}21112112#ifndef NO_PTHREADS21132114static voidtry_to_free_from_threads(size_t size)2115{2116read_lock();2117release_pack_memory(size);2118read_unlock();2119}21202121static try_to_free_t old_try_to_free_routine;21222123/*2124 * The main thread waits on the condition that (at least) one of the workers2125 * has stopped working (which is indicated in the .working member of2126 * struct thread_params).2127 * When a work thread has completed its work, it sets .working to 0 and2128 * signals the main thread and waits on the condition that .data_ready2129 * becomes 1.2130 */21312132struct thread_params {2133 pthread_t thread;2134struct object_entry **list;2135unsigned list_size;2136unsigned remaining;2137int window;2138int depth;2139int working;2140int data_ready;2141 pthread_mutex_t mutex;2142 pthread_cond_t cond;2143unsigned*processed;2144};21452146static pthread_cond_t progress_cond;21472148/*2149 * Mutex and conditional variable can't be statically-initialized on Windows.2150 */2151static voidinit_threaded_search(void)2152{2153init_recursive_mutex(&read_mutex);2154pthread_mutex_init(&cache_mutex, NULL);2155pthread_mutex_init(&progress_mutex, NULL);2156pthread_cond_init(&progress_cond, NULL);2157 old_try_to_free_routine =set_try_to_free_routine(try_to_free_from_threads);2158}21592160static voidcleanup_threaded_search(void)2161{2162set_try_to_free_routine(old_try_to_free_routine);2163pthread_cond_destroy(&progress_cond);2164pthread_mutex_destroy(&read_mutex);2165pthread_mutex_destroy(&cache_mutex);2166pthread_mutex_destroy(&progress_mutex);2167}21682169static void*threaded_find_deltas(void*arg)2170{2171struct thread_params *me = arg;21722173while(me->remaining) {2174find_deltas(me->list, &me->remaining,2175 me->window, me->depth, me->processed);21762177progress_lock();2178 me->working =0;2179pthread_cond_signal(&progress_cond);2180progress_unlock();21812182/*2183 * We must not set ->data_ready before we wait on the2184 * condition because the main thread may have set it to 12185 * before we get here. In order to be sure that new2186 * work is available if we see 1 in ->data_ready, it2187 * was initialized to 0 before this thread was spawned2188 * and we reset it to 0 right away.2189 */2190pthread_mutex_lock(&me->mutex);2191while(!me->data_ready)2192pthread_cond_wait(&me->cond, &me->mutex);2193 me->data_ready =0;2194pthread_mutex_unlock(&me->mutex);2195}2196/* leave ->working 1 so that this doesn't get more work assigned */2197return NULL;2198}21992200static voidll_find_deltas(struct object_entry **list,unsigned list_size,2201int window,int depth,unsigned*processed)2202{2203struct thread_params *p;2204int i, ret, active_threads =0;22052206init_threaded_search();22072208if(delta_search_threads <=1) {2209find_deltas(list, &list_size, window, depth, processed);2210cleanup_threaded_search();2211return;2212}2213if(progress > pack_to_stdout)2214fprintf(stderr,"Delta compression using up to%dthreads.\n",2215 delta_search_threads);2216 p =xcalloc(delta_search_threads,sizeof(*p));22172218/* Partition the work amongst work threads. */2219for(i =0; i < delta_search_threads; i++) {2220unsigned sub_size = list_size / (delta_search_threads - i);22212222/* don't use too small segments or no deltas will be found */2223if(sub_size <2*window && i+1< delta_search_threads)2224 sub_size =0;22252226 p[i].window = window;2227 p[i].depth = depth;2228 p[i].processed = processed;2229 p[i].working =1;2230 p[i].data_ready =0;22312232/* try to split chunks on "path" boundaries */2233while(sub_size && sub_size < list_size &&2234 list[sub_size]->hash &&2235 list[sub_size]->hash == list[sub_size-1]->hash)2236 sub_size++;22372238 p[i].list = list;2239 p[i].list_size = sub_size;2240 p[i].remaining = sub_size;22412242 list += sub_size;2243 list_size -= sub_size;2244}22452246/* Start work threads. */2247for(i =0; i < delta_search_threads; i++) {2248if(!p[i].list_size)2249continue;2250pthread_mutex_init(&p[i].mutex, NULL);2251pthread_cond_init(&p[i].cond, NULL);2252 ret =pthread_create(&p[i].thread, NULL,2253 threaded_find_deltas, &p[i]);2254if(ret)2255die("unable to create thread:%s",strerror(ret));2256 active_threads++;2257}22582259/*2260 * Now let's wait for work completion. Each time a thread is done2261 * with its work, we steal half of the remaining work from the2262 * thread with the largest number of unprocessed objects and give2263 * it to that newly idle thread. This ensure good load balancing2264 * until the remaining object list segments are simply too short2265 * to be worth splitting anymore.2266 */2267while(active_threads) {2268struct thread_params *target = NULL;2269struct thread_params *victim = NULL;2270unsigned sub_size =0;22712272progress_lock();2273for(;;) {2274for(i =0; !target && i < delta_search_threads; i++)2275if(!p[i].working)2276 target = &p[i];2277if(target)2278break;2279pthread_cond_wait(&progress_cond, &progress_mutex);2280}22812282for(i =0; i < delta_search_threads; i++)2283if(p[i].remaining >2*window &&2284(!victim || victim->remaining < p[i].remaining))2285 victim = &p[i];2286if(victim) {2287 sub_size = victim->remaining /2;2288 list = victim->list + victim->list_size - sub_size;2289while(sub_size && list[0]->hash &&2290 list[0]->hash == list[-1]->hash) {2291 list++;2292 sub_size--;2293}2294if(!sub_size) {2295/*2296 * It is possible for some "paths" to have2297 * so many objects that no hash boundary2298 * might be found. Let's just steal the2299 * exact half in that case.2300 */2301 sub_size = victim->remaining /2;2302 list -= sub_size;2303}2304 target->list = list;2305 victim->list_size -= sub_size;2306 victim->remaining -= sub_size;2307}2308 target->list_size = sub_size;2309 target->remaining = sub_size;2310 target->working =1;2311progress_unlock();23122313pthread_mutex_lock(&target->mutex);2314 target->data_ready =1;2315pthread_cond_signal(&target->cond);2316pthread_mutex_unlock(&target->mutex);23172318if(!sub_size) {2319pthread_join(target->thread, NULL);2320pthread_cond_destroy(&target->cond);2321pthread_mutex_destroy(&target->mutex);2322 active_threads--;2323}2324}2325cleanup_threaded_search();2326free(p);2327}23282329#else2330#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2331#endif23322333static voidadd_tag_chain(const struct object_id *oid)2334{2335struct tag *tag;23362337/*2338 * We catch duplicates already in add_object_entry(), but we'd2339 * prefer to do this extra check to avoid having to parse the2340 * tag at all if we already know that it's being packed (e.g., if2341 * it was included via bitmaps, we would not have parsed it2342 * previously).2343 */2344if(packlist_find(&to_pack, oid->hash, NULL))2345return;23462347 tag =lookup_tag(oid);2348while(1) {2349if(!tag ||parse_tag(tag) || !tag->tagged)2350die("unable to pack objects reachable from tag%s",2351oid_to_hex(oid));23522353add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL,0);23542355if(tag->tagged->type != OBJ_TAG)2356return;23572358 tag = (struct tag *)tag->tagged;2359}2360}23612362static intadd_ref_tag(const char*path,const struct object_id *oid,int flag,void*cb_data)2363{2364struct object_id peeled;23652366if(starts_with(path,"refs/tags/") &&/* is a tag? */2367!peel_ref(path, peeled.hash) &&/* peelable? */2368packlist_find(&to_pack, peeled.hash, NULL))/* object packed? */2369add_tag_chain(oid);2370return0;2371}23722373static voidprepare_pack(int window,int depth)2374{2375struct object_entry **delta_list;2376uint32_t i, nr_deltas;2377unsigned n;23782379get_object_details();23802381/*2382 * If we're locally repacking then we need to be doubly careful2383 * from now on in order to make sure no stealth corruption gets2384 * propagated to the new pack. Clients receiving streamed packs2385 * should validate everything they get anyway so no need to incur2386 * the additional cost here in that case.2387 */2388if(!pack_to_stdout)2389 do_check_packed_object_crc =1;23902391if(!to_pack.nr_objects || !window || !depth)2392return;23932394ALLOC_ARRAY(delta_list, to_pack.nr_objects);2395 nr_deltas = n =0;23962397for(i =0; i < to_pack.nr_objects; i++) {2398struct object_entry *entry = to_pack.objects + i;23992400if(entry->delta)2401/* This happens if we decided to reuse existing2402 * delta from a pack. "reuse_delta &&" is implied.2403 */2404continue;24052406if(entry->size <50)2407continue;24082409if(entry->no_try_delta)2410continue;24112412if(!entry->preferred_base) {2413 nr_deltas++;2414if(entry->type <0)2415die("unable to get type of object%s",2416oid_to_hex(&entry->idx.oid));2417}else{2418if(entry->type <0) {2419/*2420 * This object is not found, but we2421 * don't have to include it anyway.2422 */2423continue;2424}2425}24262427 delta_list[n++] = entry;2428}24292430if(nr_deltas && n >1) {2431unsigned nr_done =0;2432if(progress)2433 progress_state =start_progress(_("Compressing objects"),2434 nr_deltas);2435QSORT(delta_list, n, type_size_sort);2436ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2437stop_progress(&progress_state);2438if(nr_done != nr_deltas)2439die("inconsistency with delta count");2440}2441free(delta_list);2442}24432444static intgit_pack_config(const char*k,const char*v,void*cb)2445{2446if(!strcmp(k,"pack.window")) {2447 window =git_config_int(k, v);2448return0;2449}2450if(!strcmp(k,"pack.windowmemory")) {2451 window_memory_limit =git_config_ulong(k, v);2452return0;2453}2454if(!strcmp(k,"pack.depth")) {2455 depth =git_config_int(k, v);2456return0;2457}2458if(!strcmp(k,"pack.deltacachesize")) {2459 max_delta_cache_size =git_config_int(k, v);2460return0;2461}2462if(!strcmp(k,"pack.deltacachelimit")) {2463 cache_max_small_delta_size =git_config_int(k, v);2464return0;2465}2466if(!strcmp(k,"pack.writebitmaphashcache")) {2467if(git_config_bool(k, v))2468 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2469else2470 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2471}2472if(!strcmp(k,"pack.usebitmaps")) {2473 use_bitmap_index_default =git_config_bool(k, v);2474return0;2475}2476if(!strcmp(k,"pack.threads")) {2477 delta_search_threads =git_config_int(k, v);2478if(delta_search_threads <0)2479die("invalid number of threads specified (%d)",2480 delta_search_threads);2481#ifdef NO_PTHREADS2482if(delta_search_threads !=1) {2483warning("no threads support, ignoring%s", k);2484 delta_search_threads =0;2485}2486#endif2487return0;2488}2489if(!strcmp(k,"pack.indexversion")) {2490 pack_idx_opts.version =git_config_int(k, v);2491if(pack_idx_opts.version >2)2492die("bad pack.indexversion=%"PRIu32,2493 pack_idx_opts.version);2494return0;2495}2496returngit_default_config(k, v, cb);2497}24982499static voidread_object_list_from_stdin(void)2500{2501char line[40+1+ PATH_MAX +2];2502unsigned char sha1[20];25032504for(;;) {2505if(!fgets(line,sizeof(line), stdin)) {2506if(feof(stdin))2507break;2508if(!ferror(stdin))2509die("fgets returned NULL, not EOF, not error!");2510if(errno != EINTR)2511die_errno("fgets");2512clearerr(stdin);2513continue;2514}2515if(line[0] =='-') {2516if(get_sha1_hex(line+1, sha1))2517die("expected edge sha1, got garbage:\n%s",2518 line);2519add_preferred_base(sha1);2520continue;2521}2522if(get_sha1_hex(line, sha1))2523die("expected sha1, got garbage:\n%s", line);25242525add_preferred_base_object(line+41);2526add_object_entry(sha1,0, line+41,0);2527}2528}25292530#define OBJECT_ADDED (1u<<20)25312532static voidshow_commit(struct commit *commit,void*data)2533{2534add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL,0);2535 commit->object.flags |= OBJECT_ADDED;25362537if(write_bitmap_index)2538index_commit_for_bitmap(commit);2539}25402541static voidshow_object(struct object *obj,const char*name,void*data)2542{2543add_preferred_base_object(name);2544add_object_entry(obj->oid.hash, obj->type, name,0);2545 obj->flags |= OBJECT_ADDED;2546}25472548static voidshow_edge(struct commit *commit)2549{2550add_preferred_base(commit->object.oid.hash);2551}25522553struct in_pack_object {2554 off_t offset;2555struct object *object;2556};25572558struct in_pack {2559int alloc;2560int nr;2561struct in_pack_object *array;2562};25632564static voidmark_in_pack_object(struct object *object,struct packed_git *p,struct in_pack *in_pack)2565{2566 in_pack->array[in_pack->nr].offset =find_pack_entry_one(object->oid.hash, p);2567 in_pack->array[in_pack->nr].object = object;2568 in_pack->nr++;2569}25702571/*2572 * Compare the objects in the offset order, in order to emulate the2573 * "git rev-list --objects" output that produced the pack originally.2574 */2575static intofscmp(const void*a_,const void*b_)2576{2577struct in_pack_object *a = (struct in_pack_object *)a_;2578struct in_pack_object *b = (struct in_pack_object *)b_;25792580if(a->offset < b->offset)2581return-1;2582else if(a->offset > b->offset)2583return1;2584else2585returnoidcmp(&a->object->oid, &b->object->oid);2586}25872588static voidadd_objects_in_unpacked_packs(struct rev_info *revs)2589{2590struct packed_git *p;2591struct in_pack in_pack;2592uint32_t i;25932594memset(&in_pack,0,sizeof(in_pack));25952596for(p = packed_git; p; p = p->next) {2597const unsigned char*sha1;2598struct object *o;25992600if(!p->pack_local || p->pack_keep)2601continue;2602if(open_pack_index(p))2603die("cannot open pack index");26042605ALLOC_GROW(in_pack.array,2606 in_pack.nr + p->num_objects,2607 in_pack.alloc);26082609for(i =0; i < p->num_objects; i++) {2610 sha1 =nth_packed_object_sha1(p, i);2611 o =lookup_unknown_object(sha1);2612if(!(o->flags & OBJECT_ADDED))2613mark_in_pack_object(o, p, &in_pack);2614 o->flags |= OBJECT_ADDED;2615}2616}26172618if(in_pack.nr) {2619QSORT(in_pack.array, in_pack.nr, ofscmp);2620for(i =0; i < in_pack.nr; i++) {2621struct object *o = in_pack.array[i].object;2622add_object_entry(o->oid.hash, o->type,"",0);2623}2624}2625free(in_pack.array);2626}26272628static intadd_loose_object(const struct object_id *oid,const char*path,2629void*data)2630{2631enum object_type type =sha1_object_info(oid->hash, NULL);26322633if(type <0) {2634warning("loose object at%scould not be examined", path);2635return0;2636}26372638add_object_entry(oid->hash, type,"",0);2639return0;2640}26412642/*2643 * We actually don't even have to worry about reachability here.2644 * add_object_entry will weed out duplicates, so we just add every2645 * loose object we find.2646 */2647static voidadd_unreachable_loose_objects(void)2648{2649for_each_loose_file_in_objdir(get_object_directory(),2650 add_loose_object,2651 NULL, NULL, NULL);2652}26532654static inthas_sha1_pack_kept_or_nonlocal(const unsigned char*sha1)2655{2656static struct packed_git *last_found = (void*)1;2657struct packed_git *p;26582659 p = (last_found != (void*)1) ? last_found : packed_git;26602661while(p) {2662if((!p->pack_local || p->pack_keep) &&2663find_pack_entry_one(sha1, p)) {2664 last_found = p;2665return1;2666}2667if(p == last_found)2668 p = packed_git;2669else2670 p = p->next;2671if(p == last_found)2672 p = p->next;2673}2674return0;2675}26762677/*2678 * Store a list of sha1s that are should not be discarded2679 * because they are either written too recently, or are2680 * reachable from another object that was.2681 *2682 * This is filled by get_object_list.2683 */2684static struct oid_array recent_objects;26852686static intloosened_object_can_be_discarded(const struct object_id *oid,2687 timestamp_t mtime)2688{2689if(!unpack_unreachable_expiration)2690return0;2691if(mtime > unpack_unreachable_expiration)2692return0;2693if(oid_array_lookup(&recent_objects, oid) >=0)2694return0;2695return1;2696}26972698static voidloosen_unused_packed_objects(struct rev_info *revs)2699{2700struct packed_git *p;2701uint32_t i;2702struct object_id oid;27032704for(p = packed_git; p; p = p->next) {2705if(!p->pack_local || p->pack_keep)2706continue;27072708if(open_pack_index(p))2709die("cannot open pack index");27102711for(i =0; i < p->num_objects; i++) {2712nth_packed_object_oid(&oid, p, i);2713if(!packlist_find(&to_pack, oid.hash, NULL) &&2714!has_sha1_pack_kept_or_nonlocal(oid.hash) &&2715!loosened_object_can_be_discarded(&oid, p->mtime))2716if(force_object_loose(oid.hash, p->mtime))2717die("unable to force loose object");2718}2719}2720}27212722/*2723 * This tracks any options which pack-reuse code expects to be on, or which a2724 * reader of the pack might not understand, and which would therefore prevent2725 * blind reuse of what we have on disk.2726 */2727static intpack_options_allow_reuse(void)2728{2729return pack_to_stdout &&2730 allow_ofs_delta &&2731!ignore_packed_keep &&2732(!local || !have_non_local_packs) &&2733!incremental;2734}27352736static intget_object_list_from_bitmap(struct rev_info *revs)2737{2738if(prepare_bitmap_walk(revs) <0)2739return-1;27402741if(pack_options_allow_reuse() &&2742!reuse_partial_packfile_from_bitmap(2743&reuse_packfile,2744&reuse_packfile_objects,2745&reuse_packfile_offset)) {2746assert(reuse_packfile_objects);2747 nr_result += reuse_packfile_objects;2748display_progress(progress_state, nr_result);2749}27502751traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2752return0;2753}27542755static voidrecord_recent_object(struct object *obj,2756const char*name,2757void*data)2758{2759oid_array_append(&recent_objects, &obj->oid);2760}27612762static voidrecord_recent_commit(struct commit *commit,void*data)2763{2764oid_array_append(&recent_objects, &commit->object.oid);2765}27662767static voidget_object_list(int ac,const char**av)2768{2769struct rev_info revs;2770char line[1000];2771int flags =0;27722773init_revisions(&revs, NULL);2774 save_commit_buffer =0;2775setup_revisions(ac, av, &revs, NULL);27762777/* make sure shallows are read */2778is_repository_shallow();27792780while(fgets(line,sizeof(line), stdin) != NULL) {2781int len =strlen(line);2782if(len && line[len -1] =='\n')2783 line[--len] =0;2784if(!len)2785break;2786if(*line =='-') {2787if(!strcmp(line,"--not")) {2788 flags ^= UNINTERESTING;2789 write_bitmap_index =0;2790continue;2791}2792if(starts_with(line,"--shallow ")) {2793struct object_id oid;2794if(get_oid_hex(line +10, &oid))2795die("not an SHA-1 '%s'", line +10);2796register_shallow(&oid);2797 use_bitmap_index =0;2798continue;2799}2800die("not a rev '%s'", line);2801}2802if(handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2803die("bad revision '%s'", line);2804}28052806if(use_bitmap_index && !get_object_list_from_bitmap(&revs))2807return;28082809if(prepare_revision_walk(&revs))2810die("revision walk setup failed");2811mark_edges_uninteresting(&revs, show_edge);2812traverse_commit_list(&revs, show_commit, show_object, NULL);28132814if(unpack_unreachable_expiration) {2815 revs.ignore_missing_links =1;2816if(add_unseen_recent_objects_to_traversal(&revs,2817 unpack_unreachable_expiration))2818die("unable to add recent objects");2819if(prepare_revision_walk(&revs))2820die("revision walk setup failed");2821traverse_commit_list(&revs, record_recent_commit,2822 record_recent_object, NULL);2823}28242825if(keep_unreachable)2826add_objects_in_unpacked_packs(&revs);2827if(pack_loose_unreachable)2828add_unreachable_loose_objects();2829if(unpack_unreachable)2830loosen_unused_packed_objects(&revs);28312832oid_array_clear(&recent_objects);2833}28342835static intoption_parse_index_version(const struct option *opt,2836const char*arg,int unset)2837{2838char*c;2839const char*val = arg;2840 pack_idx_opts.version =strtoul(val, &c,10);2841if(pack_idx_opts.version >2)2842die(_("unsupported index version%s"), val);2843if(*c ==','&& c[1])2844 pack_idx_opts.off32_limit =strtoul(c+1, &c,0);2845if(*c || pack_idx_opts.off32_limit &0x80000000)2846die(_("bad index version '%s'"), val);2847return0;2848}28492850static intoption_parse_unpack_unreachable(const struct option *opt,2851const char*arg,int unset)2852{2853if(unset) {2854 unpack_unreachable =0;2855 unpack_unreachable_expiration =0;2856}2857else{2858 unpack_unreachable =1;2859if(arg)2860 unpack_unreachable_expiration =approxidate(arg);2861}2862return0;2863}28642865intcmd_pack_objects(int argc,const char**argv,const char*prefix)2866{2867int use_internal_rev_list =0;2868int thin =0;2869int shallow =0;2870int all_progress_implied =0;2871struct argv_array rp = ARGV_ARRAY_INIT;2872int rev_list_unpacked =0, rev_list_all =0, rev_list_reflog =0;2873int rev_list_index =0;2874struct option pack_objects_options[] = {2875OPT_SET_INT('q',"quiet", &progress,2876N_("do not show progress meter"),0),2877OPT_SET_INT(0,"progress", &progress,2878N_("show progress meter"),1),2879OPT_SET_INT(0,"all-progress", &progress,2880N_("show progress meter during object writing phase"),2),2881OPT_BOOL(0,"all-progress-implied",2882&all_progress_implied,2883N_("similar to --all-progress when progress meter is shown")),2884{ OPTION_CALLBACK,0,"index-version", NULL,N_("version[,offset]"),2885N_("write the pack index file in the specified idx format version"),28860, option_parse_index_version },2887OPT_MAGNITUDE(0,"max-pack-size", &pack_size_limit,2888N_("maximum size of each output pack file")),2889OPT_BOOL(0,"local", &local,2890N_("ignore borrowed objects from alternate object store")),2891OPT_BOOL(0,"incremental", &incremental,2892N_("ignore packed objects")),2893OPT_INTEGER(0,"window", &window,2894N_("limit pack window by objects")),2895OPT_MAGNITUDE(0,"window-memory", &window_memory_limit,2896N_("limit pack window by memory in addition to object limit")),2897OPT_INTEGER(0,"depth", &depth,2898N_("maximum length of delta chain allowed in the resulting pack")),2899OPT_BOOL(0,"reuse-delta", &reuse_delta,2900N_("reuse existing deltas")),2901OPT_BOOL(0,"reuse-object", &reuse_object,2902N_("reuse existing objects")),2903OPT_BOOL(0,"delta-base-offset", &allow_ofs_delta,2904N_("use OFS_DELTA objects")),2905OPT_INTEGER(0,"threads", &delta_search_threads,2906N_("use threads when searching for best delta matches")),2907OPT_BOOL(0,"non-empty", &non_empty,2908N_("do not create an empty pack output")),2909OPT_BOOL(0,"revs", &use_internal_rev_list,2910N_("read revision arguments from standard input")),2911{ OPTION_SET_INT,0,"unpacked", &rev_list_unpacked, NULL,2912N_("limit the objects to those that are not yet packed"),2913 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2914{ OPTION_SET_INT,0,"all", &rev_list_all, NULL,2915N_("include objects reachable from any reference"),2916 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2917{ OPTION_SET_INT,0,"reflog", &rev_list_reflog, NULL,2918N_("include objects referred by reflog entries"),2919 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2920{ OPTION_SET_INT,0,"indexed-objects", &rev_list_index, NULL,2921N_("include objects referred to by the index"),2922 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL,1},2923OPT_BOOL(0,"stdout", &pack_to_stdout,2924N_("output pack to stdout")),2925OPT_BOOL(0,"include-tag", &include_tag,2926N_("include tag objects that refer to objects to be packed")),2927OPT_BOOL(0,"keep-unreachable", &keep_unreachable,2928N_("keep unreachable objects")),2929OPT_BOOL(0,"pack-loose-unreachable", &pack_loose_unreachable,2930N_("pack loose unreachable objects")),2931{ OPTION_CALLBACK,0,"unpack-unreachable", NULL,N_("time"),2932N_("unpack unreachable objects newer than <time>"),2933 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },2934OPT_BOOL(0,"thin", &thin,2935N_("create thin packs")),2936OPT_BOOL(0,"shallow", &shallow,2937N_("create packs suitable for shallow fetches")),2938OPT_BOOL(0,"honor-pack-keep", &ignore_packed_keep,2939N_("ignore packs that have companion .keep file")),2940OPT_INTEGER(0,"compression", &pack_compression_level,2941N_("pack compression level")),2942OPT_SET_INT(0,"keep-true-parents", &grafts_replace_parents,2943N_("do not hide commits by grafts"),0),2944OPT_BOOL(0,"use-bitmap-index", &use_bitmap_index,2945N_("use a bitmap index if available to speed up counting objects")),2946OPT_BOOL(0,"write-bitmap-index", &write_bitmap_index,2947N_("write a bitmap index together with the pack index")),2948OPT_END(),2949};29502951 check_replace_refs =0;29522953reset_pack_idx_option(&pack_idx_opts);2954git_config(git_pack_config, NULL);29552956 progress =isatty(2);2957 argc =parse_options(argc, argv, prefix, pack_objects_options,2958 pack_usage,0);29592960if(argc) {2961 base_name = argv[0];2962 argc--;2963}2964if(pack_to_stdout != !base_name || argc)2965usage_with_options(pack_usage, pack_objects_options);29662967argv_array_push(&rp,"pack-objects");2968if(thin) {2969 use_internal_rev_list =1;2970argv_array_push(&rp, shallow2971?"--objects-edge-aggressive"2972:"--objects-edge");2973}else2974argv_array_push(&rp,"--objects");29752976if(rev_list_all) {2977 use_internal_rev_list =1;2978argv_array_push(&rp,"--all");2979}2980if(rev_list_reflog) {2981 use_internal_rev_list =1;2982argv_array_push(&rp,"--reflog");2983}2984if(rev_list_index) {2985 use_internal_rev_list =1;2986argv_array_push(&rp,"--indexed-objects");2987}2988if(rev_list_unpacked) {2989 use_internal_rev_list =1;2990argv_array_push(&rp,"--unpacked");2991}29922993if(!reuse_object)2994 reuse_delta =0;2995if(pack_compression_level == -1)2996 pack_compression_level = Z_DEFAULT_COMPRESSION;2997else if(pack_compression_level <0|| pack_compression_level > Z_BEST_COMPRESSION)2998die("bad pack compression level%d", pack_compression_level);29993000if(!delta_search_threads)/* --threads=0 means autodetect */3001 delta_search_threads =online_cpus();30023003#ifdef NO_PTHREADS3004if(delta_search_threads !=1)3005warning("no threads support, ignoring --threads");3006#endif3007if(!pack_to_stdout && !pack_size_limit)3008 pack_size_limit = pack_size_limit_cfg;3009if(pack_to_stdout && pack_size_limit)3010die("--max-pack-size cannot be used to build a pack for transfer.");3011if(pack_size_limit && pack_size_limit <1024*1024) {3012warning("minimum pack size limit is 1 MiB");3013 pack_size_limit =1024*1024;3014}30153016if(!pack_to_stdout && thin)3017die("--thin cannot be used to build an indexable pack.");30183019if(keep_unreachable && unpack_unreachable)3020die("--keep-unreachable and --unpack-unreachable are incompatible.");3021if(!rev_list_all || !rev_list_reflog || !rev_list_index)3022 unpack_unreachable_expiration =0;30233024/*3025 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3026 *3027 * - to produce good pack (with bitmap index not-yet-packed objects are3028 * packed in suboptimal order).3029 *3030 * - to use more robust pack-generation codepath (avoiding possible3031 * bugs in bitmap code and possible bitmap index corruption).3032 */3033if(!pack_to_stdout)3034 use_bitmap_index_default =0;30353036if(use_bitmap_index <0)3037 use_bitmap_index = use_bitmap_index_default;30383039/* "hard" reasons not to use bitmaps; these just won't work at all */3040if(!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) ||is_repository_shallow())3041 use_bitmap_index =0;30423043if(pack_to_stdout || !rev_list_all)3044 write_bitmap_index =0;30453046if(progress && all_progress_implied)3047 progress =2;30483049prepare_packed_git();3050if(ignore_packed_keep) {3051struct packed_git *p;3052for(p = packed_git; p; p = p->next)3053if(p->pack_local && p->pack_keep)3054break;3055if(!p)/* no keep-able packs found */3056 ignore_packed_keep =0;3057}3058if(local) {3059/*3060 * unlike ignore_packed_keep above, we do not want to3061 * unset "local" based on looking at packs, as it3062 * also covers non-local objects3063 */3064struct packed_git *p;3065for(p = packed_git; p; p = p->next) {3066if(!p->pack_local) {3067 have_non_local_packs =1;3068break;3069}3070}3071}30723073if(progress)3074 progress_state =start_progress(_("Counting objects"),0);3075if(!use_internal_rev_list)3076read_object_list_from_stdin();3077else{3078get_object_list(rp.argc, rp.argv);3079argv_array_clear(&rp);3080}3081cleanup_preferred_base();3082if(include_tag && nr_result)3083for_each_ref(add_ref_tag, NULL);3084stop_progress(&progress_state);30853086if(non_empty && !nr_result)3087return0;3088if(nr_result)3089prepare_pack(window, depth);3090write_pack_file();3091if(progress)3092fprintf(stderr,"Total %"PRIu32" (delta %"PRIu32"),"3093" reused %"PRIu32" (delta %"PRIu32")\n",3094 written, written_delta, reused, reused_delta);3095return0;3096}