1#include "builtin.h" 2#include "cache.h" 3#include "repository.h" 4#include "config.h" 5#include "attr.h" 6#include "object.h" 7#include "blob.h" 8#include "commit.h" 9#include "tag.h" 10#include "tree.h" 11#include "delta.h" 12#include "pack.h" 13#include "pack-revindex.h" 14#include "csum-file.h" 15#include "tree-walk.h" 16#include "diff.h" 17#include "revision.h" 18#include "list-objects.h" 19#include "list-objects-filter.h" 20#include "list-objects-filter-options.h" 21#include "pack-objects.h" 22#include "progress.h" 23#include "refs.h" 24#include "streaming.h" 25#include "thread-utils.h" 26#include "pack-bitmap.h" 27#include "reachable.h" 28#include "sha1-array.h" 29#include "argv-array.h" 30#include "list.h" 31#include "packfile.h" 32#include "object-store.h" 33#include "dir.h" 34 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 36#define SIZE(obj) oe_size(&to_pack, obj) 37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 39#define DELTA(obj) oe_delta(&to_pack, obj) 40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 46 47static const char *pack_usage[] = { 48 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 49 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 50 NULL 51}; 52 53/* 54 * Objects we are going to pack are collected in the `to_pack` structure. 55 * It contains an array (dynamically expanded) of the object data, and a map 56 * that can resolve SHA1s to their position in the array. 57 */ 58static struct packing_data to_pack; 59 60static struct pack_idx_entry **written_list; 61static uint32_t nr_result, nr_written, nr_seen; 62 63static int non_empty; 64static int reuse_delta = 1, reuse_object = 1; 65static int keep_unreachable, unpack_unreachable, include_tag; 66static timestamp_t unpack_unreachable_expiration; 67static int pack_loose_unreachable; 68static int local; 69static int have_non_local_packs; 70static int incremental; 71static int ignore_packed_keep_on_disk; 72static int ignore_packed_keep_in_core; 73static int allow_ofs_delta; 74static struct pack_idx_option pack_idx_opts; 75static const char *base_name; 76static int progress = 1; 77static int window = 10; 78static unsigned long pack_size_limit; 79static int depth = 50; 80static int delta_search_threads; 81static int pack_to_stdout; 82static int num_preferred_base; 83static struct progress *progress_state; 84 85static struct packed_git *reuse_packfile; 86static uint32_t reuse_packfile_objects; 87static off_t reuse_packfile_offset; 88 89static int use_bitmap_index_default = 1; 90static int use_bitmap_index = -1; 91static int write_bitmap_index; 92static uint16_t write_bitmap_options; 93 94static int exclude_promisor_objects; 95 96static unsigned long delta_cache_size = 0; 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 98static unsigned long cache_max_small_delta_size = 1000; 99 100static unsigned long window_memory_limit = 0; 101 102static struct list_objects_filter_options filter_options; 103 104enum missing_action { 105 MA_ERROR = 0, /* fail if any missing objects are encountered */ 106 MA_ALLOW_ANY, /* silently allow ALL missing objects */ 107 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ 108}; 109static enum missing_action arg_missing_action; 110static show_object_fn fn_show_object; 111 112/* 113 * stats 114 */ 115static uint32_t written, written_delta; 116static uint32_t reused, reused_delta; 117 118/* 119 * Indexed commits 120 */ 121static struct commit **indexed_commits; 122static unsigned int indexed_commits_nr; 123static unsigned int indexed_commits_alloc; 124 125static void index_commit_for_bitmap(struct commit *commit) 126{ 127 if (indexed_commits_nr >= indexed_commits_alloc) { 128 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; 129 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 130 } 131 132 indexed_commits[indexed_commits_nr++] = commit; 133} 134 135static void *get_delta(struct object_entry *entry) 136{ 137 unsigned long size, base_size, delta_size; 138 void *buf, *base_buf, *delta_buf; 139 enum object_type type; 140 141 buf = read_object_file(&entry->idx.oid, &type, &size); 142 if (!buf) 143 die("unable to read %s", oid_to_hex(&entry->idx.oid)); 144 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type, 145 &base_size); 146 if (!base_buf) 147 die("unable to read %s", 148 oid_to_hex(&DELTA(entry)->idx.oid)); 149 delta_buf = diff_delta(base_buf, base_size, 150 buf, size, &delta_size, 0); 151 /* 152 * We succesfully computed this delta once but dropped it for 153 * memory reasons. Something is very wrong if this time we 154 * recompute and create a different delta. 155 */ 156 if (!delta_buf || delta_size != DELTA_SIZE(entry)) 157 BUG("delta size changed"); 158 free(buf); 159 free(base_buf); 160 return delta_buf; 161} 162 163static unsigned long do_compress(void **pptr, unsigned long size) 164{ 165 git_zstream stream; 166 void *in, *out; 167 unsigned long maxsize; 168 169 git_deflate_init(&stream, pack_compression_level); 170 maxsize = git_deflate_bound(&stream, size); 171 172 in = *pptr; 173 out = xmalloc(maxsize); 174 *pptr = out; 175 176 stream.next_in = in; 177 stream.avail_in = size; 178 stream.next_out = out; 179 stream.avail_out = maxsize; 180 while (git_deflate(&stream, Z_FINISH) == Z_OK) 181 ; /* nothing */ 182 git_deflate_end(&stream); 183 184 free(in); 185 return stream.total_out; 186} 187 188static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, 189 const struct object_id *oid) 190{ 191 git_zstream stream; 192 unsigned char ibuf[1024 * 16]; 193 unsigned char obuf[1024 * 16]; 194 unsigned long olen = 0; 195 196 git_deflate_init(&stream, pack_compression_level); 197 198 for (;;) { 199 ssize_t readlen; 200 int zret = Z_OK; 201 readlen = read_istream(st, ibuf, sizeof(ibuf)); 202 if (readlen == -1) 203 die(_("unable to read %s"), oid_to_hex(oid)); 204 205 stream.next_in = ibuf; 206 stream.avail_in = readlen; 207 while ((stream.avail_in || readlen == 0) && 208 (zret == Z_OK || zret == Z_BUF_ERROR)) { 209 stream.next_out = obuf; 210 stream.avail_out = sizeof(obuf); 211 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); 212 hashwrite(f, obuf, stream.next_out - obuf); 213 olen += stream.next_out - obuf; 214 } 215 if (stream.avail_in) 216 die(_("deflate error (%d)"), zret); 217 if (readlen == 0) { 218 if (zret != Z_STREAM_END) 219 die(_("deflate error (%d)"), zret); 220 break; 221 } 222 } 223 git_deflate_end(&stream); 224 return olen; 225} 226 227/* 228 * we are going to reuse the existing object data as is. make 229 * sure it is not corrupt. 230 */ 231static int check_pack_inflate(struct packed_git *p, 232 struct pack_window **w_curs, 233 off_t offset, 234 off_t len, 235 unsigned long expect) 236{ 237 git_zstream stream; 238 unsigned char fakebuf[4096], *in; 239 int st; 240 241 memset(&stream, 0, sizeof(stream)); 242 git_inflate_init(&stream); 243 do { 244 in = use_pack(p, w_curs, offset, &stream.avail_in); 245 stream.next_in = in; 246 stream.next_out = fakebuf; 247 stream.avail_out = sizeof(fakebuf); 248 st = git_inflate(&stream, Z_FINISH); 249 offset += stream.next_in - in; 250 } while (st == Z_OK || st == Z_BUF_ERROR); 251 git_inflate_end(&stream); 252 return (st == Z_STREAM_END && 253 stream.total_out == expect && 254 stream.total_in == len) ? 0 : -1; 255} 256 257static void copy_pack_data(struct hashfile *f, 258 struct packed_git *p, 259 struct pack_window **w_curs, 260 off_t offset, 261 off_t len) 262{ 263 unsigned char *in; 264 unsigned long avail; 265 266 while (len) { 267 in = use_pack(p, w_curs, offset, &avail); 268 if (avail > len) 269 avail = (unsigned long)len; 270 hashwrite(f, in, avail); 271 offset += avail; 272 len -= avail; 273 } 274} 275 276/* Return 0 if we will bust the pack-size limit */ 277static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, 278 unsigned long limit, int usable_delta) 279{ 280 unsigned long size, datalen; 281 unsigned char header[MAX_PACK_OBJECT_HEADER], 282 dheader[MAX_PACK_OBJECT_HEADER]; 283 unsigned hdrlen; 284 enum object_type type; 285 void *buf; 286 struct git_istream *st = NULL; 287 const unsigned hashsz = the_hash_algo->rawsz; 288 289 if (!usable_delta) { 290 if (oe_type(entry) == OBJ_BLOB && 291 oe_size_greater_than(&to_pack, entry, big_file_threshold) && 292 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 293 buf = NULL; 294 else { 295 buf = read_object_file(&entry->idx.oid, &type, &size); 296 if (!buf) 297 die(_("unable to read %s"), 298 oid_to_hex(&entry->idx.oid)); 299 } 300 /* 301 * make sure no cached delta data remains from a 302 * previous attempt before a pack split occurred. 303 */ 304 FREE_AND_NULL(entry->delta_data); 305 entry->z_delta_size = 0; 306 } else if (entry->delta_data) { 307 size = DELTA_SIZE(entry); 308 buf = entry->delta_data; 309 entry->delta_data = NULL; 310 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 311 OBJ_OFS_DELTA : OBJ_REF_DELTA; 312 } else { 313 buf = get_delta(entry); 314 size = DELTA_SIZE(entry); 315 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 316 OBJ_OFS_DELTA : OBJ_REF_DELTA; 317 } 318 319 if (st) /* large blob case, just assume we don't compress well */ 320 datalen = size; 321 else if (entry->z_delta_size) 322 datalen = entry->z_delta_size; 323 else 324 datalen = do_compress(&buf, size); 325 326 /* 327 * The object header is a byte of 'type' followed by zero or 328 * more bytes of length. 329 */ 330 hdrlen = encode_in_pack_object_header(header, sizeof(header), 331 type, size); 332 333 if (type == OBJ_OFS_DELTA) { 334 /* 335 * Deltas with relative base contain an additional 336 * encoding of the relative offset for the delta 337 * base from this object's position in the pack. 338 */ 339 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 340 unsigned pos = sizeof(dheader) - 1; 341 dheader[pos] = ofs & 127; 342 while (ofs >>= 7) 343 dheader[--pos] = 128 | (--ofs & 127); 344 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { 345 if (st) 346 close_istream(st); 347 free(buf); 348 return 0; 349 } 350 hashwrite(f, header, hdrlen); 351 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 352 hdrlen += sizeof(dheader) - pos; 353 } else if (type == OBJ_REF_DELTA) { 354 /* 355 * Deltas with a base reference contain 356 * additional bytes for the base object ID. 357 */ 358 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { 359 if (st) 360 close_istream(st); 361 free(buf); 362 return 0; 363 } 364 hashwrite(f, header, hdrlen); 365 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); 366 hdrlen += hashsz; 367 } else { 368 if (limit && hdrlen + datalen + hashsz >= limit) { 369 if (st) 370 close_istream(st); 371 free(buf); 372 return 0; 373 } 374 hashwrite(f, header, hdrlen); 375 } 376 if (st) { 377 datalen = write_large_blob_data(st, f, &entry->idx.oid); 378 close_istream(st); 379 } else { 380 hashwrite(f, buf, datalen); 381 free(buf); 382 } 383 384 return hdrlen + datalen; 385} 386 387/* Return 0 if we will bust the pack-size limit */ 388static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, 389 unsigned long limit, int usable_delta) 390{ 391 struct packed_git *p = IN_PACK(entry); 392 struct pack_window *w_curs = NULL; 393 struct revindex_entry *revidx; 394 off_t offset; 395 enum object_type type = oe_type(entry); 396 off_t datalen; 397 unsigned char header[MAX_PACK_OBJECT_HEADER], 398 dheader[MAX_PACK_OBJECT_HEADER]; 399 unsigned hdrlen; 400 const unsigned hashsz = the_hash_algo->rawsz; 401 unsigned long entry_size = SIZE(entry); 402 403 if (DELTA(entry)) 404 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 405 OBJ_OFS_DELTA : OBJ_REF_DELTA; 406 hdrlen = encode_in_pack_object_header(header, sizeof(header), 407 type, entry_size); 408 409 offset = entry->in_pack_offset; 410 revidx = find_pack_revindex(p, offset); 411 datalen = revidx[1].offset - offset; 412 if (!pack_to_stdout && p->index_version > 1 && 413 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 414 error("bad packed object CRC for %s", 415 oid_to_hex(&entry->idx.oid)); 416 unuse_pack(&w_curs); 417 return write_no_reuse_object(f, entry, limit, usable_delta); 418 } 419 420 offset += entry->in_pack_header_size; 421 datalen -= entry->in_pack_header_size; 422 423 if (!pack_to_stdout && p->index_version == 1 && 424 check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 425 error("corrupt packed object for %s", 426 oid_to_hex(&entry->idx.oid)); 427 unuse_pack(&w_curs); 428 return write_no_reuse_object(f, entry, limit, usable_delta); 429 } 430 431 if (type == OBJ_OFS_DELTA) { 432 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 433 unsigned pos = sizeof(dheader) - 1; 434 dheader[pos] = ofs & 127; 435 while (ofs >>= 7) 436 dheader[--pos] = 128 | (--ofs & 127); 437 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { 438 unuse_pack(&w_curs); 439 return 0; 440 } 441 hashwrite(f, header, hdrlen); 442 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 443 hdrlen += sizeof(dheader) - pos; 444 reused_delta++; 445 } else if (type == OBJ_REF_DELTA) { 446 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { 447 unuse_pack(&w_curs); 448 return 0; 449 } 450 hashwrite(f, header, hdrlen); 451 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); 452 hdrlen += hashsz; 453 reused_delta++; 454 } else { 455 if (limit && hdrlen + datalen + hashsz >= limit) { 456 unuse_pack(&w_curs); 457 return 0; 458 } 459 hashwrite(f, header, hdrlen); 460 } 461 copy_pack_data(f, p, &w_curs, offset, datalen); 462 unuse_pack(&w_curs); 463 reused++; 464 return hdrlen + datalen; 465} 466 467/* Return 0 if we will bust the pack-size limit */ 468static off_t write_object(struct hashfile *f, 469 struct object_entry *entry, 470 off_t write_offset) 471{ 472 unsigned long limit; 473 off_t len; 474 int usable_delta, to_reuse; 475 476 if (!pack_to_stdout) 477 crc32_begin(f); 478 479 /* apply size limit if limited packsize and not first object */ 480 if (!pack_size_limit || !nr_written) 481 limit = 0; 482 else if (pack_size_limit <= write_offset) 483 /* 484 * the earlier object did not fit the limit; avoid 485 * mistaking this with unlimited (i.e. limit = 0). 486 */ 487 limit = 1; 488 else 489 limit = pack_size_limit - write_offset; 490 491 if (!DELTA(entry)) 492 usable_delta = 0; /* no delta */ 493 else if (!pack_size_limit) 494 usable_delta = 1; /* unlimited packfile */ 495 else if (DELTA(entry)->idx.offset == (off_t)-1) 496 usable_delta = 0; /* base was written to another pack */ 497 else if (DELTA(entry)->idx.offset) 498 usable_delta = 1; /* base already exists in this pack */ 499 else 500 usable_delta = 0; /* base could end up in another pack */ 501 502 if (!reuse_object) 503 to_reuse = 0; /* explicit */ 504 else if (!IN_PACK(entry)) 505 to_reuse = 0; /* can't reuse what we don't have */ 506 else if (oe_type(entry) == OBJ_REF_DELTA || 507 oe_type(entry) == OBJ_OFS_DELTA) 508 /* check_object() decided it for us ... */ 509 to_reuse = usable_delta; 510 /* ... but pack split may override that */ 511 else if (oe_type(entry) != entry->in_pack_type) 512 to_reuse = 0; /* pack has delta which is unusable */ 513 else if (DELTA(entry)) 514 to_reuse = 0; /* we want to pack afresh */ 515 else 516 to_reuse = 1; /* we have it in-pack undeltified, 517 * and we do not need to deltify it. 518 */ 519 520 if (!to_reuse) 521 len = write_no_reuse_object(f, entry, limit, usable_delta); 522 else 523 len = write_reuse_object(f, entry, limit, usable_delta); 524 if (!len) 525 return 0; 526 527 if (usable_delta) 528 written_delta++; 529 written++; 530 if (!pack_to_stdout) 531 entry->idx.crc32 = crc32_end(f); 532 return len; 533} 534 535enum write_one_status { 536 WRITE_ONE_SKIP = -1, /* already written */ 537 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */ 538 WRITE_ONE_WRITTEN = 1, /* normal */ 539 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ 540}; 541 542static enum write_one_status write_one(struct hashfile *f, 543 struct object_entry *e, 544 off_t *offset) 545{ 546 off_t size; 547 int recursing; 548 549 /* 550 * we set offset to 1 (which is an impossible value) to mark 551 * the fact that this object is involved in "write its base 552 * first before writing a deltified object" recursion. 553 */ 554 recursing = (e->idx.offset == 1); 555 if (recursing) { 556 warning("recursive delta detected for object %s", 557 oid_to_hex(&e->idx.oid)); 558 return WRITE_ONE_RECURSIVE; 559 } else if (e->idx.offset || e->preferred_base) { 560 /* offset is non zero if object is written already. */ 561 return WRITE_ONE_SKIP; 562 } 563 564 /* if we are deltified, write out base object first. */ 565 if (DELTA(e)) { 566 e->idx.offset = 1; /* now recurse */ 567 switch (write_one(f, DELTA(e), offset)) { 568 case WRITE_ONE_RECURSIVE: 569 /* we cannot depend on this one */ 570 SET_DELTA(e, NULL); 571 break; 572 default: 573 break; 574 case WRITE_ONE_BREAK: 575 e->idx.offset = recursing; 576 return WRITE_ONE_BREAK; 577 } 578 } 579 580 e->idx.offset = *offset; 581 size = write_object(f, e, *offset); 582 if (!size) { 583 e->idx.offset = recursing; 584 return WRITE_ONE_BREAK; 585 } 586 written_list[nr_written++] = &e->idx; 587 588 /* make sure off_t is sufficiently large not to wrap */ 589 if (signed_add_overflows(*offset, size)) 590 die("pack too large for current definition of off_t"); 591 *offset += size; 592 return WRITE_ONE_WRITTEN; 593} 594 595static int mark_tagged(const char *path, const struct object_id *oid, int flag, 596 void *cb_data) 597{ 598 struct object_id peeled; 599 struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL); 600 601 if (entry) 602 entry->tagged = 1; 603 if (!peel_ref(path, &peeled)) { 604 entry = packlist_find(&to_pack, peeled.hash, NULL); 605 if (entry) 606 entry->tagged = 1; 607 } 608 return 0; 609} 610 611static inline void add_to_write_order(struct object_entry **wo, 612 unsigned int *endp, 613 struct object_entry *e) 614{ 615 if (e->filled) 616 return; 617 wo[(*endp)++] = e; 618 e->filled = 1; 619} 620 621static void add_descendants_to_write_order(struct object_entry **wo, 622 unsigned int *endp, 623 struct object_entry *e) 624{ 625 int add_to_order = 1; 626 while (e) { 627 if (add_to_order) { 628 struct object_entry *s; 629 /* add this node... */ 630 add_to_write_order(wo, endp, e); 631 /* all its siblings... */ 632 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) { 633 add_to_write_order(wo, endp, s); 634 } 635 } 636 /* drop down a level to add left subtree nodes if possible */ 637 if (DELTA_CHILD(e)) { 638 add_to_order = 1; 639 e = DELTA_CHILD(e); 640 } else { 641 add_to_order = 0; 642 /* our sibling might have some children, it is next */ 643 if (DELTA_SIBLING(e)) { 644 e = DELTA_SIBLING(e); 645 continue; 646 } 647 /* go back to our parent node */ 648 e = DELTA(e); 649 while (e && !DELTA_SIBLING(e)) { 650 /* we're on the right side of a subtree, keep 651 * going up until we can go right again */ 652 e = DELTA(e); 653 } 654 if (!e) { 655 /* done- we hit our original root node */ 656 return; 657 } 658 /* pass it off to sibling at this level */ 659 e = DELTA_SIBLING(e); 660 } 661 }; 662} 663 664static void add_family_to_write_order(struct object_entry **wo, 665 unsigned int *endp, 666 struct object_entry *e) 667{ 668 struct object_entry *root; 669 670 for (root = e; DELTA(root); root = DELTA(root)) 671 ; /* nothing */ 672 add_descendants_to_write_order(wo, endp, root); 673} 674 675static struct object_entry **compute_write_order(void) 676{ 677 unsigned int i, wo_end, last_untagged; 678 679 struct object_entry **wo; 680 struct object_entry *objects = to_pack.objects; 681 682 for (i = 0; i < to_pack.nr_objects; i++) { 683 objects[i].tagged = 0; 684 objects[i].filled = 0; 685 SET_DELTA_CHILD(&objects[i], NULL); 686 SET_DELTA_SIBLING(&objects[i], NULL); 687 } 688 689 /* 690 * Fully connect delta_child/delta_sibling network. 691 * Make sure delta_sibling is sorted in the original 692 * recency order. 693 */ 694 for (i = to_pack.nr_objects; i > 0;) { 695 struct object_entry *e = &objects[--i]; 696 if (!DELTA(e)) 697 continue; 698 /* Mark me as the first child */ 699 e->delta_sibling_idx = DELTA(e)->delta_child_idx; 700 SET_DELTA_CHILD(DELTA(e), e); 701 } 702 703 /* 704 * Mark objects that are at the tip of tags. 705 */ 706 for_each_tag_ref(mark_tagged, NULL); 707 708 /* 709 * Give the objects in the original recency order until 710 * we see a tagged tip. 711 */ 712 ALLOC_ARRAY(wo, to_pack.nr_objects); 713 for (i = wo_end = 0; i < to_pack.nr_objects; i++) { 714 if (objects[i].tagged) 715 break; 716 add_to_write_order(wo, &wo_end, &objects[i]); 717 } 718 last_untagged = i; 719 720 /* 721 * Then fill all the tagged tips. 722 */ 723 for (; i < to_pack.nr_objects; i++) { 724 if (objects[i].tagged) 725 add_to_write_order(wo, &wo_end, &objects[i]); 726 } 727 728 /* 729 * And then all remaining commits and tags. 730 */ 731 for (i = last_untagged; i < to_pack.nr_objects; i++) { 732 if (oe_type(&objects[i]) != OBJ_COMMIT && 733 oe_type(&objects[i]) != OBJ_TAG) 734 continue; 735 add_to_write_order(wo, &wo_end, &objects[i]); 736 } 737 738 /* 739 * And then all the trees. 740 */ 741 for (i = last_untagged; i < to_pack.nr_objects; i++) { 742 if (oe_type(&objects[i]) != OBJ_TREE) 743 continue; 744 add_to_write_order(wo, &wo_end, &objects[i]); 745 } 746 747 /* 748 * Finally all the rest in really tight order 749 */ 750 for (i = last_untagged; i < to_pack.nr_objects; i++) { 751 if (!objects[i].filled) 752 add_family_to_write_order(wo, &wo_end, &objects[i]); 753 } 754 755 if (wo_end != to_pack.nr_objects) 756 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); 757 758 return wo; 759} 760 761static off_t write_reused_pack(struct hashfile *f) 762{ 763 unsigned char buffer[8192]; 764 off_t to_write, total; 765 int fd; 766 767 if (!is_pack_valid(reuse_packfile)) 768 die("packfile is invalid: %s", reuse_packfile->pack_name); 769 770 fd = git_open(reuse_packfile->pack_name); 771 if (fd < 0) 772 die_errno("unable to open packfile for reuse: %s", 773 reuse_packfile->pack_name); 774 775 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) 776 die_errno("unable to seek in reused packfile"); 777 778 if (reuse_packfile_offset < 0) 779 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 780 781 total = to_write = reuse_packfile_offset - sizeof(struct pack_header); 782 783 while (to_write) { 784 int read_pack = xread(fd, buffer, sizeof(buffer)); 785 786 if (read_pack <= 0) 787 die_errno("unable to read from reused packfile"); 788 789 if (read_pack > to_write) 790 read_pack = to_write; 791 792 hashwrite(f, buffer, read_pack); 793 to_write -= read_pack; 794 795 /* 796 * We don't know the actual number of objects written, 797 * only how many bytes written, how many bytes total, and 798 * how many objects total. So we can fake it by pretending all 799 * objects we are writing are the same size. This gives us a 800 * smooth progress meter, and at the end it matches the true 801 * answer. 802 */ 803 written = reuse_packfile_objects * 804 (((double)(total - to_write)) / total); 805 display_progress(progress_state, written); 806 } 807 808 close(fd); 809 written = reuse_packfile_objects; 810 display_progress(progress_state, written); 811 return reuse_packfile_offset - sizeof(struct pack_header); 812} 813 814static const char no_split_warning[] = N_( 815"disabling bitmap writing, packs are split due to pack.packSizeLimit" 816); 817 818static void write_pack_file(void) 819{ 820 uint32_t i = 0, j; 821 struct hashfile *f; 822 off_t offset; 823 uint32_t nr_remaining = nr_result; 824 time_t last_mtime = 0; 825 struct object_entry **write_order; 826 827 if (progress > pack_to_stdout) 828 progress_state = start_progress(_("Writing objects"), nr_result); 829 ALLOC_ARRAY(written_list, to_pack.nr_objects); 830 write_order = compute_write_order(); 831 832 do { 833 struct object_id oid; 834 char *pack_tmp_name = NULL; 835 836 if (pack_to_stdout) 837 f = hashfd_throughput(1, "<stdout>", progress_state); 838 else 839 f = create_tmp_packfile(&pack_tmp_name); 840 841 offset = write_pack_header(f, nr_remaining); 842 843 if (reuse_packfile) { 844 off_t packfile_size; 845 assert(pack_to_stdout); 846 847 packfile_size = write_reused_pack(f); 848 offset += packfile_size; 849 } 850 851 nr_written = 0; 852 for (; i < to_pack.nr_objects; i++) { 853 struct object_entry *e = write_order[i]; 854 if (write_one(f, e, &offset) == WRITE_ONE_BREAK) 855 break; 856 display_progress(progress_state, written); 857 } 858 859 /* 860 * Did we write the wrong # entries in the header? 861 * If so, rewrite it like in fast-import 862 */ 863 if (pack_to_stdout) { 864 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 865 } else if (nr_written == nr_remaining) { 866 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 867 } else { 868 int fd = finalize_hashfile(f, oid.hash, 0); 869 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 870 nr_written, oid.hash, offset); 871 close(fd); 872 if (write_bitmap_index) { 873 warning(_(no_split_warning)); 874 write_bitmap_index = 0; 875 } 876 } 877 878 if (!pack_to_stdout) { 879 struct stat st; 880 struct strbuf tmpname = STRBUF_INIT; 881 882 /* 883 * Packs are runtime accessed in their mtime 884 * order since newer packs are more likely to contain 885 * younger objects. So if we are creating multiple 886 * packs then we should modify the mtime of later ones 887 * to preserve this property. 888 */ 889 if (stat(pack_tmp_name, &st) < 0) { 890 warning_errno("failed to stat %s", pack_tmp_name); 891 } else if (!last_mtime) { 892 last_mtime = st.st_mtime; 893 } else { 894 struct utimbuf utb; 895 utb.actime = st.st_atime; 896 utb.modtime = --last_mtime; 897 if (utime(pack_tmp_name, &utb) < 0) 898 warning_errno("failed utime() on %s", pack_tmp_name); 899 } 900 901 strbuf_addf(&tmpname, "%s-", base_name); 902 903 if (write_bitmap_index) { 904 bitmap_writer_set_checksum(oid.hash); 905 bitmap_writer_build_type_index( 906 &to_pack, written_list, nr_written); 907 } 908 909 finish_tmp_packfile(&tmpname, pack_tmp_name, 910 written_list, nr_written, 911 &pack_idx_opts, oid.hash); 912 913 if (write_bitmap_index) { 914 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid)); 915 916 stop_progress(&progress_state); 917 918 bitmap_writer_show_progress(progress); 919 bitmap_writer_reuse_bitmaps(&to_pack); 920 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 921 bitmap_writer_build(&to_pack); 922 bitmap_writer_finish(written_list, nr_written, 923 tmpname.buf, write_bitmap_options); 924 write_bitmap_index = 0; 925 } 926 927 strbuf_release(&tmpname); 928 free(pack_tmp_name); 929 puts(oid_to_hex(&oid)); 930 } 931 932 /* mark written objects as written to previous pack */ 933 for (j = 0; j < nr_written; j++) { 934 written_list[j]->offset = (off_t)-1; 935 } 936 nr_remaining -= nr_written; 937 } while (nr_remaining && i < to_pack.nr_objects); 938 939 free(written_list); 940 free(write_order); 941 stop_progress(&progress_state); 942 if (written != nr_result) 943 die("wrote %"PRIu32" objects while expecting %"PRIu32, 944 written, nr_result); 945} 946 947static int no_try_delta(const char *path) 948{ 949 static struct attr_check *check; 950 951 if (!check) 952 check = attr_check_initl("delta", NULL); 953 if (git_check_attr(path, check)) 954 return 0; 955 if (ATTR_FALSE(check->items[0].value)) 956 return 1; 957 return 0; 958} 959 960/* 961 * When adding an object, check whether we have already added it 962 * to our packing list. If so, we can skip. However, if we are 963 * being asked to excludei t, but the previous mention was to include 964 * it, make sure to adjust its flags and tweak our numbers accordingly. 965 * 966 * As an optimization, we pass out the index position where we would have 967 * found the item, since that saves us from having to look it up again a 968 * few lines later when we want to add the new entry. 969 */ 970static int have_duplicate_entry(const struct object_id *oid, 971 int exclude, 972 uint32_t *index_pos) 973{ 974 struct object_entry *entry; 975 976 entry = packlist_find(&to_pack, oid->hash, index_pos); 977 if (!entry) 978 return 0; 979 980 if (exclude) { 981 if (!entry->preferred_base) 982 nr_result--; 983 entry->preferred_base = 1; 984 } 985 986 return 1; 987} 988 989static int want_found_object(int exclude, struct packed_git *p) 990{ 991 if (exclude) 992 return 1; 993 if (incremental) 994 return 0; 995 996 /* 997 * When asked to do --local (do not include an object that appears in a 998 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 999 * an object that appears in a pack marked with .keep), finding a pack1000 * that matches the criteria is sufficient for us to decide to omit it.1001 * However, even if this pack does not satisfy the criteria, we need to1002 * make sure no copy of this object appears in _any_ pack that makes us1003 * to omit the object, so we need to check all the packs.1004 *1005 * We can however first check whether these options can possible matter;1006 * if they do not matter we know we want the object in generated pack.1007 * Otherwise, we signal "-1" at the end to tell the caller that we do1008 * not know either way, and it needs to check more packs.1009 */1010 if (!ignore_packed_keep_on_disk &&1011 !ignore_packed_keep_in_core &&1012 (!local || !have_non_local_packs))1013 return 1;10141015 if (local && !p->pack_local)1016 return 0;1017 if (p->pack_local &&1018 ((ignore_packed_keep_on_disk && p->pack_keep) ||1019 (ignore_packed_keep_in_core && p->pack_keep_in_core)))1020 return 0;10211022 /* we don't know yet; keep looking for more packs */1023 return -1;1024}10251026/*1027 * Check whether we want the object in the pack (e.g., we do not want1028 * objects found in non-local stores if the "--local" option was used).1029 *1030 * If the caller already knows an existing pack it wants to take the object1031 * from, that is passed in *found_pack and *found_offset; otherwise this1032 * function finds if there is any pack that has the object and returns the pack1033 * and its offset in these variables.1034 */1035static int want_object_in_pack(const struct object_id *oid,1036 int exclude,1037 struct packed_git **found_pack,1038 off_t *found_offset)1039{1040 int want;1041 struct list_head *pos;10421043 if (!exclude && local && has_loose_object_nonlocal(oid))1044 return 0;10451046 /*1047 * If we already know the pack object lives in, start checks from that1048 * pack - in the usual case when neither --local was given nor .keep files1049 * are present we will determine the answer right now.1050 */1051 if (*found_pack) {1052 want = want_found_object(exclude, *found_pack);1053 if (want != -1)1054 return want;1055 }1056 list_for_each(pos, get_packed_git_mru(the_repository)) {1057 struct packed_git *p = list_entry(pos, struct packed_git, mru);1058 off_t offset;10591060 if (p == *found_pack)1061 offset = *found_offset;1062 else1063 offset = find_pack_entry_one(oid->hash, p);10641065 if (offset) {1066 if (!*found_pack) {1067 if (!is_pack_valid(p))1068 continue;1069 *found_offset = offset;1070 *found_pack = p;1071 }1072 want = want_found_object(exclude, p);1073 if (!exclude && want > 0)1074 list_move(&p->mru,1075 get_packed_git_mru(the_repository));1076 if (want != -1)1077 return want;1078 }1079 }10801081 return 1;1082}10831084static void create_object_entry(const struct object_id *oid,1085 enum object_type type,1086 uint32_t hash,1087 int exclude,1088 int no_try_delta,1089 uint32_t index_pos,1090 struct packed_git *found_pack,1091 off_t found_offset)1092{1093 struct object_entry *entry;10941095 entry = packlist_alloc(&to_pack, oid->hash, index_pos);1096 entry->hash = hash;1097 oe_set_type(entry, type);1098 if (exclude)1099 entry->preferred_base = 1;1100 else1101 nr_result++;1102 if (found_pack) {1103 oe_set_in_pack(&to_pack, entry, found_pack);1104 entry->in_pack_offset = found_offset;1105 }11061107 entry->no_try_delta = no_try_delta;1108}11091110static const char no_closure_warning[] = N_(1111"disabling bitmap writing, as some objects are not being packed"1112);11131114static int add_object_entry(const struct object_id *oid, enum object_type type,1115 const char *name, int exclude)1116{1117 struct packed_git *found_pack = NULL;1118 off_t found_offset = 0;1119 uint32_t index_pos;11201121 display_progress(progress_state, ++nr_seen);11221123 if (have_duplicate_entry(oid, exclude, &index_pos))1124 return 0;11251126 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1127 /* The pack is missing an object, so it will not have closure */1128 if (write_bitmap_index) {1129 warning(_(no_closure_warning));1130 write_bitmap_index = 0;1131 }1132 return 0;1133 }11341135 create_object_entry(oid, type, pack_name_hash(name),1136 exclude, name && no_try_delta(name),1137 index_pos, found_pack, found_offset);1138 return 1;1139}11401141static int add_object_entry_from_bitmap(const struct object_id *oid,1142 enum object_type type,1143 int flags, uint32_t name_hash,1144 struct packed_git *pack, off_t offset)1145{1146 uint32_t index_pos;11471148 display_progress(progress_state, ++nr_seen);11491150 if (have_duplicate_entry(oid, 0, &index_pos))1151 return 0;11521153 if (!want_object_in_pack(oid, 0, &pack, &offset))1154 return 0;11551156 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);1157 return 1;1158}11591160struct pbase_tree_cache {1161 struct object_id oid;1162 int ref;1163 int temporary;1164 void *tree_data;1165 unsigned long tree_size;1166};11671168static struct pbase_tree_cache *(pbase_tree_cache[256]);1169static int pbase_tree_cache_ix(const struct object_id *oid)1170{1171 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);1172}1173static int pbase_tree_cache_ix_incr(int ix)1174{1175 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);1176}11771178static struct pbase_tree {1179 struct pbase_tree *next;1180 /* This is a phony "cache" entry; we are not1181 * going to evict it or find it through _get()1182 * mechanism -- this is for the toplevel node that1183 * would almost always change with any commit.1184 */1185 struct pbase_tree_cache pcache;1186} *pbase_tree;11871188static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1189{1190 struct pbase_tree_cache *ent, *nent;1191 void *data;1192 unsigned long size;1193 enum object_type type;1194 int neigh;1195 int my_ix = pbase_tree_cache_ix(oid);1196 int available_ix = -1;11971198 /* pbase-tree-cache acts as a limited hashtable.1199 * your object will be found at your index or within a few1200 * slots after that slot if it is cached.1201 */1202 for (neigh = 0; neigh < 8; neigh++) {1203 ent = pbase_tree_cache[my_ix];1204 if (ent && !oidcmp(&ent->oid, oid)) {1205 ent->ref++;1206 return ent;1207 }1208 else if (((available_ix < 0) && (!ent || !ent->ref)) ||1209 ((0 <= available_ix) &&1210 (!ent && pbase_tree_cache[available_ix])))1211 available_ix = my_ix;1212 if (!ent)1213 break;1214 my_ix = pbase_tree_cache_ix_incr(my_ix);1215 }12161217 /* Did not find one. Either we got a bogus request or1218 * we need to read and perhaps cache.1219 */1220 data = read_object_file(oid, &type, &size);1221 if (!data)1222 return NULL;1223 if (type != OBJ_TREE) {1224 free(data);1225 return NULL;1226 }12271228 /* We need to either cache or return a throwaway copy */12291230 if (available_ix < 0)1231 ent = NULL;1232 else {1233 ent = pbase_tree_cache[available_ix];1234 my_ix = available_ix;1235 }12361237 if (!ent) {1238 nent = xmalloc(sizeof(*nent));1239 nent->temporary = (available_ix < 0);1240 }1241 else {1242 /* evict and reuse */1243 free(ent->tree_data);1244 nent = ent;1245 }1246 oidcpy(&nent->oid, oid);1247 nent->tree_data = data;1248 nent->tree_size = size;1249 nent->ref = 1;1250 if (!nent->temporary)1251 pbase_tree_cache[my_ix] = nent;1252 return nent;1253}12541255static void pbase_tree_put(struct pbase_tree_cache *cache)1256{1257 if (!cache->temporary) {1258 cache->ref--;1259 return;1260 }1261 free(cache->tree_data);1262 free(cache);1263}12641265static int name_cmp_len(const char *name)1266{1267 int i;1268 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)1269 ;1270 return i;1271}12721273static void add_pbase_object(struct tree_desc *tree,1274 const char *name,1275 int cmplen,1276 const char *fullname)1277{1278 struct name_entry entry;1279 int cmp;12801281 while (tree_entry(tree,&entry)) {1282 if (S_ISGITLINK(entry.mode))1283 continue;1284 cmp = tree_entry_len(&entry) != cmplen ? 1 :1285 memcmp(name, entry.path, cmplen);1286 if (cmp > 0)1287 continue;1288 if (cmp < 0)1289 return;1290 if (name[cmplen] != '/') {1291 add_object_entry(entry.oid,1292 object_type(entry.mode),1293 fullname, 1);1294 return;1295 }1296 if (S_ISDIR(entry.mode)) {1297 struct tree_desc sub;1298 struct pbase_tree_cache *tree;1299 const char *down = name+cmplen+1;1300 int downlen = name_cmp_len(down);13011302 tree = pbase_tree_get(entry.oid);1303 if (!tree)1304 return;1305 init_tree_desc(&sub, tree->tree_data, tree->tree_size);13061307 add_pbase_object(&sub, down, downlen, fullname);1308 pbase_tree_put(tree);1309 }1310 }1311}13121313static unsigned *done_pbase_paths;1314static int done_pbase_paths_num;1315static int done_pbase_paths_alloc;1316static int done_pbase_path_pos(unsigned hash)1317{1318 int lo = 0;1319 int hi = done_pbase_paths_num;1320 while (lo < hi) {1321 int mi = lo + (hi - lo) / 2;1322 if (done_pbase_paths[mi] == hash)1323 return mi;1324 if (done_pbase_paths[mi] < hash)1325 hi = mi;1326 else1327 lo = mi + 1;1328 }1329 return -lo-1;1330}13311332static int check_pbase_path(unsigned hash)1333{1334 int pos = done_pbase_path_pos(hash);1335 if (0 <= pos)1336 return 1;1337 pos = -pos - 1;1338 ALLOC_GROW(done_pbase_paths,1339 done_pbase_paths_num + 1,1340 done_pbase_paths_alloc);1341 done_pbase_paths_num++;1342 if (pos < done_pbase_paths_num)1343 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,1344 done_pbase_paths_num - pos - 1);1345 done_pbase_paths[pos] = hash;1346 return 0;1347}13481349static void add_preferred_base_object(const char *name)1350{1351 struct pbase_tree *it;1352 int cmplen;1353 unsigned hash = pack_name_hash(name);13541355 if (!num_preferred_base || check_pbase_path(hash))1356 return;13571358 cmplen = name_cmp_len(name);1359 for (it = pbase_tree; it; it = it->next) {1360 if (cmplen == 0) {1361 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);1362 }1363 else {1364 struct tree_desc tree;1365 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1366 add_pbase_object(&tree, name, cmplen, name);1367 }1368 }1369}13701371static void add_preferred_base(struct object_id *oid)1372{1373 struct pbase_tree *it;1374 void *data;1375 unsigned long size;1376 struct object_id tree_oid;13771378 if (window <= num_preferred_base++)1379 return;13801381 data = read_object_with_reference(oid, tree_type, &size, &tree_oid);1382 if (!data)1383 return;13841385 for (it = pbase_tree; it; it = it->next) {1386 if (!oidcmp(&it->pcache.oid, &tree_oid)) {1387 free(data);1388 return;1389 }1390 }13911392 it = xcalloc(1, sizeof(*it));1393 it->next = pbase_tree;1394 pbase_tree = it;13951396 oidcpy(&it->pcache.oid, &tree_oid);1397 it->pcache.tree_data = data;1398 it->pcache.tree_size = size;1399}14001401static void cleanup_preferred_base(void)1402{1403 struct pbase_tree *it;1404 unsigned i;14051406 it = pbase_tree;1407 pbase_tree = NULL;1408 while (it) {1409 struct pbase_tree *tmp = it;1410 it = tmp->next;1411 free(tmp->pcache.tree_data);1412 free(tmp);1413 }14141415 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {1416 if (!pbase_tree_cache[i])1417 continue;1418 free(pbase_tree_cache[i]->tree_data);1419 FREE_AND_NULL(pbase_tree_cache[i]);1420 }14211422 FREE_AND_NULL(done_pbase_paths);1423 done_pbase_paths_num = done_pbase_paths_alloc = 0;1424}14251426static void check_object(struct object_entry *entry)1427{1428 unsigned long canonical_size;14291430 if (IN_PACK(entry)) {1431 struct packed_git *p = IN_PACK(entry);1432 struct pack_window *w_curs = NULL;1433 const unsigned char *base_ref = NULL;1434 struct object_entry *base_entry;1435 unsigned long used, used_0;1436 unsigned long avail;1437 off_t ofs;1438 unsigned char *buf, c;1439 enum object_type type;1440 unsigned long in_pack_size;14411442 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);14431444 /*1445 * We want in_pack_type even if we do not reuse delta1446 * since non-delta representations could still be reused.1447 */1448 used = unpack_object_header_buffer(buf, avail,1449 &type,1450 &in_pack_size);1451 if (used == 0)1452 goto give_up;14531454 if (type < 0)1455 BUG("invalid type %d", type);1456 entry->in_pack_type = type;14571458 /*1459 * Determine if this is a delta and if so whether we can1460 * reuse it or not. Otherwise let's find out as cheaply as1461 * possible what the actual type and size for this object is.1462 */1463 switch (entry->in_pack_type) {1464 default:1465 /* Not a delta hence we've already got all we need. */1466 oe_set_type(entry, entry->in_pack_type);1467 SET_SIZE(entry, in_pack_size);1468 entry->in_pack_header_size = used;1469 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)1470 goto give_up;1471 unuse_pack(&w_curs);1472 return;1473 case OBJ_REF_DELTA:1474 if (reuse_delta && !entry->preferred_base)1475 base_ref = use_pack(p, &w_curs,1476 entry->in_pack_offset + used, NULL);1477 entry->in_pack_header_size = used + the_hash_algo->rawsz;1478 break;1479 case OBJ_OFS_DELTA:1480 buf = use_pack(p, &w_curs,1481 entry->in_pack_offset + used, NULL);1482 used_0 = 0;1483 c = buf[used_0++];1484 ofs = c & 127;1485 while (c & 128) {1486 ofs += 1;1487 if (!ofs || MSB(ofs, 7)) {1488 error("delta base offset overflow in pack for %s",1489 oid_to_hex(&entry->idx.oid));1490 goto give_up;1491 }1492 c = buf[used_0++];1493 ofs = (ofs << 7) + (c & 127);1494 }1495 ofs = entry->in_pack_offset - ofs;1496 if (ofs <= 0 || ofs >= entry->in_pack_offset) {1497 error("delta base offset out of bound for %s",1498 oid_to_hex(&entry->idx.oid));1499 goto give_up;1500 }1501 if (reuse_delta && !entry->preferred_base) {1502 struct revindex_entry *revidx;1503 revidx = find_pack_revindex(p, ofs);1504 if (!revidx)1505 goto give_up;1506 base_ref = nth_packed_object_sha1(p, revidx->nr);1507 }1508 entry->in_pack_header_size = used + used_0;1509 break;1510 }15111512 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {1513 /*1514 * If base_ref was set above that means we wish to1515 * reuse delta data, and we even found that base1516 * in the list of objects we want to pack. Goodie!1517 *1518 * Depth value does not matter - find_deltas() will1519 * never consider reused delta as the base object to1520 * deltify other objects against, in order to avoid1521 * circular deltas.1522 */1523 oe_set_type(entry, entry->in_pack_type);1524 SET_SIZE(entry, in_pack_size); /* delta size */1525 SET_DELTA(entry, base_entry);1526 SET_DELTA_SIZE(entry, in_pack_size);1527 entry->delta_sibling_idx = base_entry->delta_child_idx;1528 SET_DELTA_CHILD(base_entry, entry);1529 unuse_pack(&w_curs);1530 return;1531 }15321533 if (oe_type(entry)) {1534 off_t delta_pos;15351536 /*1537 * This must be a delta and we already know what the1538 * final object type is. Let's extract the actual1539 * object size from the delta header.1540 */1541 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1542 canonical_size = get_size_from_delta(p, &w_curs, delta_pos);1543 if (canonical_size == 0)1544 goto give_up;1545 SET_SIZE(entry, canonical_size);1546 unuse_pack(&w_curs);1547 return;1548 }15491550 /*1551 * No choice but to fall back to the recursive delta walk1552 * with sha1_object_info() to find about the object type1553 * at this point...1554 */1555 give_up:1556 unuse_pack(&w_curs);1557 }15581559 oe_set_type(entry,1560 oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1561 if (entry->type_valid) {1562 SET_SIZE(entry, canonical_size);1563 } else {1564 /*1565 * Bad object type is checked in prepare_pack(). This is1566 * to permit a missing preferred base object to be ignored1567 * as a preferred base. Doing so can result in a larger1568 * pack file, but the transfer will still take place.1569 */1570 }1571}15721573static int pack_offset_sort(const void *_a, const void *_b)1574{1575 const struct object_entry *a = *(struct object_entry **)_a;1576 const struct object_entry *b = *(struct object_entry **)_b;1577 const struct packed_git *a_in_pack = IN_PACK(a);1578 const struct packed_git *b_in_pack = IN_PACK(b);15791580 /* avoid filesystem trashing with loose objects */1581 if (!a_in_pack && !b_in_pack)1582 return oidcmp(&a->idx.oid, &b->idx.oid);15831584 if (a_in_pack < b_in_pack)1585 return -1;1586 if (a_in_pack > b_in_pack)1587 return 1;1588 return a->in_pack_offset < b->in_pack_offset ? -1 :1589 (a->in_pack_offset > b->in_pack_offset);1590}15911592/*1593 * Drop an on-disk delta we were planning to reuse. Naively, this would1594 * just involve blanking out the "delta" field, but we have to deal1595 * with some extra book-keeping:1596 *1597 * 1. Removing ourselves from the delta_sibling linked list.1598 *1599 * 2. Updating our size/type to the non-delta representation. These were1600 * either not recorded initially (size) or overwritten with the delta type1601 * (type) when check_object() decided to reuse the delta.1602 *1603 * 3. Resetting our delta depth, as we are now a base object.1604 */1605static void drop_reused_delta(struct object_entry *entry)1606{1607 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;1608 struct object_info oi = OBJECT_INFO_INIT;1609 enum object_type type;1610 unsigned long size;16111612 while (*idx) {1613 struct object_entry *oe = &to_pack.objects[*idx - 1];16141615 if (oe == entry)1616 *idx = oe->delta_sibling_idx;1617 else1618 idx = &oe->delta_sibling_idx;1619 }1620 SET_DELTA(entry, NULL);1621 entry->depth = 0;16221623 oi.sizep = &size;1624 oi.typep = &type;1625 if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {1626 /*1627 * We failed to get the info from this pack for some reason;1628 * fall back to sha1_object_info, which may find another copy.1629 * And if that fails, the error will be recorded in oe_type(entry)1630 * and dealt with in prepare_pack().1631 */1632 oe_set_type(entry,1633 oid_object_info(the_repository, &entry->idx.oid, &size));1634 } else {1635 oe_set_type(entry, type);1636 }1637 SET_SIZE(entry, size);1638}16391640/*1641 * Follow the chain of deltas from this entry onward, throwing away any links1642 * that cause us to hit a cycle (as determined by the DFS state flags in1643 * the entries).1644 *1645 * We also detect too-long reused chains that would violate our --depth1646 * limit.1647 */1648static void break_delta_chains(struct object_entry *entry)1649{1650 /*1651 * The actual depth of each object we will write is stored as an int,1652 * as it cannot exceed our int "depth" limit. But before we break1653 * changes based no that limit, we may potentially go as deep as the1654 * number of objects, which is elsewhere bounded to a uint32_t.1655 */1656 uint32_t total_depth;1657 struct object_entry *cur, *next;16581659 for (cur = entry, total_depth = 0;1660 cur;1661 cur = DELTA(cur), total_depth++) {1662 if (cur->dfs_state == DFS_DONE) {1663 /*1664 * We've already seen this object and know it isn't1665 * part of a cycle. We do need to append its depth1666 * to our count.1667 */1668 total_depth += cur->depth;1669 break;1670 }16711672 /*1673 * We break cycles before looping, so an ACTIVE state (or any1674 * other cruft which made its way into the state variable)1675 * is a bug.1676 */1677 if (cur->dfs_state != DFS_NONE)1678 BUG("confusing delta dfs state in first pass: %d",1679 cur->dfs_state);16801681 /*1682 * Now we know this is the first time we've seen the object. If1683 * it's not a delta, we're done traversing, but we'll mark it1684 * done to save time on future traversals.1685 */1686 if (!DELTA(cur)) {1687 cur->dfs_state = DFS_DONE;1688 break;1689 }16901691 /*1692 * Mark ourselves as active and see if the next step causes1693 * us to cycle to another active object. It's important to do1694 * this _before_ we loop, because it impacts where we make the1695 * cut, and thus how our total_depth counter works.1696 * E.g., We may see a partial loop like:1697 *1698 * A -> B -> C -> D -> B1699 *1700 * Cutting B->C breaks the cycle. But now the depth of A is1701 * only 1, and our total_depth counter is at 3. The size of the1702 * error is always one less than the size of the cycle we1703 * broke. Commits C and D were "lost" from A's chain.1704 *1705 * If we instead cut D->B, then the depth of A is correct at 3.1706 * We keep all commits in the chain that we examined.1707 */1708 cur->dfs_state = DFS_ACTIVE;1709 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {1710 drop_reused_delta(cur);1711 cur->dfs_state = DFS_DONE;1712 break;1713 }1714 }17151716 /*1717 * And now that we've gone all the way to the bottom of the chain, we1718 * need to clear the active flags and set the depth fields as1719 * appropriate. Unlike the loop above, which can quit when it drops a1720 * delta, we need to keep going to look for more depth cuts. So we need1721 * an extra "next" pointer to keep going after we reset cur->delta.1722 */1723 for (cur = entry; cur; cur = next) {1724 next = DELTA(cur);17251726 /*1727 * We should have a chain of zero or more ACTIVE states down to1728 * a final DONE. We can quit after the DONE, because either it1729 * has no bases, or we've already handled them in a previous1730 * call.1731 */1732 if (cur->dfs_state == DFS_DONE)1733 break;1734 else if (cur->dfs_state != DFS_ACTIVE)1735 BUG("confusing delta dfs state in second pass: %d",1736 cur->dfs_state);17371738 /*1739 * If the total_depth is more than depth, then we need to snip1740 * the chain into two or more smaller chains that don't exceed1741 * the maximum depth. Most of the resulting chains will contain1742 * (depth + 1) entries (i.e., depth deltas plus one base), and1743 * the last chain (i.e., the one containing entry) will contain1744 * whatever entries are left over, namely1745 * (total_depth % (depth + 1)) of them.1746 *1747 * Since we are iterating towards decreasing depth, we need to1748 * decrement total_depth as we go, and we need to write to the1749 * entry what its final depth will be after all of the1750 * snipping. Since we're snipping into chains of length (depth1751 * + 1) entries, the final depth of an entry will be its1752 * original depth modulo (depth + 1). Any time we encounter an1753 * entry whose final depth is supposed to be zero, we snip it1754 * from its delta base, thereby making it so.1755 */1756 cur->depth = (total_depth--) % (depth + 1);1757 if (!cur->depth)1758 drop_reused_delta(cur);17591760 cur->dfs_state = DFS_DONE;1761 }1762}17631764static void get_object_details(void)1765{1766 uint32_t i;1767 struct object_entry **sorted_by_offset;17681769 if (progress)1770 progress_state = start_progress(_("Counting objects"),1771 to_pack.nr_objects);17721773 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));1774 for (i = 0; i < to_pack.nr_objects; i++)1775 sorted_by_offset[i] = to_pack.objects + i;1776 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17771778 for (i = 0; i < to_pack.nr_objects; i++) {1779 struct object_entry *entry = sorted_by_offset[i];1780 check_object(entry);1781 if (entry->type_valid &&1782 oe_size_greater_than(&to_pack, entry, big_file_threshold))1783 entry->no_try_delta = 1;1784 display_progress(progress_state, i + 1);1785 }1786 stop_progress(&progress_state);17871788 /*1789 * This must happen in a second pass, since we rely on the delta1790 * information for the whole list being completed.1791 */1792 for (i = 0; i < to_pack.nr_objects; i++)1793 break_delta_chains(&to_pack.objects[i]);17941795 free(sorted_by_offset);1796}17971798/*1799 * We search for deltas in a list sorted by type, by filename hash, and then1800 * by size, so that we see progressively smaller and smaller files.1801 * That's because we prefer deltas to be from the bigger file1802 * to the smaller -- deletes are potentially cheaper, but perhaps1803 * more importantly, the bigger file is likely the more recent1804 * one. The deepest deltas are therefore the oldest objects which are1805 * less susceptible to be accessed often.1806 */1807static int type_size_sort(const void *_a, const void *_b)1808{1809 const struct object_entry *a = *(struct object_entry **)_a;1810 const struct object_entry *b = *(struct object_entry **)_b;1811 enum object_type a_type = oe_type(a);1812 enum object_type b_type = oe_type(b);1813 unsigned long a_size = SIZE(a);1814 unsigned long b_size = SIZE(b);18151816 if (a_type > b_type)1817 return -1;1818 if (a_type < b_type)1819 return 1;1820 if (a->hash > b->hash)1821 return -1;1822 if (a->hash < b->hash)1823 return 1;1824 if (a->preferred_base > b->preferred_base)1825 return -1;1826 if (a->preferred_base < b->preferred_base)1827 return 1;1828 if (a_size > b_size)1829 return -1;1830 if (a_size < b_size)1831 return 1;1832 return a < b ? -1 : (a > b); /* newest first */1833}18341835struct unpacked {1836 struct object_entry *entry;1837 void *data;1838 struct delta_index *index;1839 unsigned depth;1840};18411842static int delta_cacheable(unsigned long src_size, unsigned long trg_size,1843 unsigned long delta_size)1844{1845 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1846 return 0;18471848 if (delta_size < cache_max_small_delta_size)1849 return 1;18501851 /* cache delta, if objects are large enough compared to delta size */1852 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))1853 return 1;18541855 return 0;1856}18571858#ifndef NO_PTHREADS18591860static pthread_mutex_t read_mutex;1861#define read_lock() pthread_mutex_lock(&read_mutex)1862#define read_unlock() pthread_mutex_unlock(&read_mutex)18631864static pthread_mutex_t cache_mutex;1865#define cache_lock() pthread_mutex_lock(&cache_mutex)1866#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18671868static pthread_mutex_t progress_mutex;1869#define progress_lock() pthread_mutex_lock(&progress_mutex)1870#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18711872#else18731874#define read_lock() (void)01875#define read_unlock() (void)01876#define cache_lock() (void)01877#define cache_unlock() (void)01878#define progress_lock() (void)01879#define progress_unlock() (void)018801881#endif18821883/*1884 * Return the size of the object without doing any delta1885 * reconstruction (so non-deltas are true object sizes, but deltas1886 * return the size of the delta data).1887 */1888unsigned long oe_get_size_slow(struct packing_data *pack,1889 const struct object_entry *e)1890{1891 struct packed_git *p;1892 struct pack_window *w_curs;1893 unsigned char *buf;1894 enum object_type type;1895 unsigned long used, avail, size;18961897 if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1898 read_lock();1899 if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)1900 die(_("unable to get size of %s"),1901 oid_to_hex(&e->idx.oid));1902 read_unlock();1903 return size;1904 }19051906 p = oe_in_pack(pack, e);1907 if (!p)1908 BUG("when e->type is a delta, it must belong to a pack");19091910 read_lock();1911 w_curs = NULL;1912 buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);1913 used = unpack_object_header_buffer(buf, avail, &type, &size);1914 if (used == 0)1915 die(_("unable to parse object header of %s"),1916 oid_to_hex(&e->idx.oid));19171918 unuse_pack(&w_curs);1919 read_unlock();1920 return size;1921}19221923static int try_delta(struct unpacked *trg, struct unpacked *src,1924 unsigned max_depth, unsigned long *mem_usage)1925{1926 struct object_entry *trg_entry = trg->entry;1927 struct object_entry *src_entry = src->entry;1928 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1929 unsigned ref_depth;1930 enum object_type type;1931 void *delta_buf;19321933 /* Don't bother doing diffs between different types */1934 if (oe_type(trg_entry) != oe_type(src_entry))1935 return -1;19361937 /*1938 * We do not bother to try a delta that we discarded on an1939 * earlier try, but only when reusing delta data. Note that1940 * src_entry that is marked as the preferred_base should always1941 * be considered, as even if we produce a suboptimal delta against1942 * it, we will still save the transfer cost, as we already know1943 * the other side has it and we won't send src_entry at all.1944 */1945 if (reuse_delta && IN_PACK(trg_entry) &&1946 IN_PACK(trg_entry) == IN_PACK(src_entry) &&1947 !src_entry->preferred_base &&1948 trg_entry->in_pack_type != OBJ_REF_DELTA &&1949 trg_entry->in_pack_type != OBJ_OFS_DELTA)1950 return 0;19511952 /* Let's not bust the allowed depth. */1953 if (src->depth >= max_depth)1954 return 0;19551956 /* Now some size filtering heuristics. */1957 trg_size = SIZE(trg_entry);1958 if (!DELTA(trg_entry)) {1959 max_size = trg_size/2 - the_hash_algo->rawsz;1960 ref_depth = 1;1961 } else {1962 max_size = DELTA_SIZE(trg_entry);1963 ref_depth = trg->depth;1964 }1965 max_size = (uint64_t)max_size * (max_depth - src->depth) /1966 (max_depth - ref_depth + 1);1967 if (max_size == 0)1968 return 0;1969 src_size = SIZE(src_entry);1970 sizediff = src_size < trg_size ? trg_size - src_size : 0;1971 if (sizediff >= max_size)1972 return 0;1973 if (trg_size < src_size / 32)1974 return 0;19751976 /* Load data if not already done */1977 if (!trg->data) {1978 read_lock();1979 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);1980 read_unlock();1981 if (!trg->data)1982 die("object %s cannot be read",1983 oid_to_hex(&trg_entry->idx.oid));1984 if (sz != trg_size)1985 die("object %s inconsistent object length (%lu vs %lu)",1986 oid_to_hex(&trg_entry->idx.oid), sz,1987 trg_size);1988 *mem_usage += sz;1989 }1990 if (!src->data) {1991 read_lock();1992 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);1993 read_unlock();1994 if (!src->data) {1995 if (src_entry->preferred_base) {1996 static int warned = 0;1997 if (!warned++)1998 warning("object %s cannot be read",1999 oid_to_hex(&src_entry->idx.oid));2000 /*2001 * Those objects are not included in the2002 * resulting pack. Be resilient and ignore2003 * them if they can't be read, in case the2004 * pack could be created nevertheless.2005 */2006 return 0;2007 }2008 die("object %s cannot be read",2009 oid_to_hex(&src_entry->idx.oid));2010 }2011 if (sz != src_size)2012 die("object %s inconsistent object length (%lu vs %lu)",2013 oid_to_hex(&src_entry->idx.oid), sz,2014 src_size);2015 *mem_usage += sz;2016 }2017 if (!src->index) {2018 src->index = create_delta_index(src->data, src_size);2019 if (!src->index) {2020 static int warned = 0;2021 if (!warned++)2022 warning("suboptimal pack - out of memory");2023 return 0;2024 }2025 *mem_usage += sizeof_delta_index(src->index);2026 }20272028 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2029 if (!delta_buf)2030 return 0;2031 if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {2032 free(delta_buf);2033 return 0;2034 }20352036 if (DELTA(trg_entry)) {2037 /* Prefer only shallower same-sized deltas. */2038 if (delta_size == DELTA_SIZE(trg_entry) &&2039 src->depth + 1 >= trg->depth) {2040 free(delta_buf);2041 return 0;2042 }2043 }20442045 /*2046 * Handle memory allocation outside of the cache2047 * accounting lock. Compiler will optimize the strangeness2048 * away when NO_PTHREADS is defined.2049 */2050 free(trg_entry->delta_data);2051 cache_lock();2052 if (trg_entry->delta_data) {2053 delta_cache_size -= DELTA_SIZE(trg_entry);2054 trg_entry->delta_data = NULL;2055 }2056 if (delta_cacheable(src_size, trg_size, delta_size)) {2057 delta_cache_size += delta_size;2058 cache_unlock();2059 trg_entry->delta_data = xrealloc(delta_buf, delta_size);2060 } else {2061 cache_unlock();2062 free(delta_buf);2063 }20642065 SET_DELTA(trg_entry, src_entry);2066 SET_DELTA_SIZE(trg_entry, delta_size);2067 trg->depth = src->depth + 1;20682069 return 1;2070}20712072static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)2073{2074 struct object_entry *child = DELTA_CHILD(me);2075 unsigned int m = n;2076 while (child) {2077 unsigned int c = check_delta_limit(child, n + 1);2078 if (m < c)2079 m = c;2080 child = DELTA_SIBLING(child);2081 }2082 return m;2083}20842085static unsigned long free_unpacked(struct unpacked *n)2086{2087 unsigned long freed_mem = sizeof_delta_index(n->index);2088 free_delta_index(n->index);2089 n->index = NULL;2090 if (n->data) {2091 freed_mem += SIZE(n->entry);2092 FREE_AND_NULL(n->data);2093 }2094 n->entry = NULL;2095 n->depth = 0;2096 return freed_mem;2097}20982099static void find_deltas(struct object_entry **list, unsigned *list_size,2100 int window, int depth, unsigned *processed)2101{2102 uint32_t i, idx = 0, count = 0;2103 struct unpacked *array;2104 unsigned long mem_usage = 0;21052106 array = xcalloc(window, sizeof(struct unpacked));21072108 for (;;) {2109 struct object_entry *entry;2110 struct unpacked *n = array + idx;2111 int j, max_depth, best_base = -1;21122113 progress_lock();2114 if (!*list_size) {2115 progress_unlock();2116 break;2117 }2118 entry = *list++;2119 (*list_size)--;2120 if (!entry->preferred_base) {2121 (*processed)++;2122 display_progress(progress_state, *processed);2123 }2124 progress_unlock();21252126 mem_usage -= free_unpacked(n);2127 n->entry = entry;21282129 while (window_memory_limit &&2130 mem_usage > window_memory_limit &&2131 count > 1) {2132 uint32_t tail = (idx + window - count) % window;2133 mem_usage -= free_unpacked(array + tail);2134 count--;2135 }21362137 /* We do not compute delta to *create* objects we are not2138 * going to pack.2139 */2140 if (entry->preferred_base)2141 goto next;21422143 /*2144 * If the current object is at pack edge, take the depth the2145 * objects that depend on the current object into account2146 * otherwise they would become too deep.2147 */2148 max_depth = depth;2149 if (DELTA_CHILD(entry)) {2150 max_depth -= check_delta_limit(entry, 0);2151 if (max_depth <= 0)2152 goto next;2153 }21542155 j = window;2156 while (--j > 0) {2157 int ret;2158 uint32_t other_idx = idx + j;2159 struct unpacked *m;2160 if (other_idx >= window)2161 other_idx -= window;2162 m = array + other_idx;2163 if (!m->entry)2164 break;2165 ret = try_delta(n, m, max_depth, &mem_usage);2166 if (ret < 0)2167 break;2168 else if (ret > 0)2169 best_base = other_idx;2170 }21712172 /*2173 * If we decided to cache the delta data, then it is best2174 * to compress it right away. First because we have to do2175 * it anyway, and doing it here while we're threaded will2176 * save a lot of time in the non threaded write phase,2177 * as well as allow for caching more deltas within2178 * the same cache size limit.2179 * ...2180 * But only if not writing to stdout, since in that case2181 * the network is most likely throttling writes anyway,2182 * and therefore it is best to go to the write phase ASAP2183 * instead, as we can afford spending more time compressing2184 * between writes at that moment.2185 */2186 if (entry->delta_data && !pack_to_stdout) {2187 unsigned long size;21882189 size = do_compress(&entry->delta_data, DELTA_SIZE(entry));2190 if (size < (1U << OE_Z_DELTA_BITS)) {2191 entry->z_delta_size = size;2192 cache_lock();2193 delta_cache_size -= DELTA_SIZE(entry);2194 delta_cache_size += entry->z_delta_size;2195 cache_unlock();2196 } else {2197 FREE_AND_NULL(entry->delta_data);2198 entry->z_delta_size = 0;2199 }2200 }22012202 /* if we made n a delta, and if n is already at max2203 * depth, leaving it in the window is pointless. we2204 * should evict it first.2205 */2206 if (DELTA(entry) && max_depth <= n->depth)2207 continue;22082209 /*2210 * Move the best delta base up in the window, after the2211 * currently deltified object, to keep it longer. It will2212 * be the first base object to be attempted next.2213 */2214 if (DELTA(entry)) {2215 struct unpacked swap = array[best_base];2216 int dist = (window + idx - best_base) % window;2217 int dst = best_base;2218 while (dist--) {2219 int src = (dst + 1) % window;2220 array[dst] = array[src];2221 dst = src;2222 }2223 array[dst] = swap;2224 }22252226 next:2227 idx++;2228 if (count + 1 < window)2229 count++;2230 if (idx >= window)2231 idx = 0;2232 }22332234 for (i = 0; i < window; ++i) {2235 free_delta_index(array[i].index);2236 free(array[i].data);2237 }2238 free(array);2239}22402241#ifndef NO_PTHREADS22422243static void try_to_free_from_threads(size_t size)2244{2245 read_lock();2246 release_pack_memory(size);2247 read_unlock();2248}22492250static try_to_free_t old_try_to_free_routine;22512252/*2253 * The main thread waits on the condition that (at least) one of the workers2254 * has stopped working (which is indicated in the .working member of2255 * struct thread_params).2256 * When a work thread has completed its work, it sets .working to 0 and2257 * signals the main thread and waits on the condition that .data_ready2258 * becomes 1.2259 */22602261struct thread_params {2262 pthread_t thread;2263 struct object_entry **list;2264 unsigned list_size;2265 unsigned remaining;2266 int window;2267 int depth;2268 int working;2269 int data_ready;2270 pthread_mutex_t mutex;2271 pthread_cond_t cond;2272 unsigned *processed;2273};22742275static pthread_cond_t progress_cond;22762277/*2278 * Mutex and conditional variable can't be statically-initialized on Windows.2279 */2280static void init_threaded_search(void)2281{2282 init_recursive_mutex(&read_mutex);2283 pthread_mutex_init(&cache_mutex, NULL);2284 pthread_mutex_init(&progress_mutex, NULL);2285 pthread_cond_init(&progress_cond, NULL);2286 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);2287}22882289static void cleanup_threaded_search(void)2290{2291 set_try_to_free_routine(old_try_to_free_routine);2292 pthread_cond_destroy(&progress_cond);2293 pthread_mutex_destroy(&read_mutex);2294 pthread_mutex_destroy(&cache_mutex);2295 pthread_mutex_destroy(&progress_mutex);2296}22972298static void *threaded_find_deltas(void *arg)2299{2300 struct thread_params *me = arg;23012302 progress_lock();2303 while (me->remaining) {2304 progress_unlock();23052306 find_deltas(me->list, &me->remaining,2307 me->window, me->depth, me->processed);23082309 progress_lock();2310 me->working = 0;2311 pthread_cond_signal(&progress_cond);2312 progress_unlock();23132314 /*2315 * We must not set ->data_ready before we wait on the2316 * condition because the main thread may have set it to 12317 * before we get here. In order to be sure that new2318 * work is available if we see 1 in ->data_ready, it2319 * was initialized to 0 before this thread was spawned2320 * and we reset it to 0 right away.2321 */2322 pthread_mutex_lock(&me->mutex);2323 while (!me->data_ready)2324 pthread_cond_wait(&me->cond, &me->mutex);2325 me->data_ready = 0;2326 pthread_mutex_unlock(&me->mutex);23272328 progress_lock();2329 }2330 progress_unlock();2331 /* leave ->working 1 so that this doesn't get more work assigned */2332 return NULL;2333}23342335static void ll_find_deltas(struct object_entry **list, unsigned list_size,2336 int window, int depth, unsigned *processed)2337{2338 struct thread_params *p;2339 int i, ret, active_threads = 0;23402341 init_threaded_search();23422343 if (delta_search_threads <= 1) {2344 find_deltas(list, &list_size, window, depth, processed);2345 cleanup_threaded_search();2346 return;2347 }2348 if (progress > pack_to_stdout)2349 fprintf_ln(stderr, "Delta compression using up to %d threads",2350 delta_search_threads);2351 p = xcalloc(delta_search_threads, sizeof(*p));23522353 /* Partition the work amongst work threads. */2354 for (i = 0; i < delta_search_threads; i++) {2355 unsigned sub_size = list_size / (delta_search_threads - i);23562357 /* don't use too small segments or no deltas will be found */2358 if (sub_size < 2*window && i+1 < delta_search_threads)2359 sub_size = 0;23602361 p[i].window = window;2362 p[i].depth = depth;2363 p[i].processed = processed;2364 p[i].working = 1;2365 p[i].data_ready = 0;23662367 /* try to split chunks on "path" boundaries */2368 while (sub_size && sub_size < list_size &&2369 list[sub_size]->hash &&2370 list[sub_size]->hash == list[sub_size-1]->hash)2371 sub_size++;23722373 p[i].list = list;2374 p[i].list_size = sub_size;2375 p[i].remaining = sub_size;23762377 list += sub_size;2378 list_size -= sub_size;2379 }23802381 /* Start work threads. */2382 for (i = 0; i < delta_search_threads; i++) {2383 if (!p[i].list_size)2384 continue;2385 pthread_mutex_init(&p[i].mutex, NULL);2386 pthread_cond_init(&p[i].cond, NULL);2387 ret = pthread_create(&p[i].thread, NULL,2388 threaded_find_deltas, &p[i]);2389 if (ret)2390 die("unable to create thread: %s", strerror(ret));2391 active_threads++;2392 }23932394 /*2395 * Now let's wait for work completion. Each time a thread is done2396 * with its work, we steal half of the remaining work from the2397 * thread with the largest number of unprocessed objects and give2398 * it to that newly idle thread. This ensure good load balancing2399 * until the remaining object list segments are simply too short2400 * to be worth splitting anymore.2401 */2402 while (active_threads) {2403 struct thread_params *target = NULL;2404 struct thread_params *victim = NULL;2405 unsigned sub_size = 0;24062407 progress_lock();2408 for (;;) {2409 for (i = 0; !target && i < delta_search_threads; i++)2410 if (!p[i].working)2411 target = &p[i];2412 if (target)2413 break;2414 pthread_cond_wait(&progress_cond, &progress_mutex);2415 }24162417 for (i = 0; i < delta_search_threads; i++)2418 if (p[i].remaining > 2*window &&2419 (!victim || victim->remaining < p[i].remaining))2420 victim = &p[i];2421 if (victim) {2422 sub_size = victim->remaining / 2;2423 list = victim->list + victim->list_size - sub_size;2424 while (sub_size && list[0]->hash &&2425 list[0]->hash == list[-1]->hash) {2426 list++;2427 sub_size--;2428 }2429 if (!sub_size) {2430 /*2431 * It is possible for some "paths" to have2432 * so many objects that no hash boundary2433 * might be found. Let's just steal the2434 * exact half in that case.2435 */2436 sub_size = victim->remaining / 2;2437 list -= sub_size;2438 }2439 target->list = list;2440 victim->list_size -= sub_size;2441 victim->remaining -= sub_size;2442 }2443 target->list_size = sub_size;2444 target->remaining = sub_size;2445 target->working = 1;2446 progress_unlock();24472448 pthread_mutex_lock(&target->mutex);2449 target->data_ready = 1;2450 pthread_cond_signal(&target->cond);2451 pthread_mutex_unlock(&target->mutex);24522453 if (!sub_size) {2454 pthread_join(target->thread, NULL);2455 pthread_cond_destroy(&target->cond);2456 pthread_mutex_destroy(&target->mutex);2457 active_threads--;2458 }2459 }2460 cleanup_threaded_search();2461 free(p);2462}24632464#else2465#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2466#endif24672468static void add_tag_chain(const struct object_id *oid)2469{2470 struct tag *tag;24712472 /*2473 * We catch duplicates already in add_object_entry(), but we'd2474 * prefer to do this extra check to avoid having to parse the2475 * tag at all if we already know that it's being packed (e.g., if2476 * it was included via bitmaps, we would not have parsed it2477 * previously).2478 */2479 if (packlist_find(&to_pack, oid->hash, NULL))2480 return;24812482 tag = lookup_tag(oid);2483 while (1) {2484 if (!tag || parse_tag(tag) || !tag->tagged)2485 die("unable to pack objects reachable from tag %s",2486 oid_to_hex(oid));24872488 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);24892490 if (tag->tagged->type != OBJ_TAG)2491 return;24922493 tag = (struct tag *)tag->tagged;2494 }2495}24962497static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)2498{2499 struct object_id peeled;25002501 if (starts_with(path, "refs/tags/") && /* is a tag? */2502 !peel_ref(path, &peeled) && /* peelable? */2503 packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */2504 add_tag_chain(oid);2505 return 0;2506}25072508static void prepare_pack(int window, int depth)2509{2510 struct object_entry **delta_list;2511 uint32_t i, nr_deltas;2512 unsigned n;25132514 get_object_details();25152516 /*2517 * If we're locally repacking then we need to be doubly careful2518 * from now on in order to make sure no stealth corruption gets2519 * propagated to the new pack. Clients receiving streamed packs2520 * should validate everything they get anyway so no need to incur2521 * the additional cost here in that case.2522 */2523 if (!pack_to_stdout)2524 do_check_packed_object_crc = 1;25252526 if (!to_pack.nr_objects || !window || !depth)2527 return;25282529 ALLOC_ARRAY(delta_list, to_pack.nr_objects);2530 nr_deltas = n = 0;25312532 for (i = 0; i < to_pack.nr_objects; i++) {2533 struct object_entry *entry = to_pack.objects + i;25342535 if (DELTA(entry))2536 /* This happens if we decided to reuse existing2537 * delta from a pack. "reuse_delta &&" is implied.2538 */2539 continue;25402541 if (!entry->type_valid ||2542 oe_size_less_than(&to_pack, entry, 50))2543 continue;25442545 if (entry->no_try_delta)2546 continue;25472548 if (!entry->preferred_base) {2549 nr_deltas++;2550 if (oe_type(entry) < 0)2551 die("unable to get type of object %s",2552 oid_to_hex(&entry->idx.oid));2553 } else {2554 if (oe_type(entry) < 0) {2555 /*2556 * This object is not found, but we2557 * don't have to include it anyway.2558 */2559 continue;2560 }2561 }25622563 delta_list[n++] = entry;2564 }25652566 if (nr_deltas && n > 1) {2567 unsigned nr_done = 0;2568 if (progress)2569 progress_state = start_progress(_("Compressing objects"),2570 nr_deltas);2571 QSORT(delta_list, n, type_size_sort);2572 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2573 stop_progress(&progress_state);2574 if (nr_done != nr_deltas)2575 die("inconsistency with delta count");2576 }2577 free(delta_list);2578}25792580static int git_pack_config(const char *k, const char *v, void *cb)2581{2582 if (!strcmp(k, "pack.window")) {2583 window = git_config_int(k, v);2584 return 0;2585 }2586 if (!strcmp(k, "pack.windowmemory")) {2587 window_memory_limit = git_config_ulong(k, v);2588 return 0;2589 }2590 if (!strcmp(k, "pack.depth")) {2591 depth = git_config_int(k, v);2592 return 0;2593 }2594 if (!strcmp(k, "pack.deltacachesize")) {2595 max_delta_cache_size = git_config_int(k, v);2596 return 0;2597 }2598 if (!strcmp(k, "pack.deltacachelimit")) {2599 cache_max_small_delta_size = git_config_int(k, v);2600 return 0;2601 }2602 if (!strcmp(k, "pack.writebitmaphashcache")) {2603 if (git_config_bool(k, v))2604 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2605 else2606 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2607 }2608 if (!strcmp(k, "pack.usebitmaps")) {2609 use_bitmap_index_default = git_config_bool(k, v);2610 return 0;2611 }2612 if (!strcmp(k, "pack.threads")) {2613 delta_search_threads = git_config_int(k, v);2614 if (delta_search_threads < 0)2615 die("invalid number of threads specified (%d)",2616 delta_search_threads);2617#ifdef NO_PTHREADS2618 if (delta_search_threads != 1) {2619 warning("no threads support, ignoring %s", k);2620 delta_search_threads = 0;2621 }2622#endif2623 return 0;2624 }2625 if (!strcmp(k, "pack.indexversion")) {2626 pack_idx_opts.version = git_config_int(k, v);2627 if (pack_idx_opts.version > 2)2628 die("bad pack.indexversion=%"PRIu32,2629 pack_idx_opts.version);2630 return 0;2631 }2632 return git_default_config(k, v, cb);2633}26342635static void read_object_list_from_stdin(void)2636{2637 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];2638 struct object_id oid;2639 const char *p;26402641 for (;;) {2642 if (!fgets(line, sizeof(line), stdin)) {2643 if (feof(stdin))2644 break;2645 if (!ferror(stdin))2646 die("BUG: fgets returned NULL, not EOF, not error!");2647 if (errno != EINTR)2648 die_errno("fgets");2649 clearerr(stdin);2650 continue;2651 }2652 if (line[0] == '-') {2653 if (get_oid_hex(line+1, &oid))2654 die("expected edge object ID, got garbage:\n %s",2655 line);2656 add_preferred_base(&oid);2657 continue;2658 }2659 if (parse_oid_hex(line, &oid, &p))2660 die("expected object ID, got garbage:\n %s", line);26612662 add_preferred_base_object(p + 1);2663 add_object_entry(&oid, OBJ_NONE, p + 1, 0);2664 }2665}26662667/* Remember to update object flag allocation in object.h */2668#define OBJECT_ADDED (1u<<20)26692670static void show_commit(struct commit *commit, void *data)2671{2672 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);2673 commit->object.flags |= OBJECT_ADDED;26742675 if (write_bitmap_index)2676 index_commit_for_bitmap(commit);2677}26782679static void show_object(struct object *obj, const char *name, void *data)2680{2681 add_preferred_base_object(name);2682 add_object_entry(&obj->oid, obj->type, name, 0);2683 obj->flags |= OBJECT_ADDED;2684}26852686static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)2687{2688 assert(arg_missing_action == MA_ALLOW_ANY);26892690 /*2691 * Quietly ignore ALL missing objects. This avoids problems with2692 * staging them now and getting an odd error later.2693 */2694 if (!has_object_file(&obj->oid))2695 return;26962697 show_object(obj, name, data);2698}26992700static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)2701{2702 assert(arg_missing_action == MA_ALLOW_PROMISOR);27032704 /*2705 * Quietly ignore EXPECTED missing objects. This avoids problems with2706 * staging them now and getting an odd error later.2707 */2708 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))2709 return;27102711 show_object(obj, name, data);2712}27132714static int option_parse_missing_action(const struct option *opt,2715 const char *arg, int unset)2716{2717 assert(arg);2718 assert(!unset);27192720 if (!strcmp(arg, "error")) {2721 arg_missing_action = MA_ERROR;2722 fn_show_object = show_object;2723 return 0;2724 }27252726 if (!strcmp(arg, "allow-any")) {2727 arg_missing_action = MA_ALLOW_ANY;2728 fetch_if_missing = 0;2729 fn_show_object = show_object__ma_allow_any;2730 return 0;2731 }27322733 if (!strcmp(arg, "allow-promisor")) {2734 arg_missing_action = MA_ALLOW_PROMISOR;2735 fetch_if_missing = 0;2736 fn_show_object = show_object__ma_allow_promisor;2737 return 0;2738 }27392740 die(_("invalid value for --missing"));2741 return 0;2742}27432744static void show_edge(struct commit *commit)2745{2746 add_preferred_base(&commit->object.oid);2747}27482749struct in_pack_object {2750 off_t offset;2751 struct object *object;2752};27532754struct in_pack {2755 unsigned int alloc;2756 unsigned int nr;2757 struct in_pack_object *array;2758};27592760static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)2761{2762 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);2763 in_pack->array[in_pack->nr].object = object;2764 in_pack->nr++;2765}27662767/*2768 * Compare the objects in the offset order, in order to emulate the2769 * "git rev-list --objects" output that produced the pack originally.2770 */2771static int ofscmp(const void *a_, const void *b_)2772{2773 struct in_pack_object *a = (struct in_pack_object *)a_;2774 struct in_pack_object *b = (struct in_pack_object *)b_;27752776 if (a->offset < b->offset)2777 return -1;2778 else if (a->offset > b->offset)2779 return 1;2780 else2781 return oidcmp(&a->object->oid, &b->object->oid);2782}27832784static void add_objects_in_unpacked_packs(struct rev_info *revs)2785{2786 struct packed_git *p;2787 struct in_pack in_pack;2788 uint32_t i;27892790 memset(&in_pack, 0, sizeof(in_pack));27912792 for (p = get_packed_git(the_repository); p; p = p->next) {2793 struct object_id oid;2794 struct object *o;27952796 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)2797 continue;2798 if (open_pack_index(p))2799 die("cannot open pack index");28002801 ALLOC_GROW(in_pack.array,2802 in_pack.nr + p->num_objects,2803 in_pack.alloc);28042805 for (i = 0; i < p->num_objects; i++) {2806 nth_packed_object_oid(&oid, p, i);2807 o = lookup_unknown_object(oid.hash);2808 if (!(o->flags & OBJECT_ADDED))2809 mark_in_pack_object(o, p, &in_pack);2810 o->flags |= OBJECT_ADDED;2811 }2812 }28132814 if (in_pack.nr) {2815 QSORT(in_pack.array, in_pack.nr, ofscmp);2816 for (i = 0; i < in_pack.nr; i++) {2817 struct object *o = in_pack.array[i].object;2818 add_object_entry(&o->oid, o->type, "", 0);2819 }2820 }2821 free(in_pack.array);2822}28232824static int add_loose_object(const struct object_id *oid, const char *path,2825 void *data)2826{2827 enum object_type type = oid_object_info(the_repository, oid, NULL);28282829 if (type < 0) {2830 warning("loose object at %s could not be examined", path);2831 return 0;2832 }28332834 add_object_entry(oid, type, "", 0);2835 return 0;2836}28372838/*2839 * We actually don't even have to worry about reachability here.2840 * add_object_entry will weed out duplicates, so we just add every2841 * loose object we find.2842 */2843static void add_unreachable_loose_objects(void)2844{2845 for_each_loose_file_in_objdir(get_object_directory(),2846 add_loose_object,2847 NULL, NULL, NULL);2848}28492850static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2851{2852 static struct packed_git *last_found = (void *)1;2853 struct packed_git *p;28542855 p = (last_found != (void *)1) ? last_found :2856 get_packed_git(the_repository);28572858 while (p) {2859 if ((!p->pack_local || p->pack_keep ||2860 p->pack_keep_in_core) &&2861 find_pack_entry_one(oid->hash, p)) {2862 last_found = p;2863 return 1;2864 }2865 if (p == last_found)2866 p = get_packed_git(the_repository);2867 else2868 p = p->next;2869 if (p == last_found)2870 p = p->next;2871 }2872 return 0;2873}28742875/*2876 * Store a list of sha1s that are should not be discarded2877 * because they are either written too recently, or are2878 * reachable from another object that was.2879 *2880 * This is filled by get_object_list.2881 */2882static struct oid_array recent_objects;28832884static int loosened_object_can_be_discarded(const struct object_id *oid,2885 timestamp_t mtime)2886{2887 if (!unpack_unreachable_expiration)2888 return 0;2889 if (mtime > unpack_unreachable_expiration)2890 return 0;2891 if (oid_array_lookup(&recent_objects, oid) >= 0)2892 return 0;2893 return 1;2894}28952896static void loosen_unused_packed_objects(struct rev_info *revs)2897{2898 struct packed_git *p;2899 uint32_t i;2900 struct object_id oid;29012902 for (p = get_packed_git(the_repository); p; p = p->next) {2903 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)2904 continue;29052906 if (open_pack_index(p))2907 die("cannot open pack index");29082909 for (i = 0; i < p->num_objects; i++) {2910 nth_packed_object_oid(&oid, p, i);2911 if (!packlist_find(&to_pack, oid.hash, NULL) &&2912 !has_sha1_pack_kept_or_nonlocal(&oid) &&2913 !loosened_object_can_be_discarded(&oid, p->mtime))2914 if (force_object_loose(&oid, p->mtime))2915 die("unable to force loose object");2916 }2917 }2918}29192920/*2921 * This tracks any options which pack-reuse code expects to be on, or which a2922 * reader of the pack might not understand, and which would therefore prevent2923 * blind reuse of what we have on disk.2924 */2925static int pack_options_allow_reuse(void)2926{2927 return pack_to_stdout &&2928 allow_ofs_delta &&2929 !ignore_packed_keep_on_disk &&2930 !ignore_packed_keep_in_core &&2931 (!local || !have_non_local_packs) &&2932 !incremental;2933}29342935static int get_object_list_from_bitmap(struct rev_info *revs)2936{2937 if (prepare_bitmap_walk(revs) < 0)2938 return -1;29392940 if (pack_options_allow_reuse() &&2941 !reuse_partial_packfile_from_bitmap(2942 &reuse_packfile,2943 &reuse_packfile_objects,2944 &reuse_packfile_offset)) {2945 assert(reuse_packfile_objects);2946 nr_result += reuse_packfile_objects;2947 display_progress(progress_state, nr_result);2948 }29492950 traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2951 return 0;2952}29532954static void record_recent_object(struct object *obj,2955 const char *name,2956 void *data)2957{2958 oid_array_append(&recent_objects, &obj->oid);2959}29602961static void record_recent_commit(struct commit *commit, void *data)2962{2963 oid_array_append(&recent_objects, &commit->object.oid);2964}29652966static void get_object_list(int ac, const char **av)2967{2968 struct rev_info revs;2969 char line[1000];2970 int flags = 0;29712972 init_revisions(&revs, NULL);2973 save_commit_buffer = 0;2974 setup_revisions(ac, av, &revs, NULL);29752976 /* make sure shallows are read */2977 is_repository_shallow();29782979 while (fgets(line, sizeof(line), stdin) != NULL) {2980 int len = strlen(line);2981 if (len && line[len - 1] == '\n')2982 line[--len] = 0;2983 if (!len)2984 break;2985 if (*line == '-') {2986 if (!strcmp(line, "--not")) {2987 flags ^= UNINTERESTING;2988 write_bitmap_index = 0;2989 continue;2990 }2991 if (starts_with(line, "--shallow ")) {2992 struct object_id oid;2993 if (get_oid_hex(line + 10, &oid))2994 die("not an SHA-1 '%s'", line + 10);2995 register_shallow(&oid);2996 use_bitmap_index = 0;2997 continue;2998 }2999 die("not a rev '%s'", line);3000 }3001 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))3002 die("bad revision '%s'", line);3003 }30043005 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))3006 return;30073008 if (prepare_revision_walk(&revs))3009 die("revision walk setup failed");3010 mark_edges_uninteresting(&revs, show_edge);30113012 if (!fn_show_object)3013 fn_show_object = show_object;3014 traverse_commit_list_filtered(&filter_options, &revs,3015 show_commit, fn_show_object, NULL,3016 NULL);30173018 if (unpack_unreachable_expiration) {3019 revs.ignore_missing_links = 1;3020 if (add_unseen_recent_objects_to_traversal(&revs,3021 unpack_unreachable_expiration))3022 die("unable to add recent objects");3023 if (prepare_revision_walk(&revs))3024 die("revision walk setup failed");3025 traverse_commit_list(&revs, record_recent_commit,3026 record_recent_object, NULL);3027 }30283029 if (keep_unreachable)3030 add_objects_in_unpacked_packs(&revs);3031 if (pack_loose_unreachable)3032 add_unreachable_loose_objects();3033 if (unpack_unreachable)3034 loosen_unused_packed_objects(&revs);30353036 oid_array_clear(&recent_objects);3037}30383039static void add_extra_kept_packs(const struct string_list *names)3040{3041 struct packed_git *p;30423043 if (!names->nr)3044 return;30453046 for (p = get_packed_git(the_repository); p; p = p->next) {3047 const char *name = basename(p->pack_name);3048 int i;30493050 if (!p->pack_local)3051 continue;30523053 for (i = 0; i < names->nr; i++)3054 if (!fspathcmp(name, names->items[i].string))3055 break;30563057 if (i < names->nr) {3058 p->pack_keep_in_core = 1;3059 ignore_packed_keep_in_core = 1;3060 continue;3061 }3062 }3063}30643065static int option_parse_index_version(const struct option *opt,3066 const char *arg, int unset)3067{3068 char *c;3069 const char *val = arg;3070 pack_idx_opts.version = strtoul(val, &c, 10);3071 if (pack_idx_opts.version > 2)3072 die(_("unsupported index version %s"), val);3073 if (*c == ',' && c[1])3074 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);3075 if (*c || pack_idx_opts.off32_limit & 0x80000000)3076 die(_("bad index version '%s'"), val);3077 return 0;3078}30793080static int option_parse_unpack_unreachable(const struct option *opt,3081 const char *arg, int unset)3082{3083 if (unset) {3084 unpack_unreachable = 0;3085 unpack_unreachable_expiration = 0;3086 }3087 else {3088 unpack_unreachable = 1;3089 if (arg)3090 unpack_unreachable_expiration = approxidate(arg);3091 }3092 return 0;3093}30943095int cmd_pack_objects(int argc, const char **argv, const char *prefix)3096{3097 int use_internal_rev_list = 0;3098 int thin = 0;3099 int shallow = 0;3100 int all_progress_implied = 0;3101 struct argv_array rp = ARGV_ARRAY_INIT;3102 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;3103 int rev_list_index = 0;3104 struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3105 struct option pack_objects_options[] = {3106 OPT_SET_INT('q', "quiet", &progress,3107 N_("do not show progress meter"), 0),3108 OPT_SET_INT(0, "progress", &progress,3109 N_("show progress meter"), 1),3110 OPT_SET_INT(0, "all-progress", &progress,3111 N_("show progress meter during object writing phase"), 2),3112 OPT_BOOL(0, "all-progress-implied",3113 &all_progress_implied,3114 N_("similar to --all-progress when progress meter is shown")),3115 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),3116 N_("write the pack index file in the specified idx format version"),3117 0, option_parse_index_version },3118 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,3119 N_("maximum size of each output pack file")),3120 OPT_BOOL(0, "local", &local,3121 N_("ignore borrowed objects from alternate object store")),3122 OPT_BOOL(0, "incremental", &incremental,3123 N_("ignore packed objects")),3124 OPT_INTEGER(0, "window", &window,3125 N_("limit pack window by objects")),3126 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,3127 N_("limit pack window by memory in addition to object limit")),3128 OPT_INTEGER(0, "depth", &depth,3129 N_("maximum length of delta chain allowed in the resulting pack")),3130 OPT_BOOL(0, "reuse-delta", &reuse_delta,3131 N_("reuse existing deltas")),3132 OPT_BOOL(0, "reuse-object", &reuse_object,3133 N_("reuse existing objects")),3134 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,3135 N_("use OFS_DELTA objects")),3136 OPT_INTEGER(0, "threads", &delta_search_threads,3137 N_("use threads when searching for best delta matches")),3138 OPT_BOOL(0, "non-empty", &non_empty,3139 N_("do not create an empty pack output")),3140 OPT_BOOL(0, "revs", &use_internal_rev_list,3141 N_("read revision arguments from standard input")),3142 OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,3143 N_("limit the objects to those that are not yet packed"),3144 1, PARSE_OPT_NONEG),3145 OPT_SET_INT_F(0, "all", &rev_list_all,3146 N_("include objects reachable from any reference"),3147 1, PARSE_OPT_NONEG),3148 OPT_SET_INT_F(0, "reflog", &rev_list_reflog,3149 N_("include objects referred by reflog entries"),3150 1, PARSE_OPT_NONEG),3151 OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,3152 N_("include objects referred to by the index"),3153 1, PARSE_OPT_NONEG),3154 OPT_BOOL(0, "stdout", &pack_to_stdout,3155 N_("output pack to stdout")),3156 OPT_BOOL(0, "include-tag", &include_tag,3157 N_("include tag objects that refer to objects to be packed")),3158 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,3159 N_("keep unreachable objects")),3160 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,3161 N_("pack loose unreachable objects")),3162 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),3163 N_("unpack unreachable objects newer than <time>"),3164 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3165 OPT_BOOL(0, "thin", &thin,3166 N_("create thin packs")),3167 OPT_BOOL(0, "shallow", &shallow,3168 N_("create packs suitable for shallow fetches")),3169 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,3170 N_("ignore packs that have companion .keep file")),3171 OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),3172 N_("ignore this pack")),3173 OPT_INTEGER(0, "compression", &pack_compression_level,3174 N_("pack compression level")),3175 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,3176 N_("do not hide commits by grafts"), 0),3177 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,3178 N_("use a bitmap index if available to speed up counting objects")),3179 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,3180 N_("write a bitmap index together with the pack index")),3181 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3182 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),3183 N_("handling for missing objects"), PARSE_OPT_NONEG,3184 option_parse_missing_action },3185 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,3186 N_("do not pack objects in promisor packfiles")),3187 OPT_END(),3188 };31893190 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))3191 BUG("too many dfs states, increase OE_DFS_STATE_BITS");31923193 check_replace_refs = 0;31943195 reset_pack_idx_option(&pack_idx_opts);3196 git_config(git_pack_config, NULL);31973198 progress = isatty(2);3199 argc = parse_options(argc, argv, prefix, pack_objects_options,3200 pack_usage, 0);32013202 if (argc) {3203 base_name = argv[0];3204 argc--;3205 }3206 if (pack_to_stdout != !base_name || argc)3207 usage_with_options(pack_usage, pack_objects_options);32083209 if (depth >= (1 << OE_DEPTH_BITS)) {3210 warning(_("delta chain depth %d is too deep, forcing %d"),3211 depth, (1 << OE_DEPTH_BITS) - 1);3212 depth = (1 << OE_DEPTH_BITS) - 1;3213 }3214 if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {3215 warning(_("pack.deltaCacheLimit is too high, forcing %d"),3216 (1U << OE_Z_DELTA_BITS) - 1);3217 cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;3218 }32193220 argv_array_push(&rp, "pack-objects");3221 if (thin) {3222 use_internal_rev_list = 1;3223 argv_array_push(&rp, shallow3224 ? "--objects-edge-aggressive"3225 : "--objects-edge");3226 } else3227 argv_array_push(&rp, "--objects");32283229 if (rev_list_all) {3230 use_internal_rev_list = 1;3231 argv_array_push(&rp, "--all");3232 }3233 if (rev_list_reflog) {3234 use_internal_rev_list = 1;3235 argv_array_push(&rp, "--reflog");3236 }3237 if (rev_list_index) {3238 use_internal_rev_list = 1;3239 argv_array_push(&rp, "--indexed-objects");3240 }3241 if (rev_list_unpacked) {3242 use_internal_rev_list = 1;3243 argv_array_push(&rp, "--unpacked");3244 }32453246 if (exclude_promisor_objects) {3247 use_internal_rev_list = 1;3248 fetch_if_missing = 0;3249 argv_array_push(&rp, "--exclude-promisor-objects");3250 }3251 if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)3252 use_internal_rev_list = 1;32533254 if (!reuse_object)3255 reuse_delta = 0;3256 if (pack_compression_level == -1)3257 pack_compression_level = Z_DEFAULT_COMPRESSION;3258 else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)3259 die("bad pack compression level %d", pack_compression_level);32603261 if (!delta_search_threads) /* --threads=0 means autodetect */3262 delta_search_threads = online_cpus();32633264#ifdef NO_PTHREADS3265 if (delta_search_threads != 1)3266 warning("no threads support, ignoring --threads");3267#endif3268 if (!pack_to_stdout && !pack_size_limit)3269 pack_size_limit = pack_size_limit_cfg;3270 if (pack_to_stdout && pack_size_limit)3271 die("--max-pack-size cannot be used to build a pack for transfer");3272 if (pack_size_limit && pack_size_limit < 1024*1024) {3273 warning("minimum pack size limit is 1 MiB");3274 pack_size_limit = 1024*1024;3275 }32763277 if (!pack_to_stdout && thin)3278 die("--thin cannot be used to build an indexable pack.");32793280 if (keep_unreachable && unpack_unreachable)3281 die("--keep-unreachable and --unpack-unreachable are incompatible");3282 if (!rev_list_all || !rev_list_reflog || !rev_list_index)3283 unpack_unreachable_expiration = 0;32843285 if (filter_options.choice) {3286 if (!pack_to_stdout)3287 die("cannot use --filter without --stdout");3288 use_bitmap_index = 0;3289 }32903291 /*3292 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3293 *3294 * - to produce good pack (with bitmap index not-yet-packed objects are3295 * packed in suboptimal order).3296 *3297 * - to use more robust pack-generation codepath (avoiding possible3298 * bugs in bitmap code and possible bitmap index corruption).3299 */3300 if (!pack_to_stdout)3301 use_bitmap_index_default = 0;33023303 if (use_bitmap_index < 0)3304 use_bitmap_index = use_bitmap_index_default;33053306 /* "hard" reasons not to use bitmaps; these just won't work at all */3307 if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())3308 use_bitmap_index = 0;33093310 if (pack_to_stdout || !rev_list_all)3311 write_bitmap_index = 0;33123313 if (progress && all_progress_implied)3314 progress = 2;33153316 add_extra_kept_packs(&keep_pack_list);3317 if (ignore_packed_keep_on_disk) {3318 struct packed_git *p;3319 for (p = get_packed_git(the_repository); p; p = p->next)3320 if (p->pack_local && p->pack_keep)3321 break;3322 if (!p) /* no keep-able packs found */3323 ignore_packed_keep_on_disk = 0;3324 }3325 if (local) {3326 /*3327 * unlike ignore_packed_keep_on_disk above, we do not3328 * want to unset "local" based on looking at packs, as3329 * it also covers non-local objects3330 */3331 struct packed_git *p;3332 for (p = get_packed_git(the_repository); p; p = p->next) {3333 if (!p->pack_local) {3334 have_non_local_packs = 1;3335 break;3336 }3337 }3338 }33393340 prepare_packing_data(&to_pack);33413342 if (progress)3343 progress_state = start_progress(_("Enumerating objects"), 0);3344 if (!use_internal_rev_list)3345 read_object_list_from_stdin();3346 else {3347 get_object_list(rp.argc, rp.argv);3348 argv_array_clear(&rp);3349 }3350 cleanup_preferred_base();3351 if (include_tag && nr_result)3352 for_each_ref(add_ref_tag, NULL);3353 stop_progress(&progress_state);33543355 if (non_empty && !nr_result)3356 return 0;3357 if (nr_result)3358 prepare_pack(window, depth);3359 write_pack_file();3360 if (progress)3361 fprintf_ln(stderr, "Total %"PRIu32" (delta %"PRIu32"),"3362 " reused %"PRIu32" (delta %"PRIu32")",3363 written, written_delta, reused, reused_delta);3364 return 0;3365}