1#include "builtin.h" 2#include "cache.h" 3#include "repository.h" 4#include "config.h" 5#include "attr.h" 6#include "object.h" 7#include "blob.h" 8#include "commit.h" 9#include "tag.h" 10#include "tree.h" 11#include "delta.h" 12#include "pack.h" 13#include "pack-revindex.h" 14#include "csum-file.h" 15#include "tree-walk.h" 16#include "diff.h" 17#include "revision.h" 18#include "list-objects.h" 19#include "list-objects-filter.h" 20#include "list-objects-filter-options.h" 21#include "pack-objects.h" 22#include "progress.h" 23#include "refs.h" 24#include "streaming.h" 25#include "thread-utils.h" 26#include "pack-bitmap.h" 27#include "reachable.h" 28#include "sha1-array.h" 29#include "argv-array.h" 30#include "list.h" 31#include "packfile.h" 32#include "object-store.h" 33#include "dir.h" 34 35#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 36#define SIZE(obj) oe_size(&to_pack, obj) 37#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) 38#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) 39#define DELTA(obj) oe_delta(&to_pack, obj) 40#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 41#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 42#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 43#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) 44#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 45#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 46 47static const char *pack_usage[] = { 48 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 49 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 50 NULL 51}; 52 53/* 54 * Objects we are going to pack are collected in the `to_pack` structure. 55 * It contains an array (dynamically expanded) of the object data, and a map 56 * that can resolve SHA1s to their position in the array. 57 */ 58static struct packing_data to_pack; 59 60static struct pack_idx_entry **written_list; 61static uint32_t nr_result, nr_written, nr_seen; 62 63static int non_empty; 64static int reuse_delta = 1, reuse_object = 1; 65static int keep_unreachable, unpack_unreachable, include_tag; 66static timestamp_t unpack_unreachable_expiration; 67static int pack_loose_unreachable; 68static int local; 69static int have_non_local_packs; 70static int incremental; 71static int ignore_packed_keep_on_disk; 72static int ignore_packed_keep_in_core; 73static int allow_ofs_delta; 74static struct pack_idx_option pack_idx_opts; 75static const char *base_name; 76static int progress = 1; 77static int window = 10; 78static unsigned long pack_size_limit; 79static int depth = 50; 80static int delta_search_threads; 81static int pack_to_stdout; 82static int num_preferred_base; 83static struct progress *progress_state; 84 85static struct packed_git *reuse_packfile; 86static uint32_t reuse_packfile_objects; 87static off_t reuse_packfile_offset; 88 89static int use_bitmap_index_default = 1; 90static int use_bitmap_index = -1; 91static int write_bitmap_index; 92static uint16_t write_bitmap_options; 93 94static int exclude_promisor_objects; 95 96static unsigned long delta_cache_size = 0; 97static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; 98static unsigned long cache_max_small_delta_size = 1000; 99 100static unsigned long window_memory_limit = 0; 101 102static struct list_objects_filter_options filter_options; 103 104enum missing_action { 105 MA_ERROR = 0, /* fail if any missing objects are encountered */ 106 MA_ALLOW_ANY, /* silently allow ALL missing objects */ 107 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ 108}; 109static enum missing_action arg_missing_action; 110static show_object_fn fn_show_object; 111 112/* 113 * stats 114 */ 115static uint32_t written, written_delta; 116static uint32_t reused, reused_delta; 117 118/* 119 * Indexed commits 120 */ 121static struct commit **indexed_commits; 122static unsigned int indexed_commits_nr; 123static unsigned int indexed_commits_alloc; 124 125static void index_commit_for_bitmap(struct commit *commit) 126{ 127 if (indexed_commits_nr >= indexed_commits_alloc) { 128 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; 129 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 130 } 131 132 indexed_commits[indexed_commits_nr++] = commit; 133} 134 135static void *get_delta(struct object_entry *entry) 136{ 137 unsigned long size, base_size, delta_size; 138 void *buf, *base_buf, *delta_buf; 139 enum object_type type; 140 141 buf = read_object_file(&entry->idx.oid, &type, &size); 142 if (!buf) 143 die("unable to read %s", oid_to_hex(&entry->idx.oid)); 144 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type, 145 &base_size); 146 if (!base_buf) 147 die("unable to read %s", 148 oid_to_hex(&DELTA(entry)->idx.oid)); 149 delta_buf = diff_delta(base_buf, base_size, 150 buf, size, &delta_size, 0); 151 if (!delta_buf || delta_size != DELTA_SIZE(entry)) 152 die("delta size changed"); 153 free(buf); 154 free(base_buf); 155 return delta_buf; 156} 157 158static unsigned long do_compress(void **pptr, unsigned long size) 159{ 160 git_zstream stream; 161 void *in, *out; 162 unsigned long maxsize; 163 164 git_deflate_init(&stream, pack_compression_level); 165 maxsize = git_deflate_bound(&stream, size); 166 167 in = *pptr; 168 out = xmalloc(maxsize); 169 *pptr = out; 170 171 stream.next_in = in; 172 stream.avail_in = size; 173 stream.next_out = out; 174 stream.avail_out = maxsize; 175 while (git_deflate(&stream, Z_FINISH) == Z_OK) 176 ; /* nothing */ 177 git_deflate_end(&stream); 178 179 free(in); 180 return stream.total_out; 181} 182 183static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, 184 const struct object_id *oid) 185{ 186 git_zstream stream; 187 unsigned char ibuf[1024 * 16]; 188 unsigned char obuf[1024 * 16]; 189 unsigned long olen = 0; 190 191 git_deflate_init(&stream, pack_compression_level); 192 193 for (;;) { 194 ssize_t readlen; 195 int zret = Z_OK; 196 readlen = read_istream(st, ibuf, sizeof(ibuf)); 197 if (readlen == -1) 198 die(_("unable to read %s"), oid_to_hex(oid)); 199 200 stream.next_in = ibuf; 201 stream.avail_in = readlen; 202 while ((stream.avail_in || readlen == 0) && 203 (zret == Z_OK || zret == Z_BUF_ERROR)) { 204 stream.next_out = obuf; 205 stream.avail_out = sizeof(obuf); 206 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); 207 hashwrite(f, obuf, stream.next_out - obuf); 208 olen += stream.next_out - obuf; 209 } 210 if (stream.avail_in) 211 die(_("deflate error (%d)"), zret); 212 if (readlen == 0) { 213 if (zret != Z_STREAM_END) 214 die(_("deflate error (%d)"), zret); 215 break; 216 } 217 } 218 git_deflate_end(&stream); 219 return olen; 220} 221 222/* 223 * we are going to reuse the existing object data as is. make 224 * sure it is not corrupt. 225 */ 226static int check_pack_inflate(struct packed_git *p, 227 struct pack_window **w_curs, 228 off_t offset, 229 off_t len, 230 unsigned long expect) 231{ 232 git_zstream stream; 233 unsigned char fakebuf[4096], *in; 234 int st; 235 236 memset(&stream, 0, sizeof(stream)); 237 git_inflate_init(&stream); 238 do { 239 in = use_pack(p, w_curs, offset, &stream.avail_in); 240 stream.next_in = in; 241 stream.next_out = fakebuf; 242 stream.avail_out = sizeof(fakebuf); 243 st = git_inflate(&stream, Z_FINISH); 244 offset += stream.next_in - in; 245 } while (st == Z_OK || st == Z_BUF_ERROR); 246 git_inflate_end(&stream); 247 return (st == Z_STREAM_END && 248 stream.total_out == expect && 249 stream.total_in == len) ? 0 : -1; 250} 251 252static void copy_pack_data(struct hashfile *f, 253 struct packed_git *p, 254 struct pack_window **w_curs, 255 off_t offset, 256 off_t len) 257{ 258 unsigned char *in; 259 unsigned long avail; 260 261 while (len) { 262 in = use_pack(p, w_curs, offset, &avail); 263 if (avail > len) 264 avail = (unsigned long)len; 265 hashwrite(f, in, avail); 266 offset += avail; 267 len -= avail; 268 } 269} 270 271/* Return 0 if we will bust the pack-size limit */ 272static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, 273 unsigned long limit, int usable_delta) 274{ 275 unsigned long size, datalen; 276 unsigned char header[MAX_PACK_OBJECT_HEADER], 277 dheader[MAX_PACK_OBJECT_HEADER]; 278 unsigned hdrlen; 279 enum object_type type; 280 void *buf; 281 struct git_istream *st = NULL; 282 const unsigned hashsz = the_hash_algo->rawsz; 283 284 if (!usable_delta) { 285 if (oe_type(entry) == OBJ_BLOB && 286 oe_size_greater_than(&to_pack, entry, big_file_threshold) && 287 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 288 buf = NULL; 289 else { 290 buf = read_object_file(&entry->idx.oid, &type, &size); 291 if (!buf) 292 die(_("unable to read %s"), 293 oid_to_hex(&entry->idx.oid)); 294 } 295 /* 296 * make sure no cached delta data remains from a 297 * previous attempt before a pack split occurred. 298 */ 299 FREE_AND_NULL(entry->delta_data); 300 entry->z_delta_size = 0; 301 } else if (entry->delta_data) { 302 size = DELTA_SIZE(entry); 303 buf = entry->delta_data; 304 entry->delta_data = NULL; 305 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 306 OBJ_OFS_DELTA : OBJ_REF_DELTA; 307 } else { 308 buf = get_delta(entry); 309 size = DELTA_SIZE(entry); 310 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 311 OBJ_OFS_DELTA : OBJ_REF_DELTA; 312 } 313 314 if (st) /* large blob case, just assume we don't compress well */ 315 datalen = size; 316 else if (entry->z_delta_size) 317 datalen = entry->z_delta_size; 318 else 319 datalen = do_compress(&buf, size); 320 321 /* 322 * The object header is a byte of 'type' followed by zero or 323 * more bytes of length. 324 */ 325 hdrlen = encode_in_pack_object_header(header, sizeof(header), 326 type, size); 327 328 if (type == OBJ_OFS_DELTA) { 329 /* 330 * Deltas with relative base contain an additional 331 * encoding of the relative offset for the delta 332 * base from this object's position in the pack. 333 */ 334 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 335 unsigned pos = sizeof(dheader) - 1; 336 dheader[pos] = ofs & 127; 337 while (ofs >>= 7) 338 dheader[--pos] = 128 | (--ofs & 127); 339 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { 340 if (st) 341 close_istream(st); 342 free(buf); 343 return 0; 344 } 345 hashwrite(f, header, hdrlen); 346 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 347 hdrlen += sizeof(dheader) - pos; 348 } else if (type == OBJ_REF_DELTA) { 349 /* 350 * Deltas with a base reference contain 351 * additional bytes for the base object ID. 352 */ 353 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { 354 if (st) 355 close_istream(st); 356 free(buf); 357 return 0; 358 } 359 hashwrite(f, header, hdrlen); 360 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); 361 hdrlen += hashsz; 362 } else { 363 if (limit && hdrlen + datalen + hashsz >= limit) { 364 if (st) 365 close_istream(st); 366 free(buf); 367 return 0; 368 } 369 hashwrite(f, header, hdrlen); 370 } 371 if (st) { 372 datalen = write_large_blob_data(st, f, &entry->idx.oid); 373 close_istream(st); 374 } else { 375 hashwrite(f, buf, datalen); 376 free(buf); 377 } 378 379 return hdrlen + datalen; 380} 381 382/* Return 0 if we will bust the pack-size limit */ 383static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, 384 unsigned long limit, int usable_delta) 385{ 386 struct packed_git *p = IN_PACK(entry); 387 struct pack_window *w_curs = NULL; 388 struct revindex_entry *revidx; 389 off_t offset; 390 enum object_type type = oe_type(entry); 391 off_t datalen; 392 unsigned char header[MAX_PACK_OBJECT_HEADER], 393 dheader[MAX_PACK_OBJECT_HEADER]; 394 unsigned hdrlen; 395 const unsigned hashsz = the_hash_algo->rawsz; 396 unsigned long entry_size = SIZE(entry); 397 398 if (DELTA(entry)) 399 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 400 OBJ_OFS_DELTA : OBJ_REF_DELTA; 401 hdrlen = encode_in_pack_object_header(header, sizeof(header), 402 type, entry_size); 403 404 offset = entry->in_pack_offset; 405 revidx = find_pack_revindex(p, offset); 406 datalen = revidx[1].offset - offset; 407 if (!pack_to_stdout && p->index_version > 1 && 408 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 409 error("bad packed object CRC for %s", 410 oid_to_hex(&entry->idx.oid)); 411 unuse_pack(&w_curs); 412 return write_no_reuse_object(f, entry, limit, usable_delta); 413 } 414 415 offset += entry->in_pack_header_size; 416 datalen -= entry->in_pack_header_size; 417 418 if (!pack_to_stdout && p->index_version == 1 && 419 check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { 420 error("corrupt packed object for %s", 421 oid_to_hex(&entry->idx.oid)); 422 unuse_pack(&w_curs); 423 return write_no_reuse_object(f, entry, limit, usable_delta); 424 } 425 426 if (type == OBJ_OFS_DELTA) { 427 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 428 unsigned pos = sizeof(dheader) - 1; 429 dheader[pos] = ofs & 127; 430 while (ofs >>= 7) 431 dheader[--pos] = 128 | (--ofs & 127); 432 if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { 433 unuse_pack(&w_curs); 434 return 0; 435 } 436 hashwrite(f, header, hdrlen); 437 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 438 hdrlen += sizeof(dheader) - pos; 439 reused_delta++; 440 } else if (type == OBJ_REF_DELTA) { 441 if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { 442 unuse_pack(&w_curs); 443 return 0; 444 } 445 hashwrite(f, header, hdrlen); 446 hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); 447 hdrlen += hashsz; 448 reused_delta++; 449 } else { 450 if (limit && hdrlen + datalen + hashsz >= limit) { 451 unuse_pack(&w_curs); 452 return 0; 453 } 454 hashwrite(f, header, hdrlen); 455 } 456 copy_pack_data(f, p, &w_curs, offset, datalen); 457 unuse_pack(&w_curs); 458 reused++; 459 return hdrlen + datalen; 460} 461 462/* Return 0 if we will bust the pack-size limit */ 463static off_t write_object(struct hashfile *f, 464 struct object_entry *entry, 465 off_t write_offset) 466{ 467 unsigned long limit; 468 off_t len; 469 int usable_delta, to_reuse; 470 471 if (!pack_to_stdout) 472 crc32_begin(f); 473 474 /* apply size limit if limited packsize and not first object */ 475 if (!pack_size_limit || !nr_written) 476 limit = 0; 477 else if (pack_size_limit <= write_offset) 478 /* 479 * the earlier object did not fit the limit; avoid 480 * mistaking this with unlimited (i.e. limit = 0). 481 */ 482 limit = 1; 483 else 484 limit = pack_size_limit - write_offset; 485 486 if (!DELTA(entry)) 487 usable_delta = 0; /* no delta */ 488 else if (!pack_size_limit) 489 usable_delta = 1; /* unlimited packfile */ 490 else if (DELTA(entry)->idx.offset == (off_t)-1) 491 usable_delta = 0; /* base was written to another pack */ 492 else if (DELTA(entry)->idx.offset) 493 usable_delta = 1; /* base already exists in this pack */ 494 else 495 usable_delta = 0; /* base could end up in another pack */ 496 497 if (!reuse_object) 498 to_reuse = 0; /* explicit */ 499 else if (!IN_PACK(entry)) 500 to_reuse = 0; /* can't reuse what we don't have */ 501 else if (oe_type(entry) == OBJ_REF_DELTA || 502 oe_type(entry) == OBJ_OFS_DELTA) 503 /* check_object() decided it for us ... */ 504 to_reuse = usable_delta; 505 /* ... but pack split may override that */ 506 else if (oe_type(entry) != entry->in_pack_type) 507 to_reuse = 0; /* pack has delta which is unusable */ 508 else if (DELTA(entry)) 509 to_reuse = 0; /* we want to pack afresh */ 510 else 511 to_reuse = 1; /* we have it in-pack undeltified, 512 * and we do not need to deltify it. 513 */ 514 515 if (!to_reuse) 516 len = write_no_reuse_object(f, entry, limit, usable_delta); 517 else 518 len = write_reuse_object(f, entry, limit, usable_delta); 519 if (!len) 520 return 0; 521 522 if (usable_delta) 523 written_delta++; 524 written++; 525 if (!pack_to_stdout) 526 entry->idx.crc32 = crc32_end(f); 527 return len; 528} 529 530enum write_one_status { 531 WRITE_ONE_SKIP = -1, /* already written */ 532 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */ 533 WRITE_ONE_WRITTEN = 1, /* normal */ 534 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ 535}; 536 537static enum write_one_status write_one(struct hashfile *f, 538 struct object_entry *e, 539 off_t *offset) 540{ 541 off_t size; 542 int recursing; 543 544 /* 545 * we set offset to 1 (which is an impossible value) to mark 546 * the fact that this object is involved in "write its base 547 * first before writing a deltified object" recursion. 548 */ 549 recursing = (e->idx.offset == 1); 550 if (recursing) { 551 warning("recursive delta detected for object %s", 552 oid_to_hex(&e->idx.oid)); 553 return WRITE_ONE_RECURSIVE; 554 } else if (e->idx.offset || e->preferred_base) { 555 /* offset is non zero if object is written already. */ 556 return WRITE_ONE_SKIP; 557 } 558 559 /* if we are deltified, write out base object first. */ 560 if (DELTA(e)) { 561 e->idx.offset = 1; /* now recurse */ 562 switch (write_one(f, DELTA(e), offset)) { 563 case WRITE_ONE_RECURSIVE: 564 /* we cannot depend on this one */ 565 SET_DELTA(e, NULL); 566 break; 567 default: 568 break; 569 case WRITE_ONE_BREAK: 570 e->idx.offset = recursing; 571 return WRITE_ONE_BREAK; 572 } 573 } 574 575 e->idx.offset = *offset; 576 size = write_object(f, e, *offset); 577 if (!size) { 578 e->idx.offset = recursing; 579 return WRITE_ONE_BREAK; 580 } 581 written_list[nr_written++] = &e->idx; 582 583 /* make sure off_t is sufficiently large not to wrap */ 584 if (signed_add_overflows(*offset, size)) 585 die("pack too large for current definition of off_t"); 586 *offset += size; 587 return WRITE_ONE_WRITTEN; 588} 589 590static int mark_tagged(const char *path, const struct object_id *oid, int flag, 591 void *cb_data) 592{ 593 struct object_id peeled; 594 struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL); 595 596 if (entry) 597 entry->tagged = 1; 598 if (!peel_ref(path, &peeled)) { 599 entry = packlist_find(&to_pack, peeled.hash, NULL); 600 if (entry) 601 entry->tagged = 1; 602 } 603 return 0; 604} 605 606static inline void add_to_write_order(struct object_entry **wo, 607 unsigned int *endp, 608 struct object_entry *e) 609{ 610 if (e->filled) 611 return; 612 wo[(*endp)++] = e; 613 e->filled = 1; 614} 615 616static void add_descendants_to_write_order(struct object_entry **wo, 617 unsigned int *endp, 618 struct object_entry *e) 619{ 620 int add_to_order = 1; 621 while (e) { 622 if (add_to_order) { 623 struct object_entry *s; 624 /* add this node... */ 625 add_to_write_order(wo, endp, e); 626 /* all its siblings... */ 627 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) { 628 add_to_write_order(wo, endp, s); 629 } 630 } 631 /* drop down a level to add left subtree nodes if possible */ 632 if (DELTA_CHILD(e)) { 633 add_to_order = 1; 634 e = DELTA_CHILD(e); 635 } else { 636 add_to_order = 0; 637 /* our sibling might have some children, it is next */ 638 if (DELTA_SIBLING(e)) { 639 e = DELTA_SIBLING(e); 640 continue; 641 } 642 /* go back to our parent node */ 643 e = DELTA(e); 644 while (e && !DELTA_SIBLING(e)) { 645 /* we're on the right side of a subtree, keep 646 * going up until we can go right again */ 647 e = DELTA(e); 648 } 649 if (!e) { 650 /* done- we hit our original root node */ 651 return; 652 } 653 /* pass it off to sibling at this level */ 654 e = DELTA_SIBLING(e); 655 } 656 }; 657} 658 659static void add_family_to_write_order(struct object_entry **wo, 660 unsigned int *endp, 661 struct object_entry *e) 662{ 663 struct object_entry *root; 664 665 for (root = e; DELTA(root); root = DELTA(root)) 666 ; /* nothing */ 667 add_descendants_to_write_order(wo, endp, root); 668} 669 670static struct object_entry **compute_write_order(void) 671{ 672 unsigned int i, wo_end, last_untagged; 673 674 struct object_entry **wo; 675 struct object_entry *objects = to_pack.objects; 676 677 for (i = 0; i < to_pack.nr_objects; i++) { 678 objects[i].tagged = 0; 679 objects[i].filled = 0; 680 SET_DELTA_CHILD(&objects[i], NULL); 681 SET_DELTA_SIBLING(&objects[i], NULL); 682 } 683 684 /* 685 * Fully connect delta_child/delta_sibling network. 686 * Make sure delta_sibling is sorted in the original 687 * recency order. 688 */ 689 for (i = to_pack.nr_objects; i > 0;) { 690 struct object_entry *e = &objects[--i]; 691 if (!DELTA(e)) 692 continue; 693 /* Mark me as the first child */ 694 e->delta_sibling_idx = DELTA(e)->delta_child_idx; 695 SET_DELTA_CHILD(DELTA(e), e); 696 } 697 698 /* 699 * Mark objects that are at the tip of tags. 700 */ 701 for_each_tag_ref(mark_tagged, NULL); 702 703 /* 704 * Give the objects in the original recency order until 705 * we see a tagged tip. 706 */ 707 ALLOC_ARRAY(wo, to_pack.nr_objects); 708 for (i = wo_end = 0; i < to_pack.nr_objects; i++) { 709 if (objects[i].tagged) 710 break; 711 add_to_write_order(wo, &wo_end, &objects[i]); 712 } 713 last_untagged = i; 714 715 /* 716 * Then fill all the tagged tips. 717 */ 718 for (; i < to_pack.nr_objects; i++) { 719 if (objects[i].tagged) 720 add_to_write_order(wo, &wo_end, &objects[i]); 721 } 722 723 /* 724 * And then all remaining commits and tags. 725 */ 726 for (i = last_untagged; i < to_pack.nr_objects; i++) { 727 if (oe_type(&objects[i]) != OBJ_COMMIT && 728 oe_type(&objects[i]) != OBJ_TAG) 729 continue; 730 add_to_write_order(wo, &wo_end, &objects[i]); 731 } 732 733 /* 734 * And then all the trees. 735 */ 736 for (i = last_untagged; i < to_pack.nr_objects; i++) { 737 if (oe_type(&objects[i]) != OBJ_TREE) 738 continue; 739 add_to_write_order(wo, &wo_end, &objects[i]); 740 } 741 742 /* 743 * Finally all the rest in really tight order 744 */ 745 for (i = last_untagged; i < to_pack.nr_objects; i++) { 746 if (!objects[i].filled) 747 add_family_to_write_order(wo, &wo_end, &objects[i]); 748 } 749 750 if (wo_end != to_pack.nr_objects) 751 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); 752 753 return wo; 754} 755 756static off_t write_reused_pack(struct hashfile *f) 757{ 758 unsigned char buffer[8192]; 759 off_t to_write, total; 760 int fd; 761 762 if (!is_pack_valid(reuse_packfile)) 763 die("packfile is invalid: %s", reuse_packfile->pack_name); 764 765 fd = git_open(reuse_packfile->pack_name); 766 if (fd < 0) 767 die_errno("unable to open packfile for reuse: %s", 768 reuse_packfile->pack_name); 769 770 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) 771 die_errno("unable to seek in reused packfile"); 772 773 if (reuse_packfile_offset < 0) 774 reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; 775 776 total = to_write = reuse_packfile_offset - sizeof(struct pack_header); 777 778 while (to_write) { 779 int read_pack = xread(fd, buffer, sizeof(buffer)); 780 781 if (read_pack <= 0) 782 die_errno("unable to read from reused packfile"); 783 784 if (read_pack > to_write) 785 read_pack = to_write; 786 787 hashwrite(f, buffer, read_pack); 788 to_write -= read_pack; 789 790 /* 791 * We don't know the actual number of objects written, 792 * only how many bytes written, how many bytes total, and 793 * how many objects total. So we can fake it by pretending all 794 * objects we are writing are the same size. This gives us a 795 * smooth progress meter, and at the end it matches the true 796 * answer. 797 */ 798 written = reuse_packfile_objects * 799 (((double)(total - to_write)) / total); 800 display_progress(progress_state, written); 801 } 802 803 close(fd); 804 written = reuse_packfile_objects; 805 display_progress(progress_state, written); 806 return reuse_packfile_offset - sizeof(struct pack_header); 807} 808 809static const char no_split_warning[] = N_( 810"disabling bitmap writing, packs are split due to pack.packSizeLimit" 811); 812 813static void write_pack_file(void) 814{ 815 uint32_t i = 0, j; 816 struct hashfile *f; 817 off_t offset; 818 uint32_t nr_remaining = nr_result; 819 time_t last_mtime = 0; 820 struct object_entry **write_order; 821 822 if (progress > pack_to_stdout) 823 progress_state = start_progress(_("Writing objects"), nr_result); 824 ALLOC_ARRAY(written_list, to_pack.nr_objects); 825 write_order = compute_write_order(); 826 827 do { 828 struct object_id oid; 829 char *pack_tmp_name = NULL; 830 831 if (pack_to_stdout) 832 f = hashfd_throughput(1, "<stdout>", progress_state); 833 else 834 f = create_tmp_packfile(&pack_tmp_name); 835 836 offset = write_pack_header(f, nr_remaining); 837 838 if (reuse_packfile) { 839 off_t packfile_size; 840 assert(pack_to_stdout); 841 842 packfile_size = write_reused_pack(f); 843 offset += packfile_size; 844 } 845 846 nr_written = 0; 847 for (; i < to_pack.nr_objects; i++) { 848 struct object_entry *e = write_order[i]; 849 if (write_one(f, e, &offset) == WRITE_ONE_BREAK) 850 break; 851 display_progress(progress_state, written); 852 } 853 854 /* 855 * Did we write the wrong # entries in the header? 856 * If so, rewrite it like in fast-import 857 */ 858 if (pack_to_stdout) { 859 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); 860 } else if (nr_written == nr_remaining) { 861 finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); 862 } else { 863 int fd = finalize_hashfile(f, oid.hash, 0); 864 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 865 nr_written, oid.hash, offset); 866 close(fd); 867 if (write_bitmap_index) { 868 warning(_(no_split_warning)); 869 write_bitmap_index = 0; 870 } 871 } 872 873 if (!pack_to_stdout) { 874 struct stat st; 875 struct strbuf tmpname = STRBUF_INIT; 876 877 /* 878 * Packs are runtime accessed in their mtime 879 * order since newer packs are more likely to contain 880 * younger objects. So if we are creating multiple 881 * packs then we should modify the mtime of later ones 882 * to preserve this property. 883 */ 884 if (stat(pack_tmp_name, &st) < 0) { 885 warning_errno("failed to stat %s", pack_tmp_name); 886 } else if (!last_mtime) { 887 last_mtime = st.st_mtime; 888 } else { 889 struct utimbuf utb; 890 utb.actime = st.st_atime; 891 utb.modtime = --last_mtime; 892 if (utime(pack_tmp_name, &utb) < 0) 893 warning_errno("failed utime() on %s", pack_tmp_name); 894 } 895 896 strbuf_addf(&tmpname, "%s-", base_name); 897 898 if (write_bitmap_index) { 899 bitmap_writer_set_checksum(oid.hash); 900 bitmap_writer_build_type_index( 901 &to_pack, written_list, nr_written); 902 } 903 904 finish_tmp_packfile(&tmpname, pack_tmp_name, 905 written_list, nr_written, 906 &pack_idx_opts, oid.hash); 907 908 if (write_bitmap_index) { 909 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid)); 910 911 stop_progress(&progress_state); 912 913 bitmap_writer_show_progress(progress); 914 bitmap_writer_reuse_bitmaps(&to_pack); 915 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 916 bitmap_writer_build(&to_pack); 917 bitmap_writer_finish(written_list, nr_written, 918 tmpname.buf, write_bitmap_options); 919 write_bitmap_index = 0; 920 } 921 922 strbuf_release(&tmpname); 923 free(pack_tmp_name); 924 puts(oid_to_hex(&oid)); 925 } 926 927 /* mark written objects as written to previous pack */ 928 for (j = 0; j < nr_written; j++) { 929 written_list[j]->offset = (off_t)-1; 930 } 931 nr_remaining -= nr_written; 932 } while (nr_remaining && i < to_pack.nr_objects); 933 934 free(written_list); 935 free(write_order); 936 stop_progress(&progress_state); 937 if (written != nr_result) 938 die("wrote %"PRIu32" objects while expecting %"PRIu32, 939 written, nr_result); 940} 941 942static int no_try_delta(const char *path) 943{ 944 static struct attr_check *check; 945 946 if (!check) 947 check = attr_check_initl("delta", NULL); 948 if (git_check_attr(path, check)) 949 return 0; 950 if (ATTR_FALSE(check->items[0].value)) 951 return 1; 952 return 0; 953} 954 955/* 956 * When adding an object, check whether we have already added it 957 * to our packing list. If so, we can skip. However, if we are 958 * being asked to excludei t, but the previous mention was to include 959 * it, make sure to adjust its flags and tweak our numbers accordingly. 960 * 961 * As an optimization, we pass out the index position where we would have 962 * found the item, since that saves us from having to look it up again a 963 * few lines later when we want to add the new entry. 964 */ 965static int have_duplicate_entry(const struct object_id *oid, 966 int exclude, 967 uint32_t *index_pos) 968{ 969 struct object_entry *entry; 970 971 entry = packlist_find(&to_pack, oid->hash, index_pos); 972 if (!entry) 973 return 0; 974 975 if (exclude) { 976 if (!entry->preferred_base) 977 nr_result--; 978 entry->preferred_base = 1; 979 } 980 981 return 1; 982} 983 984static int want_found_object(int exclude, struct packed_git *p) 985{ 986 if (exclude) 987 return 1; 988 if (incremental) 989 return 0; 990 991 /* 992 * When asked to do --local (do not include an object that appears in a 993 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 994 * an object that appears in a pack marked with .keep), finding a pack 995 * that matches the criteria is sufficient for us to decide to omit it. 996 * However, even if this pack does not satisfy the criteria, we need to 997 * make sure no copy of this object appears in _any_ pack that makes us 998 * to omit the object, so we need to check all the packs. 999 *1000 * We can however first check whether these options can possible matter;1001 * if they do not matter we know we want the object in generated pack.1002 * Otherwise, we signal "-1" at the end to tell the caller that we do1003 * not know either way, and it needs to check more packs.1004 */1005 if (!ignore_packed_keep_on_disk &&1006 !ignore_packed_keep_in_core &&1007 (!local || !have_non_local_packs))1008 return 1;10091010 if (local && !p->pack_local)1011 return 0;1012 if (p->pack_local &&1013 ((ignore_packed_keep_on_disk && p->pack_keep) ||1014 (ignore_packed_keep_in_core && p->pack_keep_in_core)))1015 return 0;10161017 /* we don't know yet; keep looking for more packs */1018 return -1;1019}10201021/*1022 * Check whether we want the object in the pack (e.g., we do not want1023 * objects found in non-local stores if the "--local" option was used).1024 *1025 * If the caller already knows an existing pack it wants to take the object1026 * from, that is passed in *found_pack and *found_offset; otherwise this1027 * function finds if there is any pack that has the object and returns the pack1028 * and its offset in these variables.1029 */1030static int want_object_in_pack(const struct object_id *oid,1031 int exclude,1032 struct packed_git **found_pack,1033 off_t *found_offset)1034{1035 int want;1036 struct list_head *pos;10371038 if (!exclude && local && has_loose_object_nonlocal(oid))1039 return 0;10401041 /*1042 * If we already know the pack object lives in, start checks from that1043 * pack - in the usual case when neither --local was given nor .keep files1044 * are present we will determine the answer right now.1045 */1046 if (*found_pack) {1047 want = want_found_object(exclude, *found_pack);1048 if (want != -1)1049 return want;1050 }1051 list_for_each(pos, get_packed_git_mru(the_repository)) {1052 struct packed_git *p = list_entry(pos, struct packed_git, mru);1053 off_t offset;10541055 if (p == *found_pack)1056 offset = *found_offset;1057 else1058 offset = find_pack_entry_one(oid->hash, p);10591060 if (offset) {1061 if (!*found_pack) {1062 if (!is_pack_valid(p))1063 continue;1064 *found_offset = offset;1065 *found_pack = p;1066 }1067 want = want_found_object(exclude, p);1068 if (!exclude && want > 0)1069 list_move(&p->mru,1070 get_packed_git_mru(the_repository));1071 if (want != -1)1072 return want;1073 }1074 }10751076 return 1;1077}10781079static void create_object_entry(const struct object_id *oid,1080 enum object_type type,1081 uint32_t hash,1082 int exclude,1083 int no_try_delta,1084 uint32_t index_pos,1085 struct packed_git *found_pack,1086 off_t found_offset)1087{1088 struct object_entry *entry;10891090 entry = packlist_alloc(&to_pack, oid->hash, index_pos);1091 entry->hash = hash;1092 oe_set_type(entry, type);1093 if (exclude)1094 entry->preferred_base = 1;1095 else1096 nr_result++;1097 if (found_pack) {1098 oe_set_in_pack(&to_pack, entry, found_pack);1099 entry->in_pack_offset = found_offset;1100 }11011102 entry->no_try_delta = no_try_delta;1103}11041105static const char no_closure_warning[] = N_(1106"disabling bitmap writing, as some objects are not being packed"1107);11081109static int add_object_entry(const struct object_id *oid, enum object_type type,1110 const char *name, int exclude)1111{1112 struct packed_git *found_pack = NULL;1113 off_t found_offset = 0;1114 uint32_t index_pos;11151116 display_progress(progress_state, ++nr_seen);11171118 if (have_duplicate_entry(oid, exclude, &index_pos))1119 return 0;11201121 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1122 /* The pack is missing an object, so it will not have closure */1123 if (write_bitmap_index) {1124 warning(_(no_closure_warning));1125 write_bitmap_index = 0;1126 }1127 return 0;1128 }11291130 create_object_entry(oid, type, pack_name_hash(name),1131 exclude, name && no_try_delta(name),1132 index_pos, found_pack, found_offset);1133 return 1;1134}11351136static int add_object_entry_from_bitmap(const struct object_id *oid,1137 enum object_type type,1138 int flags, uint32_t name_hash,1139 struct packed_git *pack, off_t offset)1140{1141 uint32_t index_pos;11421143 display_progress(progress_state, ++nr_seen);11441145 if (have_duplicate_entry(oid, 0, &index_pos))1146 return 0;11471148 if (!want_object_in_pack(oid, 0, &pack, &offset))1149 return 0;11501151 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);1152 return 1;1153}11541155struct pbase_tree_cache {1156 struct object_id oid;1157 int ref;1158 int temporary;1159 void *tree_data;1160 unsigned long tree_size;1161};11621163static struct pbase_tree_cache *(pbase_tree_cache[256]);1164static int pbase_tree_cache_ix(const struct object_id *oid)1165{1166 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);1167}1168static int pbase_tree_cache_ix_incr(int ix)1169{1170 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);1171}11721173static struct pbase_tree {1174 struct pbase_tree *next;1175 /* This is a phony "cache" entry; we are not1176 * going to evict it or find it through _get()1177 * mechanism -- this is for the toplevel node that1178 * would almost always change with any commit.1179 */1180 struct pbase_tree_cache pcache;1181} *pbase_tree;11821183static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1184{1185 struct pbase_tree_cache *ent, *nent;1186 void *data;1187 unsigned long size;1188 enum object_type type;1189 int neigh;1190 int my_ix = pbase_tree_cache_ix(oid);1191 int available_ix = -1;11921193 /* pbase-tree-cache acts as a limited hashtable.1194 * your object will be found at your index or within a few1195 * slots after that slot if it is cached.1196 */1197 for (neigh = 0; neigh < 8; neigh++) {1198 ent = pbase_tree_cache[my_ix];1199 if (ent && !oidcmp(&ent->oid, oid)) {1200 ent->ref++;1201 return ent;1202 }1203 else if (((available_ix < 0) && (!ent || !ent->ref)) ||1204 ((0 <= available_ix) &&1205 (!ent && pbase_tree_cache[available_ix])))1206 available_ix = my_ix;1207 if (!ent)1208 break;1209 my_ix = pbase_tree_cache_ix_incr(my_ix);1210 }12111212 /* Did not find one. Either we got a bogus request or1213 * we need to read and perhaps cache.1214 */1215 data = read_object_file(oid, &type, &size);1216 if (!data)1217 return NULL;1218 if (type != OBJ_TREE) {1219 free(data);1220 return NULL;1221 }12221223 /* We need to either cache or return a throwaway copy */12241225 if (available_ix < 0)1226 ent = NULL;1227 else {1228 ent = pbase_tree_cache[available_ix];1229 my_ix = available_ix;1230 }12311232 if (!ent) {1233 nent = xmalloc(sizeof(*nent));1234 nent->temporary = (available_ix < 0);1235 }1236 else {1237 /* evict and reuse */1238 free(ent->tree_data);1239 nent = ent;1240 }1241 oidcpy(&nent->oid, oid);1242 nent->tree_data = data;1243 nent->tree_size = size;1244 nent->ref = 1;1245 if (!nent->temporary)1246 pbase_tree_cache[my_ix] = nent;1247 return nent;1248}12491250static void pbase_tree_put(struct pbase_tree_cache *cache)1251{1252 if (!cache->temporary) {1253 cache->ref--;1254 return;1255 }1256 free(cache->tree_data);1257 free(cache);1258}12591260static int name_cmp_len(const char *name)1261{1262 int i;1263 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)1264 ;1265 return i;1266}12671268static void add_pbase_object(struct tree_desc *tree,1269 const char *name,1270 int cmplen,1271 const char *fullname)1272{1273 struct name_entry entry;1274 int cmp;12751276 while (tree_entry(tree,&entry)) {1277 if (S_ISGITLINK(entry.mode))1278 continue;1279 cmp = tree_entry_len(&entry) != cmplen ? 1 :1280 memcmp(name, entry.path, cmplen);1281 if (cmp > 0)1282 continue;1283 if (cmp < 0)1284 return;1285 if (name[cmplen] != '/') {1286 add_object_entry(entry.oid,1287 object_type(entry.mode),1288 fullname, 1);1289 return;1290 }1291 if (S_ISDIR(entry.mode)) {1292 struct tree_desc sub;1293 struct pbase_tree_cache *tree;1294 const char *down = name+cmplen+1;1295 int downlen = name_cmp_len(down);12961297 tree = pbase_tree_get(entry.oid);1298 if (!tree)1299 return;1300 init_tree_desc(&sub, tree->tree_data, tree->tree_size);13011302 add_pbase_object(&sub, down, downlen, fullname);1303 pbase_tree_put(tree);1304 }1305 }1306}13071308static unsigned *done_pbase_paths;1309static int done_pbase_paths_num;1310static int done_pbase_paths_alloc;1311static int done_pbase_path_pos(unsigned hash)1312{1313 int lo = 0;1314 int hi = done_pbase_paths_num;1315 while (lo < hi) {1316 int mi = lo + (hi - lo) / 2;1317 if (done_pbase_paths[mi] == hash)1318 return mi;1319 if (done_pbase_paths[mi] < hash)1320 hi = mi;1321 else1322 lo = mi + 1;1323 }1324 return -lo-1;1325}13261327static int check_pbase_path(unsigned hash)1328{1329 int pos = done_pbase_path_pos(hash);1330 if (0 <= pos)1331 return 1;1332 pos = -pos - 1;1333 ALLOC_GROW(done_pbase_paths,1334 done_pbase_paths_num + 1,1335 done_pbase_paths_alloc);1336 done_pbase_paths_num++;1337 if (pos < done_pbase_paths_num)1338 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,1339 done_pbase_paths_num - pos - 1);1340 done_pbase_paths[pos] = hash;1341 return 0;1342}13431344static void add_preferred_base_object(const char *name)1345{1346 struct pbase_tree *it;1347 int cmplen;1348 unsigned hash = pack_name_hash(name);13491350 if (!num_preferred_base || check_pbase_path(hash))1351 return;13521353 cmplen = name_cmp_len(name);1354 for (it = pbase_tree; it; it = it->next) {1355 if (cmplen == 0) {1356 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);1357 }1358 else {1359 struct tree_desc tree;1360 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1361 add_pbase_object(&tree, name, cmplen, name);1362 }1363 }1364}13651366static void add_preferred_base(struct object_id *oid)1367{1368 struct pbase_tree *it;1369 void *data;1370 unsigned long size;1371 struct object_id tree_oid;13721373 if (window <= num_preferred_base++)1374 return;13751376 data = read_object_with_reference(oid, tree_type, &size, &tree_oid);1377 if (!data)1378 return;13791380 for (it = pbase_tree; it; it = it->next) {1381 if (!oidcmp(&it->pcache.oid, &tree_oid)) {1382 free(data);1383 return;1384 }1385 }13861387 it = xcalloc(1, sizeof(*it));1388 it->next = pbase_tree;1389 pbase_tree = it;13901391 oidcpy(&it->pcache.oid, &tree_oid);1392 it->pcache.tree_data = data;1393 it->pcache.tree_size = size;1394}13951396static void cleanup_preferred_base(void)1397{1398 struct pbase_tree *it;1399 unsigned i;14001401 it = pbase_tree;1402 pbase_tree = NULL;1403 while (it) {1404 struct pbase_tree *tmp = it;1405 it = tmp->next;1406 free(tmp->pcache.tree_data);1407 free(tmp);1408 }14091410 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {1411 if (!pbase_tree_cache[i])1412 continue;1413 free(pbase_tree_cache[i]->tree_data);1414 FREE_AND_NULL(pbase_tree_cache[i]);1415 }14161417 FREE_AND_NULL(done_pbase_paths);1418 done_pbase_paths_num = done_pbase_paths_alloc = 0;1419}14201421static void check_object(struct object_entry *entry)1422{1423 unsigned long canonical_size;14241425 if (IN_PACK(entry)) {1426 struct packed_git *p = IN_PACK(entry);1427 struct pack_window *w_curs = NULL;1428 const unsigned char *base_ref = NULL;1429 struct object_entry *base_entry;1430 unsigned long used, used_0;1431 unsigned long avail;1432 off_t ofs;1433 unsigned char *buf, c;1434 enum object_type type;1435 unsigned long in_pack_size;14361437 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);14381439 /*1440 * We want in_pack_type even if we do not reuse delta1441 * since non-delta representations could still be reused.1442 */1443 used = unpack_object_header_buffer(buf, avail,1444 &type,1445 &in_pack_size);1446 if (used == 0)1447 goto give_up;14481449 if (type < 0)1450 BUG("invalid type %d", type);1451 entry->in_pack_type = type;14521453 /*1454 * Determine if this is a delta and if so whether we can1455 * reuse it or not. Otherwise let's find out as cheaply as1456 * possible what the actual type and size for this object is.1457 */1458 switch (entry->in_pack_type) {1459 default:1460 /* Not a delta hence we've already got all we need. */1461 oe_set_type(entry, entry->in_pack_type);1462 SET_SIZE(entry, in_pack_size);1463 entry->in_pack_header_size = used;1464 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)1465 goto give_up;1466 unuse_pack(&w_curs);1467 return;1468 case OBJ_REF_DELTA:1469 if (reuse_delta && !entry->preferred_base)1470 base_ref = use_pack(p, &w_curs,1471 entry->in_pack_offset + used, NULL);1472 entry->in_pack_header_size = used + the_hash_algo->rawsz;1473 break;1474 case OBJ_OFS_DELTA:1475 buf = use_pack(p, &w_curs,1476 entry->in_pack_offset + used, NULL);1477 used_0 = 0;1478 c = buf[used_0++];1479 ofs = c & 127;1480 while (c & 128) {1481 ofs += 1;1482 if (!ofs || MSB(ofs, 7)) {1483 error("delta base offset overflow in pack for %s",1484 oid_to_hex(&entry->idx.oid));1485 goto give_up;1486 }1487 c = buf[used_0++];1488 ofs = (ofs << 7) + (c & 127);1489 }1490 ofs = entry->in_pack_offset - ofs;1491 if (ofs <= 0 || ofs >= entry->in_pack_offset) {1492 error("delta base offset out of bound for %s",1493 oid_to_hex(&entry->idx.oid));1494 goto give_up;1495 }1496 if (reuse_delta && !entry->preferred_base) {1497 struct revindex_entry *revidx;1498 revidx = find_pack_revindex(p, ofs);1499 if (!revidx)1500 goto give_up;1501 base_ref = nth_packed_object_sha1(p, revidx->nr);1502 }1503 entry->in_pack_header_size = used + used_0;1504 break;1505 }15061507 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {1508 /*1509 * If base_ref was set above that means we wish to1510 * reuse delta data, and we even found that base1511 * in the list of objects we want to pack. Goodie!1512 *1513 * Depth value does not matter - find_deltas() will1514 * never consider reused delta as the base object to1515 * deltify other objects against, in order to avoid1516 * circular deltas.1517 */1518 oe_set_type(entry, entry->in_pack_type);1519 SET_SIZE(entry, in_pack_size); /* delta size */1520 SET_DELTA(entry, base_entry);1521 SET_DELTA_SIZE(entry, in_pack_size);1522 entry->delta_sibling_idx = base_entry->delta_child_idx;1523 SET_DELTA_CHILD(base_entry, entry);1524 unuse_pack(&w_curs);1525 return;1526 }15271528 if (oe_type(entry)) {1529 off_t delta_pos;15301531 /*1532 * This must be a delta and we already know what the1533 * final object type is. Let's extract the actual1534 * object size from the delta header.1535 */1536 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1537 canonical_size = get_size_from_delta(p, &w_curs, delta_pos);1538 if (canonical_size == 0)1539 goto give_up;1540 SET_SIZE(entry, canonical_size);1541 unuse_pack(&w_curs);1542 return;1543 }15441545 /*1546 * No choice but to fall back to the recursive delta walk1547 * with sha1_object_info() to find about the object type1548 * at this point...1549 */1550 give_up:1551 unuse_pack(&w_curs);1552 }15531554 oe_set_type(entry,1555 oid_object_info(the_repository, &entry->idx.oid, &canonical_size));1556 if (entry->type_valid) {1557 SET_SIZE(entry, canonical_size);1558 } else {1559 /*1560 * Bad object type is checked in prepare_pack(). This is1561 * to permit a missing preferred base object to be ignored1562 * as a preferred base. Doing so can result in a larger1563 * pack file, but the transfer will still take place.1564 */1565 }1566}15671568static int pack_offset_sort(const void *_a, const void *_b)1569{1570 const struct object_entry *a = *(struct object_entry **)_a;1571 const struct object_entry *b = *(struct object_entry **)_b;1572 const struct packed_git *a_in_pack = IN_PACK(a);1573 const struct packed_git *b_in_pack = IN_PACK(b);15741575 /* avoid filesystem trashing with loose objects */1576 if (!a_in_pack && !b_in_pack)1577 return oidcmp(&a->idx.oid, &b->idx.oid);15781579 if (a_in_pack < b_in_pack)1580 return -1;1581 if (a_in_pack > b_in_pack)1582 return 1;1583 return a->in_pack_offset < b->in_pack_offset ? -1 :1584 (a->in_pack_offset > b->in_pack_offset);1585}15861587/*1588 * Drop an on-disk delta we were planning to reuse. Naively, this would1589 * just involve blanking out the "delta" field, but we have to deal1590 * with some extra book-keeping:1591 *1592 * 1. Removing ourselves from the delta_sibling linked list.1593 *1594 * 2. Updating our size/type to the non-delta representation. These were1595 * either not recorded initially (size) or overwritten with the delta type1596 * (type) when check_object() decided to reuse the delta.1597 *1598 * 3. Resetting our delta depth, as we are now a base object.1599 */1600static void drop_reused_delta(struct object_entry *entry)1601{1602 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;1603 struct object_info oi = OBJECT_INFO_INIT;1604 enum object_type type;1605 unsigned long size;16061607 while (*idx) {1608 struct object_entry *oe = &to_pack.objects[*idx - 1];16091610 if (oe == entry)1611 *idx = oe->delta_sibling_idx;1612 else1613 idx = &oe->delta_sibling_idx;1614 }1615 SET_DELTA(entry, NULL);1616 entry->depth = 0;16171618 oi.sizep = &size;1619 oi.typep = &type;1620 if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {1621 /*1622 * We failed to get the info from this pack for some reason;1623 * fall back to sha1_object_info, which may find another copy.1624 * And if that fails, the error will be recorded in oe_type(entry)1625 * and dealt with in prepare_pack().1626 */1627 oe_set_type(entry,1628 oid_object_info(the_repository, &entry->idx.oid, &size));1629 } else {1630 oe_set_type(entry, type);1631 }1632 SET_SIZE(entry, size);1633}16341635/*1636 * Follow the chain of deltas from this entry onward, throwing away any links1637 * that cause us to hit a cycle (as determined by the DFS state flags in1638 * the entries).1639 *1640 * We also detect too-long reused chains that would violate our --depth1641 * limit.1642 */1643static void break_delta_chains(struct object_entry *entry)1644{1645 /*1646 * The actual depth of each object we will write is stored as an int,1647 * as it cannot exceed our int "depth" limit. But before we break1648 * changes based no that limit, we may potentially go as deep as the1649 * number of objects, which is elsewhere bounded to a uint32_t.1650 */1651 uint32_t total_depth;1652 struct object_entry *cur, *next;16531654 for (cur = entry, total_depth = 0;1655 cur;1656 cur = DELTA(cur), total_depth++) {1657 if (cur->dfs_state == DFS_DONE) {1658 /*1659 * We've already seen this object and know it isn't1660 * part of a cycle. We do need to append its depth1661 * to our count.1662 */1663 total_depth += cur->depth;1664 break;1665 }16661667 /*1668 * We break cycles before looping, so an ACTIVE state (or any1669 * other cruft which made its way into the state variable)1670 * is a bug.1671 */1672 if (cur->dfs_state != DFS_NONE)1673 BUG("confusing delta dfs state in first pass: %d",1674 cur->dfs_state);16751676 /*1677 * Now we know this is the first time we've seen the object. If1678 * it's not a delta, we're done traversing, but we'll mark it1679 * done to save time on future traversals.1680 */1681 if (!DELTA(cur)) {1682 cur->dfs_state = DFS_DONE;1683 break;1684 }16851686 /*1687 * Mark ourselves as active and see if the next step causes1688 * us to cycle to another active object. It's important to do1689 * this _before_ we loop, because it impacts where we make the1690 * cut, and thus how our total_depth counter works.1691 * E.g., We may see a partial loop like:1692 *1693 * A -> B -> C -> D -> B1694 *1695 * Cutting B->C breaks the cycle. But now the depth of A is1696 * only 1, and our total_depth counter is at 3. The size of the1697 * error is always one less than the size of the cycle we1698 * broke. Commits C and D were "lost" from A's chain.1699 *1700 * If we instead cut D->B, then the depth of A is correct at 3.1701 * We keep all commits in the chain that we examined.1702 */1703 cur->dfs_state = DFS_ACTIVE;1704 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {1705 drop_reused_delta(cur);1706 cur->dfs_state = DFS_DONE;1707 break;1708 }1709 }17101711 /*1712 * And now that we've gone all the way to the bottom of the chain, we1713 * need to clear the active flags and set the depth fields as1714 * appropriate. Unlike the loop above, which can quit when it drops a1715 * delta, we need to keep going to look for more depth cuts. So we need1716 * an extra "next" pointer to keep going after we reset cur->delta.1717 */1718 for (cur = entry; cur; cur = next) {1719 next = DELTA(cur);17201721 /*1722 * We should have a chain of zero or more ACTIVE states down to1723 * a final DONE. We can quit after the DONE, because either it1724 * has no bases, or we've already handled them in a previous1725 * call.1726 */1727 if (cur->dfs_state == DFS_DONE)1728 break;1729 else if (cur->dfs_state != DFS_ACTIVE)1730 BUG("confusing delta dfs state in second pass: %d",1731 cur->dfs_state);17321733 /*1734 * If the total_depth is more than depth, then we need to snip1735 * the chain into two or more smaller chains that don't exceed1736 * the maximum depth. Most of the resulting chains will contain1737 * (depth + 1) entries (i.e., depth deltas plus one base), and1738 * the last chain (i.e., the one containing entry) will contain1739 * whatever entries are left over, namely1740 * (total_depth % (depth + 1)) of them.1741 *1742 * Since we are iterating towards decreasing depth, we need to1743 * decrement total_depth as we go, and we need to write to the1744 * entry what its final depth will be after all of the1745 * snipping. Since we're snipping into chains of length (depth1746 * + 1) entries, the final depth of an entry will be its1747 * original depth modulo (depth + 1). Any time we encounter an1748 * entry whose final depth is supposed to be zero, we snip it1749 * from its delta base, thereby making it so.1750 */1751 cur->depth = (total_depth--) % (depth + 1);1752 if (!cur->depth)1753 drop_reused_delta(cur);17541755 cur->dfs_state = DFS_DONE;1756 }1757}17581759static void get_object_details(void)1760{1761 uint32_t i;1762 struct object_entry **sorted_by_offset;17631764 if (progress)1765 progress_state = start_progress(_("Counting objects"),1766 to_pack.nr_objects);17671768 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));1769 for (i = 0; i < to_pack.nr_objects; i++)1770 sorted_by_offset[i] = to_pack.objects + i;1771 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17721773 for (i = 0; i < to_pack.nr_objects; i++) {1774 struct object_entry *entry = sorted_by_offset[i];1775 check_object(entry);1776 if (entry->type_valid &&1777 oe_size_greater_than(&to_pack, entry, big_file_threshold))1778 entry->no_try_delta = 1;1779 display_progress(progress_state, i + 1);1780 }1781 stop_progress(&progress_state);17821783 /*1784 * This must happen in a second pass, since we rely on the delta1785 * information for the whole list being completed.1786 */1787 for (i = 0; i < to_pack.nr_objects; i++)1788 break_delta_chains(&to_pack.objects[i]);17891790 free(sorted_by_offset);1791}17921793/*1794 * We search for deltas in a list sorted by type, by filename hash, and then1795 * by size, so that we see progressively smaller and smaller files.1796 * That's because we prefer deltas to be from the bigger file1797 * to the smaller -- deletes are potentially cheaper, but perhaps1798 * more importantly, the bigger file is likely the more recent1799 * one. The deepest deltas are therefore the oldest objects which are1800 * less susceptible to be accessed often.1801 */1802static int type_size_sort(const void *_a, const void *_b)1803{1804 const struct object_entry *a = *(struct object_entry **)_a;1805 const struct object_entry *b = *(struct object_entry **)_b;1806 enum object_type a_type = oe_type(a);1807 enum object_type b_type = oe_type(b);1808 unsigned long a_size = SIZE(a);1809 unsigned long b_size = SIZE(b);18101811 if (a_type > b_type)1812 return -1;1813 if (a_type < b_type)1814 return 1;1815 if (a->hash > b->hash)1816 return -1;1817 if (a->hash < b->hash)1818 return 1;1819 if (a->preferred_base > b->preferred_base)1820 return -1;1821 if (a->preferred_base < b->preferred_base)1822 return 1;1823 if (a_size > b_size)1824 return -1;1825 if (a_size < b_size)1826 return 1;1827 return a < b ? -1 : (a > b); /* newest first */1828}18291830struct unpacked {1831 struct object_entry *entry;1832 void *data;1833 struct delta_index *index;1834 unsigned depth;1835};18361837static int delta_cacheable(unsigned long src_size, unsigned long trg_size,1838 unsigned long delta_size)1839{1840 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1841 return 0;18421843 if (delta_size < cache_max_small_delta_size)1844 return 1;18451846 /* cache delta, if objects are large enough compared to delta size */1847 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))1848 return 1;18491850 return 0;1851}18521853#ifndef NO_PTHREADS18541855static pthread_mutex_t read_mutex;1856#define read_lock() pthread_mutex_lock(&read_mutex)1857#define read_unlock() pthread_mutex_unlock(&read_mutex)18581859static pthread_mutex_t cache_mutex;1860#define cache_lock() pthread_mutex_lock(&cache_mutex)1861#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18621863static pthread_mutex_t progress_mutex;1864#define progress_lock() pthread_mutex_lock(&progress_mutex)1865#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18661867#else18681869#define read_lock() (void)01870#define read_unlock() (void)01871#define cache_lock() (void)01872#define cache_unlock() (void)01873#define progress_lock() (void)01874#define progress_unlock() (void)018751876#endif18771878/*1879 * Return the size of the object without doing any delta1880 * reconstruction (so non-deltas are true object sizes, but deltas1881 * return the size of the delta data).1882 */1883unsigned long oe_get_size_slow(struct packing_data *pack,1884 const struct object_entry *e)1885{1886 struct packed_git *p;1887 struct pack_window *w_curs;1888 unsigned char *buf;1889 enum object_type type;1890 unsigned long used, avail, size;18911892 if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {1893 read_lock();1894 if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)1895 die(_("unable to get size of %s"),1896 oid_to_hex(&e->idx.oid));1897 read_unlock();1898 return size;1899 }19001901 p = oe_in_pack(pack, e);1902 if (!p)1903 BUG("when e->type is a delta, it must belong to a pack");19041905 read_lock();1906 w_curs = NULL;1907 buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);1908 used = unpack_object_header_buffer(buf, avail, &type, &size);1909 if (used == 0)1910 die(_("unable to parse object header of %s"),1911 oid_to_hex(&e->idx.oid));19121913 unuse_pack(&w_curs);1914 read_unlock();1915 return size;1916}19171918static int try_delta(struct unpacked *trg, struct unpacked *src,1919 unsigned max_depth, unsigned long *mem_usage)1920{1921 struct object_entry *trg_entry = trg->entry;1922 struct object_entry *src_entry = src->entry;1923 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1924 unsigned ref_depth;1925 enum object_type type;1926 void *delta_buf;19271928 /* Don't bother doing diffs between different types */1929 if (oe_type(trg_entry) != oe_type(src_entry))1930 return -1;19311932 /*1933 * We do not bother to try a delta that we discarded on an1934 * earlier try, but only when reusing delta data. Note that1935 * src_entry that is marked as the preferred_base should always1936 * be considered, as even if we produce a suboptimal delta against1937 * it, we will still save the transfer cost, as we already know1938 * the other side has it and we won't send src_entry at all.1939 */1940 if (reuse_delta && IN_PACK(trg_entry) &&1941 IN_PACK(trg_entry) == IN_PACK(src_entry) &&1942 !src_entry->preferred_base &&1943 trg_entry->in_pack_type != OBJ_REF_DELTA &&1944 trg_entry->in_pack_type != OBJ_OFS_DELTA)1945 return 0;19461947 /* Let's not bust the allowed depth. */1948 if (src->depth >= max_depth)1949 return 0;19501951 /* Now some size filtering heuristics. */1952 trg_size = SIZE(trg_entry);1953 if (!DELTA(trg_entry)) {1954 max_size = trg_size/2 - the_hash_algo->rawsz;1955 ref_depth = 1;1956 } else {1957 max_size = DELTA_SIZE(trg_entry);1958 ref_depth = trg->depth;1959 }1960 max_size = (uint64_t)max_size * (max_depth - src->depth) /1961 (max_depth - ref_depth + 1);1962 if (max_size == 0)1963 return 0;1964 src_size = SIZE(src_entry);1965 sizediff = src_size < trg_size ? trg_size - src_size : 0;1966 if (sizediff >= max_size)1967 return 0;1968 if (trg_size < src_size / 32)1969 return 0;19701971 /* Load data if not already done */1972 if (!trg->data) {1973 read_lock();1974 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);1975 read_unlock();1976 if (!trg->data)1977 die("object %s cannot be read",1978 oid_to_hex(&trg_entry->idx.oid));1979 if (sz != trg_size)1980 die("object %s inconsistent object length (%lu vs %lu)",1981 oid_to_hex(&trg_entry->idx.oid), sz,1982 trg_size);1983 *mem_usage += sz;1984 }1985 if (!src->data) {1986 read_lock();1987 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);1988 read_unlock();1989 if (!src->data) {1990 if (src_entry->preferred_base) {1991 static int warned = 0;1992 if (!warned++)1993 warning("object %s cannot be read",1994 oid_to_hex(&src_entry->idx.oid));1995 /*1996 * Those objects are not included in the1997 * resulting pack. Be resilient and ignore1998 * them if they can't be read, in case the1999 * pack could be created nevertheless.2000 */2001 return 0;2002 }2003 die("object %s cannot be read",2004 oid_to_hex(&src_entry->idx.oid));2005 }2006 if (sz != src_size)2007 die("object %s inconsistent object length (%lu vs %lu)",2008 oid_to_hex(&src_entry->idx.oid), sz,2009 src_size);2010 *mem_usage += sz;2011 }2012 if (!src->index) {2013 src->index = create_delta_index(src->data, src_size);2014 if (!src->index) {2015 static int warned = 0;2016 if (!warned++)2017 warning("suboptimal pack - out of memory");2018 return 0;2019 }2020 *mem_usage += sizeof_delta_index(src->index);2021 }20222023 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);2024 if (!delta_buf)2025 return 0;2026 if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {2027 free(delta_buf);2028 return 0;2029 }20302031 if (DELTA(trg_entry)) {2032 /* Prefer only shallower same-sized deltas. */2033 if (delta_size == DELTA_SIZE(trg_entry) &&2034 src->depth + 1 >= trg->depth) {2035 free(delta_buf);2036 return 0;2037 }2038 }20392040 /*2041 * Handle memory allocation outside of the cache2042 * accounting lock. Compiler will optimize the strangeness2043 * away when NO_PTHREADS is defined.2044 */2045 free(trg_entry->delta_data);2046 cache_lock();2047 if (trg_entry->delta_data) {2048 delta_cache_size -= DELTA_SIZE(trg_entry);2049 trg_entry->delta_data = NULL;2050 }2051 if (delta_cacheable(src_size, trg_size, delta_size)) {2052 delta_cache_size += delta_size;2053 cache_unlock();2054 trg_entry->delta_data = xrealloc(delta_buf, delta_size);2055 } else {2056 cache_unlock();2057 free(delta_buf);2058 }20592060 SET_DELTA(trg_entry, src_entry);2061 SET_DELTA_SIZE(trg_entry, delta_size);2062 trg->depth = src->depth + 1;20632064 return 1;2065}20662067static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)2068{2069 struct object_entry *child = DELTA_CHILD(me);2070 unsigned int m = n;2071 while (child) {2072 unsigned int c = check_delta_limit(child, n + 1);2073 if (m < c)2074 m = c;2075 child = DELTA_SIBLING(child);2076 }2077 return m;2078}20792080static unsigned long free_unpacked(struct unpacked *n)2081{2082 unsigned long freed_mem = sizeof_delta_index(n->index);2083 free_delta_index(n->index);2084 n->index = NULL;2085 if (n->data) {2086 freed_mem += SIZE(n->entry);2087 FREE_AND_NULL(n->data);2088 }2089 n->entry = NULL;2090 n->depth = 0;2091 return freed_mem;2092}20932094static void find_deltas(struct object_entry **list, unsigned *list_size,2095 int window, int depth, unsigned *processed)2096{2097 uint32_t i, idx = 0, count = 0;2098 struct unpacked *array;2099 unsigned long mem_usage = 0;21002101 array = xcalloc(window, sizeof(struct unpacked));21022103 for (;;) {2104 struct object_entry *entry;2105 struct unpacked *n = array + idx;2106 int j, max_depth, best_base = -1;21072108 progress_lock();2109 if (!*list_size) {2110 progress_unlock();2111 break;2112 }2113 entry = *list++;2114 (*list_size)--;2115 if (!entry->preferred_base) {2116 (*processed)++;2117 display_progress(progress_state, *processed);2118 }2119 progress_unlock();21202121 mem_usage -= free_unpacked(n);2122 n->entry = entry;21232124 while (window_memory_limit &&2125 mem_usage > window_memory_limit &&2126 count > 1) {2127 uint32_t tail = (idx + window - count) % window;2128 mem_usage -= free_unpacked(array + tail);2129 count--;2130 }21312132 /* We do not compute delta to *create* objects we are not2133 * going to pack.2134 */2135 if (entry->preferred_base)2136 goto next;21372138 /*2139 * If the current object is at pack edge, take the depth the2140 * objects that depend on the current object into account2141 * otherwise they would become too deep.2142 */2143 max_depth = depth;2144 if (DELTA_CHILD(entry)) {2145 max_depth -= check_delta_limit(entry, 0);2146 if (max_depth <= 0)2147 goto next;2148 }21492150 j = window;2151 while (--j > 0) {2152 int ret;2153 uint32_t other_idx = idx + j;2154 struct unpacked *m;2155 if (other_idx >= window)2156 other_idx -= window;2157 m = array + other_idx;2158 if (!m->entry)2159 break;2160 ret = try_delta(n, m, max_depth, &mem_usage);2161 if (ret < 0)2162 break;2163 else if (ret > 0)2164 best_base = other_idx;2165 }21662167 /*2168 * If we decided to cache the delta data, then it is best2169 * to compress it right away. First because we have to do2170 * it anyway, and doing it here while we're threaded will2171 * save a lot of time in the non threaded write phase,2172 * as well as allow for caching more deltas within2173 * the same cache size limit.2174 * ...2175 * But only if not writing to stdout, since in that case2176 * the network is most likely throttling writes anyway,2177 * and therefore it is best to go to the write phase ASAP2178 * instead, as we can afford spending more time compressing2179 * between writes at that moment.2180 */2181 if (entry->delta_data && !pack_to_stdout) {2182 unsigned long size;21832184 size = do_compress(&entry->delta_data, DELTA_SIZE(entry));2185 if (size < (1U << OE_Z_DELTA_BITS)) {2186 entry->z_delta_size = size;2187 cache_lock();2188 delta_cache_size -= DELTA_SIZE(entry);2189 delta_cache_size += entry->z_delta_size;2190 cache_unlock();2191 } else {2192 FREE_AND_NULL(entry->delta_data);2193 entry->z_delta_size = 0;2194 }2195 }21962197 /* if we made n a delta, and if n is already at max2198 * depth, leaving it in the window is pointless. we2199 * should evict it first.2200 */2201 if (DELTA(entry) && max_depth <= n->depth)2202 continue;22032204 /*2205 * Move the best delta base up in the window, after the2206 * currently deltified object, to keep it longer. It will2207 * be the first base object to be attempted next.2208 */2209 if (DELTA(entry)) {2210 struct unpacked swap = array[best_base];2211 int dist = (window + idx - best_base) % window;2212 int dst = best_base;2213 while (dist--) {2214 int src = (dst + 1) % window;2215 array[dst] = array[src];2216 dst = src;2217 }2218 array[dst] = swap;2219 }22202221 next:2222 idx++;2223 if (count + 1 < window)2224 count++;2225 if (idx >= window)2226 idx = 0;2227 }22282229 for (i = 0; i < window; ++i) {2230 free_delta_index(array[i].index);2231 free(array[i].data);2232 }2233 free(array);2234}22352236#ifndef NO_PTHREADS22372238static void try_to_free_from_threads(size_t size)2239{2240 read_lock();2241 release_pack_memory(size);2242 read_unlock();2243}22442245static try_to_free_t old_try_to_free_routine;22462247/*2248 * The main thread waits on the condition that (at least) one of the workers2249 * has stopped working (which is indicated in the .working member of2250 * struct thread_params).2251 * When a work thread has completed its work, it sets .working to 0 and2252 * signals the main thread and waits on the condition that .data_ready2253 * becomes 1.2254 */22552256struct thread_params {2257 pthread_t thread;2258 struct object_entry **list;2259 unsigned list_size;2260 unsigned remaining;2261 int window;2262 int depth;2263 int working;2264 int data_ready;2265 pthread_mutex_t mutex;2266 pthread_cond_t cond;2267 unsigned *processed;2268};22692270static pthread_cond_t progress_cond;22712272/*2273 * Mutex and conditional variable can't be statically-initialized on Windows.2274 */2275static void init_threaded_search(void)2276{2277 init_recursive_mutex(&read_mutex);2278 pthread_mutex_init(&cache_mutex, NULL);2279 pthread_mutex_init(&progress_mutex, NULL);2280 pthread_cond_init(&progress_cond, NULL);2281 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);2282}22832284static void cleanup_threaded_search(void)2285{2286 set_try_to_free_routine(old_try_to_free_routine);2287 pthread_cond_destroy(&progress_cond);2288 pthread_mutex_destroy(&read_mutex);2289 pthread_mutex_destroy(&cache_mutex);2290 pthread_mutex_destroy(&progress_mutex);2291}22922293static void *threaded_find_deltas(void *arg)2294{2295 struct thread_params *me = arg;22962297 progress_lock();2298 while (me->remaining) {2299 progress_unlock();23002301 find_deltas(me->list, &me->remaining,2302 me->window, me->depth, me->processed);23032304 progress_lock();2305 me->working = 0;2306 pthread_cond_signal(&progress_cond);2307 progress_unlock();23082309 /*2310 * We must not set ->data_ready before we wait on the2311 * condition because the main thread may have set it to 12312 * before we get here. In order to be sure that new2313 * work is available if we see 1 in ->data_ready, it2314 * was initialized to 0 before this thread was spawned2315 * and we reset it to 0 right away.2316 */2317 pthread_mutex_lock(&me->mutex);2318 while (!me->data_ready)2319 pthread_cond_wait(&me->cond, &me->mutex);2320 me->data_ready = 0;2321 pthread_mutex_unlock(&me->mutex);23222323 progress_lock();2324 }2325 progress_unlock();2326 /* leave ->working 1 so that this doesn't get more work assigned */2327 return NULL;2328}23292330static void ll_find_deltas(struct object_entry **list, unsigned list_size,2331 int window, int depth, unsigned *processed)2332{2333 struct thread_params *p;2334 int i, ret, active_threads = 0;23352336 init_threaded_search();23372338 if (delta_search_threads <= 1) {2339 find_deltas(list, &list_size, window, depth, processed);2340 cleanup_threaded_search();2341 return;2342 }2343 if (progress > pack_to_stdout)2344 fprintf(stderr, "Delta compression using up to %d threads.\n",2345 delta_search_threads);2346 p = xcalloc(delta_search_threads, sizeof(*p));23472348 /* Partition the work amongst work threads. */2349 for (i = 0; i < delta_search_threads; i++) {2350 unsigned sub_size = list_size / (delta_search_threads - i);23512352 /* don't use too small segments or no deltas will be found */2353 if (sub_size < 2*window && i+1 < delta_search_threads)2354 sub_size = 0;23552356 p[i].window = window;2357 p[i].depth = depth;2358 p[i].processed = processed;2359 p[i].working = 1;2360 p[i].data_ready = 0;23612362 /* try to split chunks on "path" boundaries */2363 while (sub_size && sub_size < list_size &&2364 list[sub_size]->hash &&2365 list[sub_size]->hash == list[sub_size-1]->hash)2366 sub_size++;23672368 p[i].list = list;2369 p[i].list_size = sub_size;2370 p[i].remaining = sub_size;23712372 list += sub_size;2373 list_size -= sub_size;2374 }23752376 /* Start work threads. */2377 for (i = 0; i < delta_search_threads; i++) {2378 if (!p[i].list_size)2379 continue;2380 pthread_mutex_init(&p[i].mutex, NULL);2381 pthread_cond_init(&p[i].cond, NULL);2382 ret = pthread_create(&p[i].thread, NULL,2383 threaded_find_deltas, &p[i]);2384 if (ret)2385 die("unable to create thread: %s", strerror(ret));2386 active_threads++;2387 }23882389 /*2390 * Now let's wait for work completion. Each time a thread is done2391 * with its work, we steal half of the remaining work from the2392 * thread with the largest number of unprocessed objects and give2393 * it to that newly idle thread. This ensure good load balancing2394 * until the remaining object list segments are simply too short2395 * to be worth splitting anymore.2396 */2397 while (active_threads) {2398 struct thread_params *target = NULL;2399 struct thread_params *victim = NULL;2400 unsigned sub_size = 0;24012402 progress_lock();2403 for (;;) {2404 for (i = 0; !target && i < delta_search_threads; i++)2405 if (!p[i].working)2406 target = &p[i];2407 if (target)2408 break;2409 pthread_cond_wait(&progress_cond, &progress_mutex);2410 }24112412 for (i = 0; i < delta_search_threads; i++)2413 if (p[i].remaining > 2*window &&2414 (!victim || victim->remaining < p[i].remaining))2415 victim = &p[i];2416 if (victim) {2417 sub_size = victim->remaining / 2;2418 list = victim->list + victim->list_size - sub_size;2419 while (sub_size && list[0]->hash &&2420 list[0]->hash == list[-1]->hash) {2421 list++;2422 sub_size--;2423 }2424 if (!sub_size) {2425 /*2426 * It is possible for some "paths" to have2427 * so many objects that no hash boundary2428 * might be found. Let's just steal the2429 * exact half in that case.2430 */2431 sub_size = victim->remaining / 2;2432 list -= sub_size;2433 }2434 target->list = list;2435 victim->list_size -= sub_size;2436 victim->remaining -= sub_size;2437 }2438 target->list_size = sub_size;2439 target->remaining = sub_size;2440 target->working = 1;2441 progress_unlock();24422443 pthread_mutex_lock(&target->mutex);2444 target->data_ready = 1;2445 pthread_cond_signal(&target->cond);2446 pthread_mutex_unlock(&target->mutex);24472448 if (!sub_size) {2449 pthread_join(target->thread, NULL);2450 pthread_cond_destroy(&target->cond);2451 pthread_mutex_destroy(&target->mutex);2452 active_threads--;2453 }2454 }2455 cleanup_threaded_search();2456 free(p);2457}24582459#else2460#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2461#endif24622463static void add_tag_chain(const struct object_id *oid)2464{2465 struct tag *tag;24662467 /*2468 * We catch duplicates already in add_object_entry(), but we'd2469 * prefer to do this extra check to avoid having to parse the2470 * tag at all if we already know that it's being packed (e.g., if2471 * it was included via bitmaps, we would not have parsed it2472 * previously).2473 */2474 if (packlist_find(&to_pack, oid->hash, NULL))2475 return;24762477 tag = lookup_tag(oid);2478 while (1) {2479 if (!tag || parse_tag(tag) || !tag->tagged)2480 die("unable to pack objects reachable from tag %s",2481 oid_to_hex(oid));24822483 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);24842485 if (tag->tagged->type != OBJ_TAG)2486 return;24872488 tag = (struct tag *)tag->tagged;2489 }2490}24912492static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)2493{2494 struct object_id peeled;24952496 if (starts_with(path, "refs/tags/") && /* is a tag? */2497 !peel_ref(path, &peeled) && /* peelable? */2498 packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */2499 add_tag_chain(oid);2500 return 0;2501}25022503static void prepare_pack(int window, int depth)2504{2505 struct object_entry **delta_list;2506 uint32_t i, nr_deltas;2507 unsigned n;25082509 get_object_details();25102511 /*2512 * If we're locally repacking then we need to be doubly careful2513 * from now on in order to make sure no stealth corruption gets2514 * propagated to the new pack. Clients receiving streamed packs2515 * should validate everything they get anyway so no need to incur2516 * the additional cost here in that case.2517 */2518 if (!pack_to_stdout)2519 do_check_packed_object_crc = 1;25202521 if (!to_pack.nr_objects || !window || !depth)2522 return;25232524 ALLOC_ARRAY(delta_list, to_pack.nr_objects);2525 nr_deltas = n = 0;25262527 for (i = 0; i < to_pack.nr_objects; i++) {2528 struct object_entry *entry = to_pack.objects + i;25292530 if (DELTA(entry))2531 /* This happens if we decided to reuse existing2532 * delta from a pack. "reuse_delta &&" is implied.2533 */2534 continue;25352536 if (!entry->type_valid ||2537 oe_size_less_than(&to_pack, entry, 50))2538 continue;25392540 if (entry->no_try_delta)2541 continue;25422543 if (!entry->preferred_base) {2544 nr_deltas++;2545 if (oe_type(entry) < 0)2546 die("unable to get type of object %s",2547 oid_to_hex(&entry->idx.oid));2548 } else {2549 if (oe_type(entry) < 0) {2550 /*2551 * This object is not found, but we2552 * don't have to include it anyway.2553 */2554 continue;2555 }2556 }25572558 delta_list[n++] = entry;2559 }25602561 if (nr_deltas && n > 1) {2562 unsigned nr_done = 0;2563 if (progress)2564 progress_state = start_progress(_("Compressing objects"),2565 nr_deltas);2566 QSORT(delta_list, n, type_size_sort);2567 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2568 stop_progress(&progress_state);2569 if (nr_done != nr_deltas)2570 die("inconsistency with delta count");2571 }2572 free(delta_list);2573}25742575static int git_pack_config(const char *k, const char *v, void *cb)2576{2577 if (!strcmp(k, "pack.window")) {2578 window = git_config_int(k, v);2579 return 0;2580 }2581 if (!strcmp(k, "pack.windowmemory")) {2582 window_memory_limit = git_config_ulong(k, v);2583 return 0;2584 }2585 if (!strcmp(k, "pack.depth")) {2586 depth = git_config_int(k, v);2587 return 0;2588 }2589 if (!strcmp(k, "pack.deltacachesize")) {2590 max_delta_cache_size = git_config_int(k, v);2591 return 0;2592 }2593 if (!strcmp(k, "pack.deltacachelimit")) {2594 cache_max_small_delta_size = git_config_int(k, v);2595 return 0;2596 }2597 if (!strcmp(k, "pack.writebitmaphashcache")) {2598 if (git_config_bool(k, v))2599 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2600 else2601 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2602 }2603 if (!strcmp(k, "pack.usebitmaps")) {2604 use_bitmap_index_default = git_config_bool(k, v);2605 return 0;2606 }2607 if (!strcmp(k, "pack.threads")) {2608 delta_search_threads = git_config_int(k, v);2609 if (delta_search_threads < 0)2610 die("invalid number of threads specified (%d)",2611 delta_search_threads);2612#ifdef NO_PTHREADS2613 if (delta_search_threads != 1) {2614 warning("no threads support, ignoring %s", k);2615 delta_search_threads = 0;2616 }2617#endif2618 return 0;2619 }2620 if (!strcmp(k, "pack.indexversion")) {2621 pack_idx_opts.version = git_config_int(k, v);2622 if (pack_idx_opts.version > 2)2623 die("bad pack.indexversion=%"PRIu32,2624 pack_idx_opts.version);2625 return 0;2626 }2627 return git_default_config(k, v, cb);2628}26292630static void read_object_list_from_stdin(void)2631{2632 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];2633 struct object_id oid;2634 const char *p;26352636 for (;;) {2637 if (!fgets(line, sizeof(line), stdin)) {2638 if (feof(stdin))2639 break;2640 if (!ferror(stdin))2641 die("fgets returned NULL, not EOF, not error!");2642 if (errno != EINTR)2643 die_errno("fgets");2644 clearerr(stdin);2645 continue;2646 }2647 if (line[0] == '-') {2648 if (get_oid_hex(line+1, &oid))2649 die("expected edge object ID, got garbage:\n %s",2650 line);2651 add_preferred_base(&oid);2652 continue;2653 }2654 if (parse_oid_hex(line, &oid, &p))2655 die("expected object ID, got garbage:\n %s", line);26562657 add_preferred_base_object(p + 1);2658 add_object_entry(&oid, OBJ_NONE, p + 1, 0);2659 }2660}26612662/* Remember to update object flag allocation in object.h */2663#define OBJECT_ADDED (1u<<20)26642665static void show_commit(struct commit *commit, void *data)2666{2667 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);2668 commit->object.flags |= OBJECT_ADDED;26692670 if (write_bitmap_index)2671 index_commit_for_bitmap(commit);2672}26732674static void show_object(struct object *obj, const char *name, void *data)2675{2676 add_preferred_base_object(name);2677 add_object_entry(&obj->oid, obj->type, name, 0);2678 obj->flags |= OBJECT_ADDED;2679}26802681static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)2682{2683 assert(arg_missing_action == MA_ALLOW_ANY);26842685 /*2686 * Quietly ignore ALL missing objects. This avoids problems with2687 * staging them now and getting an odd error later.2688 */2689 if (!has_object_file(&obj->oid))2690 return;26912692 show_object(obj, name, data);2693}26942695static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)2696{2697 assert(arg_missing_action == MA_ALLOW_PROMISOR);26982699 /*2700 * Quietly ignore EXPECTED missing objects. This avoids problems with2701 * staging them now and getting an odd error later.2702 */2703 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))2704 return;27052706 show_object(obj, name, data);2707}27082709static int option_parse_missing_action(const struct option *opt,2710 const char *arg, int unset)2711{2712 assert(arg);2713 assert(!unset);27142715 if (!strcmp(arg, "error")) {2716 arg_missing_action = MA_ERROR;2717 fn_show_object = show_object;2718 return 0;2719 }27202721 if (!strcmp(arg, "allow-any")) {2722 arg_missing_action = MA_ALLOW_ANY;2723 fetch_if_missing = 0;2724 fn_show_object = show_object__ma_allow_any;2725 return 0;2726 }27272728 if (!strcmp(arg, "allow-promisor")) {2729 arg_missing_action = MA_ALLOW_PROMISOR;2730 fetch_if_missing = 0;2731 fn_show_object = show_object__ma_allow_promisor;2732 return 0;2733 }27342735 die(_("invalid value for --missing"));2736 return 0;2737}27382739static void show_edge(struct commit *commit)2740{2741 add_preferred_base(&commit->object.oid);2742}27432744struct in_pack_object {2745 off_t offset;2746 struct object *object;2747};27482749struct in_pack {2750 unsigned int alloc;2751 unsigned int nr;2752 struct in_pack_object *array;2753};27542755static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)2756{2757 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);2758 in_pack->array[in_pack->nr].object = object;2759 in_pack->nr++;2760}27612762/*2763 * Compare the objects in the offset order, in order to emulate the2764 * "git rev-list --objects" output that produced the pack originally.2765 */2766static int ofscmp(const void *a_, const void *b_)2767{2768 struct in_pack_object *a = (struct in_pack_object *)a_;2769 struct in_pack_object *b = (struct in_pack_object *)b_;27702771 if (a->offset < b->offset)2772 return -1;2773 else if (a->offset > b->offset)2774 return 1;2775 else2776 return oidcmp(&a->object->oid, &b->object->oid);2777}27782779static void add_objects_in_unpacked_packs(struct rev_info *revs)2780{2781 struct packed_git *p;2782 struct in_pack in_pack;2783 uint32_t i;27842785 memset(&in_pack, 0, sizeof(in_pack));27862787 for (p = get_packed_git(the_repository); p; p = p->next) {2788 struct object_id oid;2789 struct object *o;27902791 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)2792 continue;2793 if (open_pack_index(p))2794 die("cannot open pack index");27952796 ALLOC_GROW(in_pack.array,2797 in_pack.nr + p->num_objects,2798 in_pack.alloc);27992800 for (i = 0; i < p->num_objects; i++) {2801 nth_packed_object_oid(&oid, p, i);2802 o = lookup_unknown_object(oid.hash);2803 if (!(o->flags & OBJECT_ADDED))2804 mark_in_pack_object(o, p, &in_pack);2805 o->flags |= OBJECT_ADDED;2806 }2807 }28082809 if (in_pack.nr) {2810 QSORT(in_pack.array, in_pack.nr, ofscmp);2811 for (i = 0; i < in_pack.nr; i++) {2812 struct object *o = in_pack.array[i].object;2813 add_object_entry(&o->oid, o->type, "", 0);2814 }2815 }2816 free(in_pack.array);2817}28182819static int add_loose_object(const struct object_id *oid, const char *path,2820 void *data)2821{2822 enum object_type type = oid_object_info(the_repository, oid, NULL);28232824 if (type < 0) {2825 warning("loose object at %s could not be examined", path);2826 return 0;2827 }28282829 add_object_entry(oid, type, "", 0);2830 return 0;2831}28322833/*2834 * We actually don't even have to worry about reachability here.2835 * add_object_entry will weed out duplicates, so we just add every2836 * loose object we find.2837 */2838static void add_unreachable_loose_objects(void)2839{2840 for_each_loose_file_in_objdir(get_object_directory(),2841 add_loose_object,2842 NULL, NULL, NULL);2843}28442845static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2846{2847 static struct packed_git *last_found = (void *)1;2848 struct packed_git *p;28492850 p = (last_found != (void *)1) ? last_found :2851 get_packed_git(the_repository);28522853 while (p) {2854 if ((!p->pack_local || p->pack_keep ||2855 p->pack_keep_in_core) &&2856 find_pack_entry_one(oid->hash, p)) {2857 last_found = p;2858 return 1;2859 }2860 if (p == last_found)2861 p = get_packed_git(the_repository);2862 else2863 p = p->next;2864 if (p == last_found)2865 p = p->next;2866 }2867 return 0;2868}28692870/*2871 * Store a list of sha1s that are should not be discarded2872 * because they are either written too recently, or are2873 * reachable from another object that was.2874 *2875 * This is filled by get_object_list.2876 */2877static struct oid_array recent_objects;28782879static int loosened_object_can_be_discarded(const struct object_id *oid,2880 timestamp_t mtime)2881{2882 if (!unpack_unreachable_expiration)2883 return 0;2884 if (mtime > unpack_unreachable_expiration)2885 return 0;2886 if (oid_array_lookup(&recent_objects, oid) >= 0)2887 return 0;2888 return 1;2889}28902891static void loosen_unused_packed_objects(struct rev_info *revs)2892{2893 struct packed_git *p;2894 uint32_t i;2895 struct object_id oid;28962897 for (p = get_packed_git(the_repository); p; p = p->next) {2898 if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)2899 continue;29002901 if (open_pack_index(p))2902 die("cannot open pack index");29032904 for (i = 0; i < p->num_objects; i++) {2905 nth_packed_object_oid(&oid, p, i);2906 if (!packlist_find(&to_pack, oid.hash, NULL) &&2907 !has_sha1_pack_kept_or_nonlocal(&oid) &&2908 !loosened_object_can_be_discarded(&oid, p->mtime))2909 if (force_object_loose(&oid, p->mtime))2910 die("unable to force loose object");2911 }2912 }2913}29142915/*2916 * This tracks any options which pack-reuse code expects to be on, or which a2917 * reader of the pack might not understand, and which would therefore prevent2918 * blind reuse of what we have on disk.2919 */2920static int pack_options_allow_reuse(void)2921{2922 return pack_to_stdout &&2923 allow_ofs_delta &&2924 !ignore_packed_keep_on_disk &&2925 !ignore_packed_keep_in_core &&2926 (!local || !have_non_local_packs) &&2927 !incremental;2928}29292930static int get_object_list_from_bitmap(struct rev_info *revs)2931{2932 struct bitmap_index *bitmap_git;2933 if (!(bitmap_git = prepare_bitmap_walk(revs)))2934 return -1;29352936 if (pack_options_allow_reuse() &&2937 !reuse_partial_packfile_from_bitmap(2938 bitmap_git,2939 &reuse_packfile,2940 &reuse_packfile_objects,2941 &reuse_packfile_offset)) {2942 assert(reuse_packfile_objects);2943 nr_result += reuse_packfile_objects;2944 display_progress(progress_state, nr_result);2945 }29462947 traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);2948 return 0;2949}29502951static void record_recent_object(struct object *obj,2952 const char *name,2953 void *data)2954{2955 oid_array_append(&recent_objects, &obj->oid);2956}29572958static void record_recent_commit(struct commit *commit, void *data)2959{2960 oid_array_append(&recent_objects, &commit->object.oid);2961}29622963static void get_object_list(int ac, const char **av)2964{2965 struct rev_info revs;2966 char line[1000];2967 int flags = 0;29682969 init_revisions(&revs, NULL);2970 save_commit_buffer = 0;2971 setup_revisions(ac, av, &revs, NULL);29722973 /* make sure shallows are read */2974 is_repository_shallow();29752976 while (fgets(line, sizeof(line), stdin) != NULL) {2977 int len = strlen(line);2978 if (len && line[len - 1] == '\n')2979 line[--len] = 0;2980 if (!len)2981 break;2982 if (*line == '-') {2983 if (!strcmp(line, "--not")) {2984 flags ^= UNINTERESTING;2985 write_bitmap_index = 0;2986 continue;2987 }2988 if (starts_with(line, "--shallow ")) {2989 struct object_id oid;2990 if (get_oid_hex(line + 10, &oid))2991 die("not an SHA-1 '%s'", line + 10);2992 register_shallow(&oid);2993 use_bitmap_index = 0;2994 continue;2995 }2996 die("not a rev '%s'", line);2997 }2998 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2999 die("bad revision '%s'", line);3000 }30013002 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))3003 return;30043005 if (prepare_revision_walk(&revs))3006 die("revision walk setup failed");3007 mark_edges_uninteresting(&revs, show_edge);30083009 if (!fn_show_object)3010 fn_show_object = show_object;3011 traverse_commit_list_filtered(&filter_options, &revs,3012 show_commit, fn_show_object, NULL,3013 NULL);30143015 if (unpack_unreachable_expiration) {3016 revs.ignore_missing_links = 1;3017 if (add_unseen_recent_objects_to_traversal(&revs,3018 unpack_unreachable_expiration))3019 die("unable to add recent objects");3020 if (prepare_revision_walk(&revs))3021 die("revision walk setup failed");3022 traverse_commit_list(&revs, record_recent_commit,3023 record_recent_object, NULL);3024 }30253026 if (keep_unreachable)3027 add_objects_in_unpacked_packs(&revs);3028 if (pack_loose_unreachable)3029 add_unreachable_loose_objects();3030 if (unpack_unreachable)3031 loosen_unused_packed_objects(&revs);30323033 oid_array_clear(&recent_objects);3034}30353036static void add_extra_kept_packs(const struct string_list *names)3037{3038 struct packed_git *p;30393040 if (!names->nr)3041 return;30423043 for (p = get_packed_git(the_repository); p; p = p->next) {3044 const char *name = basename(p->pack_name);3045 int i;30463047 if (!p->pack_local)3048 continue;30493050 for (i = 0; i < names->nr; i++)3051 if (!fspathcmp(name, names->items[i].string))3052 break;30533054 if (i < names->nr) {3055 p->pack_keep_in_core = 1;3056 ignore_packed_keep_in_core = 1;3057 continue;3058 }3059 }3060}30613062static int option_parse_index_version(const struct option *opt,3063 const char *arg, int unset)3064{3065 char *c;3066 const char *val = arg;3067 pack_idx_opts.version = strtoul(val, &c, 10);3068 if (pack_idx_opts.version > 2)3069 die(_("unsupported index version %s"), val);3070 if (*c == ',' && c[1])3071 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);3072 if (*c || pack_idx_opts.off32_limit & 0x80000000)3073 die(_("bad index version '%s'"), val);3074 return 0;3075}30763077static int option_parse_unpack_unreachable(const struct option *opt,3078 const char *arg, int unset)3079{3080 if (unset) {3081 unpack_unreachable = 0;3082 unpack_unreachable_expiration = 0;3083 }3084 else {3085 unpack_unreachable = 1;3086 if (arg)3087 unpack_unreachable_expiration = approxidate(arg);3088 }3089 return 0;3090}30913092int cmd_pack_objects(int argc, const char **argv, const char *prefix)3093{3094 int use_internal_rev_list = 0;3095 int thin = 0;3096 int shallow = 0;3097 int all_progress_implied = 0;3098 struct argv_array rp = ARGV_ARRAY_INIT;3099 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;3100 int rev_list_index = 0;3101 struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;3102 struct option pack_objects_options[] = {3103 OPT_SET_INT('q', "quiet", &progress,3104 N_("do not show progress meter"), 0),3105 OPT_SET_INT(0, "progress", &progress,3106 N_("show progress meter"), 1),3107 OPT_SET_INT(0, "all-progress", &progress,3108 N_("show progress meter during object writing phase"), 2),3109 OPT_BOOL(0, "all-progress-implied",3110 &all_progress_implied,3111 N_("similar to --all-progress when progress meter is shown")),3112 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),3113 N_("write the pack index file in the specified idx format version"),3114 0, option_parse_index_version },3115 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,3116 N_("maximum size of each output pack file")),3117 OPT_BOOL(0, "local", &local,3118 N_("ignore borrowed objects from alternate object store")),3119 OPT_BOOL(0, "incremental", &incremental,3120 N_("ignore packed objects")),3121 OPT_INTEGER(0, "window", &window,3122 N_("limit pack window by objects")),3123 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,3124 N_("limit pack window by memory in addition to object limit")),3125 OPT_INTEGER(0, "depth", &depth,3126 N_("maximum length of delta chain allowed in the resulting pack")),3127 OPT_BOOL(0, "reuse-delta", &reuse_delta,3128 N_("reuse existing deltas")),3129 OPT_BOOL(0, "reuse-object", &reuse_object,3130 N_("reuse existing objects")),3131 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,3132 N_("use OFS_DELTA objects")),3133 OPT_INTEGER(0, "threads", &delta_search_threads,3134 N_("use threads when searching for best delta matches")),3135 OPT_BOOL(0, "non-empty", &non_empty,3136 N_("do not create an empty pack output")),3137 OPT_BOOL(0, "revs", &use_internal_rev_list,3138 N_("read revision arguments from standard input")),3139 OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,3140 N_("limit the objects to those that are not yet packed"),3141 1, PARSE_OPT_NONEG),3142 OPT_SET_INT_F(0, "all", &rev_list_all,3143 N_("include objects reachable from any reference"),3144 1, PARSE_OPT_NONEG),3145 OPT_SET_INT_F(0, "reflog", &rev_list_reflog,3146 N_("include objects referred by reflog entries"),3147 1, PARSE_OPT_NONEG),3148 OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,3149 N_("include objects referred to by the index"),3150 1, PARSE_OPT_NONEG),3151 OPT_BOOL(0, "stdout", &pack_to_stdout,3152 N_("output pack to stdout")),3153 OPT_BOOL(0, "include-tag", &include_tag,3154 N_("include tag objects that refer to objects to be packed")),3155 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,3156 N_("keep unreachable objects")),3157 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,3158 N_("pack loose unreachable objects")),3159 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),3160 N_("unpack unreachable objects newer than <time>"),3161 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3162 OPT_BOOL(0, "thin", &thin,3163 N_("create thin packs")),3164 OPT_BOOL(0, "shallow", &shallow,3165 N_("create packs suitable for shallow fetches")),3166 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,3167 N_("ignore packs that have companion .keep file")),3168 OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),3169 N_("ignore this pack")),3170 OPT_INTEGER(0, "compression", &pack_compression_level,3171 N_("pack compression level")),3172 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,3173 N_("do not hide commits by grafts"), 0),3174 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,3175 N_("use a bitmap index if available to speed up counting objects")),3176 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,3177 N_("write a bitmap index together with the pack index")),3178 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3179 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),3180 N_("handling for missing objects"), PARSE_OPT_NONEG,3181 option_parse_missing_action },3182 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,3183 N_("do not pack objects in promisor packfiles")),3184 OPT_END(),3185 };31863187 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))3188 BUG("too many dfs states, increase OE_DFS_STATE_BITS");31893190 check_replace_refs = 0;31913192 reset_pack_idx_option(&pack_idx_opts);3193 git_config(git_pack_config, NULL);31943195 progress = isatty(2);3196 argc = parse_options(argc, argv, prefix, pack_objects_options,3197 pack_usage, 0);31983199 if (argc) {3200 base_name = argv[0];3201 argc--;3202 }3203 if (pack_to_stdout != !base_name || argc)3204 usage_with_options(pack_usage, pack_objects_options);32053206 if (depth >= (1 << OE_DEPTH_BITS)) {3207 warning(_("delta chain depth %d is too deep, forcing %d"),3208 depth, (1 << OE_DEPTH_BITS) - 1);3209 depth = (1 << OE_DEPTH_BITS) - 1;3210 }3211 if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {3212 warning(_("pack.deltaCacheLimit is too high, forcing %d"),3213 (1U << OE_Z_DELTA_BITS) - 1);3214 cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;3215 }32163217 argv_array_push(&rp, "pack-objects");3218 if (thin) {3219 use_internal_rev_list = 1;3220 argv_array_push(&rp, shallow3221 ? "--objects-edge-aggressive"3222 : "--objects-edge");3223 } else3224 argv_array_push(&rp, "--objects");32253226 if (rev_list_all) {3227 use_internal_rev_list = 1;3228 argv_array_push(&rp, "--all");3229 }3230 if (rev_list_reflog) {3231 use_internal_rev_list = 1;3232 argv_array_push(&rp, "--reflog");3233 }3234 if (rev_list_index) {3235 use_internal_rev_list = 1;3236 argv_array_push(&rp, "--indexed-objects");3237 }3238 if (rev_list_unpacked) {3239 use_internal_rev_list = 1;3240 argv_array_push(&rp, "--unpacked");3241 }32423243 if (exclude_promisor_objects) {3244 use_internal_rev_list = 1;3245 fetch_if_missing = 0;3246 argv_array_push(&rp, "--exclude-promisor-objects");3247 }3248 if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)3249 use_internal_rev_list = 1;32503251 if (!reuse_object)3252 reuse_delta = 0;3253 if (pack_compression_level == -1)3254 pack_compression_level = Z_DEFAULT_COMPRESSION;3255 else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)3256 die("bad pack compression level %d", pack_compression_level);32573258 if (!delta_search_threads) /* --threads=0 means autodetect */3259 delta_search_threads = online_cpus();32603261#ifdef NO_PTHREADS3262 if (delta_search_threads != 1)3263 warning("no threads support, ignoring --threads");3264#endif3265 if (!pack_to_stdout && !pack_size_limit)3266 pack_size_limit = pack_size_limit_cfg;3267 if (pack_to_stdout && pack_size_limit)3268 die("--max-pack-size cannot be used to build a pack for transfer.");3269 if (pack_size_limit && pack_size_limit < 1024*1024) {3270 warning("minimum pack size limit is 1 MiB");3271 pack_size_limit = 1024*1024;3272 }32733274 if (!pack_to_stdout && thin)3275 die("--thin cannot be used to build an indexable pack.");32763277 if (keep_unreachable && unpack_unreachable)3278 die("--keep-unreachable and --unpack-unreachable are incompatible.");3279 if (!rev_list_all || !rev_list_reflog || !rev_list_index)3280 unpack_unreachable_expiration = 0;32813282 if (filter_options.choice) {3283 if (!pack_to_stdout)3284 die("cannot use --filter without --stdout.");3285 use_bitmap_index = 0;3286 }32873288 /*3289 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3290 *3291 * - to produce good pack (with bitmap index not-yet-packed objects are3292 * packed in suboptimal order).3293 *3294 * - to use more robust pack-generation codepath (avoiding possible3295 * bugs in bitmap code and possible bitmap index corruption).3296 */3297 if (!pack_to_stdout)3298 use_bitmap_index_default = 0;32993300 if (use_bitmap_index < 0)3301 use_bitmap_index = use_bitmap_index_default;33023303 /* "hard" reasons not to use bitmaps; these just won't work at all */3304 if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())3305 use_bitmap_index = 0;33063307 if (pack_to_stdout || !rev_list_all)3308 write_bitmap_index = 0;33093310 if (progress && all_progress_implied)3311 progress = 2;33123313 add_extra_kept_packs(&keep_pack_list);3314 if (ignore_packed_keep_on_disk) {3315 struct packed_git *p;3316 for (p = get_packed_git(the_repository); p; p = p->next)3317 if (p->pack_local && p->pack_keep)3318 break;3319 if (!p) /* no keep-able packs found */3320 ignore_packed_keep_on_disk = 0;3321 }3322 if (local) {3323 /*3324 * unlike ignore_packed_keep_on_disk above, we do not3325 * want to unset "local" based on looking at packs, as3326 * it also covers non-local objects3327 */3328 struct packed_git *p;3329 for (p = get_packed_git(the_repository); p; p = p->next) {3330 if (!p->pack_local) {3331 have_non_local_packs = 1;3332 break;3333 }3334 }3335 }33363337 prepare_packing_data(&to_pack);33383339 if (progress)3340 progress_state = start_progress(_("Enumerating objects"), 0);3341 if (!use_internal_rev_list)3342 read_object_list_from_stdin();3343 else {3344 get_object_list(rp.argc, rp.argv);3345 argv_array_clear(&rp);3346 }3347 cleanup_preferred_base();3348 if (include_tag && nr_result)3349 for_each_ref(add_ref_tag, NULL);3350 stop_progress(&progress_state);33513352 if (non_empty && !nr_result)3353 return 0;3354 if (nr_result)3355 prepare_pack(window, depth);3356 write_pack_file();3357 if (progress)3358 fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"3359 " reused %"PRIu32" (delta %"PRIu32")\n",3360 written, written_delta, reused, reused_delta);3361 return 0;3362}