1#include "builtin.h" 2#include "cache.h" 3#include "repository.h" 4#include "config.h" 5#include "attr.h" 6#include "object.h" 7#include "blob.h" 8#include "commit.h" 9#include "tag.h" 10#include "tree.h" 11#include "delta.h" 12#include "pack.h" 13#include "pack-revindex.h" 14#include "csum-file.h" 15#include "tree-walk.h" 16#include "diff.h" 17#include "revision.h" 18#include "list-objects.h" 19#include "list-objects-filter.h" 20#include "list-objects-filter-options.h" 21#include "pack-objects.h" 22#include "progress.h" 23#include "refs.h" 24#include "streaming.h" 25#include "thread-utils.h" 26#include "pack-bitmap.h" 27#include "reachable.h" 28#include "sha1-array.h" 29#include "argv-array.h" 30#include "list.h" 31#include "packfile.h" 32#include "object-store.h" 33 34#define IN_PACK(obj) oe_in_pack(&to_pack, obj) 35#define DELTA(obj) oe_delta(&to_pack, obj) 36#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) 37#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) 38#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) 39#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) 40#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) 41 42static const char *pack_usage[] = { 43 N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"), 44 N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"), 45 NULL 46}; 47 48/* 49 * Objects we are going to pack are collected in the `to_pack` structure. 50 * It contains an array (dynamically expanded) of the object data, and a map 51 * that can resolve SHA1s to their position in the array. 52 */ 53static struct packing_data to_pack; 54 55static struct pack_idx_entry **written_list; 56static uint32_t nr_result, nr_written; 57 58static int non_empty; 59static int reuse_delta = 1, reuse_object = 1; 60static int keep_unreachable, unpack_unreachable, include_tag; 61static timestamp_t unpack_unreachable_expiration; 62static int pack_loose_unreachable; 63static int local; 64static int have_non_local_packs; 65static int incremental; 66static int ignore_packed_keep; 67static int allow_ofs_delta; 68static struct pack_idx_option pack_idx_opts; 69static const char *base_name; 70static int progress = 1; 71static int window = 10; 72static unsigned long pack_size_limit; 73static int depth = 50; 74static int delta_search_threads; 75static int pack_to_stdout; 76static int num_preferred_base; 77static struct progress *progress_state; 78 79static struct packed_git *reuse_packfile; 80static uint32_t reuse_packfile_objects; 81static off_t reuse_packfile_offset; 82 83static int use_bitmap_index_default = 1; 84static int use_bitmap_index = -1; 85static int write_bitmap_index; 86static uint16_t write_bitmap_options; 87 88static int exclude_promisor_objects; 89 90static unsigned long delta_cache_size = 0; 91static unsigned long max_delta_cache_size = 256 * 1024 * 1024; 92static unsigned long cache_max_small_delta_size = 1000; 93 94static unsigned long window_memory_limit = 0; 95 96static struct list_objects_filter_options filter_options; 97 98enum missing_action { 99 MA_ERROR = 0, /* fail if any missing objects are encountered */ 100 MA_ALLOW_ANY, /* silently allow ALL missing objects */ 101 MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ 102}; 103static enum missing_action arg_missing_action; 104static show_object_fn fn_show_object; 105 106/* 107 * stats 108 */ 109static uint32_t written, written_delta; 110static uint32_t reused, reused_delta; 111 112/* 113 * Indexed commits 114 */ 115static struct commit **indexed_commits; 116static unsigned int indexed_commits_nr; 117static unsigned int indexed_commits_alloc; 118 119static void index_commit_for_bitmap(struct commit *commit) 120{ 121 if (indexed_commits_nr >= indexed_commits_alloc) { 122 indexed_commits_alloc = (indexed_commits_alloc + 32) * 2; 123 REALLOC_ARRAY(indexed_commits, indexed_commits_alloc); 124 } 125 126 indexed_commits[indexed_commits_nr++] = commit; 127} 128 129static void *get_delta(struct object_entry *entry) 130{ 131 unsigned long size, base_size, delta_size; 132 void *buf, *base_buf, *delta_buf; 133 enum object_type type; 134 135 buf = read_object_file(&entry->idx.oid, &type, &size); 136 if (!buf) 137 die("unable to read %s", oid_to_hex(&entry->idx.oid)); 138 base_buf = read_object_file(&DELTA(entry)->idx.oid, &type, 139 &base_size); 140 if (!base_buf) 141 die("unable to read %s", 142 oid_to_hex(&DELTA(entry)->idx.oid)); 143 delta_buf = diff_delta(base_buf, base_size, 144 buf, size, &delta_size, 0); 145 if (!delta_buf || delta_size != entry->delta_size) 146 die("delta size changed"); 147 free(buf); 148 free(base_buf); 149 return delta_buf; 150} 151 152static unsigned long do_compress(void **pptr, unsigned long size) 153{ 154 git_zstream stream; 155 void *in, *out; 156 unsigned long maxsize; 157 158 git_deflate_init(&stream, pack_compression_level); 159 maxsize = git_deflate_bound(&stream, size); 160 161 in = *pptr; 162 out = xmalloc(maxsize); 163 *pptr = out; 164 165 stream.next_in = in; 166 stream.avail_in = size; 167 stream.next_out = out; 168 stream.avail_out = maxsize; 169 while (git_deflate(&stream, Z_FINISH) == Z_OK) 170 ; /* nothing */ 171 git_deflate_end(&stream); 172 173 free(in); 174 return stream.total_out; 175} 176 177static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, 178 const struct object_id *oid) 179{ 180 git_zstream stream; 181 unsigned char ibuf[1024 * 16]; 182 unsigned char obuf[1024 * 16]; 183 unsigned long olen = 0; 184 185 git_deflate_init(&stream, pack_compression_level); 186 187 for (;;) { 188 ssize_t readlen; 189 int zret = Z_OK; 190 readlen = read_istream(st, ibuf, sizeof(ibuf)); 191 if (readlen == -1) 192 die(_("unable to read %s"), oid_to_hex(oid)); 193 194 stream.next_in = ibuf; 195 stream.avail_in = readlen; 196 while ((stream.avail_in || readlen == 0) && 197 (zret == Z_OK || zret == Z_BUF_ERROR)) { 198 stream.next_out = obuf; 199 stream.avail_out = sizeof(obuf); 200 zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); 201 hashwrite(f, obuf, stream.next_out - obuf); 202 olen += stream.next_out - obuf; 203 } 204 if (stream.avail_in) 205 die(_("deflate error (%d)"), zret); 206 if (readlen == 0) { 207 if (zret != Z_STREAM_END) 208 die(_("deflate error (%d)"), zret); 209 break; 210 } 211 } 212 git_deflate_end(&stream); 213 return olen; 214} 215 216/* 217 * we are going to reuse the existing object data as is. make 218 * sure it is not corrupt. 219 */ 220static int check_pack_inflate(struct packed_git *p, 221 struct pack_window **w_curs, 222 off_t offset, 223 off_t len, 224 unsigned long expect) 225{ 226 git_zstream stream; 227 unsigned char fakebuf[4096], *in; 228 int st; 229 230 memset(&stream, 0, sizeof(stream)); 231 git_inflate_init(&stream); 232 do { 233 in = use_pack(p, w_curs, offset, &stream.avail_in); 234 stream.next_in = in; 235 stream.next_out = fakebuf; 236 stream.avail_out = sizeof(fakebuf); 237 st = git_inflate(&stream, Z_FINISH); 238 offset += stream.next_in - in; 239 } while (st == Z_OK || st == Z_BUF_ERROR); 240 git_inflate_end(&stream); 241 return (st == Z_STREAM_END && 242 stream.total_out == expect && 243 stream.total_in == len) ? 0 : -1; 244} 245 246static void copy_pack_data(struct hashfile *f, 247 struct packed_git *p, 248 struct pack_window **w_curs, 249 off_t offset, 250 off_t len) 251{ 252 unsigned char *in; 253 unsigned long avail; 254 255 while (len) { 256 in = use_pack(p, w_curs, offset, &avail); 257 if (avail > len) 258 avail = (unsigned long)len; 259 hashwrite(f, in, avail); 260 offset += avail; 261 len -= avail; 262 } 263} 264 265/* Return 0 if we will bust the pack-size limit */ 266static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, 267 unsigned long limit, int usable_delta) 268{ 269 unsigned long size, datalen; 270 unsigned char header[MAX_PACK_OBJECT_HEADER], 271 dheader[MAX_PACK_OBJECT_HEADER]; 272 unsigned hdrlen; 273 enum object_type type; 274 void *buf; 275 struct git_istream *st = NULL; 276 277 if (!usable_delta) { 278 if (oe_type(entry) == OBJ_BLOB && 279 entry->size > big_file_threshold && 280 (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) 281 buf = NULL; 282 else { 283 buf = read_object_file(&entry->idx.oid, &type, &size); 284 if (!buf) 285 die(_("unable to read %s"), 286 oid_to_hex(&entry->idx.oid)); 287 } 288 /* 289 * make sure no cached delta data remains from a 290 * previous attempt before a pack split occurred. 291 */ 292 FREE_AND_NULL(entry->delta_data); 293 entry->z_delta_size = 0; 294 } else if (entry->delta_data) { 295 size = entry->delta_size; 296 buf = entry->delta_data; 297 entry->delta_data = NULL; 298 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 299 OBJ_OFS_DELTA : OBJ_REF_DELTA; 300 } else { 301 buf = get_delta(entry); 302 size = entry->delta_size; 303 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 304 OBJ_OFS_DELTA : OBJ_REF_DELTA; 305 } 306 307 if (st) /* large blob case, just assume we don't compress well */ 308 datalen = size; 309 else if (entry->z_delta_size) 310 datalen = entry->z_delta_size; 311 else 312 datalen = do_compress(&buf, size); 313 314 /* 315 * The object header is a byte of 'type' followed by zero or 316 * more bytes of length. 317 */ 318 hdrlen = encode_in_pack_object_header(header, sizeof(header), 319 type, size); 320 321 if (type == OBJ_OFS_DELTA) { 322 /* 323 * Deltas with relative base contain an additional 324 * encoding of the relative offset for the delta 325 * base from this object's position in the pack. 326 */ 327 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 328 unsigned pos = sizeof(dheader) - 1; 329 dheader[pos] = ofs & 127; 330 while (ofs >>= 7) 331 dheader[--pos] = 128 | (--ofs & 127); 332 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { 333 if (st) 334 close_istream(st); 335 free(buf); 336 return 0; 337 } 338 hashwrite(f, header, hdrlen); 339 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 340 hdrlen += sizeof(dheader) - pos; 341 } else if (type == OBJ_REF_DELTA) { 342 /* 343 * Deltas with a base reference contain 344 * an additional 20 bytes for the base sha1. 345 */ 346 if (limit && hdrlen + 20 + datalen + 20 >= limit) { 347 if (st) 348 close_istream(st); 349 free(buf); 350 return 0; 351 } 352 hashwrite(f, header, hdrlen); 353 hashwrite(f, DELTA(entry)->idx.oid.hash, 20); 354 hdrlen += 20; 355 } else { 356 if (limit && hdrlen + datalen + 20 >= limit) { 357 if (st) 358 close_istream(st); 359 free(buf); 360 return 0; 361 } 362 hashwrite(f, header, hdrlen); 363 } 364 if (st) { 365 datalen = write_large_blob_data(st, f, &entry->idx.oid); 366 close_istream(st); 367 } else { 368 hashwrite(f, buf, datalen); 369 free(buf); 370 } 371 372 return hdrlen + datalen; 373} 374 375/* Return 0 if we will bust the pack-size limit */ 376static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, 377 unsigned long limit, int usable_delta) 378{ 379 struct packed_git *p = IN_PACK(entry); 380 struct pack_window *w_curs = NULL; 381 struct revindex_entry *revidx; 382 off_t offset; 383 enum object_type type = oe_type(entry); 384 off_t datalen; 385 unsigned char header[MAX_PACK_OBJECT_HEADER], 386 dheader[MAX_PACK_OBJECT_HEADER]; 387 unsigned hdrlen; 388 389 if (DELTA(entry)) 390 type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? 391 OBJ_OFS_DELTA : OBJ_REF_DELTA; 392 hdrlen = encode_in_pack_object_header(header, sizeof(header), 393 type, entry->size); 394 395 offset = entry->in_pack_offset; 396 revidx = find_pack_revindex(p, offset); 397 datalen = revidx[1].offset - offset; 398 if (!pack_to_stdout && p->index_version > 1 && 399 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { 400 error("bad packed object CRC for %s", 401 oid_to_hex(&entry->idx.oid)); 402 unuse_pack(&w_curs); 403 return write_no_reuse_object(f, entry, limit, usable_delta); 404 } 405 406 offset += entry->in_pack_header_size; 407 datalen -= entry->in_pack_header_size; 408 409 if (!pack_to_stdout && p->index_version == 1 && 410 check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { 411 error("corrupt packed object for %s", 412 oid_to_hex(&entry->idx.oid)); 413 unuse_pack(&w_curs); 414 return write_no_reuse_object(f, entry, limit, usable_delta); 415 } 416 417 if (type == OBJ_OFS_DELTA) { 418 off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; 419 unsigned pos = sizeof(dheader) - 1; 420 dheader[pos] = ofs & 127; 421 while (ofs >>= 7) 422 dheader[--pos] = 128 | (--ofs & 127); 423 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { 424 unuse_pack(&w_curs); 425 return 0; 426 } 427 hashwrite(f, header, hdrlen); 428 hashwrite(f, dheader + pos, sizeof(dheader) - pos); 429 hdrlen += sizeof(dheader) - pos; 430 reused_delta++; 431 } else if (type == OBJ_REF_DELTA) { 432 if (limit && hdrlen + 20 + datalen + 20 >= limit) { 433 unuse_pack(&w_curs); 434 return 0; 435 } 436 hashwrite(f, header, hdrlen); 437 hashwrite(f, DELTA(entry)->idx.oid.hash, 20); 438 hdrlen += 20; 439 reused_delta++; 440 } else { 441 if (limit && hdrlen + datalen + 20 >= limit) { 442 unuse_pack(&w_curs); 443 return 0; 444 } 445 hashwrite(f, header, hdrlen); 446 } 447 copy_pack_data(f, p, &w_curs, offset, datalen); 448 unuse_pack(&w_curs); 449 reused++; 450 return hdrlen + datalen; 451} 452 453/* Return 0 if we will bust the pack-size limit */ 454static off_t write_object(struct hashfile *f, 455 struct object_entry *entry, 456 off_t write_offset) 457{ 458 unsigned long limit; 459 off_t len; 460 int usable_delta, to_reuse; 461 462 if (!pack_to_stdout) 463 crc32_begin(f); 464 465 /* apply size limit if limited packsize and not first object */ 466 if (!pack_size_limit || !nr_written) 467 limit = 0; 468 else if (pack_size_limit <= write_offset) 469 /* 470 * the earlier object did not fit the limit; avoid 471 * mistaking this with unlimited (i.e. limit = 0). 472 */ 473 limit = 1; 474 else 475 limit = pack_size_limit - write_offset; 476 477 if (!DELTA(entry)) 478 usable_delta = 0; /* no delta */ 479 else if (!pack_size_limit) 480 usable_delta = 1; /* unlimited packfile */ 481 else if (DELTA(entry)->idx.offset == (off_t)-1) 482 usable_delta = 0; /* base was written to another pack */ 483 else if (DELTA(entry)->idx.offset) 484 usable_delta = 1; /* base already exists in this pack */ 485 else 486 usable_delta = 0; /* base could end up in another pack */ 487 488 if (!reuse_object) 489 to_reuse = 0; /* explicit */ 490 else if (!IN_PACK(entry)) 491 to_reuse = 0; /* can't reuse what we don't have */ 492 else if (oe_type(entry) == OBJ_REF_DELTA || 493 oe_type(entry) == OBJ_OFS_DELTA) 494 /* check_object() decided it for us ... */ 495 to_reuse = usable_delta; 496 /* ... but pack split may override that */ 497 else if (oe_type(entry) != entry->in_pack_type) 498 to_reuse = 0; /* pack has delta which is unusable */ 499 else if (DELTA(entry)) 500 to_reuse = 0; /* we want to pack afresh */ 501 else 502 to_reuse = 1; /* we have it in-pack undeltified, 503 * and we do not need to deltify it. 504 */ 505 506 if (!to_reuse) 507 len = write_no_reuse_object(f, entry, limit, usable_delta); 508 else 509 len = write_reuse_object(f, entry, limit, usable_delta); 510 if (!len) 511 return 0; 512 513 if (usable_delta) 514 written_delta++; 515 written++; 516 if (!pack_to_stdout) 517 entry->idx.crc32 = crc32_end(f); 518 return len; 519} 520 521enum write_one_status { 522 WRITE_ONE_SKIP = -1, /* already written */ 523 WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */ 524 WRITE_ONE_WRITTEN = 1, /* normal */ 525 WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ 526}; 527 528static enum write_one_status write_one(struct hashfile *f, 529 struct object_entry *e, 530 off_t *offset) 531{ 532 off_t size; 533 int recursing; 534 535 /* 536 * we set offset to 1 (which is an impossible value) to mark 537 * the fact that this object is involved in "write its base 538 * first before writing a deltified object" recursion. 539 */ 540 recursing = (e->idx.offset == 1); 541 if (recursing) { 542 warning("recursive delta detected for object %s", 543 oid_to_hex(&e->idx.oid)); 544 return WRITE_ONE_RECURSIVE; 545 } else if (e->idx.offset || e->preferred_base) { 546 /* offset is non zero if object is written already. */ 547 return WRITE_ONE_SKIP; 548 } 549 550 /* if we are deltified, write out base object first. */ 551 if (DELTA(e)) { 552 e->idx.offset = 1; /* now recurse */ 553 switch (write_one(f, DELTA(e), offset)) { 554 case WRITE_ONE_RECURSIVE: 555 /* we cannot depend on this one */ 556 SET_DELTA(e, NULL); 557 break; 558 default: 559 break; 560 case WRITE_ONE_BREAK: 561 e->idx.offset = recursing; 562 return WRITE_ONE_BREAK; 563 } 564 } 565 566 e->idx.offset = *offset; 567 size = write_object(f, e, *offset); 568 if (!size) { 569 e->idx.offset = recursing; 570 return WRITE_ONE_BREAK; 571 } 572 written_list[nr_written++] = &e->idx; 573 574 /* make sure off_t is sufficiently large not to wrap */ 575 if (signed_add_overflows(*offset, size)) 576 die("pack too large for current definition of off_t"); 577 *offset += size; 578 return WRITE_ONE_WRITTEN; 579} 580 581static int mark_tagged(const char *path, const struct object_id *oid, int flag, 582 void *cb_data) 583{ 584 struct object_id peeled; 585 struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL); 586 587 if (entry) 588 entry->tagged = 1; 589 if (!peel_ref(path, &peeled)) { 590 entry = packlist_find(&to_pack, peeled.hash, NULL); 591 if (entry) 592 entry->tagged = 1; 593 } 594 return 0; 595} 596 597static inline void add_to_write_order(struct object_entry **wo, 598 unsigned int *endp, 599 struct object_entry *e) 600{ 601 if (e->filled) 602 return; 603 wo[(*endp)++] = e; 604 e->filled = 1; 605} 606 607static void add_descendants_to_write_order(struct object_entry **wo, 608 unsigned int *endp, 609 struct object_entry *e) 610{ 611 int add_to_order = 1; 612 while (e) { 613 if (add_to_order) { 614 struct object_entry *s; 615 /* add this node... */ 616 add_to_write_order(wo, endp, e); 617 /* all its siblings... */ 618 for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) { 619 add_to_write_order(wo, endp, s); 620 } 621 } 622 /* drop down a level to add left subtree nodes if possible */ 623 if (DELTA_CHILD(e)) { 624 add_to_order = 1; 625 e = DELTA_CHILD(e); 626 } else { 627 add_to_order = 0; 628 /* our sibling might have some children, it is next */ 629 if (DELTA_SIBLING(e)) { 630 e = DELTA_SIBLING(e); 631 continue; 632 } 633 /* go back to our parent node */ 634 e = DELTA(e); 635 while (e && !DELTA_SIBLING(e)) { 636 /* we're on the right side of a subtree, keep 637 * going up until we can go right again */ 638 e = DELTA(e); 639 } 640 if (!e) { 641 /* done- we hit our original root node */ 642 return; 643 } 644 /* pass it off to sibling at this level */ 645 e = DELTA_SIBLING(e); 646 } 647 }; 648} 649 650static void add_family_to_write_order(struct object_entry **wo, 651 unsigned int *endp, 652 struct object_entry *e) 653{ 654 struct object_entry *root; 655 656 for (root = e; DELTA(root); root = DELTA(root)) 657 ; /* nothing */ 658 add_descendants_to_write_order(wo, endp, root); 659} 660 661static struct object_entry **compute_write_order(void) 662{ 663 unsigned int i, wo_end, last_untagged; 664 665 struct object_entry **wo; 666 struct object_entry *objects = to_pack.objects; 667 668 for (i = 0; i < to_pack.nr_objects; i++) { 669 objects[i].tagged = 0; 670 objects[i].filled = 0; 671 SET_DELTA_CHILD(&objects[i], NULL); 672 SET_DELTA_SIBLING(&objects[i], NULL); 673 } 674 675 /* 676 * Fully connect delta_child/delta_sibling network. 677 * Make sure delta_sibling is sorted in the original 678 * recency order. 679 */ 680 for (i = to_pack.nr_objects; i > 0;) { 681 struct object_entry *e = &objects[--i]; 682 if (!DELTA(e)) 683 continue; 684 /* Mark me as the first child */ 685 e->delta_sibling_idx = DELTA(e)->delta_child_idx; 686 SET_DELTA_CHILD(DELTA(e), e); 687 } 688 689 /* 690 * Mark objects that are at the tip of tags. 691 */ 692 for_each_tag_ref(mark_tagged, NULL); 693 694 /* 695 * Give the objects in the original recency order until 696 * we see a tagged tip. 697 */ 698 ALLOC_ARRAY(wo, to_pack.nr_objects); 699 for (i = wo_end = 0; i < to_pack.nr_objects; i++) { 700 if (objects[i].tagged) 701 break; 702 add_to_write_order(wo, &wo_end, &objects[i]); 703 } 704 last_untagged = i; 705 706 /* 707 * Then fill all the tagged tips. 708 */ 709 for (; i < to_pack.nr_objects; i++) { 710 if (objects[i].tagged) 711 add_to_write_order(wo, &wo_end, &objects[i]); 712 } 713 714 /* 715 * And then all remaining commits and tags. 716 */ 717 for (i = last_untagged; i < to_pack.nr_objects; i++) { 718 if (oe_type(&objects[i]) != OBJ_COMMIT && 719 oe_type(&objects[i]) != OBJ_TAG) 720 continue; 721 add_to_write_order(wo, &wo_end, &objects[i]); 722 } 723 724 /* 725 * And then all the trees. 726 */ 727 for (i = last_untagged; i < to_pack.nr_objects; i++) { 728 if (oe_type(&objects[i]) != OBJ_TREE) 729 continue; 730 add_to_write_order(wo, &wo_end, &objects[i]); 731 } 732 733 /* 734 * Finally all the rest in really tight order 735 */ 736 for (i = last_untagged; i < to_pack.nr_objects; i++) { 737 if (!objects[i].filled) 738 add_family_to_write_order(wo, &wo_end, &objects[i]); 739 } 740 741 if (wo_end != to_pack.nr_objects) 742 die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); 743 744 return wo; 745} 746 747static off_t write_reused_pack(struct hashfile *f) 748{ 749 unsigned char buffer[8192]; 750 off_t to_write, total; 751 int fd; 752 753 if (!is_pack_valid(reuse_packfile)) 754 die("packfile is invalid: %s", reuse_packfile->pack_name); 755 756 fd = git_open(reuse_packfile->pack_name); 757 if (fd < 0) 758 die_errno("unable to open packfile for reuse: %s", 759 reuse_packfile->pack_name); 760 761 if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) 762 die_errno("unable to seek in reused packfile"); 763 764 if (reuse_packfile_offset < 0) 765 reuse_packfile_offset = reuse_packfile->pack_size - 20; 766 767 total = to_write = reuse_packfile_offset - sizeof(struct pack_header); 768 769 while (to_write) { 770 int read_pack = xread(fd, buffer, sizeof(buffer)); 771 772 if (read_pack <= 0) 773 die_errno("unable to read from reused packfile"); 774 775 if (read_pack > to_write) 776 read_pack = to_write; 777 778 hashwrite(f, buffer, read_pack); 779 to_write -= read_pack; 780 781 /* 782 * We don't know the actual number of objects written, 783 * only how many bytes written, how many bytes total, and 784 * how many objects total. So we can fake it by pretending all 785 * objects we are writing are the same size. This gives us a 786 * smooth progress meter, and at the end it matches the true 787 * answer. 788 */ 789 written = reuse_packfile_objects * 790 (((double)(total - to_write)) / total); 791 display_progress(progress_state, written); 792 } 793 794 close(fd); 795 written = reuse_packfile_objects; 796 display_progress(progress_state, written); 797 return reuse_packfile_offset - sizeof(struct pack_header); 798} 799 800static const char no_split_warning[] = N_( 801"disabling bitmap writing, packs are split due to pack.packSizeLimit" 802); 803 804static void write_pack_file(void) 805{ 806 uint32_t i = 0, j; 807 struct hashfile *f; 808 off_t offset; 809 uint32_t nr_remaining = nr_result; 810 time_t last_mtime = 0; 811 struct object_entry **write_order; 812 813 if (progress > pack_to_stdout) 814 progress_state = start_progress(_("Writing objects"), nr_result); 815 ALLOC_ARRAY(written_list, to_pack.nr_objects); 816 write_order = compute_write_order(); 817 818 do { 819 struct object_id oid; 820 char *pack_tmp_name = NULL; 821 822 if (pack_to_stdout) 823 f = hashfd_throughput(1, "<stdout>", progress_state); 824 else 825 f = create_tmp_packfile(&pack_tmp_name); 826 827 offset = write_pack_header(f, nr_remaining); 828 829 if (reuse_packfile) { 830 off_t packfile_size; 831 assert(pack_to_stdout); 832 833 packfile_size = write_reused_pack(f); 834 offset += packfile_size; 835 } 836 837 nr_written = 0; 838 for (; i < to_pack.nr_objects; i++) { 839 struct object_entry *e = write_order[i]; 840 if (write_one(f, e, &offset) == WRITE_ONE_BREAK) 841 break; 842 display_progress(progress_state, written); 843 } 844 845 /* 846 * Did we write the wrong # entries in the header? 847 * If so, rewrite it like in fast-import 848 */ 849 if (pack_to_stdout) { 850 hashclose(f, oid.hash, CSUM_CLOSE); 851 } else if (nr_written == nr_remaining) { 852 hashclose(f, oid.hash, CSUM_FSYNC); 853 } else { 854 int fd = hashclose(f, oid.hash, 0); 855 fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, 856 nr_written, oid.hash, offset); 857 close(fd); 858 if (write_bitmap_index) { 859 warning(_(no_split_warning)); 860 write_bitmap_index = 0; 861 } 862 } 863 864 if (!pack_to_stdout) { 865 struct stat st; 866 struct strbuf tmpname = STRBUF_INIT; 867 868 /* 869 * Packs are runtime accessed in their mtime 870 * order since newer packs are more likely to contain 871 * younger objects. So if we are creating multiple 872 * packs then we should modify the mtime of later ones 873 * to preserve this property. 874 */ 875 if (stat(pack_tmp_name, &st) < 0) { 876 warning_errno("failed to stat %s", pack_tmp_name); 877 } else if (!last_mtime) { 878 last_mtime = st.st_mtime; 879 } else { 880 struct utimbuf utb; 881 utb.actime = st.st_atime; 882 utb.modtime = --last_mtime; 883 if (utime(pack_tmp_name, &utb) < 0) 884 warning_errno("failed utime() on %s", pack_tmp_name); 885 } 886 887 strbuf_addf(&tmpname, "%s-", base_name); 888 889 if (write_bitmap_index) { 890 bitmap_writer_set_checksum(oid.hash); 891 bitmap_writer_build_type_index( 892 &to_pack, written_list, nr_written); 893 } 894 895 finish_tmp_packfile(&tmpname, pack_tmp_name, 896 written_list, nr_written, 897 &pack_idx_opts, oid.hash); 898 899 if (write_bitmap_index) { 900 strbuf_addf(&tmpname, "%s.bitmap", oid_to_hex(&oid)); 901 902 stop_progress(&progress_state); 903 904 bitmap_writer_show_progress(progress); 905 bitmap_writer_reuse_bitmaps(&to_pack); 906 bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1); 907 bitmap_writer_build(&to_pack); 908 bitmap_writer_finish(written_list, nr_written, 909 tmpname.buf, write_bitmap_options); 910 write_bitmap_index = 0; 911 } 912 913 strbuf_release(&tmpname); 914 free(pack_tmp_name); 915 puts(oid_to_hex(&oid)); 916 } 917 918 /* mark written objects as written to previous pack */ 919 for (j = 0; j < nr_written; j++) { 920 written_list[j]->offset = (off_t)-1; 921 } 922 nr_remaining -= nr_written; 923 } while (nr_remaining && i < to_pack.nr_objects); 924 925 free(written_list); 926 free(write_order); 927 stop_progress(&progress_state); 928 if (written != nr_result) 929 die("wrote %"PRIu32" objects while expecting %"PRIu32, 930 written, nr_result); 931} 932 933static int no_try_delta(const char *path) 934{ 935 static struct attr_check *check; 936 937 if (!check) 938 check = attr_check_initl("delta", NULL); 939 if (git_check_attr(path, check)) 940 return 0; 941 if (ATTR_FALSE(check->items[0].value)) 942 return 1; 943 return 0; 944} 945 946/* 947 * When adding an object, check whether we have already added it 948 * to our packing list. If so, we can skip. However, if we are 949 * being asked to excludei t, but the previous mention was to include 950 * it, make sure to adjust its flags and tweak our numbers accordingly. 951 * 952 * As an optimization, we pass out the index position where we would have 953 * found the item, since that saves us from having to look it up again a 954 * few lines later when we want to add the new entry. 955 */ 956static int have_duplicate_entry(const struct object_id *oid, 957 int exclude, 958 uint32_t *index_pos) 959{ 960 struct object_entry *entry; 961 962 entry = packlist_find(&to_pack, oid->hash, index_pos); 963 if (!entry) 964 return 0; 965 966 if (exclude) { 967 if (!entry->preferred_base) 968 nr_result--; 969 entry->preferred_base = 1; 970 } 971 972 return 1; 973} 974 975static int want_found_object(int exclude, struct packed_git *p) 976{ 977 if (exclude) 978 return 1; 979 if (incremental) 980 return 0; 981 982 /* 983 * When asked to do --local (do not include an object that appears in a 984 * pack we borrow from elsewhere) or --honor-pack-keep (do not include 985 * an object that appears in a pack marked with .keep), finding a pack 986 * that matches the criteria is sufficient for us to decide to omit it. 987 * However, even if this pack does not satisfy the criteria, we need to 988 * make sure no copy of this object appears in _any_ pack that makes us 989 * to omit the object, so we need to check all the packs. 990 * 991 * We can however first check whether these options can possible matter; 992 * if they do not matter we know we want the object in generated pack. 993 * Otherwise, we signal "-1" at the end to tell the caller that we do 994 * not know either way, and it needs to check more packs. 995 */ 996 if (!ignore_packed_keep && 997 (!local || !have_non_local_packs)) 998 return 1; 9991000 if (local && !p->pack_local)1001 return 0;1002 if (ignore_packed_keep && p->pack_local && p->pack_keep)1003 return 0;10041005 /* we don't know yet; keep looking for more packs */1006 return -1;1007}10081009/*1010 * Check whether we want the object in the pack (e.g., we do not want1011 * objects found in non-local stores if the "--local" option was used).1012 *1013 * If the caller already knows an existing pack it wants to take the object1014 * from, that is passed in *found_pack and *found_offset; otherwise this1015 * function finds if there is any pack that has the object and returns the pack1016 * and its offset in these variables.1017 */1018static int want_object_in_pack(const struct object_id *oid,1019 int exclude,1020 struct packed_git **found_pack,1021 off_t *found_offset)1022{1023 int want;1024 struct list_head *pos;10251026 if (!exclude && local && has_loose_object_nonlocal(oid->hash))1027 return 0;10281029 /*1030 * If we already know the pack object lives in, start checks from that1031 * pack - in the usual case when neither --local was given nor .keep files1032 * are present we will determine the answer right now.1033 */1034 if (*found_pack) {1035 want = want_found_object(exclude, *found_pack);1036 if (want != -1)1037 return want;1038 }1039 list_for_each(pos, get_packed_git_mru(the_repository)) {1040 struct packed_git *p = list_entry(pos, struct packed_git, mru);1041 off_t offset;10421043 if (p == *found_pack)1044 offset = *found_offset;1045 else1046 offset = find_pack_entry_one(oid->hash, p);10471048 if (offset) {1049 if (!*found_pack) {1050 if (!is_pack_valid(p))1051 continue;1052 *found_offset = offset;1053 *found_pack = p;1054 }1055 want = want_found_object(exclude, p);1056 if (!exclude && want > 0)1057 list_move(&p->mru,1058 get_packed_git_mru(the_repository));1059 if (want != -1)1060 return want;1061 }1062 }10631064 return 1;1065}10661067static void create_object_entry(const struct object_id *oid,1068 enum object_type type,1069 uint32_t hash,1070 int exclude,1071 int no_try_delta,1072 uint32_t index_pos,1073 struct packed_git *found_pack,1074 off_t found_offset)1075{1076 struct object_entry *entry;10771078 entry = packlist_alloc(&to_pack, oid->hash, index_pos);1079 entry->hash = hash;1080 oe_set_type(entry, type);1081 if (exclude)1082 entry->preferred_base = 1;1083 else1084 nr_result++;1085 if (found_pack) {1086 oe_set_in_pack(&to_pack, entry, found_pack);1087 entry->in_pack_offset = found_offset;1088 }10891090 entry->no_try_delta = no_try_delta;1091}10921093static const char no_closure_warning[] = N_(1094"disabling bitmap writing, as some objects are not being packed"1095);10961097static int add_object_entry(const struct object_id *oid, enum object_type type,1098 const char *name, int exclude)1099{1100 struct packed_git *found_pack = NULL;1101 off_t found_offset = 0;1102 uint32_t index_pos;11031104 if (have_duplicate_entry(oid, exclude, &index_pos))1105 return 0;11061107 if (!want_object_in_pack(oid, exclude, &found_pack, &found_offset)) {1108 /* The pack is missing an object, so it will not have closure */1109 if (write_bitmap_index) {1110 warning(_(no_closure_warning));1111 write_bitmap_index = 0;1112 }1113 return 0;1114 }11151116 create_object_entry(oid, type, pack_name_hash(name),1117 exclude, name && no_try_delta(name),1118 index_pos, found_pack, found_offset);11191120 display_progress(progress_state, nr_result);1121 return 1;1122}11231124static int add_object_entry_from_bitmap(const struct object_id *oid,1125 enum object_type type,1126 int flags, uint32_t name_hash,1127 struct packed_git *pack, off_t offset)1128{1129 uint32_t index_pos;11301131 if (have_duplicate_entry(oid, 0, &index_pos))1132 return 0;11331134 if (!want_object_in_pack(oid, 0, &pack, &offset))1135 return 0;11361137 create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);11381139 display_progress(progress_state, nr_result);1140 return 1;1141}11421143struct pbase_tree_cache {1144 struct object_id oid;1145 int ref;1146 int temporary;1147 void *tree_data;1148 unsigned long tree_size;1149};11501151static struct pbase_tree_cache *(pbase_tree_cache[256]);1152static int pbase_tree_cache_ix(const struct object_id *oid)1153{1154 return oid->hash[0] % ARRAY_SIZE(pbase_tree_cache);1155}1156static int pbase_tree_cache_ix_incr(int ix)1157{1158 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);1159}11601161static struct pbase_tree {1162 struct pbase_tree *next;1163 /* This is a phony "cache" entry; we are not1164 * going to evict it or find it through _get()1165 * mechanism -- this is for the toplevel node that1166 * would almost always change with any commit.1167 */1168 struct pbase_tree_cache pcache;1169} *pbase_tree;11701171static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)1172{1173 struct pbase_tree_cache *ent, *nent;1174 void *data;1175 unsigned long size;1176 enum object_type type;1177 int neigh;1178 int my_ix = pbase_tree_cache_ix(oid);1179 int available_ix = -1;11801181 /* pbase-tree-cache acts as a limited hashtable.1182 * your object will be found at your index or within a few1183 * slots after that slot if it is cached.1184 */1185 for (neigh = 0; neigh < 8; neigh++) {1186 ent = pbase_tree_cache[my_ix];1187 if (ent && !oidcmp(&ent->oid, oid)) {1188 ent->ref++;1189 return ent;1190 }1191 else if (((available_ix < 0) && (!ent || !ent->ref)) ||1192 ((0 <= available_ix) &&1193 (!ent && pbase_tree_cache[available_ix])))1194 available_ix = my_ix;1195 if (!ent)1196 break;1197 my_ix = pbase_tree_cache_ix_incr(my_ix);1198 }11991200 /* Did not find one. Either we got a bogus request or1201 * we need to read and perhaps cache.1202 */1203 data = read_object_file(oid, &type, &size);1204 if (!data)1205 return NULL;1206 if (type != OBJ_TREE) {1207 free(data);1208 return NULL;1209 }12101211 /* We need to either cache or return a throwaway copy */12121213 if (available_ix < 0)1214 ent = NULL;1215 else {1216 ent = pbase_tree_cache[available_ix];1217 my_ix = available_ix;1218 }12191220 if (!ent) {1221 nent = xmalloc(sizeof(*nent));1222 nent->temporary = (available_ix < 0);1223 }1224 else {1225 /* evict and reuse */1226 free(ent->tree_data);1227 nent = ent;1228 }1229 oidcpy(&nent->oid, oid);1230 nent->tree_data = data;1231 nent->tree_size = size;1232 nent->ref = 1;1233 if (!nent->temporary)1234 pbase_tree_cache[my_ix] = nent;1235 return nent;1236}12371238static void pbase_tree_put(struct pbase_tree_cache *cache)1239{1240 if (!cache->temporary) {1241 cache->ref--;1242 return;1243 }1244 free(cache->tree_data);1245 free(cache);1246}12471248static int name_cmp_len(const char *name)1249{1250 int i;1251 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)1252 ;1253 return i;1254}12551256static void add_pbase_object(struct tree_desc *tree,1257 const char *name,1258 int cmplen,1259 const char *fullname)1260{1261 struct name_entry entry;1262 int cmp;12631264 while (tree_entry(tree,&entry)) {1265 if (S_ISGITLINK(entry.mode))1266 continue;1267 cmp = tree_entry_len(&entry) != cmplen ? 1 :1268 memcmp(name, entry.path, cmplen);1269 if (cmp > 0)1270 continue;1271 if (cmp < 0)1272 return;1273 if (name[cmplen] != '/') {1274 add_object_entry(entry.oid,1275 object_type(entry.mode),1276 fullname, 1);1277 return;1278 }1279 if (S_ISDIR(entry.mode)) {1280 struct tree_desc sub;1281 struct pbase_tree_cache *tree;1282 const char *down = name+cmplen+1;1283 int downlen = name_cmp_len(down);12841285 tree = pbase_tree_get(entry.oid);1286 if (!tree)1287 return;1288 init_tree_desc(&sub, tree->tree_data, tree->tree_size);12891290 add_pbase_object(&sub, down, downlen, fullname);1291 pbase_tree_put(tree);1292 }1293 }1294}12951296static unsigned *done_pbase_paths;1297static int done_pbase_paths_num;1298static int done_pbase_paths_alloc;1299static int done_pbase_path_pos(unsigned hash)1300{1301 int lo = 0;1302 int hi = done_pbase_paths_num;1303 while (lo < hi) {1304 int mi = lo + (hi - lo) / 2;1305 if (done_pbase_paths[mi] == hash)1306 return mi;1307 if (done_pbase_paths[mi] < hash)1308 hi = mi;1309 else1310 lo = mi + 1;1311 }1312 return -lo-1;1313}13141315static int check_pbase_path(unsigned hash)1316{1317 int pos = done_pbase_path_pos(hash);1318 if (0 <= pos)1319 return 1;1320 pos = -pos - 1;1321 ALLOC_GROW(done_pbase_paths,1322 done_pbase_paths_num + 1,1323 done_pbase_paths_alloc);1324 done_pbase_paths_num++;1325 if (pos < done_pbase_paths_num)1326 MOVE_ARRAY(done_pbase_paths + pos + 1, done_pbase_paths + pos,1327 done_pbase_paths_num - pos - 1);1328 done_pbase_paths[pos] = hash;1329 return 0;1330}13311332static void add_preferred_base_object(const char *name)1333{1334 struct pbase_tree *it;1335 int cmplen;1336 unsigned hash = pack_name_hash(name);13371338 if (!num_preferred_base || check_pbase_path(hash))1339 return;13401341 cmplen = name_cmp_len(name);1342 for (it = pbase_tree; it; it = it->next) {1343 if (cmplen == 0) {1344 add_object_entry(&it->pcache.oid, OBJ_TREE, NULL, 1);1345 }1346 else {1347 struct tree_desc tree;1348 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);1349 add_pbase_object(&tree, name, cmplen, name);1350 }1351 }1352}13531354static void add_preferred_base(struct object_id *oid)1355{1356 struct pbase_tree *it;1357 void *data;1358 unsigned long size;1359 struct object_id tree_oid;13601361 if (window <= num_preferred_base++)1362 return;13631364 data = read_object_with_reference(oid, tree_type, &size, &tree_oid);1365 if (!data)1366 return;13671368 for (it = pbase_tree; it; it = it->next) {1369 if (!oidcmp(&it->pcache.oid, &tree_oid)) {1370 free(data);1371 return;1372 }1373 }13741375 it = xcalloc(1, sizeof(*it));1376 it->next = pbase_tree;1377 pbase_tree = it;13781379 oidcpy(&it->pcache.oid, &tree_oid);1380 it->pcache.tree_data = data;1381 it->pcache.tree_size = size;1382}13831384static void cleanup_preferred_base(void)1385{1386 struct pbase_tree *it;1387 unsigned i;13881389 it = pbase_tree;1390 pbase_tree = NULL;1391 while (it) {1392 struct pbase_tree *tmp = it;1393 it = tmp->next;1394 free(tmp->pcache.tree_data);1395 free(tmp);1396 }13971398 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {1399 if (!pbase_tree_cache[i])1400 continue;1401 free(pbase_tree_cache[i]->tree_data);1402 FREE_AND_NULL(pbase_tree_cache[i]);1403 }14041405 FREE_AND_NULL(done_pbase_paths);1406 done_pbase_paths_num = done_pbase_paths_alloc = 0;1407}14081409static void check_object(struct object_entry *entry)1410{1411 if (IN_PACK(entry)) {1412 struct packed_git *p = IN_PACK(entry);1413 struct pack_window *w_curs = NULL;1414 const unsigned char *base_ref = NULL;1415 struct object_entry *base_entry;1416 unsigned long used, used_0;1417 unsigned long avail;1418 off_t ofs;1419 unsigned char *buf, c;1420 enum object_type type;1421 unsigned long in_pack_size;14221423 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);14241425 /*1426 * We want in_pack_type even if we do not reuse delta1427 * since non-delta representations could still be reused.1428 */1429 used = unpack_object_header_buffer(buf, avail,1430 &type,1431 &in_pack_size);1432 if (used == 0)1433 goto give_up;14341435 if (type < 0)1436 BUG("invalid type %d", type);1437 entry->in_pack_type = type;14381439 /*1440 * Determine if this is a delta and if so whether we can1441 * reuse it or not. Otherwise let's find out as cheaply as1442 * possible what the actual type and size for this object is.1443 */1444 switch (entry->in_pack_type) {1445 default:1446 /* Not a delta hence we've already got all we need. */1447 oe_set_type(entry, entry->in_pack_type);1448 entry->size = in_pack_size;1449 entry->in_pack_header_size = used;1450 if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)1451 goto give_up;1452 unuse_pack(&w_curs);1453 return;1454 case OBJ_REF_DELTA:1455 if (reuse_delta && !entry->preferred_base)1456 base_ref = use_pack(p, &w_curs,1457 entry->in_pack_offset + used, NULL);1458 entry->in_pack_header_size = used + 20;1459 break;1460 case OBJ_OFS_DELTA:1461 buf = use_pack(p, &w_curs,1462 entry->in_pack_offset + used, NULL);1463 used_0 = 0;1464 c = buf[used_0++];1465 ofs = c & 127;1466 while (c & 128) {1467 ofs += 1;1468 if (!ofs || MSB(ofs, 7)) {1469 error("delta base offset overflow in pack for %s",1470 oid_to_hex(&entry->idx.oid));1471 goto give_up;1472 }1473 c = buf[used_0++];1474 ofs = (ofs << 7) + (c & 127);1475 }1476 ofs = entry->in_pack_offset - ofs;1477 if (ofs <= 0 || ofs >= entry->in_pack_offset) {1478 error("delta base offset out of bound for %s",1479 oid_to_hex(&entry->idx.oid));1480 goto give_up;1481 }1482 if (reuse_delta && !entry->preferred_base) {1483 struct revindex_entry *revidx;1484 revidx = find_pack_revindex(p, ofs);1485 if (!revidx)1486 goto give_up;1487 base_ref = nth_packed_object_sha1(p, revidx->nr);1488 }1489 entry->in_pack_header_size = used + used_0;1490 break;1491 }14921493 if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {1494 /*1495 * If base_ref was set above that means we wish to1496 * reuse delta data, and we even found that base1497 * in the list of objects we want to pack. Goodie!1498 *1499 * Depth value does not matter - find_deltas() will1500 * never consider reused delta as the base object to1501 * deltify other objects against, in order to avoid1502 * circular deltas.1503 */1504 oe_set_type(entry, entry->in_pack_type);1505 entry->size = in_pack_size; /* delta size */1506 SET_DELTA(entry, base_entry);1507 entry->delta_size = entry->size;1508 entry->delta_sibling_idx = base_entry->delta_child_idx;1509 SET_DELTA_CHILD(base_entry, entry);1510 unuse_pack(&w_curs);1511 return;1512 }15131514 if (oe_type(entry)) {1515 off_t delta_pos;15161517 /*1518 * This must be a delta and we already know what the1519 * final object type is. Let's extract the actual1520 * object size from the delta header.1521 */1522 delta_pos = entry->in_pack_offset + entry->in_pack_header_size;1523 entry->size = get_size_from_delta(p, &w_curs, delta_pos);1524 if (entry->size == 0)1525 goto give_up;1526 unuse_pack(&w_curs);1527 return;1528 }15291530 /*1531 * No choice but to fall back to the recursive delta walk1532 * with sha1_object_info() to find about the object type1533 * at this point...1534 */1535 give_up:1536 unuse_pack(&w_curs);1537 }15381539 oe_set_type(entry, oid_object_info(&entry->idx.oid, &entry->size));1540 /*1541 * The error condition is checked in prepare_pack(). This is1542 * to permit a missing preferred base object to be ignored1543 * as a preferred base. Doing so can result in a larger1544 * pack file, but the transfer will still take place.1545 */1546}15471548static int pack_offset_sort(const void *_a, const void *_b)1549{1550 const struct object_entry *a = *(struct object_entry **)_a;1551 const struct object_entry *b = *(struct object_entry **)_b;1552 const struct packed_git *a_in_pack = IN_PACK(a);1553 const struct packed_git *b_in_pack = IN_PACK(b);15541555 /* avoid filesystem trashing with loose objects */1556 if (!a_in_pack && !b_in_pack)1557 return oidcmp(&a->idx.oid, &b->idx.oid);15581559 if (a_in_pack < b_in_pack)1560 return -1;1561 if (a_in_pack > b_in_pack)1562 return 1;1563 return a->in_pack_offset < b->in_pack_offset ? -1 :1564 (a->in_pack_offset > b->in_pack_offset);1565}15661567/*1568 * Drop an on-disk delta we were planning to reuse. Naively, this would1569 * just involve blanking out the "delta" field, but we have to deal1570 * with some extra book-keeping:1571 *1572 * 1. Removing ourselves from the delta_sibling linked list.1573 *1574 * 2. Updating our size/type to the non-delta representation. These were1575 * either not recorded initially (size) or overwritten with the delta type1576 * (type) when check_object() decided to reuse the delta.1577 *1578 * 3. Resetting our delta depth, as we are now a base object.1579 */1580static void drop_reused_delta(struct object_entry *entry)1581{1582 unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;1583 struct object_info oi = OBJECT_INFO_INIT;1584 enum object_type type;15851586 while (*idx) {1587 struct object_entry *oe = &to_pack.objects[*idx - 1];15881589 if (oe == entry)1590 *idx = oe->delta_sibling_idx;1591 else1592 idx = &oe->delta_sibling_idx;1593 }1594 SET_DELTA(entry, NULL);1595 entry->depth = 0;15961597 oi.sizep = &entry->size;1598 oi.typep = &type;1599 if (packed_object_info(IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {1600 /*1601 * We failed to get the info from this pack for some reason;1602 * fall back to sha1_object_info, which may find another copy.1603 * And if that fails, the error will be recorded in oe_type(entry)1604 * and dealt with in prepare_pack().1605 */1606 oe_set_type(entry, oid_object_info(&entry->idx.oid,1607 &entry->size));1608 } else {1609 oe_set_type(entry, type);1610 }1611}16121613/*1614 * Follow the chain of deltas from this entry onward, throwing away any links1615 * that cause us to hit a cycle (as determined by the DFS state flags in1616 * the entries).1617 *1618 * We also detect too-long reused chains that would violate our --depth1619 * limit.1620 */1621static void break_delta_chains(struct object_entry *entry)1622{1623 /*1624 * The actual depth of each object we will write is stored as an int,1625 * as it cannot exceed our int "depth" limit. But before we break1626 * changes based no that limit, we may potentially go as deep as the1627 * number of objects, which is elsewhere bounded to a uint32_t.1628 */1629 uint32_t total_depth;1630 struct object_entry *cur, *next;16311632 for (cur = entry, total_depth = 0;1633 cur;1634 cur = DELTA(cur), total_depth++) {1635 if (cur->dfs_state == DFS_DONE) {1636 /*1637 * We've already seen this object and know it isn't1638 * part of a cycle. We do need to append its depth1639 * to our count.1640 */1641 total_depth += cur->depth;1642 break;1643 }16441645 /*1646 * We break cycles before looping, so an ACTIVE state (or any1647 * other cruft which made its way into the state variable)1648 * is a bug.1649 */1650 if (cur->dfs_state != DFS_NONE)1651 die("BUG: confusing delta dfs state in first pass: %d",1652 cur->dfs_state);16531654 /*1655 * Now we know this is the first time we've seen the object. If1656 * it's not a delta, we're done traversing, but we'll mark it1657 * done to save time on future traversals.1658 */1659 if (!DELTA(cur)) {1660 cur->dfs_state = DFS_DONE;1661 break;1662 }16631664 /*1665 * Mark ourselves as active and see if the next step causes1666 * us to cycle to another active object. It's important to do1667 * this _before_ we loop, because it impacts where we make the1668 * cut, and thus how our total_depth counter works.1669 * E.g., We may see a partial loop like:1670 *1671 * A -> B -> C -> D -> B1672 *1673 * Cutting B->C breaks the cycle. But now the depth of A is1674 * only 1, and our total_depth counter is at 3. The size of the1675 * error is always one less than the size of the cycle we1676 * broke. Commits C and D were "lost" from A's chain.1677 *1678 * If we instead cut D->B, then the depth of A is correct at 3.1679 * We keep all commits in the chain that we examined.1680 */1681 cur->dfs_state = DFS_ACTIVE;1682 if (DELTA(cur)->dfs_state == DFS_ACTIVE) {1683 drop_reused_delta(cur);1684 cur->dfs_state = DFS_DONE;1685 break;1686 }1687 }16881689 /*1690 * And now that we've gone all the way to the bottom of the chain, we1691 * need to clear the active flags and set the depth fields as1692 * appropriate. Unlike the loop above, which can quit when it drops a1693 * delta, we need to keep going to look for more depth cuts. So we need1694 * an extra "next" pointer to keep going after we reset cur->delta.1695 */1696 for (cur = entry; cur; cur = next) {1697 next = DELTA(cur);16981699 /*1700 * We should have a chain of zero or more ACTIVE states down to1701 * a final DONE. We can quit after the DONE, because either it1702 * has no bases, or we've already handled them in a previous1703 * call.1704 */1705 if (cur->dfs_state == DFS_DONE)1706 break;1707 else if (cur->dfs_state != DFS_ACTIVE)1708 die("BUG: confusing delta dfs state in second pass: %d",1709 cur->dfs_state);17101711 /*1712 * If the total_depth is more than depth, then we need to snip1713 * the chain into two or more smaller chains that don't exceed1714 * the maximum depth. Most of the resulting chains will contain1715 * (depth + 1) entries (i.e., depth deltas plus one base), and1716 * the last chain (i.e., the one containing entry) will contain1717 * whatever entries are left over, namely1718 * (total_depth % (depth + 1)) of them.1719 *1720 * Since we are iterating towards decreasing depth, we need to1721 * decrement total_depth as we go, and we need to write to the1722 * entry what its final depth will be after all of the1723 * snipping. Since we're snipping into chains of length (depth1724 * + 1) entries, the final depth of an entry will be its1725 * original depth modulo (depth + 1). Any time we encounter an1726 * entry whose final depth is supposed to be zero, we snip it1727 * from its delta base, thereby making it so.1728 */1729 cur->depth = (total_depth--) % (depth + 1);1730 if (!cur->depth)1731 drop_reused_delta(cur);17321733 cur->dfs_state = DFS_DONE;1734 }1735}17361737static void get_object_details(void)1738{1739 uint32_t i;1740 struct object_entry **sorted_by_offset;17411742 sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));1743 for (i = 0; i < to_pack.nr_objects; i++)1744 sorted_by_offset[i] = to_pack.objects + i;1745 QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);17461747 for (i = 0; i < to_pack.nr_objects; i++) {1748 struct object_entry *entry = sorted_by_offset[i];1749 check_object(entry);1750 if (entry->type_valid && big_file_threshold < entry->size)1751 entry->no_try_delta = 1;1752 }17531754 /*1755 * This must happen in a second pass, since we rely on the delta1756 * information for the whole list being completed.1757 */1758 for (i = 0; i < to_pack.nr_objects; i++)1759 break_delta_chains(&to_pack.objects[i]);17601761 free(sorted_by_offset);1762}17631764/*1765 * We search for deltas in a list sorted by type, by filename hash, and then1766 * by size, so that we see progressively smaller and smaller files.1767 * That's because we prefer deltas to be from the bigger file1768 * to the smaller -- deletes are potentially cheaper, but perhaps1769 * more importantly, the bigger file is likely the more recent1770 * one. The deepest deltas are therefore the oldest objects which are1771 * less susceptible to be accessed often.1772 */1773static int type_size_sort(const void *_a, const void *_b)1774{1775 const struct object_entry *a = *(struct object_entry **)_a;1776 const struct object_entry *b = *(struct object_entry **)_b;1777 enum object_type a_type = oe_type(a);1778 enum object_type b_type = oe_type(b);17791780 if (a_type > b_type)1781 return -1;1782 if (a_type < b_type)1783 return 1;1784 if (a->hash > b->hash)1785 return -1;1786 if (a->hash < b->hash)1787 return 1;1788 if (a->preferred_base > b->preferred_base)1789 return -1;1790 if (a->preferred_base < b->preferred_base)1791 return 1;1792 if (a->size > b->size)1793 return -1;1794 if (a->size < b->size)1795 return 1;1796 return a < b ? -1 : (a > b); /* newest first */1797}17981799struct unpacked {1800 struct object_entry *entry;1801 void *data;1802 struct delta_index *index;1803 unsigned depth;1804};18051806static int delta_cacheable(unsigned long src_size, unsigned long trg_size,1807 unsigned long delta_size)1808{1809 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)1810 return 0;18111812 if (delta_size < cache_max_small_delta_size)1813 return 1;18141815 /* cache delta, if objects are large enough compared to delta size */1816 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))1817 return 1;18181819 return 0;1820}18211822#ifndef NO_PTHREADS18231824static pthread_mutex_t read_mutex;1825#define read_lock() pthread_mutex_lock(&read_mutex)1826#define read_unlock() pthread_mutex_unlock(&read_mutex)18271828static pthread_mutex_t cache_mutex;1829#define cache_lock() pthread_mutex_lock(&cache_mutex)1830#define cache_unlock() pthread_mutex_unlock(&cache_mutex)18311832static pthread_mutex_t progress_mutex;1833#define progress_lock() pthread_mutex_lock(&progress_mutex)1834#define progress_unlock() pthread_mutex_unlock(&progress_mutex)18351836#else18371838#define read_lock() (void)01839#define read_unlock() (void)01840#define cache_lock() (void)01841#define cache_unlock() (void)01842#define progress_lock() (void)01843#define progress_unlock() (void)018441845#endif18461847static int try_delta(struct unpacked *trg, struct unpacked *src,1848 unsigned max_depth, unsigned long *mem_usage)1849{1850 struct object_entry *trg_entry = trg->entry;1851 struct object_entry *src_entry = src->entry;1852 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;1853 unsigned ref_depth;1854 enum object_type type;1855 void *delta_buf;18561857 /* Don't bother doing diffs between different types */1858 if (oe_type(trg_entry) != oe_type(src_entry))1859 return -1;18601861 /*1862 * We do not bother to try a delta that we discarded on an1863 * earlier try, but only when reusing delta data. Note that1864 * src_entry that is marked as the preferred_base should always1865 * be considered, as even if we produce a suboptimal delta against1866 * it, we will still save the transfer cost, as we already know1867 * the other side has it and we won't send src_entry at all.1868 */1869 if (reuse_delta && IN_PACK(trg_entry) &&1870 IN_PACK(trg_entry) == IN_PACK(src_entry) &&1871 !src_entry->preferred_base &&1872 trg_entry->in_pack_type != OBJ_REF_DELTA &&1873 trg_entry->in_pack_type != OBJ_OFS_DELTA)1874 return 0;18751876 /* Let's not bust the allowed depth. */1877 if (src->depth >= max_depth)1878 return 0;18791880 /* Now some size filtering heuristics. */1881 trg_size = trg_entry->size;1882 if (!DELTA(trg_entry)) {1883 max_size = trg_size/2 - 20;1884 ref_depth = 1;1885 } else {1886 max_size = trg_entry->delta_size;1887 ref_depth = trg->depth;1888 }1889 max_size = (uint64_t)max_size * (max_depth - src->depth) /1890 (max_depth - ref_depth + 1);1891 if (max_size == 0)1892 return 0;1893 src_size = src_entry->size;1894 sizediff = src_size < trg_size ? trg_size - src_size : 0;1895 if (sizediff >= max_size)1896 return 0;1897 if (trg_size < src_size / 32)1898 return 0;18991900 /* Load data if not already done */1901 if (!trg->data) {1902 read_lock();1903 trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);1904 read_unlock();1905 if (!trg->data)1906 die("object %s cannot be read",1907 oid_to_hex(&trg_entry->idx.oid));1908 if (sz != trg_size)1909 die("object %s inconsistent object length (%lu vs %lu)",1910 oid_to_hex(&trg_entry->idx.oid), sz,1911 trg_size);1912 *mem_usage += sz;1913 }1914 if (!src->data) {1915 read_lock();1916 src->data = read_object_file(&src_entry->idx.oid, &type, &sz);1917 read_unlock();1918 if (!src->data) {1919 if (src_entry->preferred_base) {1920 static int warned = 0;1921 if (!warned++)1922 warning("object %s cannot be read",1923 oid_to_hex(&src_entry->idx.oid));1924 /*1925 * Those objects are not included in the1926 * resulting pack. Be resilient and ignore1927 * them if they can't be read, in case the1928 * pack could be created nevertheless.1929 */1930 return 0;1931 }1932 die("object %s cannot be read",1933 oid_to_hex(&src_entry->idx.oid));1934 }1935 if (sz != src_size)1936 die("object %s inconsistent object length (%lu vs %lu)",1937 oid_to_hex(&src_entry->idx.oid), sz,1938 src_size);1939 *mem_usage += sz;1940 }1941 if (!src->index) {1942 src->index = create_delta_index(src->data, src_size);1943 if (!src->index) {1944 static int warned = 0;1945 if (!warned++)1946 warning("suboptimal pack - out of memory");1947 return 0;1948 }1949 *mem_usage += sizeof_delta_index(src->index);1950 }19511952 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);1953 if (!delta_buf)1954 return 0;19551956 if (DELTA(trg_entry)) {1957 /* Prefer only shallower same-sized deltas. */1958 if (delta_size == trg_entry->delta_size &&1959 src->depth + 1 >= trg->depth) {1960 free(delta_buf);1961 return 0;1962 }1963 }19641965 /*1966 * Handle memory allocation outside of the cache1967 * accounting lock. Compiler will optimize the strangeness1968 * away when NO_PTHREADS is defined.1969 */1970 free(trg_entry->delta_data);1971 cache_lock();1972 if (trg_entry->delta_data) {1973 delta_cache_size -= trg_entry->delta_size;1974 trg_entry->delta_data = NULL;1975 }1976 if (delta_cacheable(src_size, trg_size, delta_size)) {1977 delta_cache_size += delta_size;1978 cache_unlock();1979 trg_entry->delta_data = xrealloc(delta_buf, delta_size);1980 } else {1981 cache_unlock();1982 free(delta_buf);1983 }19841985 SET_DELTA(trg_entry, src_entry);1986 trg_entry->delta_size = delta_size;1987 trg->depth = src->depth + 1;19881989 return 1;1990}19911992static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)1993{1994 struct object_entry *child = DELTA_CHILD(me);1995 unsigned int m = n;1996 while (child) {1997 unsigned int c = check_delta_limit(child, n + 1);1998 if (m < c)1999 m = c;2000 child = DELTA_SIBLING(child);2001 }2002 return m;2003}20042005static unsigned long free_unpacked(struct unpacked *n)2006{2007 unsigned long freed_mem = sizeof_delta_index(n->index);2008 free_delta_index(n->index);2009 n->index = NULL;2010 if (n->data) {2011 freed_mem += n->entry->size;2012 FREE_AND_NULL(n->data);2013 }2014 n->entry = NULL;2015 n->depth = 0;2016 return freed_mem;2017}20182019static void find_deltas(struct object_entry **list, unsigned *list_size,2020 int window, int depth, unsigned *processed)2021{2022 uint32_t i, idx = 0, count = 0;2023 struct unpacked *array;2024 unsigned long mem_usage = 0;20252026 array = xcalloc(window, sizeof(struct unpacked));20272028 for (;;) {2029 struct object_entry *entry;2030 struct unpacked *n = array + idx;2031 int j, max_depth, best_base = -1;20322033 progress_lock();2034 if (!*list_size) {2035 progress_unlock();2036 break;2037 }2038 entry = *list++;2039 (*list_size)--;2040 if (!entry->preferred_base) {2041 (*processed)++;2042 display_progress(progress_state, *processed);2043 }2044 progress_unlock();20452046 mem_usage -= free_unpacked(n);2047 n->entry = entry;20482049 while (window_memory_limit &&2050 mem_usage > window_memory_limit &&2051 count > 1) {2052 uint32_t tail = (idx + window - count) % window;2053 mem_usage -= free_unpacked(array + tail);2054 count--;2055 }20562057 /* We do not compute delta to *create* objects we are not2058 * going to pack.2059 */2060 if (entry->preferred_base)2061 goto next;20622063 /*2064 * If the current object is at pack edge, take the depth the2065 * objects that depend on the current object into account2066 * otherwise they would become too deep.2067 */2068 max_depth = depth;2069 if (DELTA_CHILD(entry)) {2070 max_depth -= check_delta_limit(entry, 0);2071 if (max_depth <= 0)2072 goto next;2073 }20742075 j = window;2076 while (--j > 0) {2077 int ret;2078 uint32_t other_idx = idx + j;2079 struct unpacked *m;2080 if (other_idx >= window)2081 other_idx -= window;2082 m = array + other_idx;2083 if (!m->entry)2084 break;2085 ret = try_delta(n, m, max_depth, &mem_usage);2086 if (ret < 0)2087 break;2088 else if (ret > 0)2089 best_base = other_idx;2090 }20912092 /*2093 * If we decided to cache the delta data, then it is best2094 * to compress it right away. First because we have to do2095 * it anyway, and doing it here while we're threaded will2096 * save a lot of time in the non threaded write phase,2097 * as well as allow for caching more deltas within2098 * the same cache size limit.2099 * ...2100 * But only if not writing to stdout, since in that case2101 * the network is most likely throttling writes anyway,2102 * and therefore it is best to go to the write phase ASAP2103 * instead, as we can afford spending more time compressing2104 * between writes at that moment.2105 */2106 if (entry->delta_data && !pack_to_stdout) {2107 unsigned long size;21082109 size = do_compress(&entry->delta_data, entry->delta_size);2110 if (size < (1U << OE_Z_DELTA_BITS)) {2111 entry->z_delta_size = size;2112 cache_lock();2113 delta_cache_size -= entry->delta_size;2114 delta_cache_size += entry->z_delta_size;2115 cache_unlock();2116 } else {2117 FREE_AND_NULL(entry->delta_data);2118 entry->z_delta_size = 0;2119 }2120 }21212122 /* if we made n a delta, and if n is already at max2123 * depth, leaving it in the window is pointless. we2124 * should evict it first.2125 */2126 if (DELTA(entry) && max_depth <= n->depth)2127 continue;21282129 /*2130 * Move the best delta base up in the window, after the2131 * currently deltified object, to keep it longer. It will2132 * be the first base object to be attempted next.2133 */2134 if (DELTA(entry)) {2135 struct unpacked swap = array[best_base];2136 int dist = (window + idx - best_base) % window;2137 int dst = best_base;2138 while (dist--) {2139 int src = (dst + 1) % window;2140 array[dst] = array[src];2141 dst = src;2142 }2143 array[dst] = swap;2144 }21452146 next:2147 idx++;2148 if (count + 1 < window)2149 count++;2150 if (idx >= window)2151 idx = 0;2152 }21532154 for (i = 0; i < window; ++i) {2155 free_delta_index(array[i].index);2156 free(array[i].data);2157 }2158 free(array);2159}21602161#ifndef NO_PTHREADS21622163static void try_to_free_from_threads(size_t size)2164{2165 read_lock();2166 release_pack_memory(size);2167 read_unlock();2168}21692170static try_to_free_t old_try_to_free_routine;21712172/*2173 * The main thread waits on the condition that (at least) one of the workers2174 * has stopped working (which is indicated in the .working member of2175 * struct thread_params).2176 * When a work thread has completed its work, it sets .working to 0 and2177 * signals the main thread and waits on the condition that .data_ready2178 * becomes 1.2179 */21802181struct thread_params {2182 pthread_t thread;2183 struct object_entry **list;2184 unsigned list_size;2185 unsigned remaining;2186 int window;2187 int depth;2188 int working;2189 int data_ready;2190 pthread_mutex_t mutex;2191 pthread_cond_t cond;2192 unsigned *processed;2193};21942195static pthread_cond_t progress_cond;21962197/*2198 * Mutex and conditional variable can't be statically-initialized on Windows.2199 */2200static void init_threaded_search(void)2201{2202 init_recursive_mutex(&read_mutex);2203 pthread_mutex_init(&cache_mutex, NULL);2204 pthread_mutex_init(&progress_mutex, NULL);2205 pthread_cond_init(&progress_cond, NULL);2206 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);2207}22082209static void cleanup_threaded_search(void)2210{2211 set_try_to_free_routine(old_try_to_free_routine);2212 pthread_cond_destroy(&progress_cond);2213 pthread_mutex_destroy(&read_mutex);2214 pthread_mutex_destroy(&cache_mutex);2215 pthread_mutex_destroy(&progress_mutex);2216}22172218static void *threaded_find_deltas(void *arg)2219{2220 struct thread_params *me = arg;22212222 progress_lock();2223 while (me->remaining) {2224 progress_unlock();22252226 find_deltas(me->list, &me->remaining,2227 me->window, me->depth, me->processed);22282229 progress_lock();2230 me->working = 0;2231 pthread_cond_signal(&progress_cond);2232 progress_unlock();22332234 /*2235 * We must not set ->data_ready before we wait on the2236 * condition because the main thread may have set it to 12237 * before we get here. In order to be sure that new2238 * work is available if we see 1 in ->data_ready, it2239 * was initialized to 0 before this thread was spawned2240 * and we reset it to 0 right away.2241 */2242 pthread_mutex_lock(&me->mutex);2243 while (!me->data_ready)2244 pthread_cond_wait(&me->cond, &me->mutex);2245 me->data_ready = 0;2246 pthread_mutex_unlock(&me->mutex);22472248 progress_lock();2249 }2250 progress_unlock();2251 /* leave ->working 1 so that this doesn't get more work assigned */2252 return NULL;2253}22542255static void ll_find_deltas(struct object_entry **list, unsigned list_size,2256 int window, int depth, unsigned *processed)2257{2258 struct thread_params *p;2259 int i, ret, active_threads = 0;22602261 init_threaded_search();22622263 if (delta_search_threads <= 1) {2264 find_deltas(list, &list_size, window, depth, processed);2265 cleanup_threaded_search();2266 return;2267 }2268 if (progress > pack_to_stdout)2269 fprintf(stderr, "Delta compression using up to %d threads.\n",2270 delta_search_threads);2271 p = xcalloc(delta_search_threads, sizeof(*p));22722273 /* Partition the work amongst work threads. */2274 for (i = 0; i < delta_search_threads; i++) {2275 unsigned sub_size = list_size / (delta_search_threads - i);22762277 /* don't use too small segments or no deltas will be found */2278 if (sub_size < 2*window && i+1 < delta_search_threads)2279 sub_size = 0;22802281 p[i].window = window;2282 p[i].depth = depth;2283 p[i].processed = processed;2284 p[i].working = 1;2285 p[i].data_ready = 0;22862287 /* try to split chunks on "path" boundaries */2288 while (sub_size && sub_size < list_size &&2289 list[sub_size]->hash &&2290 list[sub_size]->hash == list[sub_size-1]->hash)2291 sub_size++;22922293 p[i].list = list;2294 p[i].list_size = sub_size;2295 p[i].remaining = sub_size;22962297 list += sub_size;2298 list_size -= sub_size;2299 }23002301 /* Start work threads. */2302 for (i = 0; i < delta_search_threads; i++) {2303 if (!p[i].list_size)2304 continue;2305 pthread_mutex_init(&p[i].mutex, NULL);2306 pthread_cond_init(&p[i].cond, NULL);2307 ret = pthread_create(&p[i].thread, NULL,2308 threaded_find_deltas, &p[i]);2309 if (ret)2310 die("unable to create thread: %s", strerror(ret));2311 active_threads++;2312 }23132314 /*2315 * Now let's wait for work completion. Each time a thread is done2316 * with its work, we steal half of the remaining work from the2317 * thread with the largest number of unprocessed objects and give2318 * it to that newly idle thread. This ensure good load balancing2319 * until the remaining object list segments are simply too short2320 * to be worth splitting anymore.2321 */2322 while (active_threads) {2323 struct thread_params *target = NULL;2324 struct thread_params *victim = NULL;2325 unsigned sub_size = 0;23262327 progress_lock();2328 for (;;) {2329 for (i = 0; !target && i < delta_search_threads; i++)2330 if (!p[i].working)2331 target = &p[i];2332 if (target)2333 break;2334 pthread_cond_wait(&progress_cond, &progress_mutex);2335 }23362337 for (i = 0; i < delta_search_threads; i++)2338 if (p[i].remaining > 2*window &&2339 (!victim || victim->remaining < p[i].remaining))2340 victim = &p[i];2341 if (victim) {2342 sub_size = victim->remaining / 2;2343 list = victim->list + victim->list_size - sub_size;2344 while (sub_size && list[0]->hash &&2345 list[0]->hash == list[-1]->hash) {2346 list++;2347 sub_size--;2348 }2349 if (!sub_size) {2350 /*2351 * It is possible for some "paths" to have2352 * so many objects that no hash boundary2353 * might be found. Let's just steal the2354 * exact half in that case.2355 */2356 sub_size = victim->remaining / 2;2357 list -= sub_size;2358 }2359 target->list = list;2360 victim->list_size -= sub_size;2361 victim->remaining -= sub_size;2362 }2363 target->list_size = sub_size;2364 target->remaining = sub_size;2365 target->working = 1;2366 progress_unlock();23672368 pthread_mutex_lock(&target->mutex);2369 target->data_ready = 1;2370 pthread_cond_signal(&target->cond);2371 pthread_mutex_unlock(&target->mutex);23722373 if (!sub_size) {2374 pthread_join(target->thread, NULL);2375 pthread_cond_destroy(&target->cond);2376 pthread_mutex_destroy(&target->mutex);2377 active_threads--;2378 }2379 }2380 cleanup_threaded_search();2381 free(p);2382}23832384#else2385#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)2386#endif23872388static void add_tag_chain(const struct object_id *oid)2389{2390 struct tag *tag;23912392 /*2393 * We catch duplicates already in add_object_entry(), but we'd2394 * prefer to do this extra check to avoid having to parse the2395 * tag at all if we already know that it's being packed (e.g., if2396 * it was included via bitmaps, we would not have parsed it2397 * previously).2398 */2399 if (packlist_find(&to_pack, oid->hash, NULL))2400 return;24012402 tag = lookup_tag(oid);2403 while (1) {2404 if (!tag || parse_tag(tag) || !tag->tagged)2405 die("unable to pack objects reachable from tag %s",2406 oid_to_hex(oid));24072408 add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);24092410 if (tag->tagged->type != OBJ_TAG)2411 return;24122413 tag = (struct tag *)tag->tagged;2414 }2415}24162417static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)2418{2419 struct object_id peeled;24202421 if (starts_with(path, "refs/tags/") && /* is a tag? */2422 !peel_ref(path, &peeled) && /* peelable? */2423 packlist_find(&to_pack, peeled.hash, NULL)) /* object packed? */2424 add_tag_chain(oid);2425 return 0;2426}24272428static void prepare_pack(int window, int depth)2429{2430 struct object_entry **delta_list;2431 uint32_t i, nr_deltas;2432 unsigned n;24332434 get_object_details();24352436 /*2437 * If we're locally repacking then we need to be doubly careful2438 * from now on in order to make sure no stealth corruption gets2439 * propagated to the new pack. Clients receiving streamed packs2440 * should validate everything they get anyway so no need to incur2441 * the additional cost here in that case.2442 */2443 if (!pack_to_stdout)2444 do_check_packed_object_crc = 1;24452446 if (!to_pack.nr_objects || !window || !depth)2447 return;24482449 ALLOC_ARRAY(delta_list, to_pack.nr_objects);2450 nr_deltas = n = 0;24512452 for (i = 0; i < to_pack.nr_objects; i++) {2453 struct object_entry *entry = to_pack.objects + i;24542455 if (DELTA(entry))2456 /* This happens if we decided to reuse existing2457 * delta from a pack. "reuse_delta &&" is implied.2458 */2459 continue;24602461 if (!entry->type_valid || entry->size < 50)2462 continue;24632464 if (entry->no_try_delta)2465 continue;24662467 if (!entry->preferred_base) {2468 nr_deltas++;2469 if (oe_type(entry) < 0)2470 die("unable to get type of object %s",2471 oid_to_hex(&entry->idx.oid));2472 } else {2473 if (oe_type(entry) < 0) {2474 /*2475 * This object is not found, but we2476 * don't have to include it anyway.2477 */2478 continue;2479 }2480 }24812482 delta_list[n++] = entry;2483 }24842485 if (nr_deltas && n > 1) {2486 unsigned nr_done = 0;2487 if (progress)2488 progress_state = start_progress(_("Compressing objects"),2489 nr_deltas);2490 QSORT(delta_list, n, type_size_sort);2491 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);2492 stop_progress(&progress_state);2493 if (nr_done != nr_deltas)2494 die("inconsistency with delta count");2495 }2496 free(delta_list);2497}24982499static int git_pack_config(const char *k, const char *v, void *cb)2500{2501 if (!strcmp(k, "pack.window")) {2502 window = git_config_int(k, v);2503 return 0;2504 }2505 if (!strcmp(k, "pack.windowmemory")) {2506 window_memory_limit = git_config_ulong(k, v);2507 return 0;2508 }2509 if (!strcmp(k, "pack.depth")) {2510 depth = git_config_int(k, v);2511 return 0;2512 }2513 if (!strcmp(k, "pack.deltacachesize")) {2514 max_delta_cache_size = git_config_int(k, v);2515 return 0;2516 }2517 if (!strcmp(k, "pack.deltacachelimit")) {2518 cache_max_small_delta_size = git_config_int(k, v);2519 return 0;2520 }2521 if (!strcmp(k, "pack.writebitmaphashcache")) {2522 if (git_config_bool(k, v))2523 write_bitmap_options |= BITMAP_OPT_HASH_CACHE;2524 else2525 write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;2526 }2527 if (!strcmp(k, "pack.usebitmaps")) {2528 use_bitmap_index_default = git_config_bool(k, v);2529 return 0;2530 }2531 if (!strcmp(k, "pack.threads")) {2532 delta_search_threads = git_config_int(k, v);2533 if (delta_search_threads < 0)2534 die("invalid number of threads specified (%d)",2535 delta_search_threads);2536#ifdef NO_PTHREADS2537 if (delta_search_threads != 1) {2538 warning("no threads support, ignoring %s", k);2539 delta_search_threads = 0;2540 }2541#endif2542 return 0;2543 }2544 if (!strcmp(k, "pack.indexversion")) {2545 pack_idx_opts.version = git_config_int(k, v);2546 if (pack_idx_opts.version > 2)2547 die("bad pack.indexversion=%"PRIu32,2548 pack_idx_opts.version);2549 return 0;2550 }2551 return git_default_config(k, v, cb);2552}25532554static void read_object_list_from_stdin(void)2555{2556 char line[GIT_MAX_HEXSZ + 1 + PATH_MAX + 2];2557 struct object_id oid;2558 const char *p;25592560 for (;;) {2561 if (!fgets(line, sizeof(line), stdin)) {2562 if (feof(stdin))2563 break;2564 if (!ferror(stdin))2565 die("fgets returned NULL, not EOF, not error!");2566 if (errno != EINTR)2567 die_errno("fgets");2568 clearerr(stdin);2569 continue;2570 }2571 if (line[0] == '-') {2572 if (get_oid_hex(line+1, &oid))2573 die("expected edge object ID, got garbage:\n %s",2574 line);2575 add_preferred_base(&oid);2576 continue;2577 }2578 if (parse_oid_hex(line, &oid, &p))2579 die("expected object ID, got garbage:\n %s", line);25802581 add_preferred_base_object(p + 1);2582 add_object_entry(&oid, OBJ_NONE, p + 1, 0);2583 }2584}25852586/* Remember to update object flag allocation in object.h */2587#define OBJECT_ADDED (1u<<20)25882589static void show_commit(struct commit *commit, void *data)2590{2591 add_object_entry(&commit->object.oid, OBJ_COMMIT, NULL, 0);2592 commit->object.flags |= OBJECT_ADDED;25932594 if (write_bitmap_index)2595 index_commit_for_bitmap(commit);2596}25972598static void show_object(struct object *obj, const char *name, void *data)2599{2600 add_preferred_base_object(name);2601 add_object_entry(&obj->oid, obj->type, name, 0);2602 obj->flags |= OBJECT_ADDED;2603}26042605static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)2606{2607 assert(arg_missing_action == MA_ALLOW_ANY);26082609 /*2610 * Quietly ignore ALL missing objects. This avoids problems with2611 * staging them now and getting an odd error later.2612 */2613 if (!has_object_file(&obj->oid))2614 return;26152616 show_object(obj, name, data);2617}26182619static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)2620{2621 assert(arg_missing_action == MA_ALLOW_PROMISOR);26222623 /*2624 * Quietly ignore EXPECTED missing objects. This avoids problems with2625 * staging them now and getting an odd error later.2626 */2627 if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))2628 return;26292630 show_object(obj, name, data);2631}26322633static int option_parse_missing_action(const struct option *opt,2634 const char *arg, int unset)2635{2636 assert(arg);2637 assert(!unset);26382639 if (!strcmp(arg, "error")) {2640 arg_missing_action = MA_ERROR;2641 fn_show_object = show_object;2642 return 0;2643 }26442645 if (!strcmp(arg, "allow-any")) {2646 arg_missing_action = MA_ALLOW_ANY;2647 fetch_if_missing = 0;2648 fn_show_object = show_object__ma_allow_any;2649 return 0;2650 }26512652 if (!strcmp(arg, "allow-promisor")) {2653 arg_missing_action = MA_ALLOW_PROMISOR;2654 fetch_if_missing = 0;2655 fn_show_object = show_object__ma_allow_promisor;2656 return 0;2657 }26582659 die(_("invalid value for --missing"));2660 return 0;2661}26622663static void show_edge(struct commit *commit)2664{2665 add_preferred_base(&commit->object.oid);2666}26672668struct in_pack_object {2669 off_t offset;2670 struct object *object;2671};26722673struct in_pack {2674 unsigned int alloc;2675 unsigned int nr;2676 struct in_pack_object *array;2677};26782679static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)2680{2681 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);2682 in_pack->array[in_pack->nr].object = object;2683 in_pack->nr++;2684}26852686/*2687 * Compare the objects in the offset order, in order to emulate the2688 * "git rev-list --objects" output that produced the pack originally.2689 */2690static int ofscmp(const void *a_, const void *b_)2691{2692 struct in_pack_object *a = (struct in_pack_object *)a_;2693 struct in_pack_object *b = (struct in_pack_object *)b_;26942695 if (a->offset < b->offset)2696 return -1;2697 else if (a->offset > b->offset)2698 return 1;2699 else2700 return oidcmp(&a->object->oid, &b->object->oid);2701}27022703static void add_objects_in_unpacked_packs(struct rev_info *revs)2704{2705 struct packed_git *p;2706 struct in_pack in_pack;2707 uint32_t i;27082709 memset(&in_pack, 0, sizeof(in_pack));27102711 for (p = get_packed_git(the_repository); p; p = p->next) {2712 struct object_id oid;2713 struct object *o;27142715 if (!p->pack_local || p->pack_keep)2716 continue;2717 if (open_pack_index(p))2718 die("cannot open pack index");27192720 ALLOC_GROW(in_pack.array,2721 in_pack.nr + p->num_objects,2722 in_pack.alloc);27232724 for (i = 0; i < p->num_objects; i++) {2725 nth_packed_object_oid(&oid, p, i);2726 o = lookup_unknown_object(oid.hash);2727 if (!(o->flags & OBJECT_ADDED))2728 mark_in_pack_object(o, p, &in_pack);2729 o->flags |= OBJECT_ADDED;2730 }2731 }27322733 if (in_pack.nr) {2734 QSORT(in_pack.array, in_pack.nr, ofscmp);2735 for (i = 0; i < in_pack.nr; i++) {2736 struct object *o = in_pack.array[i].object;2737 add_object_entry(&o->oid, o->type, "", 0);2738 }2739 }2740 free(in_pack.array);2741}27422743static int add_loose_object(const struct object_id *oid, const char *path,2744 void *data)2745{2746 enum object_type type = oid_object_info(oid, NULL);27472748 if (type < 0) {2749 warning("loose object at %s could not be examined", path);2750 return 0;2751 }27522753 add_object_entry(oid, type, "", 0);2754 return 0;2755}27562757/*2758 * We actually don't even have to worry about reachability here.2759 * add_object_entry will weed out duplicates, so we just add every2760 * loose object we find.2761 */2762static void add_unreachable_loose_objects(void)2763{2764 for_each_loose_file_in_objdir(get_object_directory(),2765 add_loose_object,2766 NULL, NULL, NULL);2767}27682769static int has_sha1_pack_kept_or_nonlocal(const struct object_id *oid)2770{2771 static struct packed_git *last_found = (void *)1;2772 struct packed_git *p;27732774 p = (last_found != (void *)1) ? last_found :2775 get_packed_git(the_repository);27762777 while (p) {2778 if ((!p->pack_local || p->pack_keep) &&2779 find_pack_entry_one(oid->hash, p)) {2780 last_found = p;2781 return 1;2782 }2783 if (p == last_found)2784 p = get_packed_git(the_repository);2785 else2786 p = p->next;2787 if (p == last_found)2788 p = p->next;2789 }2790 return 0;2791}27922793/*2794 * Store a list of sha1s that are should not be discarded2795 * because they are either written too recently, or are2796 * reachable from another object that was.2797 *2798 * This is filled by get_object_list.2799 */2800static struct oid_array recent_objects;28012802static int loosened_object_can_be_discarded(const struct object_id *oid,2803 timestamp_t mtime)2804{2805 if (!unpack_unreachable_expiration)2806 return 0;2807 if (mtime > unpack_unreachable_expiration)2808 return 0;2809 if (oid_array_lookup(&recent_objects, oid) >= 0)2810 return 0;2811 return 1;2812}28132814static void loosen_unused_packed_objects(struct rev_info *revs)2815{2816 struct packed_git *p;2817 uint32_t i;2818 struct object_id oid;28192820 for (p = get_packed_git(the_repository); p; p = p->next) {2821 if (!p->pack_local || p->pack_keep)2822 continue;28232824 if (open_pack_index(p))2825 die("cannot open pack index");28262827 for (i = 0; i < p->num_objects; i++) {2828 nth_packed_object_oid(&oid, p, i);2829 if (!packlist_find(&to_pack, oid.hash, NULL) &&2830 !has_sha1_pack_kept_or_nonlocal(&oid) &&2831 !loosened_object_can_be_discarded(&oid, p->mtime))2832 if (force_object_loose(&oid, p->mtime))2833 die("unable to force loose object");2834 }2835 }2836}28372838/*2839 * This tracks any options which pack-reuse code expects to be on, or which a2840 * reader of the pack might not understand, and which would therefore prevent2841 * blind reuse of what we have on disk.2842 */2843static int pack_options_allow_reuse(void)2844{2845 return pack_to_stdout &&2846 allow_ofs_delta &&2847 !ignore_packed_keep &&2848 (!local || !have_non_local_packs) &&2849 !incremental;2850}28512852static int get_object_list_from_bitmap(struct rev_info *revs)2853{2854 if (prepare_bitmap_walk(revs) < 0)2855 return -1;28562857 if (pack_options_allow_reuse() &&2858 !reuse_partial_packfile_from_bitmap(2859 &reuse_packfile,2860 &reuse_packfile_objects,2861 &reuse_packfile_offset)) {2862 assert(reuse_packfile_objects);2863 nr_result += reuse_packfile_objects;2864 display_progress(progress_state, nr_result);2865 }28662867 traverse_bitmap_commit_list(&add_object_entry_from_bitmap);2868 return 0;2869}28702871static void record_recent_object(struct object *obj,2872 const char *name,2873 void *data)2874{2875 oid_array_append(&recent_objects, &obj->oid);2876}28772878static void record_recent_commit(struct commit *commit, void *data)2879{2880 oid_array_append(&recent_objects, &commit->object.oid);2881}28822883static void get_object_list(int ac, const char **av)2884{2885 struct rev_info revs;2886 char line[1000];2887 int flags = 0;28882889 init_revisions(&revs, NULL);2890 save_commit_buffer = 0;2891 setup_revisions(ac, av, &revs, NULL);28922893 /* make sure shallows are read */2894 is_repository_shallow();28952896 while (fgets(line, sizeof(line), stdin) != NULL) {2897 int len = strlen(line);2898 if (len && line[len - 1] == '\n')2899 line[--len] = 0;2900 if (!len)2901 break;2902 if (*line == '-') {2903 if (!strcmp(line, "--not")) {2904 flags ^= UNINTERESTING;2905 write_bitmap_index = 0;2906 continue;2907 }2908 if (starts_with(line, "--shallow ")) {2909 struct object_id oid;2910 if (get_oid_hex(line + 10, &oid))2911 die("not an SHA-1 '%s'", line + 10);2912 register_shallow(&oid);2913 use_bitmap_index = 0;2914 continue;2915 }2916 die("not a rev '%s'", line);2917 }2918 if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))2919 die("bad revision '%s'", line);2920 }29212922 if (use_bitmap_index && !get_object_list_from_bitmap(&revs))2923 return;29242925 if (prepare_revision_walk(&revs))2926 die("revision walk setup failed");2927 mark_edges_uninteresting(&revs, show_edge);29282929 if (!fn_show_object)2930 fn_show_object = show_object;2931 traverse_commit_list_filtered(&filter_options, &revs,2932 show_commit, fn_show_object, NULL,2933 NULL);29342935 if (unpack_unreachable_expiration) {2936 revs.ignore_missing_links = 1;2937 if (add_unseen_recent_objects_to_traversal(&revs,2938 unpack_unreachable_expiration))2939 die("unable to add recent objects");2940 if (prepare_revision_walk(&revs))2941 die("revision walk setup failed");2942 traverse_commit_list(&revs, record_recent_commit,2943 record_recent_object, NULL);2944 }29452946 if (keep_unreachable)2947 add_objects_in_unpacked_packs(&revs);2948 if (pack_loose_unreachable)2949 add_unreachable_loose_objects();2950 if (unpack_unreachable)2951 loosen_unused_packed_objects(&revs);29522953 oid_array_clear(&recent_objects);2954}29552956static int option_parse_index_version(const struct option *opt,2957 const char *arg, int unset)2958{2959 char *c;2960 const char *val = arg;2961 pack_idx_opts.version = strtoul(val, &c, 10);2962 if (pack_idx_opts.version > 2)2963 die(_("unsupported index version %s"), val);2964 if (*c == ',' && c[1])2965 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);2966 if (*c || pack_idx_opts.off32_limit & 0x80000000)2967 die(_("bad index version '%s'"), val);2968 return 0;2969}29702971static int option_parse_unpack_unreachable(const struct option *opt,2972 const char *arg, int unset)2973{2974 if (unset) {2975 unpack_unreachable = 0;2976 unpack_unreachable_expiration = 0;2977 }2978 else {2979 unpack_unreachable = 1;2980 if (arg)2981 unpack_unreachable_expiration = approxidate(arg);2982 }2983 return 0;2984}29852986int cmd_pack_objects(int argc, const char **argv, const char *prefix)2987{2988 int use_internal_rev_list = 0;2989 int thin = 0;2990 int shallow = 0;2991 int all_progress_implied = 0;2992 struct argv_array rp = ARGV_ARRAY_INIT;2993 int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;2994 int rev_list_index = 0;2995 struct option pack_objects_options[] = {2996 OPT_SET_INT('q', "quiet", &progress,2997 N_("do not show progress meter"), 0),2998 OPT_SET_INT(0, "progress", &progress,2999 N_("show progress meter"), 1),3000 OPT_SET_INT(0, "all-progress", &progress,3001 N_("show progress meter during object writing phase"), 2),3002 OPT_BOOL(0, "all-progress-implied",3003 &all_progress_implied,3004 N_("similar to --all-progress when progress meter is shown")),3005 { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),3006 N_("write the pack index file in the specified idx format version"),3007 0, option_parse_index_version },3008 OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,3009 N_("maximum size of each output pack file")),3010 OPT_BOOL(0, "local", &local,3011 N_("ignore borrowed objects from alternate object store")),3012 OPT_BOOL(0, "incremental", &incremental,3013 N_("ignore packed objects")),3014 OPT_INTEGER(0, "window", &window,3015 N_("limit pack window by objects")),3016 OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,3017 N_("limit pack window by memory in addition to object limit")),3018 OPT_INTEGER(0, "depth", &depth,3019 N_("maximum length of delta chain allowed in the resulting pack")),3020 OPT_BOOL(0, "reuse-delta", &reuse_delta,3021 N_("reuse existing deltas")),3022 OPT_BOOL(0, "reuse-object", &reuse_object,3023 N_("reuse existing objects")),3024 OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,3025 N_("use OFS_DELTA objects")),3026 OPT_INTEGER(0, "threads", &delta_search_threads,3027 N_("use threads when searching for best delta matches")),3028 OPT_BOOL(0, "non-empty", &non_empty,3029 N_("do not create an empty pack output")),3030 OPT_BOOL(0, "revs", &use_internal_rev_list,3031 N_("read revision arguments from standard input")),3032 { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,3033 N_("limit the objects to those that are not yet packed"),3034 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },3035 { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,3036 N_("include objects reachable from any reference"),3037 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },3038 { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,3039 N_("include objects referred by reflog entries"),3040 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },3041 { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,3042 N_("include objects referred to by the index"),3043 PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },3044 OPT_BOOL(0, "stdout", &pack_to_stdout,3045 N_("output pack to stdout")),3046 OPT_BOOL(0, "include-tag", &include_tag,3047 N_("include tag objects that refer to objects to be packed")),3048 OPT_BOOL(0, "keep-unreachable", &keep_unreachable,3049 N_("keep unreachable objects")),3050 OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,3051 N_("pack loose unreachable objects")),3052 { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),3053 N_("unpack unreachable objects newer than <time>"),3054 PARSE_OPT_OPTARG, option_parse_unpack_unreachable },3055 OPT_BOOL(0, "thin", &thin,3056 N_("create thin packs")),3057 OPT_BOOL(0, "shallow", &shallow,3058 N_("create packs suitable for shallow fetches")),3059 OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,3060 N_("ignore packs that have companion .keep file")),3061 OPT_INTEGER(0, "compression", &pack_compression_level,3062 N_("pack compression level")),3063 OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,3064 N_("do not hide commits by grafts"), 0),3065 OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,3066 N_("use a bitmap index if available to speed up counting objects")),3067 OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,3068 N_("write a bitmap index together with the pack index")),3069 OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),3070 { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),3071 N_("handling for missing objects"), PARSE_OPT_NONEG,3072 option_parse_missing_action },3073 OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,3074 N_("do not pack objects in promisor packfiles")),3075 OPT_END(),3076 };30773078 if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))3079 BUG("too many dfs states, increase OE_DFS_STATE_BITS");30803081 check_replace_refs = 0;30823083 reset_pack_idx_option(&pack_idx_opts);3084 git_config(git_pack_config, NULL);30853086 progress = isatty(2);3087 argc = parse_options(argc, argv, prefix, pack_objects_options,3088 pack_usage, 0);30893090 if (argc) {3091 base_name = argv[0];3092 argc--;3093 }3094 if (pack_to_stdout != !base_name || argc)3095 usage_with_options(pack_usage, pack_objects_options);30963097 if (depth >= (1 << OE_DEPTH_BITS)) {3098 warning(_("delta chain depth %d is too deep, forcing %d"),3099 depth, (1 << OE_DEPTH_BITS) - 1);3100 depth = (1 << OE_DEPTH_BITS) - 1;3101 }3102 if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {3103 warning(_("pack.deltaCacheLimit is too high, forcing %d"),3104 (1U << OE_Z_DELTA_BITS) - 1);3105 cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;3106 }31073108 argv_array_push(&rp, "pack-objects");3109 if (thin) {3110 use_internal_rev_list = 1;3111 argv_array_push(&rp, shallow3112 ? "--objects-edge-aggressive"3113 : "--objects-edge");3114 } else3115 argv_array_push(&rp, "--objects");31163117 if (rev_list_all) {3118 use_internal_rev_list = 1;3119 argv_array_push(&rp, "--all");3120 }3121 if (rev_list_reflog) {3122 use_internal_rev_list = 1;3123 argv_array_push(&rp, "--reflog");3124 }3125 if (rev_list_index) {3126 use_internal_rev_list = 1;3127 argv_array_push(&rp, "--indexed-objects");3128 }3129 if (rev_list_unpacked) {3130 use_internal_rev_list = 1;3131 argv_array_push(&rp, "--unpacked");3132 }31333134 if (exclude_promisor_objects) {3135 use_internal_rev_list = 1;3136 fetch_if_missing = 0;3137 argv_array_push(&rp, "--exclude-promisor-objects");3138 }31393140 if (!reuse_object)3141 reuse_delta = 0;3142 if (pack_compression_level == -1)3143 pack_compression_level = Z_DEFAULT_COMPRESSION;3144 else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)3145 die("bad pack compression level %d", pack_compression_level);31463147 if (!delta_search_threads) /* --threads=0 means autodetect */3148 delta_search_threads = online_cpus();31493150#ifdef NO_PTHREADS3151 if (delta_search_threads != 1)3152 warning("no threads support, ignoring --threads");3153#endif3154 if (!pack_to_stdout && !pack_size_limit)3155 pack_size_limit = pack_size_limit_cfg;3156 if (pack_to_stdout && pack_size_limit)3157 die("--max-pack-size cannot be used to build a pack for transfer.");3158 if (pack_size_limit && pack_size_limit < 1024*1024) {3159 warning("minimum pack size limit is 1 MiB");3160 pack_size_limit = 1024*1024;3161 }31623163 if (!pack_to_stdout && thin)3164 die("--thin cannot be used to build an indexable pack.");31653166 if (keep_unreachable && unpack_unreachable)3167 die("--keep-unreachable and --unpack-unreachable are incompatible.");3168 if (!rev_list_all || !rev_list_reflog || !rev_list_index)3169 unpack_unreachable_expiration = 0;31703171 if (filter_options.choice) {3172 if (!pack_to_stdout)3173 die("cannot use --filter without --stdout.");3174 use_bitmap_index = 0;3175 }31763177 /*3178 * "soft" reasons not to use bitmaps - for on-disk repack by default we want3179 *3180 * - to produce good pack (with bitmap index not-yet-packed objects are3181 * packed in suboptimal order).3182 *3183 * - to use more robust pack-generation codepath (avoiding possible3184 * bugs in bitmap code and possible bitmap index corruption).3185 */3186 if (!pack_to_stdout)3187 use_bitmap_index_default = 0;31883189 if (use_bitmap_index < 0)3190 use_bitmap_index = use_bitmap_index_default;31913192 /* "hard" reasons not to use bitmaps; these just won't work at all */3193 if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())3194 use_bitmap_index = 0;31953196 if (pack_to_stdout || !rev_list_all)3197 write_bitmap_index = 0;31983199 if (progress && all_progress_implied)3200 progress = 2;32013202 if (ignore_packed_keep) {3203 struct packed_git *p;3204 for (p = get_packed_git(the_repository); p; p = p->next)3205 if (p->pack_local && p->pack_keep)3206 break;3207 if (!p) /* no keep-able packs found */3208 ignore_packed_keep = 0;3209 }3210 if (local) {3211 /*3212 * unlike ignore_packed_keep above, we do not want to3213 * unset "local" based on looking at packs, as it3214 * also covers non-local objects3215 */3216 struct packed_git *p;3217 for (p = get_packed_git(the_repository); p; p = p->next) {3218 if (!p->pack_local) {3219 have_non_local_packs = 1;3220 break;3221 }3222 }3223 }32243225 prepare_packing_data(&to_pack);32263227 if (progress)3228 progress_state = start_progress(_("Counting objects"), 0);3229 if (!use_internal_rev_list)3230 read_object_list_from_stdin();3231 else {3232 get_object_list(rp.argc, rp.argv);3233 argv_array_clear(&rp);3234 }3235 cleanup_preferred_base();3236 if (include_tag && nr_result)3237 for_each_ref(add_ref_tag, NULL);3238 stop_progress(&progress_state);32393240 if (non_empty && !nr_result)3241 return 0;3242 if (nr_result)3243 prepare_pack(window, depth);3244 write_pack_file();3245 if (progress)3246 fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"3247 " reused %"PRIu32" (delta %"PRIu32")\n",3248 written, written_delta, reused, reused_delta);3249 return 0;3250}