1#include "builtin.h"
2#include "cache.h"
3#include "attr.h"
4#include "object.h"
5#include "blob.h"
6#include "commit.h"
7#include "tag.h"
8#include "tree.h"
9#include "delta.h"
10#include "pack.h"
11#include "pack-revindex.h"
12#include "csum-file.h"
13#include "tree-walk.h"
14#include "diff.h"
15#include "revision.h"
16#include "list-objects.h"
17#include "progress.h"
18#include "refs.h"
19#include "thread-utils.h"
20
21static const char pack_usage[] =
22 "git pack-objects [ -q | --progress | --all-progress ]\n"
23 " [--all-progress-implied]\n"
24 " [--max-pack-size=<n>] [--local] [--incremental]\n"
25 " [--window=<n>] [--window-memory=<n>] [--depth=<n>]\n"
26 " [--no-reuse-delta] [--no-reuse-object] [--delta-base-offset]\n"
27 " [--threads=<n>] [--non-empty] [--revs [--unpacked | --all]]\n"
28 " [--reflog] [--stdout | base-name] [--include-tag]\n"
29 " [--keep-unreachable | --unpack-unreachable]\n"
30 " [< ref-list | < object-list]";
31
32struct object_entry {
33 struct pack_idx_entry idx;
34 unsigned long size; /* uncompressed size */
35 struct packed_git *in_pack; /* already in pack */
36 off_t in_pack_offset;
37 struct object_entry *delta; /* delta base object */
38 struct object_entry *delta_child; /* deltified objects who bases me */
39 struct object_entry *delta_sibling; /* other deltified objects who
40 * uses the same base as me
41 */
42 void *delta_data; /* cached delta (uncompressed) */
43 unsigned long delta_size; /* delta data size (uncompressed) */
44 unsigned long z_delta_size; /* delta data size (compressed) */
45 unsigned int hash; /* name hint hash */
46 enum object_type type;
47 enum object_type in_pack_type; /* could be delta */
48 unsigned char in_pack_header_size;
49 unsigned char preferred_base; /* we do not pack this, but is available
50 * to be used as the base object to delta
51 * objects against.
52 */
53 unsigned char no_try_delta;
54 unsigned char tagged; /* near the very tip of refs */
55 unsigned char filled; /* assigned write-order */
56};
57
58/*
59 * Objects we are going to pack are collected in objects array (dynamically
60 * expanded). nr_objects & nr_alloc controls this array. They are stored
61 * in the order we see -- typically rev-list --objects order that gives us
62 * nice "minimum seek" order.
63 */
64static struct object_entry *objects;
65static struct pack_idx_entry **written_list;
66static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
67
68static int non_empty;
69static int reuse_delta = 1, reuse_object = 1;
70static int keep_unreachable, unpack_unreachable, include_tag;
71static int local;
72static int incremental;
73static int ignore_packed_keep;
74static int allow_ofs_delta;
75static struct pack_idx_option pack_idx_opts;
76static const char *base_name;
77static int progress = 1;
78static int window = 10;
79static unsigned long pack_size_limit, pack_size_limit_cfg;
80static int depth = 50;
81static int delta_search_threads;
82static int pack_to_stdout;
83static int num_preferred_base;
84static struct progress *progress_state;
85static int pack_compression_level = Z_DEFAULT_COMPRESSION;
86static int pack_compression_seen;
87
88static unsigned long delta_cache_size = 0;
89static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
90static unsigned long cache_max_small_delta_size = 1000;
91
92static unsigned long window_memory_limit = 0;
93
94/*
95 * The object names in objects array are hashed with this hashtable,
96 * to help looking up the entry by object name.
97 * This hashtable is built after all the objects are seen.
98 */
99static int *object_ix;
100static int object_ix_hashsz;
101static struct object_entry *locate_object_entry(const unsigned char *sha1);
102
103/*
104 * stats
105 */
106static uint32_t written, written_delta;
107static uint32_t reused, reused_delta;
108
109
110static void *get_delta(struct object_entry *entry)
111{
112 unsigned long size, base_size, delta_size;
113 void *buf, *base_buf, *delta_buf;
114 enum object_type type;
115
116 buf = read_sha1_file(entry->idx.sha1, &type, &size);
117 if (!buf)
118 die("unable to read %s", sha1_to_hex(entry->idx.sha1));
119 base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
120 if (!base_buf)
121 die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
122 delta_buf = diff_delta(base_buf, base_size,
123 buf, size, &delta_size, 0);
124 if (!delta_buf || delta_size != entry->delta_size)
125 die("delta size changed");
126 free(buf);
127 free(base_buf);
128 return delta_buf;
129}
130
131static unsigned long do_compress(void **pptr, unsigned long size)
132{
133 git_zstream stream;
134 void *in, *out;
135 unsigned long maxsize;
136
137 memset(&stream, 0, sizeof(stream));
138 git_deflate_init(&stream, pack_compression_level);
139 maxsize = git_deflate_bound(&stream, size);
140
141 in = *pptr;
142 out = xmalloc(maxsize);
143 *pptr = out;
144
145 stream.next_in = in;
146 stream.avail_in = size;
147 stream.next_out = out;
148 stream.avail_out = maxsize;
149 while (git_deflate(&stream, Z_FINISH) == Z_OK)
150 ; /* nothing */
151 git_deflate_end(&stream);
152
153 free(in);
154 return stream.total_out;
155}
156
157/*
158 * we are going to reuse the existing object data as is. make
159 * sure it is not corrupt.
160 */
161static int check_pack_inflate(struct packed_git *p,
162 struct pack_window **w_curs,
163 off_t offset,
164 off_t len,
165 unsigned long expect)
166{
167 git_zstream stream;
168 unsigned char fakebuf[4096], *in;
169 int st;
170
171 memset(&stream, 0, sizeof(stream));
172 git_inflate_init(&stream);
173 do {
174 in = use_pack(p, w_curs, offset, &stream.avail_in);
175 stream.next_in = in;
176 stream.next_out = fakebuf;
177 stream.avail_out = sizeof(fakebuf);
178 st = git_inflate(&stream, Z_FINISH);
179 offset += stream.next_in - in;
180 } while (st == Z_OK || st == Z_BUF_ERROR);
181 git_inflate_end(&stream);
182 return (st == Z_STREAM_END &&
183 stream.total_out == expect &&
184 stream.total_in == len) ? 0 : -1;
185}
186
187static void copy_pack_data(struct sha1file *f,
188 struct packed_git *p,
189 struct pack_window **w_curs,
190 off_t offset,
191 off_t len)
192{
193 unsigned char *in;
194 unsigned long avail;
195
196 while (len) {
197 in = use_pack(p, w_curs, offset, &avail);
198 if (avail > len)
199 avail = (unsigned long)len;
200 sha1write(f, in, avail);
201 offset += avail;
202 len -= avail;
203 }
204}
205
206/* Return 0 if we will bust the pack-size limit */
207static unsigned long write_object(struct sha1file *f,
208 struct object_entry *entry,
209 off_t write_offset)
210{
211 unsigned long size, limit, datalen;
212 void *buf;
213 unsigned char header[10], dheader[10];
214 unsigned hdrlen;
215 enum object_type type;
216 int usable_delta, to_reuse;
217
218 if (!pack_to_stdout)
219 crc32_begin(f);
220
221 type = entry->type;
222
223 /* apply size limit if limited packsize and not first object */
224 if (!pack_size_limit || !nr_written)
225 limit = 0;
226 else if (pack_size_limit <= write_offset)
227 /*
228 * the earlier object did not fit the limit; avoid
229 * mistaking this with unlimited (i.e. limit = 0).
230 */
231 limit = 1;
232 else
233 limit = pack_size_limit - write_offset;
234
235 if (!entry->delta)
236 usable_delta = 0; /* no delta */
237 else if (!pack_size_limit)
238 usable_delta = 1; /* unlimited packfile */
239 else if (entry->delta->idx.offset == (off_t)-1)
240 usable_delta = 0; /* base was written to another pack */
241 else if (entry->delta->idx.offset)
242 usable_delta = 1; /* base already exists in this pack */
243 else
244 usable_delta = 0; /* base could end up in another pack */
245
246 if (!reuse_object)
247 to_reuse = 0; /* explicit */
248 else if (!entry->in_pack)
249 to_reuse = 0; /* can't reuse what we don't have */
250 else if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA)
251 /* check_object() decided it for us ... */
252 to_reuse = usable_delta;
253 /* ... but pack split may override that */
254 else if (type != entry->in_pack_type)
255 to_reuse = 0; /* pack has delta which is unusable */
256 else if (entry->delta)
257 to_reuse = 0; /* we want to pack afresh */
258 else
259 to_reuse = 1; /* we have it in-pack undeltified,
260 * and we do not need to deltify it.
261 */
262
263 if (!to_reuse) {
264 no_reuse:
265 if (!usable_delta) {
266 buf = read_sha1_file(entry->idx.sha1, &type, &size);
267 if (!buf)
268 die("unable to read %s", sha1_to_hex(entry->idx.sha1));
269 /*
270 * make sure no cached delta data remains from a
271 * previous attempt before a pack split occurred.
272 */
273 free(entry->delta_data);
274 entry->delta_data = NULL;
275 entry->z_delta_size = 0;
276 } else if (entry->delta_data) {
277 size = entry->delta_size;
278 buf = entry->delta_data;
279 entry->delta_data = NULL;
280 type = (allow_ofs_delta && entry->delta->idx.offset) ?
281 OBJ_OFS_DELTA : OBJ_REF_DELTA;
282 } else {
283 buf = get_delta(entry);
284 size = entry->delta_size;
285 type = (allow_ofs_delta && entry->delta->idx.offset) ?
286 OBJ_OFS_DELTA : OBJ_REF_DELTA;
287 }
288
289 if (entry->z_delta_size)
290 datalen = entry->z_delta_size;
291 else
292 datalen = do_compress(&buf, size);
293
294 /*
295 * The object header is a byte of 'type' followed by zero or
296 * more bytes of length.
297 */
298 hdrlen = encode_in_pack_object_header(type, size, header);
299
300 if (type == OBJ_OFS_DELTA) {
301 /*
302 * Deltas with relative base contain an additional
303 * encoding of the relative offset for the delta
304 * base from this object's position in the pack.
305 */
306 off_t ofs = entry->idx.offset - entry->delta->idx.offset;
307 unsigned pos = sizeof(dheader) - 1;
308 dheader[pos] = ofs & 127;
309 while (ofs >>= 7)
310 dheader[--pos] = 128 | (--ofs & 127);
311 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
312 free(buf);
313 return 0;
314 }
315 sha1write(f, header, hdrlen);
316 sha1write(f, dheader + pos, sizeof(dheader) - pos);
317 hdrlen += sizeof(dheader) - pos;
318 } else if (type == OBJ_REF_DELTA) {
319 /*
320 * Deltas with a base reference contain
321 * an additional 20 bytes for the base sha1.
322 */
323 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
324 free(buf);
325 return 0;
326 }
327 sha1write(f, header, hdrlen);
328 sha1write(f, entry->delta->idx.sha1, 20);
329 hdrlen += 20;
330 } else {
331 if (limit && hdrlen + datalen + 20 >= limit) {
332 free(buf);
333 return 0;
334 }
335 sha1write(f, header, hdrlen);
336 }
337 sha1write(f, buf, datalen);
338 free(buf);
339 }
340 else {
341 struct packed_git *p = entry->in_pack;
342 struct pack_window *w_curs = NULL;
343 struct revindex_entry *revidx;
344 off_t offset;
345
346 if (entry->delta)
347 type = (allow_ofs_delta && entry->delta->idx.offset) ?
348 OBJ_OFS_DELTA : OBJ_REF_DELTA;
349 hdrlen = encode_in_pack_object_header(type, entry->size, header);
350
351 offset = entry->in_pack_offset;
352 revidx = find_pack_revindex(p, offset);
353 datalen = revidx[1].offset - offset;
354 if (!pack_to_stdout && p->index_version > 1 &&
355 check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
356 error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
357 unuse_pack(&w_curs);
358 goto no_reuse;
359 }
360
361 offset += entry->in_pack_header_size;
362 datalen -= entry->in_pack_header_size;
363 if (!pack_to_stdout && p->index_version == 1 &&
364 check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
365 error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
366 unuse_pack(&w_curs);
367 goto no_reuse;
368 }
369
370 if (type == OBJ_OFS_DELTA) {
371 off_t ofs = entry->idx.offset - entry->delta->idx.offset;
372 unsigned pos = sizeof(dheader) - 1;
373 dheader[pos] = ofs & 127;
374 while (ofs >>= 7)
375 dheader[--pos] = 128 | (--ofs & 127);
376 if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
377 unuse_pack(&w_curs);
378 return 0;
379 }
380 sha1write(f, header, hdrlen);
381 sha1write(f, dheader + pos, sizeof(dheader) - pos);
382 hdrlen += sizeof(dheader) - pos;
383 reused_delta++;
384 } else if (type == OBJ_REF_DELTA) {
385 if (limit && hdrlen + 20 + datalen + 20 >= limit) {
386 unuse_pack(&w_curs);
387 return 0;
388 }
389 sha1write(f, header, hdrlen);
390 sha1write(f, entry->delta->idx.sha1, 20);
391 hdrlen += 20;
392 reused_delta++;
393 } else {
394 if (limit && hdrlen + datalen + 20 >= limit) {
395 unuse_pack(&w_curs);
396 return 0;
397 }
398 sha1write(f, header, hdrlen);
399 }
400 copy_pack_data(f, p, &w_curs, offset, datalen);
401 unuse_pack(&w_curs);
402 reused++;
403 }
404 if (usable_delta)
405 written_delta++;
406 written++;
407 if (!pack_to_stdout)
408 entry->idx.crc32 = crc32_end(f);
409 return hdrlen + datalen;
410}
411
412static int write_one(struct sha1file *f,
413 struct object_entry *e,
414 off_t *offset)
415{
416 unsigned long size;
417
418 /* offset is non zero if object is written already. */
419 if (e->idx.offset || e->preferred_base)
420 return -1;
421
422 /* if we are deltified, write out base object first. */
423 if (e->delta && !write_one(f, e->delta, offset))
424 return 0;
425
426 e->idx.offset = *offset;
427 size = write_object(f, e, *offset);
428 if (!size) {
429 e->idx.offset = 0;
430 return 0;
431 }
432 written_list[nr_written++] = &e->idx;
433
434 /* make sure off_t is sufficiently large not to wrap */
435 if (signed_add_overflows(*offset, size))
436 die("pack too large for current definition of off_t");
437 *offset += size;
438 return 1;
439}
440
441static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
442 void *cb_data)
443{
444 unsigned char peeled[20];
445 struct object_entry *entry = locate_object_entry(sha1);
446
447 if (entry)
448 entry->tagged = 1;
449 if (!peel_ref(path, peeled)) {
450 entry = locate_object_entry(peeled);
451 if (entry)
452 entry->tagged = 1;
453 }
454 return 0;
455}
456
457static inline void add_to_write_order(struct object_entry **wo,
458 unsigned int *endp,
459 struct object_entry *e)
460{
461 if (e->filled)
462 return;
463 wo[(*endp)++] = e;
464 e->filled = 1;
465}
466
467static void add_descendants_to_write_order(struct object_entry **wo,
468 unsigned int *endp,
469 struct object_entry *e)
470{
471 int add_to_order = 1;
472 while (e) {
473 if (add_to_order) {
474 struct object_entry *s;
475 /* add this node... */
476 add_to_write_order(wo, endp, e);
477 /* all its siblings... */
478 for (s = e->delta_sibling; s; s = s->delta_sibling) {
479 add_to_write_order(wo, endp, s);
480 }
481 }
482 /* drop down a level to add left subtree nodes if possible */
483 if (e->delta_child) {
484 add_to_order = 1;
485 e = e->delta_child;
486 } else {
487 add_to_order = 0;
488 /* our sibling might have some children, it is next */
489 if (e->delta_sibling) {
490 e = e->delta_sibling;
491 continue;
492 }
493 /* go back to our parent node */
494 e = e->delta;
495 while (e && !e->delta_sibling) {
496 /* we're on the right side of a subtree, keep
497 * going up until we can go right again */
498 e = e->delta;
499 }
500 if (!e) {
501 /* done- we hit our original root node */
502 return;
503 }
504 /* pass it off to sibling at this level */
505 e = e->delta_sibling;
506 }
507 };
508}
509
510static void add_family_to_write_order(struct object_entry **wo,
511 unsigned int *endp,
512 struct object_entry *e)
513{
514 struct object_entry *root;
515
516 for (root = e; root->delta; root = root->delta)
517 ; /* nothing */
518 add_descendants_to_write_order(wo, endp, root);
519}
520
521static struct object_entry **compute_write_order(void)
522{
523 unsigned int i, wo_end, last_untagged;
524
525 struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
526
527 for (i = 0; i < nr_objects; i++) {
528 objects[i].tagged = 0;
529 objects[i].filled = 0;
530 objects[i].delta_child = NULL;
531 objects[i].delta_sibling = NULL;
532 }
533
534 /*
535 * Fully connect delta_child/delta_sibling network.
536 * Make sure delta_sibling is sorted in the original
537 * recency order.
538 */
539 for (i = nr_objects; i > 0;) {
540 struct object_entry *e = &objects[--i];
541 if (!e->delta)
542 continue;
543 /* Mark me as the first child */
544 e->delta_sibling = e->delta->delta_child;
545 e->delta->delta_child = e;
546 }
547
548 /*
549 * Mark objects that are at the tip of tags.
550 */
551 for_each_tag_ref(mark_tagged, NULL);
552
553 /*
554 * Give the objects in the original recency order until
555 * we see a tagged tip.
556 */
557 for (i = wo_end = 0; i < nr_objects; i++) {
558 if (objects[i].tagged)
559 break;
560 add_to_write_order(wo, &wo_end, &objects[i]);
561 }
562 last_untagged = i;
563
564 /*
565 * Then fill all the tagged tips.
566 */
567 for (; i < nr_objects; i++) {
568 if (objects[i].tagged)
569 add_to_write_order(wo, &wo_end, &objects[i]);
570 }
571
572 /*
573 * And then all remaining commits and tags.
574 */
575 for (i = last_untagged; i < nr_objects; i++) {
576 if (objects[i].type != OBJ_COMMIT &&
577 objects[i].type != OBJ_TAG)
578 continue;
579 add_to_write_order(wo, &wo_end, &objects[i]);
580 }
581
582 /*
583 * And then all the trees.
584 */
585 for (i = last_untagged; i < nr_objects; i++) {
586 if (objects[i].type != OBJ_TREE)
587 continue;
588 add_to_write_order(wo, &wo_end, &objects[i]);
589 }
590
591 /*
592 * Finally all the rest in really tight order
593 */
594 for (i = last_untagged; i < nr_objects; i++) {
595 if (!objects[i].filled)
596 add_family_to_write_order(wo, &wo_end, &objects[i]);
597 }
598
599 if (wo_end != nr_objects)
600 die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
601
602 return wo;
603}
604
605static void write_pack_file(void)
606{
607 uint32_t i = 0, j;
608 struct sha1file *f;
609 off_t offset;
610 struct pack_header hdr;
611 uint32_t nr_remaining = nr_result;
612 time_t last_mtime = 0;
613 struct object_entry **write_order;
614
615 if (progress > pack_to_stdout)
616 progress_state = start_progress("Writing objects", nr_result);
617 written_list = xmalloc(nr_objects * sizeof(*written_list));
618 write_order = compute_write_order();
619
620 do {
621 unsigned char sha1[20];
622 char *pack_tmp_name = NULL;
623
624 if (pack_to_stdout) {
625 f = sha1fd_throughput(1, "<stdout>", progress_state);
626 } else {
627 char tmpname[PATH_MAX];
628 int fd;
629 fd = odb_mkstemp(tmpname, sizeof(tmpname),
630 "pack/tmp_pack_XXXXXX");
631 pack_tmp_name = xstrdup(tmpname);
632 f = sha1fd(fd, pack_tmp_name);
633 }
634
635 hdr.hdr_signature = htonl(PACK_SIGNATURE);
636 hdr.hdr_version = htonl(PACK_VERSION);
637 hdr.hdr_entries = htonl(nr_remaining);
638 sha1write(f, &hdr, sizeof(hdr));
639 offset = sizeof(hdr);
640 nr_written = 0;
641 for (; i < nr_objects; i++) {
642 struct object_entry *e = write_order[i];
643 if (!write_one(f, e, &offset))
644 break;
645 display_progress(progress_state, written);
646 }
647
648 /*
649 * Did we write the wrong # entries in the header?
650 * If so, rewrite it like in fast-import
651 */
652 if (pack_to_stdout) {
653 sha1close(f, sha1, CSUM_CLOSE);
654 } else if (nr_written == nr_remaining) {
655 sha1close(f, sha1, CSUM_FSYNC);
656 } else {
657 int fd = sha1close(f, sha1, 0);
658 fixup_pack_header_footer(fd, sha1, pack_tmp_name,
659 nr_written, sha1, offset);
660 close(fd);
661 }
662
663 if (!pack_to_stdout) {
664 struct stat st;
665 const char *idx_tmp_name;
666 char tmpname[PATH_MAX];
667
668 idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
669 &pack_idx_opts, sha1);
670
671 snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
672 base_name, sha1_to_hex(sha1));
673 free_pack_by_name(tmpname);
674 if (adjust_shared_perm(pack_tmp_name))
675 die_errno("unable to make temporary pack file readable");
676 if (rename(pack_tmp_name, tmpname))
677 die_errno("unable to rename temporary pack file");
678
679 /*
680 * Packs are runtime accessed in their mtime
681 * order since newer packs are more likely to contain
682 * younger objects. So if we are creating multiple
683 * packs then we should modify the mtime of later ones
684 * to preserve this property.
685 */
686 if (stat(tmpname, &st) < 0) {
687 warning("failed to stat %s: %s",
688 tmpname, strerror(errno));
689 } else if (!last_mtime) {
690 last_mtime = st.st_mtime;
691 } else {
692 struct utimbuf utb;
693 utb.actime = st.st_atime;
694 utb.modtime = --last_mtime;
695 if (utime(tmpname, &utb) < 0)
696 warning("failed utime() on %s: %s",
697 tmpname, strerror(errno));
698 }
699
700 snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
701 base_name, sha1_to_hex(sha1));
702 if (adjust_shared_perm(idx_tmp_name))
703 die_errno("unable to make temporary index file readable");
704 if (rename(idx_tmp_name, tmpname))
705 die_errno("unable to rename temporary index file");
706
707 free((void *) idx_tmp_name);
708 free(pack_tmp_name);
709 puts(sha1_to_hex(sha1));
710 }
711
712 /* mark written objects as written to previous pack */
713 for (j = 0; j < nr_written; j++) {
714 written_list[j]->offset = (off_t)-1;
715 }
716 nr_remaining -= nr_written;
717 } while (nr_remaining && i < nr_objects);
718
719 free(written_list);
720 free(write_order);
721 stop_progress(&progress_state);
722 if (written != nr_result)
723 die("wrote %"PRIu32" objects while expecting %"PRIu32,
724 written, nr_result);
725}
726
727static int locate_object_entry_hash(const unsigned char *sha1)
728{
729 int i;
730 unsigned int ui;
731 memcpy(&ui, sha1, sizeof(unsigned int));
732 i = ui % object_ix_hashsz;
733 while (0 < object_ix[i]) {
734 if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1))
735 return i;
736 if (++i == object_ix_hashsz)
737 i = 0;
738 }
739 return -1 - i;
740}
741
742static struct object_entry *locate_object_entry(const unsigned char *sha1)
743{
744 int i;
745
746 if (!object_ix_hashsz)
747 return NULL;
748
749 i = locate_object_entry_hash(sha1);
750 if (0 <= i)
751 return &objects[object_ix[i]-1];
752 return NULL;
753}
754
755static void rehash_objects(void)
756{
757 uint32_t i;
758 struct object_entry *oe;
759
760 object_ix_hashsz = nr_objects * 3;
761 if (object_ix_hashsz < 1024)
762 object_ix_hashsz = 1024;
763 object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz);
764 memset(object_ix, 0, sizeof(int) * object_ix_hashsz);
765 for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
766 int ix = locate_object_entry_hash(oe->idx.sha1);
767 if (0 <= ix)
768 continue;
769 ix = -1 - ix;
770 object_ix[ix] = i + 1;
771 }
772}
773
774static unsigned name_hash(const char *name)
775{
776 unsigned c, hash = 0;
777
778 if (!name)
779 return 0;
780
781 /*
782 * This effectively just creates a sortable number from the
783 * last sixteen non-whitespace characters. Last characters
784 * count "most", so things that end in ".c" sort together.
785 */
786 while ((c = *name++) != 0) {
787 if (isspace(c))
788 continue;
789 hash = (hash >> 2) + (c << 24);
790 }
791 return hash;
792}
793
794static void setup_delta_attr_check(struct git_attr_check *check)
795{
796 static struct git_attr *attr_delta;
797
798 if (!attr_delta)
799 attr_delta = git_attr("delta");
800
801 check[0].attr = attr_delta;
802}
803
804static int no_try_delta(const char *path)
805{
806 struct git_attr_check check[1];
807
808 setup_delta_attr_check(check);
809 if (git_check_attr(path, ARRAY_SIZE(check), check))
810 return 0;
811 if (ATTR_FALSE(check->value))
812 return 1;
813 return 0;
814}
815
816static int add_object_entry(const unsigned char *sha1, enum object_type type,
817 const char *name, int exclude)
818{
819 struct object_entry *entry;
820 struct packed_git *p, *found_pack = NULL;
821 off_t found_offset = 0;
822 int ix;
823 unsigned hash = name_hash(name);
824
825 ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
826 if (ix >= 0) {
827 if (exclude) {
828 entry = objects + object_ix[ix] - 1;
829 if (!entry->preferred_base)
830 nr_result--;
831 entry->preferred_base = 1;
832 }
833 return 0;
834 }
835
836 if (!exclude && local && has_loose_object_nonlocal(sha1))
837 return 0;
838
839 for (p = packed_git; p; p = p->next) {
840 off_t offset = find_pack_entry_one(sha1, p);
841 if (offset) {
842 if (!found_pack) {
843 found_offset = offset;
844 found_pack = p;
845 }
846 if (exclude)
847 break;
848 if (incremental)
849 return 0;
850 if (local && !p->pack_local)
851 return 0;
852 if (ignore_packed_keep && p->pack_local && p->pack_keep)
853 return 0;
854 }
855 }
856
857 if (nr_objects >= nr_alloc) {
858 nr_alloc = (nr_alloc + 1024) * 3 / 2;
859 objects = xrealloc(objects, nr_alloc * sizeof(*entry));
860 }
861
862 entry = objects + nr_objects++;
863 memset(entry, 0, sizeof(*entry));
864 hashcpy(entry->idx.sha1, sha1);
865 entry->hash = hash;
866 if (type)
867 entry->type = type;
868 if (exclude)
869 entry->preferred_base = 1;
870 else
871 nr_result++;
872 if (found_pack) {
873 entry->in_pack = found_pack;
874 entry->in_pack_offset = found_offset;
875 }
876
877 if (object_ix_hashsz * 3 <= nr_objects * 4)
878 rehash_objects();
879 else
880 object_ix[-1 - ix] = nr_objects;
881
882 display_progress(progress_state, nr_objects);
883
884 if (name && no_try_delta(name))
885 entry->no_try_delta = 1;
886
887 return 1;
888}
889
890struct pbase_tree_cache {
891 unsigned char sha1[20];
892 int ref;
893 int temporary;
894 void *tree_data;
895 unsigned long tree_size;
896};
897
898static struct pbase_tree_cache *(pbase_tree_cache[256]);
899static int pbase_tree_cache_ix(const unsigned char *sha1)
900{
901 return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
902}
903static int pbase_tree_cache_ix_incr(int ix)
904{
905 return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
906}
907
908static struct pbase_tree {
909 struct pbase_tree *next;
910 /* This is a phony "cache" entry; we are not
911 * going to evict it nor find it through _get()
912 * mechanism -- this is for the toplevel node that
913 * would almost always change with any commit.
914 */
915 struct pbase_tree_cache pcache;
916} *pbase_tree;
917
918static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
919{
920 struct pbase_tree_cache *ent, *nent;
921 void *data;
922 unsigned long size;
923 enum object_type type;
924 int neigh;
925 int my_ix = pbase_tree_cache_ix(sha1);
926 int available_ix = -1;
927
928 /* pbase-tree-cache acts as a limited hashtable.
929 * your object will be found at your index or within a few
930 * slots after that slot if it is cached.
931 */
932 for (neigh = 0; neigh < 8; neigh++) {
933 ent = pbase_tree_cache[my_ix];
934 if (ent && !hashcmp(ent->sha1, sha1)) {
935 ent->ref++;
936 return ent;
937 }
938 else if (((available_ix < 0) && (!ent || !ent->ref)) ||
939 ((0 <= available_ix) &&
940 (!ent && pbase_tree_cache[available_ix])))
941 available_ix = my_ix;
942 if (!ent)
943 break;
944 my_ix = pbase_tree_cache_ix_incr(my_ix);
945 }
946
947 /* Did not find one. Either we got a bogus request or
948 * we need to read and perhaps cache.
949 */
950 data = read_sha1_file(sha1, &type, &size);
951 if (!data)
952 return NULL;
953 if (type != OBJ_TREE) {
954 free(data);
955 return NULL;
956 }
957
958 /* We need to either cache or return a throwaway copy */
959
960 if (available_ix < 0)
961 ent = NULL;
962 else {
963 ent = pbase_tree_cache[available_ix];
964 my_ix = available_ix;
965 }
966
967 if (!ent) {
968 nent = xmalloc(sizeof(*nent));
969 nent->temporary = (available_ix < 0);
970 }
971 else {
972 /* evict and reuse */
973 free(ent->tree_data);
974 nent = ent;
975 }
976 hashcpy(nent->sha1, sha1);
977 nent->tree_data = data;
978 nent->tree_size = size;
979 nent->ref = 1;
980 if (!nent->temporary)
981 pbase_tree_cache[my_ix] = nent;
982 return nent;
983}
984
985static void pbase_tree_put(struct pbase_tree_cache *cache)
986{
987 if (!cache->temporary) {
988 cache->ref--;
989 return;
990 }
991 free(cache->tree_data);
992 free(cache);
993}
994
995static int name_cmp_len(const char *name)
996{
997 int i;
998 for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
999 ;
1000 return i;
1001}
1002
1003static void add_pbase_object(struct tree_desc *tree,
1004 const char *name,
1005 int cmplen,
1006 const char *fullname)
1007{
1008 struct name_entry entry;
1009 int cmp;
1010
1011 while (tree_entry(tree,&entry)) {
1012 if (S_ISGITLINK(entry.mode))
1013 continue;
1014 cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
1015 memcmp(name, entry.path, cmplen);
1016 if (cmp > 0)
1017 continue;
1018 if (cmp < 0)
1019 return;
1020 if (name[cmplen] != '/') {
1021 add_object_entry(entry.sha1,
1022 object_type(entry.mode),
1023 fullname, 1);
1024 return;
1025 }
1026 if (S_ISDIR(entry.mode)) {
1027 struct tree_desc sub;
1028 struct pbase_tree_cache *tree;
1029 const char *down = name+cmplen+1;
1030 int downlen = name_cmp_len(down);
1031
1032 tree = pbase_tree_get(entry.sha1);
1033 if (!tree)
1034 return;
1035 init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1036
1037 add_pbase_object(&sub, down, downlen, fullname);
1038 pbase_tree_put(tree);
1039 }
1040 }
1041}
1042
1043static unsigned *done_pbase_paths;
1044static int done_pbase_paths_num;
1045static int done_pbase_paths_alloc;
1046static int done_pbase_path_pos(unsigned hash)
1047{
1048 int lo = 0;
1049 int hi = done_pbase_paths_num;
1050 while (lo < hi) {
1051 int mi = (hi + lo) / 2;
1052 if (done_pbase_paths[mi] == hash)
1053 return mi;
1054 if (done_pbase_paths[mi] < hash)
1055 hi = mi;
1056 else
1057 lo = mi + 1;
1058 }
1059 return -lo-1;
1060}
1061
1062static int check_pbase_path(unsigned hash)
1063{
1064 int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
1065 if (0 <= pos)
1066 return 1;
1067 pos = -pos - 1;
1068 if (done_pbase_paths_alloc <= done_pbase_paths_num) {
1069 done_pbase_paths_alloc = alloc_nr(done_pbase_paths_alloc);
1070 done_pbase_paths = xrealloc(done_pbase_paths,
1071 done_pbase_paths_alloc *
1072 sizeof(unsigned));
1073 }
1074 done_pbase_paths_num++;
1075 if (pos < done_pbase_paths_num)
1076 memmove(done_pbase_paths + pos + 1,
1077 done_pbase_paths + pos,
1078 (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
1079 done_pbase_paths[pos] = hash;
1080 return 0;
1081}
1082
1083static void add_preferred_base_object(const char *name)
1084{
1085 struct pbase_tree *it;
1086 int cmplen;
1087 unsigned hash = name_hash(name);
1088
1089 if (!num_preferred_base || check_pbase_path(hash))
1090 return;
1091
1092 cmplen = name_cmp_len(name);
1093 for (it = pbase_tree; it; it = it->next) {
1094 if (cmplen == 0) {
1095 add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1096 }
1097 else {
1098 struct tree_desc tree;
1099 init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1100 add_pbase_object(&tree, name, cmplen, name);
1101 }
1102 }
1103}
1104
1105static void add_preferred_base(unsigned char *sha1)
1106{
1107 struct pbase_tree *it;
1108 void *data;
1109 unsigned long size;
1110 unsigned char tree_sha1[20];
1111
1112 if (window <= num_preferred_base++)
1113 return;
1114
1115 data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
1116 if (!data)
1117 return;
1118
1119 for (it = pbase_tree; it; it = it->next) {
1120 if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1121 free(data);
1122 return;
1123 }
1124 }
1125
1126 it = xcalloc(1, sizeof(*it));
1127 it->next = pbase_tree;
1128 pbase_tree = it;
1129
1130 hashcpy(it->pcache.sha1, tree_sha1);
1131 it->pcache.tree_data = data;
1132 it->pcache.tree_size = size;
1133}
1134
1135static void cleanup_preferred_base(void)
1136{
1137 struct pbase_tree *it;
1138 unsigned i;
1139
1140 it = pbase_tree;
1141 pbase_tree = NULL;
1142 while (it) {
1143 struct pbase_tree *this = it;
1144 it = this->next;
1145 free(this->pcache.tree_data);
1146 free(this);
1147 }
1148
1149 for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1150 if (!pbase_tree_cache[i])
1151 continue;
1152 free(pbase_tree_cache[i]->tree_data);
1153 free(pbase_tree_cache[i]);
1154 pbase_tree_cache[i] = NULL;
1155 }
1156
1157 free(done_pbase_paths);
1158 done_pbase_paths = NULL;
1159 done_pbase_paths_num = done_pbase_paths_alloc = 0;
1160}
1161
1162static void check_object(struct object_entry *entry)
1163{
1164 if (entry->in_pack) {
1165 struct packed_git *p = entry->in_pack;
1166 struct pack_window *w_curs = NULL;
1167 const unsigned char *base_ref = NULL;
1168 struct object_entry *base_entry;
1169 unsigned long used, used_0;
1170 unsigned long avail;
1171 off_t ofs;
1172 unsigned char *buf, c;
1173
1174 buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1175
1176 /*
1177 * We want in_pack_type even if we do not reuse delta
1178 * since non-delta representations could still be reused.
1179 */
1180 used = unpack_object_header_buffer(buf, avail,
1181 &entry->in_pack_type,
1182 &entry->size);
1183 if (used == 0)
1184 goto give_up;
1185
1186 /*
1187 * Determine if this is a delta and if so whether we can
1188 * reuse it or not. Otherwise let's find out as cheaply as
1189 * possible what the actual type and size for this object is.
1190 */
1191 switch (entry->in_pack_type) {
1192 default:
1193 /* Not a delta hence we've already got all we need. */
1194 entry->type = entry->in_pack_type;
1195 entry->in_pack_header_size = used;
1196 if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1197 goto give_up;
1198 unuse_pack(&w_curs);
1199 return;
1200 case OBJ_REF_DELTA:
1201 if (reuse_delta && !entry->preferred_base)
1202 base_ref = use_pack(p, &w_curs,
1203 entry->in_pack_offset + used, NULL);
1204 entry->in_pack_header_size = used + 20;
1205 break;
1206 case OBJ_OFS_DELTA:
1207 buf = use_pack(p, &w_curs,
1208 entry->in_pack_offset + used, NULL);
1209 used_0 = 0;
1210 c = buf[used_0++];
1211 ofs = c & 127;
1212 while (c & 128) {
1213 ofs += 1;
1214 if (!ofs || MSB(ofs, 7)) {
1215 error("delta base offset overflow in pack for %s",
1216 sha1_to_hex(entry->idx.sha1));
1217 goto give_up;
1218 }
1219 c = buf[used_0++];
1220 ofs = (ofs << 7) + (c & 127);
1221 }
1222 ofs = entry->in_pack_offset - ofs;
1223 if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1224 error("delta base offset out of bound for %s",
1225 sha1_to_hex(entry->idx.sha1));
1226 goto give_up;
1227 }
1228 if (reuse_delta && !entry->preferred_base) {
1229 struct revindex_entry *revidx;
1230 revidx = find_pack_revindex(p, ofs);
1231 if (!revidx)
1232 goto give_up;
1233 base_ref = nth_packed_object_sha1(p, revidx->nr);
1234 }
1235 entry->in_pack_header_size = used + used_0;
1236 break;
1237 }
1238
1239 if (base_ref && (base_entry = locate_object_entry(base_ref))) {
1240 /*
1241 * If base_ref was set above that means we wish to
1242 * reuse delta data, and we even found that base
1243 * in the list of objects we want to pack. Goodie!
1244 *
1245 * Depth value does not matter - find_deltas() will
1246 * never consider reused delta as the base object to
1247 * deltify other objects against, in order to avoid
1248 * circular deltas.
1249 */
1250 entry->type = entry->in_pack_type;
1251 entry->delta = base_entry;
1252 entry->delta_size = entry->size;
1253 entry->delta_sibling = base_entry->delta_child;
1254 base_entry->delta_child = entry;
1255 unuse_pack(&w_curs);
1256 return;
1257 }
1258
1259 if (entry->type) {
1260 /*
1261 * This must be a delta and we already know what the
1262 * final object type is. Let's extract the actual
1263 * object size from the delta header.
1264 */
1265 entry->size = get_size_from_delta(p, &w_curs,
1266 entry->in_pack_offset + entry->in_pack_header_size);
1267 if (entry->size == 0)
1268 goto give_up;
1269 unuse_pack(&w_curs);
1270 return;
1271 }
1272
1273 /*
1274 * No choice but to fall back to the recursive delta walk
1275 * with sha1_object_info() to find about the object type
1276 * at this point...
1277 */
1278 give_up:
1279 unuse_pack(&w_curs);
1280 }
1281
1282 entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1283 /*
1284 * The error condition is checked in prepare_pack(). This is
1285 * to permit a missing preferred base object to be ignored
1286 * as a preferred base. Doing so can result in a larger
1287 * pack file, but the transfer will still take place.
1288 */
1289}
1290
1291static int pack_offset_sort(const void *_a, const void *_b)
1292{
1293 const struct object_entry *a = *(struct object_entry **)_a;
1294 const struct object_entry *b = *(struct object_entry **)_b;
1295
1296 /* avoid filesystem trashing with loose objects */
1297 if (!a->in_pack && !b->in_pack)
1298 return hashcmp(a->idx.sha1, b->idx.sha1);
1299
1300 if (a->in_pack < b->in_pack)
1301 return -1;
1302 if (a->in_pack > b->in_pack)
1303 return 1;
1304 return a->in_pack_offset < b->in_pack_offset ? -1 :
1305 (a->in_pack_offset > b->in_pack_offset);
1306}
1307
1308static void get_object_details(void)
1309{
1310 uint32_t i;
1311 struct object_entry **sorted_by_offset;
1312
1313 sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *));
1314 for (i = 0; i < nr_objects; i++)
1315 sorted_by_offset[i] = objects + i;
1316 qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
1317
1318 for (i = 0; i < nr_objects; i++) {
1319 struct object_entry *entry = sorted_by_offset[i];
1320 check_object(entry);
1321 if (big_file_threshold <= entry->size)
1322 entry->no_try_delta = 1;
1323 }
1324
1325 free(sorted_by_offset);
1326}
1327
1328/*
1329 * We search for deltas in a list sorted by type, by filename hash, and then
1330 * by size, so that we see progressively smaller and smaller files.
1331 * That's because we prefer deltas to be from the bigger file
1332 * to the smaller -- deletes are potentially cheaper, but perhaps
1333 * more importantly, the bigger file is likely the more recent
1334 * one. The deepest deltas are therefore the oldest objects which are
1335 * less susceptible to be accessed often.
1336 */
1337static int type_size_sort(const void *_a, const void *_b)
1338{
1339 const struct object_entry *a = *(struct object_entry **)_a;
1340 const struct object_entry *b = *(struct object_entry **)_b;
1341
1342 if (a->type > b->type)
1343 return -1;
1344 if (a->type < b->type)
1345 return 1;
1346 if (a->hash > b->hash)
1347 return -1;
1348 if (a->hash < b->hash)
1349 return 1;
1350 if (a->preferred_base > b->preferred_base)
1351 return -1;
1352 if (a->preferred_base < b->preferred_base)
1353 return 1;
1354 if (a->size > b->size)
1355 return -1;
1356 if (a->size < b->size)
1357 return 1;
1358 return a < b ? -1 : (a > b); /* newest first */
1359}
1360
1361struct unpacked {
1362 struct object_entry *entry;
1363 void *data;
1364 struct delta_index *index;
1365 unsigned depth;
1366};
1367
1368static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1369 unsigned long delta_size)
1370{
1371 if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1372 return 0;
1373
1374 if (delta_size < cache_max_small_delta_size)
1375 return 1;
1376
1377 /* cache delta, if objects are large enough compared to delta size */
1378 if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1379 return 1;
1380
1381 return 0;
1382}
1383
1384#ifndef NO_PTHREADS
1385
1386static pthread_mutex_t read_mutex;
1387#define read_lock() pthread_mutex_lock(&read_mutex)
1388#define read_unlock() pthread_mutex_unlock(&read_mutex)
1389
1390static pthread_mutex_t cache_mutex;
1391#define cache_lock() pthread_mutex_lock(&cache_mutex)
1392#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
1393
1394static pthread_mutex_t progress_mutex;
1395#define progress_lock() pthread_mutex_lock(&progress_mutex)
1396#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
1397
1398#else
1399
1400#define read_lock() (void)0
1401#define read_unlock() (void)0
1402#define cache_lock() (void)0
1403#define cache_unlock() (void)0
1404#define progress_lock() (void)0
1405#define progress_unlock() (void)0
1406
1407#endif
1408
1409static int try_delta(struct unpacked *trg, struct unpacked *src,
1410 unsigned max_depth, unsigned long *mem_usage)
1411{
1412 struct object_entry *trg_entry = trg->entry;
1413 struct object_entry *src_entry = src->entry;
1414 unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1415 unsigned ref_depth;
1416 enum object_type type;
1417 void *delta_buf;
1418
1419 /* Don't bother doing diffs between different types */
1420 if (trg_entry->type != src_entry->type)
1421 return -1;
1422
1423 /*
1424 * We do not bother to try a delta that we discarded
1425 * on an earlier try, but only when reusing delta data.
1426 */
1427 if (reuse_delta && trg_entry->in_pack &&
1428 trg_entry->in_pack == src_entry->in_pack &&
1429 trg_entry->in_pack_type != OBJ_REF_DELTA &&
1430 trg_entry->in_pack_type != OBJ_OFS_DELTA)
1431 return 0;
1432
1433 /* Let's not bust the allowed depth. */
1434 if (src->depth >= max_depth)
1435 return 0;
1436
1437 /* Now some size filtering heuristics. */
1438 trg_size = trg_entry->size;
1439 if (!trg_entry->delta) {
1440 max_size = trg_size/2 - 20;
1441 ref_depth = 1;
1442 } else {
1443 max_size = trg_entry->delta_size;
1444 ref_depth = trg->depth;
1445 }
1446 max_size = (uint64_t)max_size * (max_depth - src->depth) /
1447 (max_depth - ref_depth + 1);
1448 if (max_size == 0)
1449 return 0;
1450 src_size = src_entry->size;
1451 sizediff = src_size < trg_size ? trg_size - src_size : 0;
1452 if (sizediff >= max_size)
1453 return 0;
1454 if (trg_size < src_size / 32)
1455 return 0;
1456
1457 /* Load data if not already done */
1458 if (!trg->data) {
1459 read_lock();
1460 trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
1461 read_unlock();
1462 if (!trg->data)
1463 die("object %s cannot be read",
1464 sha1_to_hex(trg_entry->idx.sha1));
1465 if (sz != trg_size)
1466 die("object %s inconsistent object length (%lu vs %lu)",
1467 sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1468 *mem_usage += sz;
1469 }
1470 if (!src->data) {
1471 read_lock();
1472 src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
1473 read_unlock();
1474 if (!src->data) {
1475 if (src_entry->preferred_base) {
1476 static int warned = 0;
1477 if (!warned++)
1478 warning("object %s cannot be read",
1479 sha1_to_hex(src_entry->idx.sha1));
1480 /*
1481 * Those objects are not included in the
1482 * resulting pack. Be resilient and ignore
1483 * them if they can't be read, in case the
1484 * pack could be created nevertheless.
1485 */
1486 return 0;
1487 }
1488 die("object %s cannot be read",
1489 sha1_to_hex(src_entry->idx.sha1));
1490 }
1491 if (sz != src_size)
1492 die("object %s inconsistent object length (%lu vs %lu)",
1493 sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1494 *mem_usage += sz;
1495 }
1496 if (!src->index) {
1497 src->index = create_delta_index(src->data, src_size);
1498 if (!src->index) {
1499 static int warned = 0;
1500 if (!warned++)
1501 warning("suboptimal pack - out of memory");
1502 return 0;
1503 }
1504 *mem_usage += sizeof_delta_index(src->index);
1505 }
1506
1507 delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1508 if (!delta_buf)
1509 return 0;
1510
1511 if (trg_entry->delta) {
1512 /* Prefer only shallower same-sized deltas. */
1513 if (delta_size == trg_entry->delta_size &&
1514 src->depth + 1 >= trg->depth) {
1515 free(delta_buf);
1516 return 0;
1517 }
1518 }
1519
1520 /*
1521 * Handle memory allocation outside of the cache
1522 * accounting lock. Compiler will optimize the strangeness
1523 * away when NO_PTHREADS is defined.
1524 */
1525 free(trg_entry->delta_data);
1526 cache_lock();
1527 if (trg_entry->delta_data) {
1528 delta_cache_size -= trg_entry->delta_size;
1529 trg_entry->delta_data = NULL;
1530 }
1531 if (delta_cacheable(src_size, trg_size, delta_size)) {
1532 delta_cache_size += delta_size;
1533 cache_unlock();
1534 trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1535 } else {
1536 cache_unlock();
1537 free(delta_buf);
1538 }
1539
1540 trg_entry->delta = src_entry;
1541 trg_entry->delta_size = delta_size;
1542 trg->depth = src->depth + 1;
1543
1544 return 1;
1545}
1546
1547static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1548{
1549 struct object_entry *child = me->delta_child;
1550 unsigned int m = n;
1551 while (child) {
1552 unsigned int c = check_delta_limit(child, n + 1);
1553 if (m < c)
1554 m = c;
1555 child = child->delta_sibling;
1556 }
1557 return m;
1558}
1559
1560static unsigned long free_unpacked(struct unpacked *n)
1561{
1562 unsigned long freed_mem = sizeof_delta_index(n->index);
1563 free_delta_index(n->index);
1564 n->index = NULL;
1565 if (n->data) {
1566 freed_mem += n->entry->size;
1567 free(n->data);
1568 n->data = NULL;
1569 }
1570 n->entry = NULL;
1571 n->depth = 0;
1572 return freed_mem;
1573}
1574
1575static void find_deltas(struct object_entry **list, unsigned *list_size,
1576 int window, int depth, unsigned *processed)
1577{
1578 uint32_t i, idx = 0, count = 0;
1579 struct unpacked *array;
1580 unsigned long mem_usage = 0;
1581
1582 array = xcalloc(window, sizeof(struct unpacked));
1583
1584 for (;;) {
1585 struct object_entry *entry;
1586 struct unpacked *n = array + idx;
1587 int j, max_depth, best_base = -1;
1588
1589 progress_lock();
1590 if (!*list_size) {
1591 progress_unlock();
1592 break;
1593 }
1594 entry = *list++;
1595 (*list_size)--;
1596 if (!entry->preferred_base) {
1597 (*processed)++;
1598 display_progress(progress_state, *processed);
1599 }
1600 progress_unlock();
1601
1602 mem_usage -= free_unpacked(n);
1603 n->entry = entry;
1604
1605 while (window_memory_limit &&
1606 mem_usage > window_memory_limit &&
1607 count > 1) {
1608 uint32_t tail = (idx + window - count) % window;
1609 mem_usage -= free_unpacked(array + tail);
1610 count--;
1611 }
1612
1613 /* We do not compute delta to *create* objects we are not
1614 * going to pack.
1615 */
1616 if (entry->preferred_base)
1617 goto next;
1618
1619 /*
1620 * If the current object is at pack edge, take the depth the
1621 * objects that depend on the current object into account
1622 * otherwise they would become too deep.
1623 */
1624 max_depth = depth;
1625 if (entry->delta_child) {
1626 max_depth -= check_delta_limit(entry, 0);
1627 if (max_depth <= 0)
1628 goto next;
1629 }
1630
1631 j = window;
1632 while (--j > 0) {
1633 int ret;
1634 uint32_t other_idx = idx + j;
1635 struct unpacked *m;
1636 if (other_idx >= window)
1637 other_idx -= window;
1638 m = array + other_idx;
1639 if (!m->entry)
1640 break;
1641 ret = try_delta(n, m, max_depth, &mem_usage);
1642 if (ret < 0)
1643 break;
1644 else if (ret > 0)
1645 best_base = other_idx;
1646 }
1647
1648 /*
1649 * If we decided to cache the delta data, then it is best
1650 * to compress it right away. First because we have to do
1651 * it anyway, and doing it here while we're threaded will
1652 * save a lot of time in the non threaded write phase,
1653 * as well as allow for caching more deltas within
1654 * the same cache size limit.
1655 * ...
1656 * But only if not writing to stdout, since in that case
1657 * the network is most likely throttling writes anyway,
1658 * and therefore it is best to go to the write phase ASAP
1659 * instead, as we can afford spending more time compressing
1660 * between writes at that moment.
1661 */
1662 if (entry->delta_data && !pack_to_stdout) {
1663 entry->z_delta_size = do_compress(&entry->delta_data,
1664 entry->delta_size);
1665 cache_lock();
1666 delta_cache_size -= entry->delta_size;
1667 delta_cache_size += entry->z_delta_size;
1668 cache_unlock();
1669 }
1670
1671 /* if we made n a delta, and if n is already at max
1672 * depth, leaving it in the window is pointless. we
1673 * should evict it first.
1674 */
1675 if (entry->delta && max_depth <= n->depth)
1676 continue;
1677
1678 /*
1679 * Move the best delta base up in the window, after the
1680 * currently deltified object, to keep it longer. It will
1681 * be the first base object to be attempted next.
1682 */
1683 if (entry->delta) {
1684 struct unpacked swap = array[best_base];
1685 int dist = (window + idx - best_base) % window;
1686 int dst = best_base;
1687 while (dist--) {
1688 int src = (dst + 1) % window;
1689 array[dst] = array[src];
1690 dst = src;
1691 }
1692 array[dst] = swap;
1693 }
1694
1695 next:
1696 idx++;
1697 if (count + 1 < window)
1698 count++;
1699 if (idx >= window)
1700 idx = 0;
1701 }
1702
1703 for (i = 0; i < window; ++i) {
1704 free_delta_index(array[i].index);
1705 free(array[i].data);
1706 }
1707 free(array);
1708}
1709
1710#ifndef NO_PTHREADS
1711
1712static void try_to_free_from_threads(size_t size)
1713{
1714 read_lock();
1715 release_pack_memory(size, -1);
1716 read_unlock();
1717}
1718
1719static try_to_free_t old_try_to_free_routine;
1720
1721/*
1722 * The main thread waits on the condition that (at least) one of the workers
1723 * has stopped working (which is indicated in the .working member of
1724 * struct thread_params).
1725 * When a work thread has completed its work, it sets .working to 0 and
1726 * signals the main thread and waits on the condition that .data_ready
1727 * becomes 1.
1728 */
1729
1730struct thread_params {
1731 pthread_t thread;
1732 struct object_entry **list;
1733 unsigned list_size;
1734 unsigned remaining;
1735 int window;
1736 int depth;
1737 int working;
1738 int data_ready;
1739 pthread_mutex_t mutex;
1740 pthread_cond_t cond;
1741 unsigned *processed;
1742};
1743
1744static pthread_cond_t progress_cond;
1745
1746/*
1747 * Mutex and conditional variable can't be statically-initialized on Windows.
1748 */
1749static void init_threaded_search(void)
1750{
1751 init_recursive_mutex(&read_mutex);
1752 pthread_mutex_init(&cache_mutex, NULL);
1753 pthread_mutex_init(&progress_mutex, NULL);
1754 pthread_cond_init(&progress_cond, NULL);
1755 old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
1756}
1757
1758static void cleanup_threaded_search(void)
1759{
1760 set_try_to_free_routine(old_try_to_free_routine);
1761 pthread_cond_destroy(&progress_cond);
1762 pthread_mutex_destroy(&read_mutex);
1763 pthread_mutex_destroy(&cache_mutex);
1764 pthread_mutex_destroy(&progress_mutex);
1765}
1766
1767static void *threaded_find_deltas(void *arg)
1768{
1769 struct thread_params *me = arg;
1770
1771 while (me->remaining) {
1772 find_deltas(me->list, &me->remaining,
1773 me->window, me->depth, me->processed);
1774
1775 progress_lock();
1776 me->working = 0;
1777 pthread_cond_signal(&progress_cond);
1778 progress_unlock();
1779
1780 /*
1781 * We must not set ->data_ready before we wait on the
1782 * condition because the main thread may have set it to 1
1783 * before we get here. In order to be sure that new
1784 * work is available if we see 1 in ->data_ready, it
1785 * was initialized to 0 before this thread was spawned
1786 * and we reset it to 0 right away.
1787 */
1788 pthread_mutex_lock(&me->mutex);
1789 while (!me->data_ready)
1790 pthread_cond_wait(&me->cond, &me->mutex);
1791 me->data_ready = 0;
1792 pthread_mutex_unlock(&me->mutex);
1793 }
1794 /* leave ->working 1 so that this doesn't get more work assigned */
1795 return NULL;
1796}
1797
1798static void ll_find_deltas(struct object_entry **list, unsigned list_size,
1799 int window, int depth, unsigned *processed)
1800{
1801 struct thread_params *p;
1802 int i, ret, active_threads = 0;
1803
1804 init_threaded_search();
1805
1806 if (!delta_search_threads) /* --threads=0 means autodetect */
1807 delta_search_threads = online_cpus();
1808 if (delta_search_threads <= 1) {
1809 find_deltas(list, &list_size, window, depth, processed);
1810 cleanup_threaded_search();
1811 return;
1812 }
1813 if (progress > pack_to_stdout)
1814 fprintf(stderr, "Delta compression using up to %d threads.\n",
1815 delta_search_threads);
1816 p = xcalloc(delta_search_threads, sizeof(*p));
1817
1818 /* Partition the work amongst work threads. */
1819 for (i = 0; i < delta_search_threads; i++) {
1820 unsigned sub_size = list_size / (delta_search_threads - i);
1821
1822 /* don't use too small segments or no deltas will be found */
1823 if (sub_size < 2*window && i+1 < delta_search_threads)
1824 sub_size = 0;
1825
1826 p[i].window = window;
1827 p[i].depth = depth;
1828 p[i].processed = processed;
1829 p[i].working = 1;
1830 p[i].data_ready = 0;
1831
1832 /* try to split chunks on "path" boundaries */
1833 while (sub_size && sub_size < list_size &&
1834 list[sub_size]->hash &&
1835 list[sub_size]->hash == list[sub_size-1]->hash)
1836 sub_size++;
1837
1838 p[i].list = list;
1839 p[i].list_size = sub_size;
1840 p[i].remaining = sub_size;
1841
1842 list += sub_size;
1843 list_size -= sub_size;
1844 }
1845
1846 /* Start work threads. */
1847 for (i = 0; i < delta_search_threads; i++) {
1848 if (!p[i].list_size)
1849 continue;
1850 pthread_mutex_init(&p[i].mutex, NULL);
1851 pthread_cond_init(&p[i].cond, NULL);
1852 ret = pthread_create(&p[i].thread, NULL,
1853 threaded_find_deltas, &p[i]);
1854 if (ret)
1855 die("unable to create thread: %s", strerror(ret));
1856 active_threads++;
1857 }
1858
1859 /*
1860 * Now let's wait for work completion. Each time a thread is done
1861 * with its work, we steal half of the remaining work from the
1862 * thread with the largest number of unprocessed objects and give
1863 * it to that newly idle thread. This ensure good load balancing
1864 * until the remaining object list segments are simply too short
1865 * to be worth splitting anymore.
1866 */
1867 while (active_threads) {
1868 struct thread_params *target = NULL;
1869 struct thread_params *victim = NULL;
1870 unsigned sub_size = 0;
1871
1872 progress_lock();
1873 for (;;) {
1874 for (i = 0; !target && i < delta_search_threads; i++)
1875 if (!p[i].working)
1876 target = &p[i];
1877 if (target)
1878 break;
1879 pthread_cond_wait(&progress_cond, &progress_mutex);
1880 }
1881
1882 for (i = 0; i < delta_search_threads; i++)
1883 if (p[i].remaining > 2*window &&
1884 (!victim || victim->remaining < p[i].remaining))
1885 victim = &p[i];
1886 if (victim) {
1887 sub_size = victim->remaining / 2;
1888 list = victim->list + victim->list_size - sub_size;
1889 while (sub_size && list[0]->hash &&
1890 list[0]->hash == list[-1]->hash) {
1891 list++;
1892 sub_size--;
1893 }
1894 if (!sub_size) {
1895 /*
1896 * It is possible for some "paths" to have
1897 * so many objects that no hash boundary
1898 * might be found. Let's just steal the
1899 * exact half in that case.
1900 */
1901 sub_size = victim->remaining / 2;
1902 list -= sub_size;
1903 }
1904 target->list = list;
1905 victim->list_size -= sub_size;
1906 victim->remaining -= sub_size;
1907 }
1908 target->list_size = sub_size;
1909 target->remaining = sub_size;
1910 target->working = 1;
1911 progress_unlock();
1912
1913 pthread_mutex_lock(&target->mutex);
1914 target->data_ready = 1;
1915 pthread_cond_signal(&target->cond);
1916 pthread_mutex_unlock(&target->mutex);
1917
1918 if (!sub_size) {
1919 pthread_join(target->thread, NULL);
1920 pthread_cond_destroy(&target->cond);
1921 pthread_mutex_destroy(&target->mutex);
1922 active_threads--;
1923 }
1924 }
1925 cleanup_threaded_search();
1926 free(p);
1927}
1928
1929#else
1930#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
1931#endif
1932
1933static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
1934{
1935 unsigned char peeled[20];
1936
1937 if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
1938 !peel_ref(path, peeled) && /* peelable? */
1939 !is_null_sha1(peeled) && /* annotated tag? */
1940 locate_object_entry(peeled)) /* object packed? */
1941 add_object_entry(sha1, OBJ_TAG, NULL, 0);
1942 return 0;
1943}
1944
1945static void prepare_pack(int window, int depth)
1946{
1947 struct object_entry **delta_list;
1948 uint32_t i, nr_deltas;
1949 unsigned n;
1950
1951 get_object_details();
1952
1953 /*
1954 * If we're locally repacking then we need to be doubly careful
1955 * from now on in order to make sure no stealth corruption gets
1956 * propagated to the new pack. Clients receiving streamed packs
1957 * should validate everything they get anyway so no need to incur
1958 * the additional cost here in that case.
1959 */
1960 if (!pack_to_stdout)
1961 do_check_packed_object_crc = 1;
1962
1963 if (!nr_objects || !window || !depth)
1964 return;
1965
1966 delta_list = xmalloc(nr_objects * sizeof(*delta_list));
1967 nr_deltas = n = 0;
1968
1969 for (i = 0; i < nr_objects; i++) {
1970 struct object_entry *entry = objects + i;
1971
1972 if (entry->delta)
1973 /* This happens if we decided to reuse existing
1974 * delta from a pack. "reuse_delta &&" is implied.
1975 */
1976 continue;
1977
1978 if (entry->size < 50)
1979 continue;
1980
1981 if (entry->no_try_delta)
1982 continue;
1983
1984 if (!entry->preferred_base) {
1985 nr_deltas++;
1986 if (entry->type < 0)
1987 die("unable to get type of object %s",
1988 sha1_to_hex(entry->idx.sha1));
1989 } else {
1990 if (entry->type < 0) {
1991 /*
1992 * This object is not found, but we
1993 * don't have to include it anyway.
1994 */
1995 continue;
1996 }
1997 }
1998
1999 delta_list[n++] = entry;
2000 }
2001
2002 if (nr_deltas && n > 1) {
2003 unsigned nr_done = 0;
2004 if (progress)
2005 progress_state = start_progress("Compressing objects",
2006 nr_deltas);
2007 qsort(delta_list, n, sizeof(*delta_list), type_size_sort);
2008 ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2009 stop_progress(&progress_state);
2010 if (nr_done != nr_deltas)
2011 die("inconsistency with delta count");
2012 }
2013 free(delta_list);
2014}
2015
2016static int git_pack_config(const char *k, const char *v, void *cb)
2017{
2018 if (!strcmp(k, "pack.window")) {
2019 window = git_config_int(k, v);
2020 return 0;
2021 }
2022 if (!strcmp(k, "pack.windowmemory")) {
2023 window_memory_limit = git_config_ulong(k, v);
2024 return 0;
2025 }
2026 if (!strcmp(k, "pack.depth")) {
2027 depth = git_config_int(k, v);
2028 return 0;
2029 }
2030 if (!strcmp(k, "pack.compression")) {
2031 int level = git_config_int(k, v);
2032 if (level == -1)
2033 level = Z_DEFAULT_COMPRESSION;
2034 else if (level < 0 || level > Z_BEST_COMPRESSION)
2035 die("bad pack compression level %d", level);
2036 pack_compression_level = level;
2037 pack_compression_seen = 1;
2038 return 0;
2039 }
2040 if (!strcmp(k, "pack.deltacachesize")) {
2041 max_delta_cache_size = git_config_int(k, v);
2042 return 0;
2043 }
2044 if (!strcmp(k, "pack.deltacachelimit")) {
2045 cache_max_small_delta_size = git_config_int(k, v);
2046 return 0;
2047 }
2048 if (!strcmp(k, "pack.threads")) {
2049 delta_search_threads = git_config_int(k, v);
2050 if (delta_search_threads < 0)
2051 die("invalid number of threads specified (%d)",
2052 delta_search_threads);
2053#ifdef NO_PTHREADS
2054 if (delta_search_threads != 1)
2055 warning("no threads support, ignoring %s", k);
2056#endif
2057 return 0;
2058 }
2059 if (!strcmp(k, "pack.indexversion")) {
2060 pack_idx_opts.version = git_config_int(k, v);
2061 if (pack_idx_opts.version > 2)
2062 die("bad pack.indexversion=%"PRIu32,
2063 pack_idx_opts.version);
2064 return 0;
2065 }
2066 if (!strcmp(k, "pack.packsizelimit")) {
2067 pack_size_limit_cfg = git_config_ulong(k, v);
2068 return 0;
2069 }
2070 return git_default_config(k, v, cb);
2071}
2072
2073static void read_object_list_from_stdin(void)
2074{
2075 char line[40 + 1 + PATH_MAX + 2];
2076 unsigned char sha1[20];
2077
2078 for (;;) {
2079 if (!fgets(line, sizeof(line), stdin)) {
2080 if (feof(stdin))
2081 break;
2082 if (!ferror(stdin))
2083 die("fgets returned NULL, not EOF, not error!");
2084 if (errno != EINTR)
2085 die_errno("fgets");
2086 clearerr(stdin);
2087 continue;
2088 }
2089 if (line[0] == '-') {
2090 if (get_sha1_hex(line+1, sha1))
2091 die("expected edge sha1, got garbage:\n %s",
2092 line);
2093 add_preferred_base(sha1);
2094 continue;
2095 }
2096 if (get_sha1_hex(line, sha1))
2097 die("expected sha1, got garbage:\n %s", line);
2098
2099 add_preferred_base_object(line+41);
2100 add_object_entry(sha1, 0, line+41, 0);
2101 }
2102}
2103
2104#define OBJECT_ADDED (1u<<20)
2105
2106static void show_commit(struct commit *commit, void *data)
2107{
2108 add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
2109 commit->object.flags |= OBJECT_ADDED;
2110}
2111
2112static void show_object(struct object *obj, const struct name_path *path, const char *last)
2113{
2114 char *name = path_name(path, last);
2115
2116 add_preferred_base_object(name);
2117 add_object_entry(obj->sha1, obj->type, name, 0);
2118 obj->flags |= OBJECT_ADDED;
2119
2120 /*
2121 * We will have generated the hash from the name,
2122 * but not saved a pointer to it - we can free it
2123 */
2124 free((char *)name);
2125}
2126
2127static void show_edge(struct commit *commit)
2128{
2129 add_preferred_base(commit->object.sha1);
2130}
2131
2132struct in_pack_object {
2133 off_t offset;
2134 struct object *object;
2135};
2136
2137struct in_pack {
2138 int alloc;
2139 int nr;
2140 struct in_pack_object *array;
2141};
2142
2143static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2144{
2145 in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
2146 in_pack->array[in_pack->nr].object = object;
2147 in_pack->nr++;
2148}
2149
2150/*
2151 * Compare the objects in the offset order, in order to emulate the
2152 * "git rev-list --objects" output that produced the pack originally.
2153 */
2154static int ofscmp(const void *a_, const void *b_)
2155{
2156 struct in_pack_object *a = (struct in_pack_object *)a_;
2157 struct in_pack_object *b = (struct in_pack_object *)b_;
2158
2159 if (a->offset < b->offset)
2160 return -1;
2161 else if (a->offset > b->offset)
2162 return 1;
2163 else
2164 return hashcmp(a->object->sha1, b->object->sha1);
2165}
2166
2167static void add_objects_in_unpacked_packs(struct rev_info *revs)
2168{
2169 struct packed_git *p;
2170 struct in_pack in_pack;
2171 uint32_t i;
2172
2173 memset(&in_pack, 0, sizeof(in_pack));
2174
2175 for (p = packed_git; p; p = p->next) {
2176 const unsigned char *sha1;
2177 struct object *o;
2178
2179 if (!p->pack_local || p->pack_keep)
2180 continue;
2181 if (open_pack_index(p))
2182 die("cannot open pack index");
2183
2184 ALLOC_GROW(in_pack.array,
2185 in_pack.nr + p->num_objects,
2186 in_pack.alloc);
2187
2188 for (i = 0; i < p->num_objects; i++) {
2189 sha1 = nth_packed_object_sha1(p, i);
2190 o = lookup_unknown_object(sha1);
2191 if (!(o->flags & OBJECT_ADDED))
2192 mark_in_pack_object(o, p, &in_pack);
2193 o->flags |= OBJECT_ADDED;
2194 }
2195 }
2196
2197 if (in_pack.nr) {
2198 qsort(in_pack.array, in_pack.nr, sizeof(in_pack.array[0]),
2199 ofscmp);
2200 for (i = 0; i < in_pack.nr; i++) {
2201 struct object *o = in_pack.array[i].object;
2202 add_object_entry(o->sha1, o->type, "", 0);
2203 }
2204 }
2205 free(in_pack.array);
2206}
2207
2208static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
2209{
2210 static struct packed_git *last_found = (void *)1;
2211 struct packed_git *p;
2212
2213 p = (last_found != (void *)1) ? last_found : packed_git;
2214
2215 while (p) {
2216 if ((!p->pack_local || p->pack_keep) &&
2217 find_pack_entry_one(sha1, p)) {
2218 last_found = p;
2219 return 1;
2220 }
2221 if (p == last_found)
2222 p = packed_git;
2223 else
2224 p = p->next;
2225 if (p == last_found)
2226 p = p->next;
2227 }
2228 return 0;
2229}
2230
2231static void loosen_unused_packed_objects(struct rev_info *revs)
2232{
2233 struct packed_git *p;
2234 uint32_t i;
2235 const unsigned char *sha1;
2236
2237 for (p = packed_git; p; p = p->next) {
2238 if (!p->pack_local || p->pack_keep)
2239 continue;
2240
2241 if (open_pack_index(p))
2242 die("cannot open pack index");
2243
2244 for (i = 0; i < p->num_objects; i++) {
2245 sha1 = nth_packed_object_sha1(p, i);
2246 if (!locate_object_entry(sha1) &&
2247 !has_sha1_pack_kept_or_nonlocal(sha1))
2248 if (force_object_loose(sha1, p->mtime))
2249 die("unable to force loose object");
2250 }
2251 }
2252}
2253
2254static void get_object_list(int ac, const char **av)
2255{
2256 struct rev_info revs;
2257 char line[1000];
2258 int flags = 0;
2259
2260 init_revisions(&revs, NULL);
2261 save_commit_buffer = 0;
2262 setup_revisions(ac, av, &revs, NULL);
2263
2264 while (fgets(line, sizeof(line), stdin) != NULL) {
2265 int len = strlen(line);
2266 if (len && line[len - 1] == '\n')
2267 line[--len] = 0;
2268 if (!len)
2269 break;
2270 if (*line == '-') {
2271 if (!strcmp(line, "--not")) {
2272 flags ^= UNINTERESTING;
2273 continue;
2274 }
2275 die("not a rev '%s'", line);
2276 }
2277 if (handle_revision_arg(line, &revs, flags, 1))
2278 die("bad revision '%s'", line);
2279 }
2280
2281 if (prepare_revision_walk(&revs))
2282 die("revision walk setup failed");
2283 mark_edges_uninteresting(revs.commits, &revs, show_edge);
2284 traverse_commit_list(&revs, show_commit, show_object, NULL);
2285
2286 if (keep_unreachable)
2287 add_objects_in_unpacked_packs(&revs);
2288 if (unpack_unreachable)
2289 loosen_unused_packed_objects(&revs);
2290}
2291
2292int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2293{
2294 int use_internal_rev_list = 0;
2295 int thin = 0;
2296 int all_progress_implied = 0;
2297 uint32_t i;
2298 const char **rp_av;
2299 int rp_ac_alloc = 64;
2300 int rp_ac;
2301
2302 read_replace_refs = 0;
2303
2304 rp_av = xcalloc(rp_ac_alloc, sizeof(*rp_av));
2305
2306 rp_av[0] = "pack-objects";
2307 rp_av[1] = "--objects"; /* --thin will make it --objects-edge */
2308 rp_ac = 2;
2309
2310 reset_pack_idx_option(&pack_idx_opts);
2311 git_config(git_pack_config, NULL);
2312 if (!pack_compression_seen && core_compression_seen)
2313 pack_compression_level = core_compression_level;
2314
2315 progress = isatty(2);
2316 for (i = 1; i < argc; i++) {
2317 const char *arg = argv[i];
2318
2319 if (*arg != '-')
2320 break;
2321
2322 if (!strcmp("--non-empty", arg)) {
2323 non_empty = 1;
2324 continue;
2325 }
2326 if (!strcmp("--local", arg)) {
2327 local = 1;
2328 continue;
2329 }
2330 if (!strcmp("--incremental", arg)) {
2331 incremental = 1;
2332 continue;
2333 }
2334 if (!strcmp("--honor-pack-keep", arg)) {
2335 ignore_packed_keep = 1;
2336 continue;
2337 }
2338 if (!prefixcmp(arg, "--compression=")) {
2339 char *end;
2340 int level = strtoul(arg+14, &end, 0);
2341 if (!arg[14] || *end)
2342 usage(pack_usage);
2343 if (level == -1)
2344 level = Z_DEFAULT_COMPRESSION;
2345 else if (level < 0 || level > Z_BEST_COMPRESSION)
2346 die("bad pack compression level %d", level);
2347 pack_compression_level = level;
2348 continue;
2349 }
2350 if (!prefixcmp(arg, "--max-pack-size=")) {
2351 pack_size_limit_cfg = 0;
2352 if (!git_parse_ulong(arg+16, &pack_size_limit))
2353 usage(pack_usage);
2354 continue;
2355 }
2356 if (!prefixcmp(arg, "--window=")) {
2357 char *end;
2358 window = strtoul(arg+9, &end, 0);
2359 if (!arg[9] || *end)
2360 usage(pack_usage);
2361 continue;
2362 }
2363 if (!prefixcmp(arg, "--window-memory=")) {
2364 if (!git_parse_ulong(arg+16, &window_memory_limit))
2365 usage(pack_usage);
2366 continue;
2367 }
2368 if (!prefixcmp(arg, "--threads=")) {
2369 char *end;
2370 delta_search_threads = strtoul(arg+10, &end, 0);
2371 if (!arg[10] || *end || delta_search_threads < 0)
2372 usage(pack_usage);
2373#ifdef NO_PTHREADS
2374 if (delta_search_threads != 1)
2375 warning("no threads support, "
2376 "ignoring %s", arg);
2377#endif
2378 continue;
2379 }
2380 if (!prefixcmp(arg, "--depth=")) {
2381 char *end;
2382 depth = strtoul(arg+8, &end, 0);
2383 if (!arg[8] || *end)
2384 usage(pack_usage);
2385 continue;
2386 }
2387 if (!strcmp("--progress", arg)) {
2388 progress = 1;
2389 continue;
2390 }
2391 if (!strcmp("--all-progress", arg)) {
2392 progress = 2;
2393 continue;
2394 }
2395 if (!strcmp("--all-progress-implied", arg)) {
2396 all_progress_implied = 1;
2397 continue;
2398 }
2399 if (!strcmp("-q", arg)) {
2400 progress = 0;
2401 continue;
2402 }
2403 if (!strcmp("--no-reuse-delta", arg)) {
2404 reuse_delta = 0;
2405 continue;
2406 }
2407 if (!strcmp("--no-reuse-object", arg)) {
2408 reuse_object = reuse_delta = 0;
2409 continue;
2410 }
2411 if (!strcmp("--delta-base-offset", arg)) {
2412 allow_ofs_delta = 1;
2413 continue;
2414 }
2415 if (!strcmp("--stdout", arg)) {
2416 pack_to_stdout = 1;
2417 continue;
2418 }
2419 if (!strcmp("--revs", arg)) {
2420 use_internal_rev_list = 1;
2421 continue;
2422 }
2423 if (!strcmp("--keep-unreachable", arg)) {
2424 keep_unreachable = 1;
2425 continue;
2426 }
2427 if (!strcmp("--unpack-unreachable", arg)) {
2428 unpack_unreachable = 1;
2429 continue;
2430 }
2431 if (!strcmp("--include-tag", arg)) {
2432 include_tag = 1;
2433 continue;
2434 }
2435 if (!strcmp("--unpacked", arg) ||
2436 !strcmp("--reflog", arg) ||
2437 !strcmp("--all", arg)) {
2438 use_internal_rev_list = 1;
2439 if (rp_ac >= rp_ac_alloc - 1) {
2440 rp_ac_alloc = alloc_nr(rp_ac_alloc);
2441 rp_av = xrealloc(rp_av,
2442 rp_ac_alloc * sizeof(*rp_av));
2443 }
2444 rp_av[rp_ac++] = arg;
2445 continue;
2446 }
2447 if (!strcmp("--thin", arg)) {
2448 use_internal_rev_list = 1;
2449 thin = 1;
2450 rp_av[1] = "--objects-edge";
2451 continue;
2452 }
2453 if (!prefixcmp(arg, "--index-version=")) {
2454 char *c;
2455 pack_idx_opts.version = strtoul(arg + 16, &c, 10);
2456 if (pack_idx_opts.version > 2)
2457 die("bad %s", arg);
2458 if (*c == ',')
2459 pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2460 if (*c || pack_idx_opts.off32_limit & 0x80000000)
2461 die("bad %s", arg);
2462 continue;
2463 }
2464 if (!strcmp(arg, "--keep-true-parents")) {
2465 grafts_replace_parents = 0;
2466 continue;
2467 }
2468 usage(pack_usage);
2469 }
2470
2471 /* Traditionally "pack-objects [options] base extra" failed;
2472 * we would however want to take refs parameter that would
2473 * have been given to upstream rev-list ourselves, which means
2474 * we somehow want to say what the base name is. So the
2475 * syntax would be:
2476 *
2477 * pack-objects [options] base <refs...>
2478 *
2479 * in other words, we would treat the first non-option as the
2480 * base_name and send everything else to the internal revision
2481 * walker.
2482 */
2483
2484 if (!pack_to_stdout)
2485 base_name = argv[i++];
2486
2487 if (pack_to_stdout != !base_name)
2488 usage(pack_usage);
2489
2490 if (!pack_to_stdout && !pack_size_limit)
2491 pack_size_limit = pack_size_limit_cfg;
2492 if (pack_to_stdout && pack_size_limit)
2493 die("--max-pack-size cannot be used to build a pack for transfer.");
2494 if (pack_size_limit && pack_size_limit < 1024*1024) {
2495 warning("minimum pack size limit is 1 MiB");
2496 pack_size_limit = 1024*1024;
2497 }
2498
2499 if (!pack_to_stdout && thin)
2500 die("--thin cannot be used to build an indexable pack.");
2501
2502 if (keep_unreachable && unpack_unreachable)
2503 die("--keep-unreachable and --unpack-unreachable are incompatible.");
2504
2505 if (progress && all_progress_implied)
2506 progress = 2;
2507
2508 prepare_packed_git();
2509
2510 if (progress)
2511 progress_state = start_progress("Counting objects", 0);
2512 if (!use_internal_rev_list)
2513 read_object_list_from_stdin();
2514 else {
2515 rp_av[rp_ac] = NULL;
2516 get_object_list(rp_ac, rp_av);
2517 }
2518 cleanup_preferred_base();
2519 if (include_tag && nr_result)
2520 for_each_ref(add_ref_tag, NULL);
2521 stop_progress(&progress_state);
2522
2523 if (non_empty && !nr_result)
2524 return 0;
2525 if (nr_result)
2526 prepare_pack(window, depth);
2527 write_pack_file();
2528 if (progress)
2529 fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
2530 " reused %"PRIu32" (delta %"PRIu32")\n",
2531 written, written_delta, reused, reused_delta);
2532 return 0;
2533}