1#include "builtin.h"
2#include "config.h"
3#include "delta.h"
4#include "pack.h"
5#include "csum-file.h"
6#include "blob.h"
7#include "commit.h"
8#include "tag.h"
9#include "tree.h"
10#include "progress.h"
11#include "fsck.h"
12#include "exec-cmd.h"
13#include "streaming.h"
14#include "thread-utils.h"
15#include "packfile.h"
16#include "object-store.h"
17
18static const char index_pack_usage[] =
19"git index-pack [-v] [-o <index-file>] [--keep | --keep=<msg>] [--verify] [--strict] (<pack-file> | --stdin [--fix-thin] [<pack-file>])";
20
21struct object_entry {
22 struct pack_idx_entry idx;
23 unsigned long size;
24 unsigned char hdr_size;
25 signed char type;
26 signed char real_type;
27};
28
29struct object_stat {
30 unsigned delta_depth;
31 int base_object_no;
32};
33
34struct base_data {
35 struct base_data *base;
36 struct base_data *child;
37 struct object_entry *obj;
38 void *data;
39 unsigned long size;
40 int ref_first, ref_last;
41 int ofs_first, ofs_last;
42};
43
44struct thread_local {
45 pthread_t thread;
46 struct base_data *base_cache;
47 size_t base_cache_used;
48 int pack_fd;
49};
50
51/* Remember to update object flag allocation in object.h */
52#define FLAG_LINK (1u<<20)
53#define FLAG_CHECKED (1u<<21)
54
55struct ofs_delta_entry {
56 off_t offset;
57 int obj_no;
58};
59
60struct ref_delta_entry {
61 struct object_id oid;
62 int obj_no;
63};
64
65static struct object_entry *objects;
66static struct object_stat *obj_stat;
67static struct ofs_delta_entry *ofs_deltas;
68static struct ref_delta_entry *ref_deltas;
69static struct thread_local nothread_data;
70static int nr_objects;
71static int nr_ofs_deltas;
72static int nr_ref_deltas;
73static int ref_deltas_alloc;
74static int nr_resolved_deltas;
75static int nr_threads;
76
77static int from_stdin;
78static int strict;
79static int do_fsck_object;
80static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
81static int verbose;
82static int show_resolving_progress;
83static int show_stat;
84static int check_self_contained_and_connected;
85
86static struct progress *progress;
87
88/* We always read in 4kB chunks. */
89static unsigned char input_buffer[4096];
90static unsigned int input_offset, input_len;
91static off_t consumed_bytes;
92static off_t max_input_size;
93static unsigned deepest_delta;
94static git_hash_ctx input_ctx;
95static uint32_t input_crc32;
96static int input_fd, output_fd;
97static const char *curr_pack;
98
99static struct thread_local *thread_data;
100static int nr_dispatched;
101static int threads_active;
102
103static pthread_mutex_t read_mutex;
104#define read_lock() lock_mutex(&read_mutex)
105#define read_unlock() unlock_mutex(&read_mutex)
106
107static pthread_mutex_t counter_mutex;
108#define counter_lock() lock_mutex(&counter_mutex)
109#define counter_unlock() unlock_mutex(&counter_mutex)
110
111static pthread_mutex_t work_mutex;
112#define work_lock() lock_mutex(&work_mutex)
113#define work_unlock() unlock_mutex(&work_mutex)
114
115static pthread_mutex_t deepest_delta_mutex;
116#define deepest_delta_lock() lock_mutex(&deepest_delta_mutex)
117#define deepest_delta_unlock() unlock_mutex(&deepest_delta_mutex)
118
119static pthread_mutex_t type_cas_mutex;
120#define type_cas_lock() lock_mutex(&type_cas_mutex)
121#define type_cas_unlock() unlock_mutex(&type_cas_mutex)
122
123static pthread_key_t key;
124
125static inline void lock_mutex(pthread_mutex_t *mutex)
126{
127 if (threads_active)
128 pthread_mutex_lock(mutex);
129}
130
131static inline void unlock_mutex(pthread_mutex_t *mutex)
132{
133 if (threads_active)
134 pthread_mutex_unlock(mutex);
135}
136
137/*
138 * Mutex and conditional variable can't be statically-initialized on Windows.
139 */
140static void init_thread(void)
141{
142 int i;
143 init_recursive_mutex(&read_mutex);
144 pthread_mutex_init(&counter_mutex, NULL);
145 pthread_mutex_init(&work_mutex, NULL);
146 pthread_mutex_init(&type_cas_mutex, NULL);
147 if (show_stat)
148 pthread_mutex_init(&deepest_delta_mutex, NULL);
149 pthread_key_create(&key, NULL);
150 thread_data = xcalloc(nr_threads, sizeof(*thread_data));
151 for (i = 0; i < nr_threads; i++) {
152 thread_data[i].pack_fd = open(curr_pack, O_RDONLY);
153 if (thread_data[i].pack_fd == -1)
154 die_errno(_("unable to open %s"), curr_pack);
155 }
156
157 threads_active = 1;
158}
159
160static void cleanup_thread(void)
161{
162 int i;
163 if (!threads_active)
164 return;
165 threads_active = 0;
166 pthread_mutex_destroy(&read_mutex);
167 pthread_mutex_destroy(&counter_mutex);
168 pthread_mutex_destroy(&work_mutex);
169 pthread_mutex_destroy(&type_cas_mutex);
170 if (show_stat)
171 pthread_mutex_destroy(&deepest_delta_mutex);
172 for (i = 0; i < nr_threads; i++)
173 close(thread_data[i].pack_fd);
174 pthread_key_delete(key);
175 free(thread_data);
176}
177
178static int mark_link(struct object *obj, int type, void *data, struct fsck_options *options)
179{
180 if (!obj)
181 return -1;
182
183 if (type != OBJ_ANY && obj->type != type)
184 die(_("object type mismatch at %s"), oid_to_hex(&obj->oid));
185
186 obj->flags |= FLAG_LINK;
187 return 0;
188}
189
190/* The content of each linked object must have been checked
191 or it must be already present in the object database */
192static unsigned check_object(struct object *obj)
193{
194 if (!obj)
195 return 0;
196
197 if (!(obj->flags & FLAG_LINK))
198 return 0;
199
200 if (!(obj->flags & FLAG_CHECKED)) {
201 unsigned long size;
202 int type = oid_object_info(the_repository, &obj->oid, &size);
203 if (type <= 0)
204 die(_("did not receive expected object %s"),
205 oid_to_hex(&obj->oid));
206 if (type != obj->type)
207 die(_("object %s: expected type %s, found %s"),
208 oid_to_hex(&obj->oid),
209 type_name(obj->type), type_name(type));
210 obj->flags |= FLAG_CHECKED;
211 return 1;
212 }
213
214 return 0;
215}
216
217static unsigned check_objects(void)
218{
219 unsigned i, max, foreign_nr = 0;
220
221 max = get_max_object_index();
222 for (i = 0; i < max; i++)
223 foreign_nr += check_object(get_indexed_object(i));
224 return foreign_nr;
225}
226
227
228/* Discard current buffer used content. */
229static void flush(void)
230{
231 if (input_offset) {
232 if (output_fd >= 0)
233 write_or_die(output_fd, input_buffer, input_offset);
234 the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
235 memmove(input_buffer, input_buffer + input_offset, input_len);
236 input_offset = 0;
237 }
238}
239
240/*
241 * Make sure at least "min" bytes are available in the buffer, and
242 * return the pointer to the buffer.
243 */
244static void *fill(int min)
245{
246 if (min <= input_len)
247 return input_buffer + input_offset;
248 if (min > sizeof(input_buffer))
249 die(Q_("cannot fill %d byte",
250 "cannot fill %d bytes",
251 min),
252 min);
253 flush();
254 do {
255 ssize_t ret = xread(input_fd, input_buffer + input_len,
256 sizeof(input_buffer) - input_len);
257 if (ret <= 0) {
258 if (!ret)
259 die(_("early EOF"));
260 die_errno(_("read error on input"));
261 }
262 input_len += ret;
263 if (from_stdin)
264 display_throughput(progress, consumed_bytes + input_len);
265 } while (input_len < min);
266 return input_buffer;
267}
268
269static void use(int bytes)
270{
271 if (bytes > input_len)
272 die(_("used more bytes than were available"));
273 input_crc32 = crc32(input_crc32, input_buffer + input_offset, bytes);
274 input_len -= bytes;
275 input_offset += bytes;
276
277 /* make sure off_t is sufficiently large not to wrap */
278 if (signed_add_overflows(consumed_bytes, bytes))
279 die(_("pack too large for current definition of off_t"));
280 consumed_bytes += bytes;
281 if (max_input_size && consumed_bytes > max_input_size)
282 die(_("pack exceeds maximum allowed size"));
283}
284
285static const char *open_pack_file(const char *pack_name)
286{
287 if (from_stdin) {
288 input_fd = 0;
289 if (!pack_name) {
290 struct strbuf tmp_file = STRBUF_INIT;
291 output_fd = odb_mkstemp(&tmp_file,
292 "pack/tmp_pack_XXXXXX");
293 pack_name = strbuf_detach(&tmp_file, NULL);
294 } else {
295 output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600);
296 if (output_fd < 0)
297 die_errno(_("unable to create '%s'"), pack_name);
298 }
299 nothread_data.pack_fd = output_fd;
300 } else {
301 input_fd = open(pack_name, O_RDONLY);
302 if (input_fd < 0)
303 die_errno(_("cannot open packfile '%s'"), pack_name);
304 output_fd = -1;
305 nothread_data.pack_fd = input_fd;
306 }
307 the_hash_algo->init_fn(&input_ctx);
308 return pack_name;
309}
310
311static void parse_pack_header(void)
312{
313 struct pack_header *hdr = fill(sizeof(struct pack_header));
314
315 /* Header consistency check */
316 if (hdr->hdr_signature != htonl(PACK_SIGNATURE))
317 die(_("pack signature mismatch"));
318 if (!pack_version_ok(hdr->hdr_version))
319 die(_("pack version %"PRIu32" unsupported"),
320 ntohl(hdr->hdr_version));
321
322 nr_objects = ntohl(hdr->hdr_entries);
323 use(sizeof(struct pack_header));
324}
325
326static NORETURN void bad_object(off_t offset, const char *format,
327 ...) __attribute__((format (printf, 2, 3)));
328
329static NORETURN void bad_object(off_t offset, const char *format, ...)
330{
331 va_list params;
332 char buf[1024];
333
334 va_start(params, format);
335 vsnprintf(buf, sizeof(buf), format, params);
336 va_end(params);
337 die(_("pack has bad object at offset %"PRIuMAX": %s"),
338 (uintmax_t)offset, buf);
339}
340
341static inline struct thread_local *get_thread_data(void)
342{
343 if (HAVE_THREADS) {
344 if (threads_active)
345 return pthread_getspecific(key);
346 assert(!threads_active &&
347 "This should only be reached when all threads are gone");
348 }
349 return ¬hread_data;
350}
351
352static void set_thread_data(struct thread_local *data)
353{
354 if (threads_active)
355 pthread_setspecific(key, data);
356}
357
358static struct base_data *alloc_base_data(void)
359{
360 struct base_data *base = xcalloc(1, sizeof(struct base_data));
361 base->ref_last = -1;
362 base->ofs_last = -1;
363 return base;
364}
365
366static void free_base_data(struct base_data *c)
367{
368 if (c->data) {
369 FREE_AND_NULL(c->data);
370 get_thread_data()->base_cache_used -= c->size;
371 }
372}
373
374static void prune_base_data(struct base_data *retain)
375{
376 struct base_data *b;
377 struct thread_local *data = get_thread_data();
378 for (b = data->base_cache;
379 data->base_cache_used > delta_base_cache_limit && b;
380 b = b->child) {
381 if (b->data && b != retain)
382 free_base_data(b);
383 }
384}
385
386static void link_base_data(struct base_data *base, struct base_data *c)
387{
388 if (base)
389 base->child = c;
390 else
391 get_thread_data()->base_cache = c;
392
393 c->base = base;
394 c->child = NULL;
395 if (c->data)
396 get_thread_data()->base_cache_used += c->size;
397 prune_base_data(c);
398}
399
400static void unlink_base_data(struct base_data *c)
401{
402 struct base_data *base = c->base;
403 if (base)
404 base->child = NULL;
405 else
406 get_thread_data()->base_cache = NULL;
407 free_base_data(c);
408}
409
410static int is_delta_type(enum object_type type)
411{
412 return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
413}
414
415static void *unpack_entry_data(off_t offset, unsigned long size,
416 enum object_type type, struct object_id *oid)
417{
418 static char fixed_buf[8192];
419 int status;
420 git_zstream stream;
421 void *buf;
422 git_hash_ctx c;
423 char hdr[32];
424 int hdrlen;
425
426 if (!is_delta_type(type)) {
427 hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX,
428 type_name(type),(uintmax_t)size) + 1;
429 the_hash_algo->init_fn(&c);
430 the_hash_algo->update_fn(&c, hdr, hdrlen);
431 } else
432 oid = NULL;
433 if (type == OBJ_BLOB && size > big_file_threshold)
434 buf = fixed_buf;
435 else
436 buf = xmallocz(size);
437
438 memset(&stream, 0, sizeof(stream));
439 git_inflate_init(&stream);
440 stream.next_out = buf;
441 stream.avail_out = buf == fixed_buf ? sizeof(fixed_buf) : size;
442
443 do {
444 unsigned char *last_out = stream.next_out;
445 stream.next_in = fill(1);
446 stream.avail_in = input_len;
447 status = git_inflate(&stream, 0);
448 use(input_len - stream.avail_in);
449 if (oid)
450 the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
451 if (buf == fixed_buf) {
452 stream.next_out = buf;
453 stream.avail_out = sizeof(fixed_buf);
454 }
455 } while (status == Z_OK);
456 if (stream.total_out != size || status != Z_STREAM_END)
457 bad_object(offset, _("inflate returned %d"), status);
458 git_inflate_end(&stream);
459 if (oid)
460 the_hash_algo->final_fn(oid->hash, &c);
461 return buf == fixed_buf ? NULL : buf;
462}
463
464static void *unpack_raw_entry(struct object_entry *obj,
465 off_t *ofs_offset,
466 struct object_id *ref_oid,
467 struct object_id *oid)
468{
469 unsigned char *p;
470 unsigned long size, c;
471 off_t base_offset;
472 unsigned shift;
473 void *data;
474
475 obj->idx.offset = consumed_bytes;
476 input_crc32 = crc32(0, NULL, 0);
477
478 p = fill(1);
479 c = *p;
480 use(1);
481 obj->type = (c >> 4) & 7;
482 size = (c & 15);
483 shift = 4;
484 while (c & 0x80) {
485 p = fill(1);
486 c = *p;
487 use(1);
488 size += (c & 0x7f) << shift;
489 shift += 7;
490 }
491 obj->size = size;
492
493 switch (obj->type) {
494 case OBJ_REF_DELTA:
495 hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
496 use(the_hash_algo->rawsz);
497 break;
498 case OBJ_OFS_DELTA:
499 p = fill(1);
500 c = *p;
501 use(1);
502 base_offset = c & 127;
503 while (c & 128) {
504 base_offset += 1;
505 if (!base_offset || MSB(base_offset, 7))
506 bad_object(obj->idx.offset, _("offset value overflow for delta base object"));
507 p = fill(1);
508 c = *p;
509 use(1);
510 base_offset = (base_offset << 7) + (c & 127);
511 }
512 *ofs_offset = obj->idx.offset - base_offset;
513 if (*ofs_offset <= 0 || *ofs_offset >= obj->idx.offset)
514 bad_object(obj->idx.offset, _("delta base offset is out of bound"));
515 break;
516 case OBJ_COMMIT:
517 case OBJ_TREE:
518 case OBJ_BLOB:
519 case OBJ_TAG:
520 break;
521 default:
522 bad_object(obj->idx.offset, _("unknown object type %d"), obj->type);
523 }
524 obj->hdr_size = consumed_bytes - obj->idx.offset;
525
526 data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
527 obj->idx.crc32 = input_crc32;
528 return data;
529}
530
531static void *unpack_data(struct object_entry *obj,
532 int (*consume)(const unsigned char *, unsigned long, void *),
533 void *cb_data)
534{
535 off_t from = obj[0].idx.offset + obj[0].hdr_size;
536 off_t len = obj[1].idx.offset - from;
537 unsigned char *data, *inbuf;
538 git_zstream stream;
539 int status;
540
541 data = xmallocz(consume ? 64*1024 : obj->size);
542 inbuf = xmalloc((len < 64*1024) ? (int)len : 64*1024);
543
544 memset(&stream, 0, sizeof(stream));
545 git_inflate_init(&stream);
546 stream.next_out = data;
547 stream.avail_out = consume ? 64*1024 : obj->size;
548
549 do {
550 ssize_t n = (len < 64*1024) ? (ssize_t)len : 64*1024;
551 n = xpread(get_thread_data()->pack_fd, inbuf, n, from);
552 if (n < 0)
553 die_errno(_("cannot pread pack file"));
554 if (!n)
555 die(Q_("premature end of pack file, %"PRIuMAX" byte missing",
556 "premature end of pack file, %"PRIuMAX" bytes missing",
557 (unsigned int)len),
558 (uintmax_t)len);
559 from += n;
560 len -= n;
561 stream.next_in = inbuf;
562 stream.avail_in = n;
563 if (!consume)
564 status = git_inflate(&stream, 0);
565 else {
566 do {
567 status = git_inflate(&stream, 0);
568 if (consume(data, stream.next_out - data, cb_data)) {
569 free(inbuf);
570 free(data);
571 return NULL;
572 }
573 stream.next_out = data;
574 stream.avail_out = 64*1024;
575 } while (status == Z_OK && stream.avail_in);
576 }
577 } while (len && status == Z_OK && !stream.avail_in);
578
579 /* This has been inflated OK when first encountered, so... */
580 if (status != Z_STREAM_END || stream.total_out != obj->size)
581 die(_("serious inflate inconsistency"));
582
583 git_inflate_end(&stream);
584 free(inbuf);
585 if (consume) {
586 FREE_AND_NULL(data);
587 }
588 return data;
589}
590
591static void *get_data_from_pack(struct object_entry *obj)
592{
593 return unpack_data(obj, NULL, NULL);
594}
595
596static int compare_ofs_delta_bases(off_t offset1, off_t offset2,
597 enum object_type type1,
598 enum object_type type2)
599{
600 int cmp = type1 - type2;
601 if (cmp)
602 return cmp;
603 return offset1 < offset2 ? -1 :
604 offset1 > offset2 ? 1 :
605 0;
606}
607
608static int find_ofs_delta(const off_t offset, enum object_type type)
609{
610 int first = 0, last = nr_ofs_deltas;
611
612 while (first < last) {
613 int next = first + (last - first) / 2;
614 struct ofs_delta_entry *delta = &ofs_deltas[next];
615 int cmp;
616
617 cmp = compare_ofs_delta_bases(offset, delta->offset,
618 type, objects[delta->obj_no].type);
619 if (!cmp)
620 return next;
621 if (cmp < 0) {
622 last = next;
623 continue;
624 }
625 first = next+1;
626 }
627 return -first-1;
628}
629
630static void find_ofs_delta_children(off_t offset,
631 int *first_index, int *last_index,
632 enum object_type type)
633{
634 int first = find_ofs_delta(offset, type);
635 int last = first;
636 int end = nr_ofs_deltas - 1;
637
638 if (first < 0) {
639 *first_index = 0;
640 *last_index = -1;
641 return;
642 }
643 while (first > 0 && ofs_deltas[first - 1].offset == offset)
644 --first;
645 while (last < end && ofs_deltas[last + 1].offset == offset)
646 ++last;
647 *first_index = first;
648 *last_index = last;
649}
650
651static int compare_ref_delta_bases(const struct object_id *oid1,
652 const struct object_id *oid2,
653 enum object_type type1,
654 enum object_type type2)
655{
656 int cmp = type1 - type2;
657 if (cmp)
658 return cmp;
659 return oidcmp(oid1, oid2);
660}
661
662static int find_ref_delta(const struct object_id *oid, enum object_type type)
663{
664 int first = 0, last = nr_ref_deltas;
665
666 while (first < last) {
667 int next = first + (last - first) / 2;
668 struct ref_delta_entry *delta = &ref_deltas[next];
669 int cmp;
670
671 cmp = compare_ref_delta_bases(oid, &delta->oid,
672 type, objects[delta->obj_no].type);
673 if (!cmp)
674 return next;
675 if (cmp < 0) {
676 last = next;
677 continue;
678 }
679 first = next+1;
680 }
681 return -first-1;
682}
683
684static void find_ref_delta_children(const struct object_id *oid,
685 int *first_index, int *last_index,
686 enum object_type type)
687{
688 int first = find_ref_delta(oid, type);
689 int last = first;
690 int end = nr_ref_deltas - 1;
691
692 if (first < 0) {
693 *first_index = 0;
694 *last_index = -1;
695 return;
696 }
697 while (first > 0 && oideq(&ref_deltas[first - 1].oid, oid))
698 --first;
699 while (last < end && oideq(&ref_deltas[last + 1].oid, oid))
700 ++last;
701 *first_index = first;
702 *last_index = last;
703}
704
705struct compare_data {
706 struct object_entry *entry;
707 struct git_istream *st;
708 unsigned char *buf;
709 unsigned long buf_size;
710};
711
712static int compare_objects(const unsigned char *buf, unsigned long size,
713 void *cb_data)
714{
715 struct compare_data *data = cb_data;
716
717 if (data->buf_size < size) {
718 free(data->buf);
719 data->buf = xmalloc(size);
720 data->buf_size = size;
721 }
722
723 while (size) {
724 ssize_t len = read_istream(data->st, data->buf, size);
725 if (len == 0)
726 die(_("SHA1 COLLISION FOUND WITH %s !"),
727 oid_to_hex(&data->entry->idx.oid));
728 if (len < 0)
729 die(_("unable to read %s"),
730 oid_to_hex(&data->entry->idx.oid));
731 if (memcmp(buf, data->buf, len))
732 die(_("SHA1 COLLISION FOUND WITH %s !"),
733 oid_to_hex(&data->entry->idx.oid));
734 size -= len;
735 buf += len;
736 }
737 return 0;
738}
739
740static int check_collison(struct object_entry *entry)
741{
742 struct compare_data data;
743 enum object_type type;
744 unsigned long size;
745
746 if (entry->size <= big_file_threshold || entry->type != OBJ_BLOB)
747 return -1;
748
749 memset(&data, 0, sizeof(data));
750 data.entry = entry;
751 data.st = open_istream(&entry->idx.oid, &type, &size, NULL);
752 if (!data.st)
753 return -1;
754 if (size != entry->size || type != entry->type)
755 die(_("SHA1 COLLISION FOUND WITH %s !"),
756 oid_to_hex(&entry->idx.oid));
757 unpack_data(entry, compare_objects, &data);
758 close_istream(data.st);
759 free(data.buf);
760 return 0;
761}
762
763static void sha1_object(const void *data, struct object_entry *obj_entry,
764 unsigned long size, enum object_type type,
765 const struct object_id *oid)
766{
767 void *new_data = NULL;
768 int collision_test_needed = 0;
769
770 assert(data || obj_entry);
771
772 if (startup_info->have_repository) {
773 read_lock();
774 collision_test_needed =
775 has_sha1_file_with_flags(oid->hash, OBJECT_INFO_QUICK);
776 read_unlock();
777 }
778
779 if (collision_test_needed && !data) {
780 read_lock();
781 if (!check_collison(obj_entry))
782 collision_test_needed = 0;
783 read_unlock();
784 }
785 if (collision_test_needed) {
786 void *has_data;
787 enum object_type has_type;
788 unsigned long has_size;
789 read_lock();
790 has_type = oid_object_info(the_repository, oid, &has_size);
791 if (has_type < 0)
792 die(_("cannot read existing object info %s"), oid_to_hex(oid));
793 if (has_type != type || has_size != size)
794 die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
795 has_data = read_object_file(oid, &has_type, &has_size);
796 read_unlock();
797 if (!data)
798 data = new_data = get_data_from_pack(obj_entry);
799 if (!has_data)
800 die(_("cannot read existing object %s"), oid_to_hex(oid));
801 if (size != has_size || type != has_type ||
802 memcmp(data, has_data, size) != 0)
803 die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
804 free(has_data);
805 }
806
807 if (strict || do_fsck_object) {
808 read_lock();
809 if (type == OBJ_BLOB) {
810 struct blob *blob = lookup_blob(the_repository, oid);
811 if (blob)
812 blob->object.flags |= FLAG_CHECKED;
813 else
814 die(_("invalid blob object %s"), oid_to_hex(oid));
815 if (do_fsck_object &&
816 fsck_object(&blob->object, (void *)data, size, &fsck_options))
817 die(_("fsck error in packed object"));
818 } else {
819 struct object *obj;
820 int eaten;
821 void *buf = (void *) data;
822
823 assert(data && "data can only be NULL for large _blobs_");
824
825 /*
826 * we do not need to free the memory here, as the
827 * buf is deleted by the caller.
828 */
829 obj = parse_object_buffer(the_repository, oid, type,
830 size, buf,
831 &eaten);
832 if (!obj)
833 die(_("invalid %s"), type_name(type));
834 if (do_fsck_object &&
835 fsck_object(obj, buf, size, &fsck_options))
836 die(_("fsck error in packed object"));
837 if (strict && fsck_walk(obj, NULL, &fsck_options))
838 die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
839
840 if (obj->type == OBJ_TREE) {
841 struct tree *item = (struct tree *) obj;
842 item->buffer = NULL;
843 obj->parsed = 0;
844 }
845 if (obj->type == OBJ_COMMIT) {
846 struct commit *commit = (struct commit *) obj;
847 if (detach_commit_buffer(commit, NULL) != data)
848 BUG("parse_object_buffer transmogrified our buffer");
849 }
850 obj->flags |= FLAG_CHECKED;
851 }
852 read_unlock();
853 }
854
855 free(new_data);
856}
857
858/*
859 * This function is part of find_unresolved_deltas(). There are two
860 * walkers going in the opposite ways.
861 *
862 * The first one in find_unresolved_deltas() traverses down from
863 * parent node to children, deflating nodes along the way. However,
864 * memory for deflated nodes is limited by delta_base_cache_limit, so
865 * at some point parent node's deflated content may be freed.
866 *
867 * The second walker is this function, which goes from current node up
868 * to top parent if necessary to deflate the node. In normal
869 * situation, its parent node would be already deflated, so it just
870 * needs to apply delta.
871 *
872 * In the worst case scenario, parent node is no longer deflated because
873 * we're running out of delta_base_cache_limit; we need to re-deflate
874 * parents, possibly up to the top base.
875 *
876 * All deflated objects here are subject to be freed if we exceed
877 * delta_base_cache_limit, just like in find_unresolved_deltas(), we
878 * just need to make sure the last node is not freed.
879 */
880static void *get_base_data(struct base_data *c)
881{
882 if (!c->data) {
883 struct object_entry *obj = c->obj;
884 struct base_data **delta = NULL;
885 int delta_nr = 0, delta_alloc = 0;
886
887 while (is_delta_type(c->obj->type) && !c->data) {
888 ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
889 delta[delta_nr++] = c;
890 c = c->base;
891 }
892 if (!delta_nr) {
893 c->data = get_data_from_pack(obj);
894 c->size = obj->size;
895 get_thread_data()->base_cache_used += c->size;
896 prune_base_data(c);
897 }
898 for (; delta_nr > 0; delta_nr--) {
899 void *base, *raw;
900 c = delta[delta_nr - 1];
901 obj = c->obj;
902 base = get_base_data(c->base);
903 raw = get_data_from_pack(obj);
904 c->data = patch_delta(
905 base, c->base->size,
906 raw, obj->size,
907 &c->size);
908 free(raw);
909 if (!c->data)
910 bad_object(obj->idx.offset, _("failed to apply delta"));
911 get_thread_data()->base_cache_used += c->size;
912 prune_base_data(c);
913 }
914 free(delta);
915 }
916 return c->data;
917}
918
919static void resolve_delta(struct object_entry *delta_obj,
920 struct base_data *base, struct base_data *result)
921{
922 void *base_data, *delta_data;
923
924 if (show_stat) {
925 int i = delta_obj - objects;
926 int j = base->obj - objects;
927 obj_stat[i].delta_depth = obj_stat[j].delta_depth + 1;
928 deepest_delta_lock();
929 if (deepest_delta < obj_stat[i].delta_depth)
930 deepest_delta = obj_stat[i].delta_depth;
931 deepest_delta_unlock();
932 obj_stat[i].base_object_no = j;
933 }
934 delta_data = get_data_from_pack(delta_obj);
935 base_data = get_base_data(base);
936 result->obj = delta_obj;
937 result->data = patch_delta(base_data, base->size,
938 delta_data, delta_obj->size, &result->size);
939 free(delta_data);
940 if (!result->data)
941 bad_object(delta_obj->idx.offset, _("failed to apply delta"));
942 hash_object_file(result->data, result->size,
943 type_name(delta_obj->real_type), &delta_obj->idx.oid);
944 sha1_object(result->data, NULL, result->size, delta_obj->real_type,
945 &delta_obj->idx.oid);
946 counter_lock();
947 nr_resolved_deltas++;
948 counter_unlock();
949}
950
951/*
952 * Standard boolean compare-and-swap: atomically check whether "*type" is
953 * "want"; if so, swap in "set" and return true. Otherwise, leave it untouched
954 * and return false.
955 */
956static int compare_and_swap_type(signed char *type,
957 enum object_type want,
958 enum object_type set)
959{
960 enum object_type old;
961
962 type_cas_lock();
963 old = *type;
964 if (old == want)
965 *type = set;
966 type_cas_unlock();
967
968 return old == want;
969}
970
971static struct base_data *find_unresolved_deltas_1(struct base_data *base,
972 struct base_data *prev_base)
973{
974 if (base->ref_last == -1 && base->ofs_last == -1) {
975 find_ref_delta_children(&base->obj->idx.oid,
976 &base->ref_first, &base->ref_last,
977 OBJ_REF_DELTA);
978
979 find_ofs_delta_children(base->obj->idx.offset,
980 &base->ofs_first, &base->ofs_last,
981 OBJ_OFS_DELTA);
982
983 if (base->ref_last == -1 && base->ofs_last == -1) {
984 free(base->data);
985 return NULL;
986 }
987
988 link_base_data(prev_base, base);
989 }
990
991 if (base->ref_first <= base->ref_last) {
992 struct object_entry *child = objects + ref_deltas[base->ref_first].obj_no;
993 struct base_data *result = alloc_base_data();
994
995 if (!compare_and_swap_type(&child->real_type, OBJ_REF_DELTA,
996 base->obj->real_type))
997 BUG("child->real_type != OBJ_REF_DELTA");
998
999 resolve_delta(child, base, result);
1000 if (base->ref_first == base->ref_last && base->ofs_last == -1)
1001 free_base_data(base);
1002
1003 base->ref_first++;
1004 return result;
1005 }
1006
1007 if (base->ofs_first <= base->ofs_last) {
1008 struct object_entry *child = objects + ofs_deltas[base->ofs_first].obj_no;
1009 struct base_data *result = alloc_base_data();
1010
1011 assert(child->real_type == OBJ_OFS_DELTA);
1012 child->real_type = base->obj->real_type;
1013 resolve_delta(child, base, result);
1014 if (base->ofs_first == base->ofs_last)
1015 free_base_data(base);
1016
1017 base->ofs_first++;
1018 return result;
1019 }
1020
1021 unlink_base_data(base);
1022 return NULL;
1023}
1024
1025static void find_unresolved_deltas(struct base_data *base)
1026{
1027 struct base_data *new_base, *prev_base = NULL;
1028 for (;;) {
1029 new_base = find_unresolved_deltas_1(base, prev_base);
1030
1031 if (new_base) {
1032 prev_base = base;
1033 base = new_base;
1034 } else {
1035 free(base);
1036 base = prev_base;
1037 if (!base)
1038 return;
1039 prev_base = base->base;
1040 }
1041 }
1042}
1043
1044static int compare_ofs_delta_entry(const void *a, const void *b)
1045{
1046 const struct ofs_delta_entry *delta_a = a;
1047 const struct ofs_delta_entry *delta_b = b;
1048
1049 return delta_a->offset < delta_b->offset ? -1 :
1050 delta_a->offset > delta_b->offset ? 1 :
1051 0;
1052}
1053
1054static int compare_ref_delta_entry(const void *a, const void *b)
1055{
1056 const struct ref_delta_entry *delta_a = a;
1057 const struct ref_delta_entry *delta_b = b;
1058
1059 return oidcmp(&delta_a->oid, &delta_b->oid);
1060}
1061
1062static void resolve_base(struct object_entry *obj)
1063{
1064 struct base_data *base_obj = alloc_base_data();
1065 base_obj->obj = obj;
1066 base_obj->data = NULL;
1067 find_unresolved_deltas(base_obj);
1068}
1069
1070static void *threaded_second_pass(void *data)
1071{
1072 set_thread_data(data);
1073 for (;;) {
1074 int i;
1075 counter_lock();
1076 display_progress(progress, nr_resolved_deltas);
1077 counter_unlock();
1078 work_lock();
1079 while (nr_dispatched < nr_objects &&
1080 is_delta_type(objects[nr_dispatched].type))
1081 nr_dispatched++;
1082 if (nr_dispatched >= nr_objects) {
1083 work_unlock();
1084 break;
1085 }
1086 i = nr_dispatched++;
1087 work_unlock();
1088
1089 resolve_base(&objects[i]);
1090 }
1091 return NULL;
1092}
1093
1094/*
1095 * First pass:
1096 * - find locations of all objects;
1097 * - calculate SHA1 of all non-delta objects;
1098 * - remember base (SHA1 or offset) for all deltas.
1099 */
1100static void parse_pack_objects(unsigned char *hash)
1101{
1102 int i, nr_delays = 0;
1103 struct ofs_delta_entry *ofs_delta = ofs_deltas;
1104 struct object_id ref_delta_oid;
1105 struct stat st;
1106
1107 if (verbose)
1108 progress = start_progress(
1109 from_stdin ? _("Receiving objects") : _("Indexing objects"),
1110 nr_objects);
1111 for (i = 0; i < nr_objects; i++) {
1112 struct object_entry *obj = &objects[i];
1113 void *data = unpack_raw_entry(obj, &ofs_delta->offset,
1114 &ref_delta_oid,
1115 &obj->idx.oid);
1116 obj->real_type = obj->type;
1117 if (obj->type == OBJ_OFS_DELTA) {
1118 nr_ofs_deltas++;
1119 ofs_delta->obj_no = i;
1120 ofs_delta++;
1121 } else if (obj->type == OBJ_REF_DELTA) {
1122 ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
1123 oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
1124 ref_deltas[nr_ref_deltas].obj_no = i;
1125 nr_ref_deltas++;
1126 } else if (!data) {
1127 /* large blobs, check later */
1128 obj->real_type = OBJ_BAD;
1129 nr_delays++;
1130 } else
1131 sha1_object(data, NULL, obj->size, obj->type,
1132 &obj->idx.oid);
1133 free(data);
1134 display_progress(progress, i+1);
1135 }
1136 objects[i].idx.offset = consumed_bytes;
1137 stop_progress(&progress);
1138
1139 /* Check pack integrity */
1140 flush();
1141 the_hash_algo->final_fn(hash, &input_ctx);
1142 if (!hasheq(fill(the_hash_algo->rawsz), hash))
1143 die(_("pack is corrupted (SHA1 mismatch)"));
1144 use(the_hash_algo->rawsz);
1145
1146 /* If input_fd is a file, we should have reached its end now. */
1147 if (fstat(input_fd, &st))
1148 die_errno(_("cannot fstat packfile"));
1149 if (S_ISREG(st.st_mode) &&
1150 lseek(input_fd, 0, SEEK_CUR) - input_len != st.st_size)
1151 die(_("pack has junk at the end"));
1152
1153 for (i = 0; i < nr_objects; i++) {
1154 struct object_entry *obj = &objects[i];
1155 if (obj->real_type != OBJ_BAD)
1156 continue;
1157 obj->real_type = obj->type;
1158 sha1_object(NULL, obj, obj->size, obj->type,
1159 &obj->idx.oid);
1160 nr_delays--;
1161 }
1162 if (nr_delays)
1163 die(_("confusion beyond insanity in parse_pack_objects()"));
1164}
1165
1166/*
1167 * Second pass:
1168 * - for all non-delta objects, look if it is used as a base for
1169 * deltas;
1170 * - if used as a base, uncompress the object and apply all deltas,
1171 * recursively checking if the resulting object is used as a base
1172 * for some more deltas.
1173 */
1174static void resolve_deltas(void)
1175{
1176 int i;
1177
1178 if (!nr_ofs_deltas && !nr_ref_deltas)
1179 return;
1180
1181 /* Sort deltas by base SHA1/offset for fast searching */
1182 QSORT(ofs_deltas, nr_ofs_deltas, compare_ofs_delta_entry);
1183 QSORT(ref_deltas, nr_ref_deltas, compare_ref_delta_entry);
1184
1185 if (verbose || show_resolving_progress)
1186 progress = start_progress(_("Resolving deltas"),
1187 nr_ref_deltas + nr_ofs_deltas);
1188
1189 nr_dispatched = 0;
1190 if (nr_threads > 1 || getenv("GIT_FORCE_THREADS")) {
1191 init_thread();
1192 for (i = 0; i < nr_threads; i++) {
1193 int ret = pthread_create(&thread_data[i].thread, NULL,
1194 threaded_second_pass, thread_data + i);
1195 if (ret)
1196 die(_("unable to create thread: %s"),
1197 strerror(ret));
1198 }
1199 for (i = 0; i < nr_threads; i++)
1200 pthread_join(thread_data[i].thread, NULL);
1201 cleanup_thread();
1202 return;
1203 }
1204
1205 for (i = 0; i < nr_objects; i++) {
1206 struct object_entry *obj = &objects[i];
1207
1208 if (is_delta_type(obj->type))
1209 continue;
1210 resolve_base(obj);
1211 display_progress(progress, nr_resolved_deltas);
1212 }
1213}
1214
1215/*
1216 * Third pass:
1217 * - append objects to convert thin pack to full pack if required
1218 * - write the final pack hash
1219 */
1220static void fix_unresolved_deltas(struct hashfile *f);
1221static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
1222{
1223 if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
1224 stop_progress(&progress);
1225 /* Flush remaining pack final hash. */
1226 flush();
1227 return;
1228 }
1229
1230 if (fix_thin_pack) {
1231 struct hashfile *f;
1232 unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
1233 struct strbuf msg = STRBUF_INIT;
1234 int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
1235 int nr_objects_initial = nr_objects;
1236 if (nr_unresolved <= 0)
1237 die(_("confusion beyond insanity"));
1238 REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
1239 memset(objects + nr_objects + 1, 0,
1240 nr_unresolved * sizeof(*objects));
1241 f = hashfd(output_fd, curr_pack);
1242 fix_unresolved_deltas(f);
1243 strbuf_addf(&msg, Q_("completed with %d local object",
1244 "completed with %d local objects",
1245 nr_objects - nr_objects_initial),
1246 nr_objects - nr_objects_initial);
1247 stop_progress_msg(&progress, msg.buf);
1248 strbuf_release(&msg);
1249 finalize_hashfile(f, tail_hash, 0);
1250 hashcpy(read_hash, pack_hash);
1251 fixup_pack_header_footer(output_fd, pack_hash,
1252 curr_pack, nr_objects,
1253 read_hash, consumed_bytes-the_hash_algo->rawsz);
1254 if (!hasheq(read_hash, tail_hash))
1255 die(_("Unexpected tail checksum for %s "
1256 "(disk corruption?)"), curr_pack);
1257 }
1258 if (nr_ofs_deltas + nr_ref_deltas != nr_resolved_deltas)
1259 die(Q_("pack has %d unresolved delta",
1260 "pack has %d unresolved deltas",
1261 nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas),
1262 nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
1263}
1264
1265static int write_compressed(struct hashfile *f, void *in, unsigned int size)
1266{
1267 git_zstream stream;
1268 int status;
1269 unsigned char outbuf[4096];
1270
1271 git_deflate_init(&stream, zlib_compression_level);
1272 stream.next_in = in;
1273 stream.avail_in = size;
1274
1275 do {
1276 stream.next_out = outbuf;
1277 stream.avail_out = sizeof(outbuf);
1278 status = git_deflate(&stream, Z_FINISH);
1279 hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
1280 } while (status == Z_OK);
1281
1282 if (status != Z_STREAM_END)
1283 die(_("unable to deflate appended object (%d)"), status);
1284 size = stream.total_out;
1285 git_deflate_end(&stream);
1286 return size;
1287}
1288
1289static struct object_entry *append_obj_to_pack(struct hashfile *f,
1290 const unsigned char *sha1, void *buf,
1291 unsigned long size, enum object_type type)
1292{
1293 struct object_entry *obj = &objects[nr_objects++];
1294 unsigned char header[10];
1295 unsigned long s = size;
1296 int n = 0;
1297 unsigned char c = (type << 4) | (s & 15);
1298 s >>= 4;
1299 while (s) {
1300 header[n++] = c | 0x80;
1301 c = s & 0x7f;
1302 s >>= 7;
1303 }
1304 header[n++] = c;
1305 crc32_begin(f);
1306 hashwrite(f, header, n);
1307 obj[0].size = size;
1308 obj[0].hdr_size = n;
1309 obj[0].type = type;
1310 obj[0].real_type = type;
1311 obj[1].idx.offset = obj[0].idx.offset + n;
1312 obj[1].idx.offset += write_compressed(f, buf, size);
1313 obj[0].idx.crc32 = crc32_end(f);
1314 hashflush(f);
1315 hashcpy(obj->idx.oid.hash, sha1);
1316 return obj;
1317}
1318
1319static int delta_pos_compare(const void *_a, const void *_b)
1320{
1321 struct ref_delta_entry *a = *(struct ref_delta_entry **)_a;
1322 struct ref_delta_entry *b = *(struct ref_delta_entry **)_b;
1323 return a->obj_no - b->obj_no;
1324}
1325
1326static void fix_unresolved_deltas(struct hashfile *f)
1327{
1328 struct ref_delta_entry **sorted_by_pos;
1329 int i;
1330
1331 /*
1332 * Since many unresolved deltas may well be themselves base objects
1333 * for more unresolved deltas, we really want to include the
1334 * smallest number of base objects that would cover as much delta
1335 * as possible by picking the
1336 * trunc deltas first, allowing for other deltas to resolve without
1337 * additional base objects. Since most base objects are to be found
1338 * before deltas depending on them, a good heuristic is to start
1339 * resolving deltas in the same order as their position in the pack.
1340 */
1341 ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas);
1342 for (i = 0; i < nr_ref_deltas; i++)
1343 sorted_by_pos[i] = &ref_deltas[i];
1344 QSORT(sorted_by_pos, nr_ref_deltas, delta_pos_compare);
1345
1346 for (i = 0; i < nr_ref_deltas; i++) {
1347 struct ref_delta_entry *d = sorted_by_pos[i];
1348 enum object_type type;
1349 struct base_data *base_obj = alloc_base_data();
1350
1351 if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
1352 continue;
1353 base_obj->data = read_object_file(&d->oid, &type,
1354 &base_obj->size);
1355 if (!base_obj->data)
1356 continue;
1357
1358 if (check_object_signature(&d->oid, base_obj->data,
1359 base_obj->size, type_name(type)))
1360 die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
1361 base_obj->obj = append_obj_to_pack(f, d->oid.hash,
1362 base_obj->data, base_obj->size, type);
1363 find_unresolved_deltas(base_obj);
1364 display_progress(progress, nr_resolved_deltas);
1365 }
1366 free(sorted_by_pos);
1367}
1368
1369static const char *derive_filename(const char *pack_name, const char *suffix,
1370 struct strbuf *buf)
1371{
1372 size_t len;
1373 if (!strip_suffix(pack_name, ".pack", &len))
1374 die(_("packfile name '%s' does not end with '.pack'"),
1375 pack_name);
1376 strbuf_add(buf, pack_name, len);
1377 strbuf_addch(buf, '.');
1378 strbuf_addstr(buf, suffix);
1379 return buf->buf;
1380}
1381
1382static void write_special_file(const char *suffix, const char *msg,
1383 const char *pack_name, const unsigned char *hash,
1384 const char **report)
1385{
1386 struct strbuf name_buf = STRBUF_INIT;
1387 const char *filename;
1388 int fd;
1389 int msg_len = strlen(msg);
1390
1391 if (pack_name)
1392 filename = derive_filename(pack_name, suffix, &name_buf);
1393 else
1394 filename = odb_pack_name(&name_buf, hash, suffix);
1395
1396 fd = odb_pack_keep(filename);
1397 if (fd < 0) {
1398 if (errno != EEXIST)
1399 die_errno(_("cannot write %s file '%s'"),
1400 suffix, filename);
1401 } else {
1402 if (msg_len > 0) {
1403 write_or_die(fd, msg, msg_len);
1404 write_or_die(fd, "\n", 1);
1405 }
1406 if (close(fd) != 0)
1407 die_errno(_("cannot close written %s file '%s'"),
1408 suffix, filename);
1409 if (report)
1410 *report = suffix;
1411 }
1412 strbuf_release(&name_buf);
1413}
1414
1415static void final(const char *final_pack_name, const char *curr_pack_name,
1416 const char *final_index_name, const char *curr_index_name,
1417 const char *keep_msg, const char *promisor_msg,
1418 unsigned char *hash)
1419{
1420 const char *report = "pack";
1421 struct strbuf pack_name = STRBUF_INIT;
1422 struct strbuf index_name = STRBUF_INIT;
1423 int err;
1424
1425 if (!from_stdin) {
1426 close(input_fd);
1427 } else {
1428 fsync_or_die(output_fd, curr_pack_name);
1429 err = close(output_fd);
1430 if (err)
1431 die_errno(_("error while closing pack file"));
1432 }
1433
1434 if (keep_msg)
1435 write_special_file("keep", keep_msg, final_pack_name, hash,
1436 &report);
1437 if (promisor_msg)
1438 write_special_file("promisor", promisor_msg, final_pack_name,
1439 hash, NULL);
1440
1441 if (final_pack_name != curr_pack_name) {
1442 if (!final_pack_name)
1443 final_pack_name = odb_pack_name(&pack_name, hash, "pack");
1444 if (finalize_object_file(curr_pack_name, final_pack_name))
1445 die(_("cannot store pack file"));
1446 } else if (from_stdin)
1447 chmod(final_pack_name, 0444);
1448
1449 if (final_index_name != curr_index_name) {
1450 if (!final_index_name)
1451 final_index_name = odb_pack_name(&index_name, hash, "idx");
1452 if (finalize_object_file(curr_index_name, final_index_name))
1453 die(_("cannot store index file"));
1454 } else
1455 chmod(final_index_name, 0444);
1456
1457 if (do_fsck_object) {
1458 struct packed_git *p;
1459 p = add_packed_git(final_index_name, strlen(final_index_name), 0);
1460 if (p)
1461 install_packed_git(the_repository, p);
1462 }
1463
1464 if (!from_stdin) {
1465 printf("%s\n", sha1_to_hex(hash));
1466 } else {
1467 struct strbuf buf = STRBUF_INIT;
1468
1469 strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
1470 write_or_die(1, buf.buf, buf.len);
1471 strbuf_release(&buf);
1472
1473 /*
1474 * Let's just mimic git-unpack-objects here and write
1475 * the last part of the input buffer to stdout.
1476 */
1477 while (input_len) {
1478 err = xwrite(1, input_buffer + input_offset, input_len);
1479 if (err <= 0)
1480 break;
1481 input_len -= err;
1482 input_offset += err;
1483 }
1484 }
1485
1486 strbuf_release(&index_name);
1487 strbuf_release(&pack_name);
1488}
1489
1490static int git_index_pack_config(const char *k, const char *v, void *cb)
1491{
1492 struct pack_idx_option *opts = cb;
1493
1494 if (!strcmp(k, "pack.indexversion")) {
1495 opts->version = git_config_int(k, v);
1496 if (opts->version > 2)
1497 die(_("bad pack.indexversion=%"PRIu32), opts->version);
1498 return 0;
1499 }
1500 if (!strcmp(k, "pack.threads")) {
1501 nr_threads = git_config_int(k, v);
1502 if (nr_threads < 0)
1503 die(_("invalid number of threads specified (%d)"),
1504 nr_threads);
1505 if (!HAVE_THREADS && nr_threads != 1) {
1506 warning(_("no threads support, ignoring %s"), k);
1507 nr_threads = 1;
1508 }
1509 return 0;
1510 }
1511 return git_default_config(k, v, cb);
1512}
1513
1514static int cmp_uint32(const void *a_, const void *b_)
1515{
1516 uint32_t a = *((uint32_t *)a_);
1517 uint32_t b = *((uint32_t *)b_);
1518
1519 return (a < b) ? -1 : (a != b);
1520}
1521
1522static void read_v2_anomalous_offsets(struct packed_git *p,
1523 struct pack_idx_option *opts)
1524{
1525 const uint32_t *idx1, *idx2;
1526 uint32_t i;
1527 const uint32_t hashwords = the_hash_algo->rawsz / sizeof(uint32_t);
1528
1529 /* The address of the 4-byte offset table */
1530 idx1 = (((const uint32_t *)p->index_data)
1531 + 2 /* 8-byte header */
1532 + 256 /* fan out */
1533 + hashwords * p->num_objects /* object ID table */
1534 + p->num_objects /* CRC32 table */
1535 );
1536
1537 /* The address of the 8-byte offset table */
1538 idx2 = idx1 + p->num_objects;
1539
1540 for (i = 0; i < p->num_objects; i++) {
1541 uint32_t off = ntohl(idx1[i]);
1542 if (!(off & 0x80000000))
1543 continue;
1544 off = off & 0x7fffffff;
1545 check_pack_index_ptr(p, &idx2[off * 2]);
1546 if (idx2[off * 2])
1547 continue;
1548 /*
1549 * The real offset is ntohl(idx2[off * 2]) in high 4
1550 * octets, and ntohl(idx2[off * 2 + 1]) in low 4
1551 * octets. But idx2[off * 2] is Zero!!!
1552 */
1553 ALLOC_GROW(opts->anomaly, opts->anomaly_nr + 1, opts->anomaly_alloc);
1554 opts->anomaly[opts->anomaly_nr++] = ntohl(idx2[off * 2 + 1]);
1555 }
1556
1557 QSORT(opts->anomaly, opts->anomaly_nr, cmp_uint32);
1558}
1559
1560static void read_idx_option(struct pack_idx_option *opts, const char *pack_name)
1561{
1562 struct packed_git *p = add_packed_git(pack_name, strlen(pack_name), 1);
1563
1564 if (!p)
1565 die(_("Cannot open existing pack file '%s'"), pack_name);
1566 if (open_pack_index(p))
1567 die(_("Cannot open existing pack idx file for '%s'"), pack_name);
1568
1569 /* Read the attributes from the existing idx file */
1570 opts->version = p->index_version;
1571
1572 if (opts->version == 2)
1573 read_v2_anomalous_offsets(p, opts);
1574
1575 /*
1576 * Get rid of the idx file as we do not need it anymore.
1577 * NEEDSWORK: extract this bit from free_pack_by_name() in
1578 * sha1-file.c, perhaps? It shouldn't matter very much as we
1579 * know we haven't installed this pack (hence we never have
1580 * read anything from it).
1581 */
1582 close_pack_index(p);
1583 free(p);
1584}
1585
1586static void show_pack_info(int stat_only)
1587{
1588 int i, baseobjects = nr_objects - nr_ref_deltas - nr_ofs_deltas;
1589 unsigned long *chain_histogram = NULL;
1590
1591 if (deepest_delta)
1592 chain_histogram = xcalloc(deepest_delta, sizeof(unsigned long));
1593
1594 for (i = 0; i < nr_objects; i++) {
1595 struct object_entry *obj = &objects[i];
1596
1597 if (is_delta_type(obj->type))
1598 chain_histogram[obj_stat[i].delta_depth - 1]++;
1599 if (stat_only)
1600 continue;
1601 printf("%s %-6s %"PRIuMAX" %"PRIuMAX" %"PRIuMAX,
1602 oid_to_hex(&obj->idx.oid),
1603 type_name(obj->real_type), (uintmax_t)obj->size,
1604 (uintmax_t)(obj[1].idx.offset - obj->idx.offset),
1605 (uintmax_t)obj->idx.offset);
1606 if (is_delta_type(obj->type)) {
1607 struct object_entry *bobj = &objects[obj_stat[i].base_object_no];
1608 printf(" %u %s", obj_stat[i].delta_depth,
1609 oid_to_hex(&bobj->idx.oid));
1610 }
1611 putchar('\n');
1612 }
1613
1614 if (baseobjects)
1615 printf_ln(Q_("non delta: %d object",
1616 "non delta: %d objects",
1617 baseobjects),
1618 baseobjects);
1619 for (i = 0; i < deepest_delta; i++) {
1620 if (!chain_histogram[i])
1621 continue;
1622 printf_ln(Q_("chain length = %d: %lu object",
1623 "chain length = %d: %lu objects",
1624 chain_histogram[i]),
1625 i + 1,
1626 chain_histogram[i]);
1627 }
1628}
1629
1630int cmd_index_pack(int argc, const char **argv, const char *prefix)
1631{
1632 int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
1633 const char *curr_index;
1634 const char *index_name = NULL, *pack_name = NULL;
1635 const char *keep_msg = NULL;
1636 const char *promisor_msg = NULL;
1637 struct strbuf index_name_buf = STRBUF_INIT;
1638 struct pack_idx_entry **idx_objects;
1639 struct pack_idx_option opts;
1640 unsigned char pack_hash[GIT_MAX_RAWSZ];
1641 unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
1642 int report_end_of_input = 0;
1643
1644 /*
1645 * index-pack never needs to fetch missing objects, since it only
1646 * accesses the repo to do hash collision checks
1647 */
1648 fetch_if_missing = 0;
1649
1650 if (argc == 2 && !strcmp(argv[1], "-h"))
1651 usage(index_pack_usage);
1652
1653 read_replace_refs = 0;
1654 fsck_options.walk = mark_link;
1655
1656 reset_pack_idx_option(&opts);
1657 git_config(git_index_pack_config, &opts);
1658 if (prefix && chdir(prefix))
1659 die(_("Cannot come back to cwd"));
1660
1661 for (i = 1; i < argc; i++) {
1662 const char *arg = argv[i];
1663
1664 if (*arg == '-') {
1665 if (!strcmp(arg, "--stdin")) {
1666 from_stdin = 1;
1667 } else if (!strcmp(arg, "--fix-thin")) {
1668 fix_thin_pack = 1;
1669 } else if (skip_to_optional_arg(arg, "--strict", &arg)) {
1670 strict = 1;
1671 do_fsck_object = 1;
1672 fsck_set_msg_types(&fsck_options, arg);
1673 } else if (!strcmp(arg, "--check-self-contained-and-connected")) {
1674 strict = 1;
1675 check_self_contained_and_connected = 1;
1676 } else if (!strcmp(arg, "--fsck-objects")) {
1677 do_fsck_object = 1;
1678 } else if (!strcmp(arg, "--verify")) {
1679 verify = 1;
1680 } else if (!strcmp(arg, "--verify-stat")) {
1681 verify = 1;
1682 show_stat = 1;
1683 } else if (!strcmp(arg, "--verify-stat-only")) {
1684 verify = 1;
1685 show_stat = 1;
1686 stat_only = 1;
1687 } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
1688 ; /* nothing to do */
1689 } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
1690 ; /* already parsed */
1691 } else if (starts_with(arg, "--threads=")) {
1692 char *end;
1693 nr_threads = strtoul(arg+10, &end, 0);
1694 if (!arg[10] || *end || nr_threads < 0)
1695 usage(index_pack_usage);
1696 if (!HAVE_THREADS && nr_threads != 1) {
1697 warning(_("no threads support, ignoring %s"), arg);
1698 nr_threads = 1;
1699 }
1700 } else if (starts_with(arg, "--pack_header=")) {
1701 struct pack_header *hdr;
1702 char *c;
1703
1704 hdr = (struct pack_header *)input_buffer;
1705 hdr->hdr_signature = htonl(PACK_SIGNATURE);
1706 hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
1707 if (*c != ',')
1708 die(_("bad %s"), arg);
1709 hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
1710 if (*c)
1711 die(_("bad %s"), arg);
1712 input_len = sizeof(*hdr);
1713 } else if (!strcmp(arg, "-v")) {
1714 verbose = 1;
1715 } else if (!strcmp(arg, "--show-resolving-progress")) {
1716 show_resolving_progress = 1;
1717 } else if (!strcmp(arg, "--report-end-of-input")) {
1718 report_end_of_input = 1;
1719 } else if (!strcmp(arg, "-o")) {
1720 if (index_name || (i+1) >= argc)
1721 usage(index_pack_usage);
1722 index_name = argv[++i];
1723 } else if (starts_with(arg, "--index-version=")) {
1724 char *c;
1725 opts.version = strtoul(arg + 16, &c, 10);
1726 if (opts.version > 2)
1727 die(_("bad %s"), arg);
1728 if (*c == ',')
1729 opts.off32_limit = strtoul(c+1, &c, 0);
1730 if (*c || opts.off32_limit & 0x80000000)
1731 die(_("bad %s"), arg);
1732 } else if (skip_prefix(arg, "--max-input-size=", &arg)) {
1733 max_input_size = strtoumax(arg, NULL, 10);
1734 } else
1735 usage(index_pack_usage);
1736 continue;
1737 }
1738
1739 if (pack_name)
1740 usage(index_pack_usage);
1741 pack_name = arg;
1742 }
1743
1744 if (!pack_name && !from_stdin)
1745 usage(index_pack_usage);
1746 if (fix_thin_pack && !from_stdin)
1747 die(_("--fix-thin cannot be used without --stdin"));
1748 if (from_stdin && !startup_info->have_repository)
1749 die(_("--stdin requires a git repository"));
1750 if (!index_name && pack_name)
1751 index_name = derive_filename(pack_name, "idx", &index_name_buf);
1752
1753 if (verify) {
1754 if (!index_name)
1755 die(_("--verify with no packfile name given"));
1756 read_idx_option(&opts, index_name);
1757 opts.flags |= WRITE_IDX_VERIFY | WRITE_IDX_STRICT;
1758 }
1759 if (strict)
1760 opts.flags |= WRITE_IDX_STRICT;
1761
1762 if (HAVE_THREADS && !nr_threads) {
1763 nr_threads = online_cpus();
1764 /* An experiment showed that more threads does not mean faster */
1765 if (nr_threads > 3)
1766 nr_threads = 3;
1767 }
1768
1769 curr_pack = open_pack_file(pack_name);
1770 parse_pack_header();
1771 objects = xcalloc(st_add(nr_objects, 1), sizeof(struct object_entry));
1772 if (show_stat)
1773 obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
1774 ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
1775 parse_pack_objects(pack_hash);
1776 if (report_end_of_input)
1777 write_in_full(2, "\0", 1);
1778 resolve_deltas();
1779 conclude_pack(fix_thin_pack, curr_pack, pack_hash);
1780 free(ofs_deltas);
1781 free(ref_deltas);
1782 if (strict)
1783 foreign_nr = check_objects();
1784
1785 if (show_stat)
1786 show_pack_info(stat_only);
1787
1788 ALLOC_ARRAY(idx_objects, nr_objects);
1789 for (i = 0; i < nr_objects; i++)
1790 idx_objects[i] = &objects[i].idx;
1791 curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
1792 free(idx_objects);
1793
1794 if (!verify)
1795 final(pack_name, curr_pack,
1796 index_name, curr_index,
1797 keep_msg, promisor_msg,
1798 pack_hash);
1799 else
1800 close(input_fd);
1801
1802 if (do_fsck_object && fsck_finish(&fsck_options))
1803 die(_("fsck error in pack objects"));
1804
1805 free(objects);
1806 strbuf_release(&index_name_buf);
1807 if (pack_name == NULL)
1808 free((void *) curr_pack);
1809 if (index_name == NULL)
1810 free((void *) curr_index);
1811
1812 /*
1813 * Let the caller know this pack is not self contained
1814 */
1815 if (check_self_contained_and_connected && foreign_nr)
1816 return 1;
1817
1818 return 0;
1819}