1#include "cache.h"
2#include "lockfile.h"
3#include "refs.h"
4#include "pkt-line.h"
5#include "commit.h"
6#include "tag.h"
7#include "exec_cmd.h"
8#include "pack.h"
9#include "sideband.h"
10#include "fetch-pack.h"
11#include "remote.h"
12#include "run-command.h"
13#include "connect.h"
14#include "transport.h"
15#include "version.h"
16#include "prio-queue.h"
17#include "sha1-array.h"
18
19static int transfer_unpack_limit = -1;
20static int fetch_unpack_limit = -1;
21static int unpack_limit = 100;
22static int prefer_ofs_delta = 1;
23static int no_done;
24static int deepen_since_ok;
25static int deepen_not_ok;
26static int fetch_fsck_objects = -1;
27static int transfer_fsck_objects = -1;
28static int agent_supported;
29static struct lock_file shallow_lock;
30static const char *alternate_shallow_file;
31
32/* Remember to update object flag allocation in object.h */
33#define COMPLETE (1U << 0)
34#define COMMON (1U << 1)
35#define COMMON_REF (1U << 2)
36#define SEEN (1U << 3)
37#define POPPED (1U << 4)
38#define ALTERNATE (1U << 5)
39
40static int marked;
41
42/*
43 * After sending this many "have"s if we do not get any new ACK , we
44 * give up traversing our history.
45 */
46#define MAX_IN_VAIN 256
47
48static struct prio_queue rev_list = { compare_commits_by_commit_date };
49static int non_common_revs, multi_ack, use_sideband;
50/* Allow specifying sha1 if it is a ref tip. */
51#define ALLOW_TIP_SHA1 01
52/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
53#define ALLOW_REACHABLE_SHA1 02
54static unsigned int allow_unadvertised_object_request;
55
56__attribute__((format (printf, 2, 3)))
57static inline void print_verbose(const struct fetch_pack_args *args,
58 const char *fmt, ...)
59{
60 va_list params;
61
62 if (!args->verbose)
63 return;
64
65 va_start(params, fmt);
66 vfprintf(stderr, fmt, params);
67 va_end(params);
68 fputc('\n', stderr);
69}
70
71struct alternate_object_cache {
72 struct object **items;
73 size_t nr, alloc;
74};
75
76static void cache_one_alternate(const char *refname,
77 const struct object_id *oid,
78 void *vcache)
79{
80 struct alternate_object_cache *cache = vcache;
81 struct object *obj = parse_object(oid->hash);
82
83 if (!obj || (obj->flags & ALTERNATE))
84 return;
85
86 obj->flags |= ALTERNATE;
87 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
88 cache->items[cache->nr++] = obj;
89}
90
91static void for_each_cached_alternate(void (*cb)(struct object *))
92{
93 static int initialized;
94 static struct alternate_object_cache cache;
95 size_t i;
96
97 if (!initialized) {
98 for_each_alternate_ref(cache_one_alternate, &cache);
99 initialized = 1;
100 }
101
102 for (i = 0; i < cache.nr; i++)
103 cb(cache.items[i]);
104}
105
106static void rev_list_push(struct commit *commit, int mark)
107{
108 if (!(commit->object.flags & mark)) {
109 commit->object.flags |= mark;
110
111 if (parse_commit(commit))
112 return;
113
114 prio_queue_put(&rev_list, commit);
115
116 if (!(commit->object.flags & COMMON))
117 non_common_revs++;
118 }
119}
120
121static int rev_list_insert_ref(const char *refname, const unsigned char *sha1)
122{
123 struct object *o = deref_tag(parse_object(sha1), refname, 0);
124
125 if (o && o->type == OBJ_COMMIT)
126 rev_list_push((struct commit *)o, SEEN);
127
128 return 0;
129}
130
131static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
132 int flag, void *cb_data)
133{
134 return rev_list_insert_ref(refname, oid->hash);
135}
136
137static int clear_marks(const char *refname, const struct object_id *oid,
138 int flag, void *cb_data)
139{
140 struct object *o = deref_tag(parse_object(oid->hash), refname, 0);
141
142 if (o && o->type == OBJ_COMMIT)
143 clear_commit_marks((struct commit *)o,
144 COMMON | COMMON_REF | SEEN | POPPED);
145 return 0;
146}
147
148/*
149 This function marks a rev and its ancestors as common.
150 In some cases, it is desirable to mark only the ancestors (for example
151 when only the server does not yet know that they are common).
152*/
153
154static void mark_common(struct commit *commit,
155 int ancestors_only, int dont_parse)
156{
157 if (commit != NULL && !(commit->object.flags & COMMON)) {
158 struct object *o = (struct object *)commit;
159
160 if (!ancestors_only)
161 o->flags |= COMMON;
162
163 if (!(o->flags & SEEN))
164 rev_list_push(commit, SEEN);
165 else {
166 struct commit_list *parents;
167
168 if (!ancestors_only && !(o->flags & POPPED))
169 non_common_revs--;
170 if (!o->parsed && !dont_parse)
171 if (parse_commit(commit))
172 return;
173
174 for (parents = commit->parents;
175 parents;
176 parents = parents->next)
177 mark_common(parents->item, 0, dont_parse);
178 }
179 }
180}
181
182/*
183 Get the next rev to send, ignoring the common.
184*/
185
186static const unsigned char *get_rev(void)
187{
188 struct commit *commit = NULL;
189
190 while (commit == NULL) {
191 unsigned int mark;
192 struct commit_list *parents;
193
194 if (rev_list.nr == 0 || non_common_revs == 0)
195 return NULL;
196
197 commit = prio_queue_get(&rev_list);
198 parse_commit(commit);
199 parents = commit->parents;
200
201 commit->object.flags |= POPPED;
202 if (!(commit->object.flags & COMMON))
203 non_common_revs--;
204
205 if (commit->object.flags & COMMON) {
206 /* do not send "have", and ignore ancestors */
207 commit = NULL;
208 mark = COMMON | SEEN;
209 } else if (commit->object.flags & COMMON_REF)
210 /* send "have", and ignore ancestors */
211 mark = COMMON | SEEN;
212 else
213 /* send "have", also for its ancestors */
214 mark = SEEN;
215
216 while (parents) {
217 if (!(parents->item->object.flags & SEEN))
218 rev_list_push(parents->item, mark);
219 if (mark & COMMON)
220 mark_common(parents->item, 1, 0);
221 parents = parents->next;
222 }
223 }
224
225 return commit->object.oid.hash;
226}
227
228enum ack_type {
229 NAK = 0,
230 ACK,
231 ACK_continue,
232 ACK_common,
233 ACK_ready
234};
235
236static void consume_shallow_list(struct fetch_pack_args *args, int fd)
237{
238 if (args->stateless_rpc && args->deepen) {
239 /* If we sent a depth we will get back "duplicate"
240 * shallow and unshallow commands every time there
241 * is a block of have lines exchanged.
242 */
243 char *line;
244 while ((line = packet_read_line(fd, NULL))) {
245 if (starts_with(line, "shallow "))
246 continue;
247 if (starts_with(line, "unshallow "))
248 continue;
249 die(_("git fetch-pack: expected shallow list"));
250 }
251 }
252}
253
254static enum ack_type get_ack(int fd, unsigned char *result_sha1)
255{
256 int len;
257 char *line = packet_read_line(fd, &len);
258 const char *arg;
259
260 if (!len)
261 die(_("git fetch-pack: expected ACK/NAK, got EOF"));
262 if (!strcmp(line, "NAK"))
263 return NAK;
264 if (skip_prefix(line, "ACK ", &arg)) {
265 if (!get_sha1_hex(arg, result_sha1)) {
266 arg += 40;
267 len -= arg - line;
268 if (len < 1)
269 return ACK;
270 if (strstr(arg, "continue"))
271 return ACK_continue;
272 if (strstr(arg, "common"))
273 return ACK_common;
274 if (strstr(arg, "ready"))
275 return ACK_ready;
276 return ACK;
277 }
278 }
279 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
280}
281
282static void send_request(struct fetch_pack_args *args,
283 int fd, struct strbuf *buf)
284{
285 if (args->stateless_rpc) {
286 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
287 packet_flush(fd);
288 } else
289 write_or_die(fd, buf->buf, buf->len);
290}
291
292static void insert_one_alternate_object(struct object *obj)
293{
294 rev_list_insert_ref(NULL, obj->oid.hash);
295}
296
297#define INITIAL_FLUSH 16
298#define PIPESAFE_FLUSH 32
299#define LARGE_FLUSH 16384
300
301static int next_flush(struct fetch_pack_args *args, int count)
302{
303 if (args->stateless_rpc) {
304 if (count < LARGE_FLUSH)
305 count <<= 1;
306 else
307 count = count * 11 / 10;
308 } else {
309 if (count < PIPESAFE_FLUSH)
310 count <<= 1;
311 else
312 count += PIPESAFE_FLUSH;
313 }
314 return count;
315}
316
317static int find_common(struct fetch_pack_args *args,
318 int fd[2], unsigned char *result_sha1,
319 struct ref *refs)
320{
321 int fetching;
322 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
323 const unsigned char *sha1;
324 unsigned in_vain = 0;
325 int got_continue = 0;
326 int got_ready = 0;
327 struct strbuf req_buf = STRBUF_INIT;
328 size_t state_len = 0;
329
330 if (args->stateless_rpc && multi_ack == 1)
331 die(_("--stateless-rpc requires multi_ack_detailed"));
332 if (marked)
333 for_each_ref(clear_marks, NULL);
334 marked = 1;
335
336 for_each_ref(rev_list_insert_ref_oid, NULL);
337 for_each_cached_alternate(insert_one_alternate_object);
338
339 fetching = 0;
340 for ( ; refs ; refs = refs->next) {
341 unsigned char *remote = refs->old_oid.hash;
342 const char *remote_hex;
343 struct object *o;
344
345 /*
346 * If that object is complete (i.e. it is an ancestor of a
347 * local ref), we tell them we have it but do not have to
348 * tell them about its ancestors, which they already know
349 * about.
350 *
351 * We use lookup_object here because we are only
352 * interested in the case we *know* the object is
353 * reachable and we have already scanned it.
354 */
355 if (((o = lookup_object(remote)) != NULL) &&
356 (o->flags & COMPLETE)) {
357 continue;
358 }
359
360 remote_hex = sha1_to_hex(remote);
361 if (!fetching) {
362 struct strbuf c = STRBUF_INIT;
363 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
364 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
365 if (no_done) strbuf_addstr(&c, " no-done");
366 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
367 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
368 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
369 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
370 if (args->no_progress) strbuf_addstr(&c, " no-progress");
371 if (args->include_tag) strbuf_addstr(&c, " include-tag");
372 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
373 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
374 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
375 if (agent_supported) strbuf_addf(&c, " agent=%s",
376 git_user_agent_sanitized());
377 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
378 strbuf_release(&c);
379 } else
380 packet_buf_write(&req_buf, "want %s\n", remote_hex);
381 fetching++;
382 }
383
384 if (!fetching) {
385 strbuf_release(&req_buf);
386 packet_flush(fd[1]);
387 return 1;
388 }
389
390 if (is_repository_shallow())
391 write_shallow_commits(&req_buf, 1, NULL);
392 if (args->depth > 0)
393 packet_buf_write(&req_buf, "deepen %d", args->depth);
394 if (args->deepen_since) {
395 unsigned long max_age = approxidate(args->deepen_since);
396 packet_buf_write(&req_buf, "deepen-since %lu", max_age);
397 }
398 if (args->deepen_not) {
399 int i;
400 for (i = 0; i < args->deepen_not->nr; i++) {
401 struct string_list_item *s = args->deepen_not->items + i;
402 packet_buf_write(&req_buf, "deepen-not %s", s->string);
403 }
404 }
405 packet_buf_flush(&req_buf);
406 state_len = req_buf.len;
407
408 if (args->deepen) {
409 char *line;
410 const char *arg;
411 unsigned char sha1[20];
412
413 send_request(args, fd[1], &req_buf);
414 while ((line = packet_read_line(fd[0], NULL))) {
415 if (skip_prefix(line, "shallow ", &arg)) {
416 if (get_sha1_hex(arg, sha1))
417 die(_("invalid shallow line: %s"), line);
418 register_shallow(sha1);
419 continue;
420 }
421 if (skip_prefix(line, "unshallow ", &arg)) {
422 if (get_sha1_hex(arg, sha1))
423 die(_("invalid unshallow line: %s"), line);
424 if (!lookup_object(sha1))
425 die(_("object not found: %s"), line);
426 /* make sure that it is parsed as shallow */
427 if (!parse_object(sha1))
428 die(_("error in object: %s"), line);
429 if (unregister_shallow(sha1))
430 die(_("no shallow found: %s"), line);
431 continue;
432 }
433 die(_("expected shallow/unshallow, got %s"), line);
434 }
435 } else if (!args->stateless_rpc)
436 send_request(args, fd[1], &req_buf);
437
438 if (!args->stateless_rpc) {
439 /* If we aren't using the stateless-rpc interface
440 * we don't need to retain the headers.
441 */
442 strbuf_setlen(&req_buf, 0);
443 state_len = 0;
444 }
445
446 flushes = 0;
447 retval = -1;
448 while ((sha1 = get_rev())) {
449 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
450 print_verbose(args, "have %s", sha1_to_hex(sha1));
451 in_vain++;
452 if (flush_at <= ++count) {
453 int ack;
454
455 packet_buf_flush(&req_buf);
456 send_request(args, fd[1], &req_buf);
457 strbuf_setlen(&req_buf, state_len);
458 flushes++;
459 flush_at = next_flush(args, count);
460
461 /*
462 * We keep one window "ahead" of the other side, and
463 * will wait for an ACK only on the next one
464 */
465 if (!args->stateless_rpc && count == INITIAL_FLUSH)
466 continue;
467
468 consume_shallow_list(args, fd[0]);
469 do {
470 ack = get_ack(fd[0], result_sha1);
471 if (ack)
472 print_verbose(args, _("got %s %d %s"), "ack",
473 ack, sha1_to_hex(result_sha1));
474 switch (ack) {
475 case ACK:
476 flushes = 0;
477 multi_ack = 0;
478 retval = 0;
479 goto done;
480 case ACK_common:
481 case ACK_ready:
482 case ACK_continue: {
483 struct commit *commit =
484 lookup_commit(result_sha1);
485 if (!commit)
486 die(_("invalid commit %s"), sha1_to_hex(result_sha1));
487 if (args->stateless_rpc
488 && ack == ACK_common
489 && !(commit->object.flags & COMMON)) {
490 /* We need to replay the have for this object
491 * on the next RPC request so the peer knows
492 * it is in common with us.
493 */
494 const char *hex = sha1_to_hex(result_sha1);
495 packet_buf_write(&req_buf, "have %s\n", hex);
496 state_len = req_buf.len;
497 /*
498 * Reset in_vain because an ack
499 * for this commit has not been
500 * seen.
501 */
502 in_vain = 0;
503 } else if (!args->stateless_rpc
504 || ack != ACK_common)
505 in_vain = 0;
506 mark_common(commit, 0, 1);
507 retval = 0;
508 got_continue = 1;
509 if (ack == ACK_ready) {
510 clear_prio_queue(&rev_list);
511 got_ready = 1;
512 }
513 break;
514 }
515 }
516 } while (ack);
517 flushes--;
518 if (got_continue && MAX_IN_VAIN < in_vain) {
519 print_verbose(args, _("giving up"));
520 break; /* give up */
521 }
522 }
523 }
524done:
525 if (!got_ready || !no_done) {
526 packet_buf_write(&req_buf, "done\n");
527 send_request(args, fd[1], &req_buf);
528 }
529 print_verbose(args, _("done"));
530 if (retval != 0) {
531 multi_ack = 0;
532 flushes++;
533 }
534 strbuf_release(&req_buf);
535
536 if (!got_ready || !no_done)
537 consume_shallow_list(args, fd[0]);
538 while (flushes || multi_ack) {
539 int ack = get_ack(fd[0], result_sha1);
540 if (ack) {
541 print_verbose(args, _("got %s (%d) %s"), "ack",
542 ack, sha1_to_hex(result_sha1));
543 if (ack == ACK)
544 return 0;
545 multi_ack = 1;
546 continue;
547 }
548 flushes--;
549 }
550 /* it is no error to fetch into a completely empty repo */
551 return count ? retval : 0;
552}
553
554static struct commit_list *complete;
555
556static int mark_complete(const unsigned char *sha1)
557{
558 struct object *o = parse_object(sha1);
559
560 while (o && o->type == OBJ_TAG) {
561 struct tag *t = (struct tag *) o;
562 if (!t->tagged)
563 break; /* broken repository */
564 o->flags |= COMPLETE;
565 o = parse_object(t->tagged->oid.hash);
566 }
567 if (o && o->type == OBJ_COMMIT) {
568 struct commit *commit = (struct commit *)o;
569 if (!(commit->object.flags & COMPLETE)) {
570 commit->object.flags |= COMPLETE;
571 commit_list_insert(commit, &complete);
572 }
573 }
574 return 0;
575}
576
577static int mark_complete_oid(const char *refname, const struct object_id *oid,
578 int flag, void *cb_data)
579{
580 return mark_complete(oid->hash);
581}
582
583static void mark_recent_complete_commits(struct fetch_pack_args *args,
584 unsigned long cutoff)
585{
586 while (complete && cutoff <= complete->item->date) {
587 print_verbose(args, _("Marking %s as complete"),
588 oid_to_hex(&complete->item->object.oid));
589 pop_most_recent_commit(&complete, COMPLETE);
590 }
591}
592
593static void filter_refs(struct fetch_pack_args *args,
594 struct ref **refs,
595 struct ref **sought, int nr_sought)
596{
597 struct ref *newlist = NULL;
598 struct ref **newtail = &newlist;
599 struct ref *ref, *next;
600 int i;
601
602 i = 0;
603 for (ref = *refs; ref; ref = next) {
604 int keep = 0;
605 next = ref->next;
606
607 if (starts_with(ref->name, "refs/") &&
608 check_refname_format(ref->name, 0))
609 ; /* trash */
610 else {
611 while (i < nr_sought) {
612 int cmp = strcmp(ref->name, sought[i]->name);
613 if (cmp < 0)
614 break; /* definitely do not have it */
615 else if (cmp == 0) {
616 keep = 1; /* definitely have it */
617 sought[i]->match_status = REF_MATCHED;
618 }
619 i++;
620 }
621 }
622
623 if (!keep && args->fetch_all &&
624 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
625 keep = 1;
626
627 if (keep) {
628 *newtail = ref;
629 ref->next = NULL;
630 newtail = &ref->next;
631 } else {
632 free(ref);
633 }
634 }
635
636 /* Append unmatched requests to the list */
637 for (i = 0; i < nr_sought; i++) {
638 unsigned char sha1[20];
639
640 ref = sought[i];
641 if (ref->match_status != REF_NOT_MATCHED)
642 continue;
643 if (get_sha1_hex(ref->name, sha1) ||
644 ref->name[40] != '\0' ||
645 hashcmp(sha1, ref->old_oid.hash))
646 continue;
647
648 if ((allow_unadvertised_object_request &
649 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1))) {
650 ref->match_status = REF_MATCHED;
651 *newtail = copy_ref(ref);
652 newtail = &(*newtail)->next;
653 } else {
654 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
655 }
656 }
657 *refs = newlist;
658}
659
660static void mark_alternate_complete(struct object *obj)
661{
662 mark_complete(obj->oid.hash);
663}
664
665static int everything_local(struct fetch_pack_args *args,
666 struct ref **refs,
667 struct ref **sought, int nr_sought)
668{
669 struct ref *ref;
670 int retval;
671 unsigned long cutoff = 0;
672
673 save_commit_buffer = 0;
674
675 for (ref = *refs; ref; ref = ref->next) {
676 struct object *o;
677
678 if (!has_object_file(&ref->old_oid))
679 continue;
680
681 o = parse_object(ref->old_oid.hash);
682 if (!o)
683 continue;
684
685 /* We already have it -- which may mean that we were
686 * in sync with the other side at some time after
687 * that (it is OK if we guess wrong here).
688 */
689 if (o->type == OBJ_COMMIT) {
690 struct commit *commit = (struct commit *)o;
691 if (!cutoff || cutoff < commit->date)
692 cutoff = commit->date;
693 }
694 }
695
696 if (!args->deepen) {
697 for_each_ref(mark_complete_oid, NULL);
698 for_each_cached_alternate(mark_alternate_complete);
699 commit_list_sort_by_date(&complete);
700 if (cutoff)
701 mark_recent_complete_commits(args, cutoff);
702 }
703
704 /*
705 * Mark all complete remote refs as common refs.
706 * Don't mark them common yet; the server has to be told so first.
707 */
708 for (ref = *refs; ref; ref = ref->next) {
709 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
710 NULL, 0);
711
712 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
713 continue;
714
715 if (!(o->flags & SEEN)) {
716 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
717
718 mark_common((struct commit *)o, 1, 1);
719 }
720 }
721
722 filter_refs(args, refs, sought, nr_sought);
723
724 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
725 const unsigned char *remote = ref->old_oid.hash;
726 struct object *o;
727
728 o = lookup_object(remote);
729 if (!o || !(o->flags & COMPLETE)) {
730 retval = 0;
731 print_verbose(args, "want %s (%s)", sha1_to_hex(remote),
732 ref->name);
733 continue;
734 }
735 print_verbose(args, _("already have %s (%s)"), sha1_to_hex(remote),
736 ref->name);
737 }
738 return retval;
739}
740
741static int sideband_demux(int in, int out, void *data)
742{
743 int *xd = data;
744 int ret;
745
746 ret = recv_sideband("fetch-pack", xd[0], out);
747 close(out);
748 return ret;
749}
750
751static int get_pack(struct fetch_pack_args *args,
752 int xd[2], char **pack_lockfile)
753{
754 struct async demux;
755 int do_keep = args->keep_pack;
756 const char *cmd_name;
757 struct pack_header header;
758 int pass_header = 0;
759 struct child_process cmd = CHILD_PROCESS_INIT;
760 int ret;
761
762 memset(&demux, 0, sizeof(demux));
763 if (use_sideband) {
764 /* xd[] is talking with upload-pack; subprocess reads from
765 * xd[0], spits out band#2 to stderr, and feeds us band#1
766 * through demux->out.
767 */
768 demux.proc = sideband_demux;
769 demux.data = xd;
770 demux.out = -1;
771 demux.isolate_sigpipe = 1;
772 if (start_async(&demux))
773 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
774 }
775 else
776 demux.out = xd[0];
777
778 if (!args->keep_pack && unpack_limit) {
779
780 if (read_pack_header(demux.out, &header))
781 die(_("protocol error: bad pack header"));
782 pass_header = 1;
783 if (ntohl(header.hdr_entries) < unpack_limit)
784 do_keep = 0;
785 else
786 do_keep = 1;
787 }
788
789 if (alternate_shallow_file) {
790 argv_array_push(&cmd.args, "--shallow-file");
791 argv_array_push(&cmd.args, alternate_shallow_file);
792 }
793
794 if (do_keep) {
795 if (pack_lockfile)
796 cmd.out = -1;
797 cmd_name = "index-pack";
798 argv_array_push(&cmd.args, cmd_name);
799 argv_array_push(&cmd.args, "--stdin");
800 if (!args->quiet && !args->no_progress)
801 argv_array_push(&cmd.args, "-v");
802 if (args->use_thin_pack)
803 argv_array_push(&cmd.args, "--fix-thin");
804 if (args->lock_pack || unpack_limit) {
805 char hostname[256];
806 if (gethostname(hostname, sizeof(hostname)))
807 xsnprintf(hostname, sizeof(hostname), "localhost");
808 argv_array_pushf(&cmd.args,
809 "--keep=fetch-pack %"PRIuMAX " on %s",
810 (uintmax_t)getpid(), hostname);
811 }
812 if (args->check_self_contained_and_connected)
813 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
814 }
815 else {
816 cmd_name = "unpack-objects";
817 argv_array_push(&cmd.args, cmd_name);
818 if (args->quiet || args->no_progress)
819 argv_array_push(&cmd.args, "-q");
820 args->check_self_contained_and_connected = 0;
821 }
822
823 if (pass_header)
824 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
825 ntohl(header.hdr_version),
826 ntohl(header.hdr_entries));
827 if (fetch_fsck_objects >= 0
828 ? fetch_fsck_objects
829 : transfer_fsck_objects >= 0
830 ? transfer_fsck_objects
831 : 0)
832 argv_array_push(&cmd.args, "--strict");
833
834 cmd.in = demux.out;
835 cmd.git_cmd = 1;
836 if (start_command(&cmd))
837 die(_("fetch-pack: unable to fork off %s"), cmd_name);
838 if (do_keep && pack_lockfile) {
839 *pack_lockfile = index_pack_lockfile(cmd.out);
840 close(cmd.out);
841 }
842
843 if (!use_sideband)
844 /* Closed by start_command() */
845 xd[0] = -1;
846
847 ret = finish_command(&cmd);
848 if (!ret || (args->check_self_contained_and_connected && ret == 1))
849 args->self_contained_and_connected =
850 args->check_self_contained_and_connected &&
851 ret == 0;
852 else
853 die(_("%s failed"), cmd_name);
854 if (use_sideband && finish_async(&demux))
855 die(_("error in sideband demultiplexer"));
856 return 0;
857}
858
859static int cmp_ref_by_name(const void *a_, const void *b_)
860{
861 const struct ref *a = *((const struct ref **)a_);
862 const struct ref *b = *((const struct ref **)b_);
863 return strcmp(a->name, b->name);
864}
865
866static struct ref *do_fetch_pack(struct fetch_pack_args *args,
867 int fd[2],
868 const struct ref *orig_ref,
869 struct ref **sought, int nr_sought,
870 struct shallow_info *si,
871 char **pack_lockfile)
872{
873 struct ref *ref = copy_ref_list(orig_ref);
874 unsigned char sha1[20];
875 const char *agent_feature;
876 int agent_len;
877
878 sort_ref_list(&ref, ref_compare_name);
879 QSORT(sought, nr_sought, cmp_ref_by_name);
880
881 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
882 die(_("Server does not support shallow clients"));
883 if (args->depth > 0 || args->deepen_since || args->deepen_not)
884 args->deepen = 1;
885 if (server_supports("multi_ack_detailed")) {
886 print_verbose(args, _("Server supports multi_ack_detailed"));
887 multi_ack = 2;
888 if (server_supports("no-done")) {
889 print_verbose(args, _("Server supports no-done"));
890 if (args->stateless_rpc)
891 no_done = 1;
892 }
893 }
894 else if (server_supports("multi_ack")) {
895 print_verbose(args, _("Server supports multi_ack"));
896 multi_ack = 1;
897 }
898 if (server_supports("side-band-64k")) {
899 print_verbose(args, _("Server supports side-band-64k"));
900 use_sideband = 2;
901 }
902 else if (server_supports("side-band")) {
903 print_verbose(args, _("Server supports side-band"));
904 use_sideband = 1;
905 }
906 if (server_supports("allow-tip-sha1-in-want")) {
907 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
908 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
909 }
910 if (server_supports("allow-reachable-sha1-in-want")) {
911 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
912 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
913 }
914 if (!server_supports("thin-pack"))
915 args->use_thin_pack = 0;
916 if (!server_supports("no-progress"))
917 args->no_progress = 0;
918 if (!server_supports("include-tag"))
919 args->include_tag = 0;
920 if (server_supports("ofs-delta"))
921 print_verbose(args, _("Server supports ofs-delta"));
922 else
923 prefer_ofs_delta = 0;
924
925 if ((agent_feature = server_feature_value("agent", &agent_len))) {
926 agent_supported = 1;
927 if (agent_len)
928 print_verbose(args, _("Server version is %.*s"),
929 agent_len, agent_feature);
930 }
931 if (server_supports("deepen-since"))
932 deepen_since_ok = 1;
933 else if (args->deepen_since)
934 die(_("Server does not support --shallow-since"));
935 if (server_supports("deepen-not"))
936 deepen_not_ok = 1;
937 else if (args->deepen_not)
938 die(_("Server does not support --shallow-exclude"));
939 if (!server_supports("deepen-relative") && args->deepen_relative)
940 die(_("Server does not support --deepen"));
941
942 if (everything_local(args, &ref, sought, nr_sought)) {
943 packet_flush(fd[1]);
944 goto all_done;
945 }
946 if (find_common(args, fd, sha1, ref) < 0)
947 if (!args->keep_pack)
948 /* When cloning, it is not unusual to have
949 * no common commit.
950 */
951 warning(_("no common commits"));
952
953 if (args->stateless_rpc)
954 packet_flush(fd[1]);
955 if (args->deepen)
956 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
957 NULL);
958 else if (si->nr_ours || si->nr_theirs)
959 alternate_shallow_file = setup_temporary_shallow(si->shallow);
960 else
961 alternate_shallow_file = NULL;
962 if (get_pack(args, fd, pack_lockfile))
963 die(_("git fetch-pack: fetch failed."));
964
965 all_done:
966 return ref;
967}
968
969static void fetch_pack_config(void)
970{
971 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
972 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
973 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
974 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
975 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
976
977 git_config(git_default_config, NULL);
978}
979
980static void fetch_pack_setup(void)
981{
982 static int did_setup;
983 if (did_setup)
984 return;
985 fetch_pack_config();
986 if (0 <= transfer_unpack_limit)
987 unpack_limit = transfer_unpack_limit;
988 else if (0 <= fetch_unpack_limit)
989 unpack_limit = fetch_unpack_limit;
990 did_setup = 1;
991}
992
993static int remove_duplicates_in_refs(struct ref **ref, int nr)
994{
995 struct string_list names = STRING_LIST_INIT_NODUP;
996 int src, dst;
997
998 for (src = dst = 0; src < nr; src++) {
999 struct string_list_item *item;
1000 item = string_list_insert(&names, ref[src]->name);
1001 if (item->util)
1002 continue; /* already have it */
1003 item->util = ref[src];
1004 if (src != dst)
1005 ref[dst] = ref[src];
1006 dst++;
1007 }
1008 for (src = dst; src < nr; src++)
1009 ref[src] = NULL;
1010 string_list_clear(&names, 0);
1011 return dst;
1012}
1013
1014static void update_shallow(struct fetch_pack_args *args,
1015 struct ref **sought, int nr_sought,
1016 struct shallow_info *si)
1017{
1018 struct oid_array ref = OID_ARRAY_INIT;
1019 int *status;
1020 int i;
1021
1022 if (args->deepen && alternate_shallow_file) {
1023 if (*alternate_shallow_file == '\0') { /* --unshallow */
1024 unlink_or_warn(git_path_shallow());
1025 rollback_lock_file(&shallow_lock);
1026 } else
1027 commit_lock_file(&shallow_lock);
1028 return;
1029 }
1030
1031 if (!si->shallow || !si->shallow->nr)
1032 return;
1033
1034 if (args->cloning) {
1035 /*
1036 * remote is shallow, but this is a clone, there are
1037 * no objects in repo to worry about. Accept any
1038 * shallow points that exist in the pack (iow in repo
1039 * after get_pack() and reprepare_packed_git())
1040 */
1041 struct oid_array extra = OID_ARRAY_INIT;
1042 struct object_id *oid = si->shallow->oid;
1043 for (i = 0; i < si->shallow->nr; i++)
1044 if (has_object_file(&oid[i]))
1045 oid_array_append(&extra, &oid[i]);
1046 if (extra.nr) {
1047 setup_alternate_shallow(&shallow_lock,
1048 &alternate_shallow_file,
1049 &extra);
1050 commit_lock_file(&shallow_lock);
1051 }
1052 oid_array_clear(&extra);
1053 return;
1054 }
1055
1056 if (!si->nr_ours && !si->nr_theirs)
1057 return;
1058
1059 remove_nonexistent_theirs_shallow(si);
1060 if (!si->nr_ours && !si->nr_theirs)
1061 return;
1062 for (i = 0; i < nr_sought; i++)
1063 oid_array_append(&ref, &sought[i]->old_oid);
1064 si->ref = &ref;
1065
1066 if (args->update_shallow) {
1067 /*
1068 * remote is also shallow, .git/shallow may be updated
1069 * so all refs can be accepted. Make sure we only add
1070 * shallow roots that are actually reachable from new
1071 * refs.
1072 */
1073 struct oid_array extra = OID_ARRAY_INIT;
1074 struct object_id *oid = si->shallow->oid;
1075 assign_shallow_commits_to_refs(si, NULL, NULL);
1076 if (!si->nr_ours && !si->nr_theirs) {
1077 oid_array_clear(&ref);
1078 return;
1079 }
1080 for (i = 0; i < si->nr_ours; i++)
1081 oid_array_append(&extra, &oid[si->ours[i]]);
1082 for (i = 0; i < si->nr_theirs; i++)
1083 oid_array_append(&extra, &oid[si->theirs[i]]);
1084 setup_alternate_shallow(&shallow_lock,
1085 &alternate_shallow_file,
1086 &extra);
1087 commit_lock_file(&shallow_lock);
1088 oid_array_clear(&extra);
1089 oid_array_clear(&ref);
1090 return;
1091 }
1092
1093 /*
1094 * remote is also shallow, check what ref is safe to update
1095 * without updating .git/shallow
1096 */
1097 status = xcalloc(nr_sought, sizeof(*status));
1098 assign_shallow_commits_to_refs(si, NULL, status);
1099 if (si->nr_ours || si->nr_theirs) {
1100 for (i = 0; i < nr_sought; i++)
1101 if (status[i])
1102 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1103 }
1104 free(status);
1105 oid_array_clear(&ref);
1106}
1107
1108struct ref *fetch_pack(struct fetch_pack_args *args,
1109 int fd[], struct child_process *conn,
1110 const struct ref *ref,
1111 const char *dest,
1112 struct ref **sought, int nr_sought,
1113 struct oid_array *shallow,
1114 char **pack_lockfile)
1115{
1116 struct ref *ref_cpy;
1117 struct shallow_info si;
1118
1119 fetch_pack_setup();
1120 if (nr_sought)
1121 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1122
1123 if (!ref) {
1124 packet_flush(fd[1]);
1125 die(_("no matching remote head"));
1126 }
1127 prepare_shallow_info(&si, shallow);
1128 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1129 &si, pack_lockfile);
1130 reprepare_packed_git();
1131 update_shallow(args, sought, nr_sought, &si);
1132 clear_shallow_info(&si);
1133 return ref_cpy;
1134}
1135
1136int report_unmatched_refs(struct ref **sought, int nr_sought)
1137{
1138 int i, ret = 0;
1139
1140 for (i = 0; i < nr_sought; i++) {
1141 if (!sought[i])
1142 continue;
1143 switch (sought[i]->match_status) {
1144 case REF_MATCHED:
1145 continue;
1146 case REF_NOT_MATCHED:
1147 error(_("no such remote ref %s"), sought[i]->name);
1148 break;
1149 case REF_UNADVERTISED_NOT_ALLOWED:
1150 error(_("Server does not allow request for unadvertised object %s"),
1151 sought[i]->name);
1152 break;
1153 }
1154 ret = 1;
1155 }
1156 return ret;
1157}