1#include "cache.h"
2#include "refs.h"
3#include "pkt-line.h"
4#include "commit.h"
5#include "tag.h"
6#include "exec_cmd.h"
7#include "pack.h"
8#include "sideband.h"
9#include "fetch-pack.h"
10#include "remote.h"
11#include "run-command.h"
12#include "connect.h"
13#include "transport.h"
14#include "version.h"
15#include "prio-queue.h"
16#include "sha1-array.h"
17
18static int transfer_unpack_limit = -1;
19static int fetch_unpack_limit = -1;
20static int unpack_limit = 100;
21static int prefer_ofs_delta = 1;
22static int no_done;
23static int fetch_fsck_objects = -1;
24static int transfer_fsck_objects = -1;
25static int agent_supported;
26static struct lock_file shallow_lock;
27static const char *alternate_shallow_file;
28
29/* Remember to update object flag allocation in object.h */
30#define COMPLETE (1U << 0)
31#define COMMON (1U << 1)
32#define COMMON_REF (1U << 2)
33#define SEEN (1U << 3)
34#define POPPED (1U << 4)
35
36static int marked;
37
38/*
39 * After sending this many "have"s if we do not get any new ACK , we
40 * give up traversing our history.
41 */
42#define MAX_IN_VAIN 256
43
44static struct prio_queue rev_list = { compare_commits_by_commit_date };
45static int non_common_revs, multi_ack, use_sideband, allow_tip_sha1_in_want;
46
47static void rev_list_push(struct commit *commit, int mark)
48{
49 if (!(commit->object.flags & mark)) {
50 commit->object.flags |= mark;
51
52 if (parse_commit(commit))
53 return;
54
55 prio_queue_put(&rev_list, commit);
56
57 if (!(commit->object.flags & COMMON))
58 non_common_revs++;
59 }
60}
61
62static int rev_list_insert_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
63{
64 struct object *o = deref_tag(parse_object(sha1), refname, 0);
65
66 if (o && o->type == OBJ_COMMIT)
67 rev_list_push((struct commit *)o, SEEN);
68
69 return 0;
70}
71
72static int clear_marks(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
73{
74 struct object *o = deref_tag(parse_object(sha1), refname, 0);
75
76 if (o && o->type == OBJ_COMMIT)
77 clear_commit_marks((struct commit *)o,
78 COMMON | COMMON_REF | SEEN | POPPED);
79 return 0;
80}
81
82/*
83 This function marks a rev and its ancestors as common.
84 In some cases, it is desirable to mark only the ancestors (for example
85 when only the server does not yet know that they are common).
86*/
87
88static void mark_common(struct commit *commit,
89 int ancestors_only, int dont_parse)
90{
91 if (commit != NULL && !(commit->object.flags & COMMON)) {
92 struct object *o = (struct object *)commit;
93
94 if (!ancestors_only)
95 o->flags |= COMMON;
96
97 if (!(o->flags & SEEN))
98 rev_list_push(commit, SEEN);
99 else {
100 struct commit_list *parents;
101
102 if (!ancestors_only && !(o->flags & POPPED))
103 non_common_revs--;
104 if (!o->parsed && !dont_parse)
105 if (parse_commit(commit))
106 return;
107
108 for (parents = commit->parents;
109 parents;
110 parents = parents->next)
111 mark_common(parents->item, 0, dont_parse);
112 }
113 }
114}
115
116/*
117 Get the next rev to send, ignoring the common.
118*/
119
120static const unsigned char *get_rev(void)
121{
122 struct commit *commit = NULL;
123
124 while (commit == NULL) {
125 unsigned int mark;
126 struct commit_list *parents;
127
128 if (rev_list.nr == 0 || non_common_revs == 0)
129 return NULL;
130
131 commit = prio_queue_get(&rev_list);
132 parse_commit(commit);
133 parents = commit->parents;
134
135 commit->object.flags |= POPPED;
136 if (!(commit->object.flags & COMMON))
137 non_common_revs--;
138
139 if (commit->object.flags & COMMON) {
140 /* do not send "have", and ignore ancestors */
141 commit = NULL;
142 mark = COMMON | SEEN;
143 } else if (commit->object.flags & COMMON_REF)
144 /* send "have", and ignore ancestors */
145 mark = COMMON | SEEN;
146 else
147 /* send "have", also for its ancestors */
148 mark = SEEN;
149
150 while (parents) {
151 if (!(parents->item->object.flags & SEEN))
152 rev_list_push(parents->item, mark);
153 if (mark & COMMON)
154 mark_common(parents->item, 1, 0);
155 parents = parents->next;
156 }
157 }
158
159 return commit->object.sha1;
160}
161
162enum ack_type {
163 NAK = 0,
164 ACK,
165 ACK_continue,
166 ACK_common,
167 ACK_ready
168};
169
170static void consume_shallow_list(struct fetch_pack_args *args, int fd)
171{
172 if (args->stateless_rpc && args->depth > 0) {
173 /* If we sent a depth we will get back "duplicate"
174 * shallow and unshallow commands every time there
175 * is a block of have lines exchanged.
176 */
177 char *line;
178 while ((line = packet_read_line(fd, NULL))) {
179 if (starts_with(line, "shallow "))
180 continue;
181 if (starts_with(line, "unshallow "))
182 continue;
183 die("git fetch-pack: expected shallow list");
184 }
185 }
186}
187
188static enum ack_type get_ack(int fd, unsigned char *result_sha1)
189{
190 int len;
191 char *line = packet_read_line(fd, &len);
192
193 if (!len)
194 die("git fetch-pack: expected ACK/NAK, got EOF");
195 if (!strcmp(line, "NAK"))
196 return NAK;
197 if (starts_with(line, "ACK ")) {
198 if (!get_sha1_hex(line+4, result_sha1)) {
199 if (len < 45)
200 return ACK;
201 if (strstr(line+45, "continue"))
202 return ACK_continue;
203 if (strstr(line+45, "common"))
204 return ACK_common;
205 if (strstr(line+45, "ready"))
206 return ACK_ready;
207 return ACK;
208 }
209 }
210 die("git fetch_pack: expected ACK/NAK, got '%s'", line);
211}
212
213static void send_request(struct fetch_pack_args *args,
214 int fd, struct strbuf *buf)
215{
216 if (args->stateless_rpc) {
217 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
218 packet_flush(fd);
219 } else
220 write_or_die(fd, buf->buf, buf->len);
221}
222
223static void insert_one_alternate_ref(const struct ref *ref, void *unused)
224{
225 rev_list_insert_ref(NULL, ref->old_sha1, 0, NULL);
226}
227
228#define INITIAL_FLUSH 16
229#define PIPESAFE_FLUSH 32
230#define LARGE_FLUSH 1024
231
232static int next_flush(struct fetch_pack_args *args, int count)
233{
234 int flush_limit = args->stateless_rpc ? LARGE_FLUSH : PIPESAFE_FLUSH;
235
236 if (count < flush_limit)
237 count <<= 1;
238 else
239 count += flush_limit;
240 return count;
241}
242
243static int find_common(struct fetch_pack_args *args,
244 int fd[2], unsigned char *result_sha1,
245 struct ref *refs)
246{
247 int fetching;
248 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
249 const unsigned char *sha1;
250 unsigned in_vain = 0;
251 int got_continue = 0;
252 int got_ready = 0;
253 struct strbuf req_buf = STRBUF_INIT;
254 size_t state_len = 0;
255
256 if (args->stateless_rpc && multi_ack == 1)
257 die("--stateless-rpc requires multi_ack_detailed");
258 if (marked)
259 for_each_ref(clear_marks, NULL);
260 marked = 1;
261
262 for_each_ref(rev_list_insert_ref, NULL);
263 for_each_alternate_ref(insert_one_alternate_ref, NULL);
264
265 fetching = 0;
266 for ( ; refs ; refs = refs->next) {
267 unsigned char *remote = refs->old_sha1;
268 const char *remote_hex;
269 struct object *o;
270
271 /*
272 * If that object is complete (i.e. it is an ancestor of a
273 * local ref), we tell them we have it but do not have to
274 * tell them about its ancestors, which they already know
275 * about.
276 *
277 * We use lookup_object here because we are only
278 * interested in the case we *know* the object is
279 * reachable and we have already scanned it.
280 */
281 if (((o = lookup_object(remote)) != NULL) &&
282 (o->flags & COMPLETE)) {
283 continue;
284 }
285
286 remote_hex = sha1_to_hex(remote);
287 if (!fetching) {
288 struct strbuf c = STRBUF_INIT;
289 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
290 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
291 if (no_done) strbuf_addstr(&c, " no-done");
292 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
293 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
294 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
295 if (args->no_progress) strbuf_addstr(&c, " no-progress");
296 if (args->include_tag) strbuf_addstr(&c, " include-tag");
297 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
298 if (agent_supported) strbuf_addf(&c, " agent=%s",
299 git_user_agent_sanitized());
300 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
301 strbuf_release(&c);
302 } else
303 packet_buf_write(&req_buf, "want %s\n", remote_hex);
304 fetching++;
305 }
306
307 if (!fetching) {
308 strbuf_release(&req_buf);
309 packet_flush(fd[1]);
310 return 1;
311 }
312
313 if (is_repository_shallow())
314 write_shallow_commits(&req_buf, 1, NULL);
315 if (args->depth > 0)
316 packet_buf_write(&req_buf, "deepen %d", args->depth);
317 packet_buf_flush(&req_buf);
318 state_len = req_buf.len;
319
320 if (args->depth > 0) {
321 char *line;
322 unsigned char sha1[20];
323
324 send_request(args, fd[1], &req_buf);
325 while ((line = packet_read_line(fd[0], NULL))) {
326 if (starts_with(line, "shallow ")) {
327 if (get_sha1_hex(line + 8, sha1))
328 die("invalid shallow line: %s", line);
329 register_shallow(sha1);
330 continue;
331 }
332 if (starts_with(line, "unshallow ")) {
333 if (get_sha1_hex(line + 10, sha1))
334 die("invalid unshallow line: %s", line);
335 if (!lookup_object(sha1))
336 die("object not found: %s", line);
337 /* make sure that it is parsed as shallow */
338 if (!parse_object(sha1))
339 die("error in object: %s", line);
340 if (unregister_shallow(sha1))
341 die("no shallow found: %s", line);
342 continue;
343 }
344 die("expected shallow/unshallow, got %s", line);
345 }
346 } else if (!args->stateless_rpc)
347 send_request(args, fd[1], &req_buf);
348
349 if (!args->stateless_rpc) {
350 /* If we aren't using the stateless-rpc interface
351 * we don't need to retain the headers.
352 */
353 strbuf_setlen(&req_buf, 0);
354 state_len = 0;
355 }
356
357 flushes = 0;
358 retval = -1;
359 while ((sha1 = get_rev())) {
360 packet_buf_write(&req_buf, "have %s\n", sha1_to_hex(sha1));
361 if (args->verbose)
362 fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
363 in_vain++;
364 if (flush_at <= ++count) {
365 int ack;
366
367 packet_buf_flush(&req_buf);
368 send_request(args, fd[1], &req_buf);
369 strbuf_setlen(&req_buf, state_len);
370 flushes++;
371 flush_at = next_flush(args, count);
372
373 /*
374 * We keep one window "ahead" of the other side, and
375 * will wait for an ACK only on the next one
376 */
377 if (!args->stateless_rpc && count == INITIAL_FLUSH)
378 continue;
379
380 consume_shallow_list(args, fd[0]);
381 do {
382 ack = get_ack(fd[0], result_sha1);
383 if (args->verbose && ack)
384 fprintf(stderr, "got ack %d %s\n", ack,
385 sha1_to_hex(result_sha1));
386 switch (ack) {
387 case ACK:
388 flushes = 0;
389 multi_ack = 0;
390 retval = 0;
391 goto done;
392 case ACK_common:
393 case ACK_ready:
394 case ACK_continue: {
395 struct commit *commit =
396 lookup_commit(result_sha1);
397 if (!commit)
398 die("invalid commit %s", sha1_to_hex(result_sha1));
399 if (args->stateless_rpc
400 && ack == ACK_common
401 && !(commit->object.flags & COMMON)) {
402 /* We need to replay the have for this object
403 * on the next RPC request so the peer knows
404 * it is in common with us.
405 */
406 const char *hex = sha1_to_hex(result_sha1);
407 packet_buf_write(&req_buf, "have %s\n", hex);
408 state_len = req_buf.len;
409 }
410 mark_common(commit, 0, 1);
411 retval = 0;
412 in_vain = 0;
413 got_continue = 1;
414 if (ack == ACK_ready) {
415 clear_prio_queue(&rev_list);
416 got_ready = 1;
417 }
418 break;
419 }
420 }
421 } while (ack);
422 flushes--;
423 if (got_continue && MAX_IN_VAIN < in_vain) {
424 if (args->verbose)
425 fprintf(stderr, "giving up\n");
426 break; /* give up */
427 }
428 }
429 }
430done:
431 if (!got_ready || !no_done) {
432 packet_buf_write(&req_buf, "done\n");
433 send_request(args, fd[1], &req_buf);
434 }
435 if (args->verbose)
436 fprintf(stderr, "done\n");
437 if (retval != 0) {
438 multi_ack = 0;
439 flushes++;
440 }
441 strbuf_release(&req_buf);
442
443 if (!got_ready || !no_done)
444 consume_shallow_list(args, fd[0]);
445 while (flushes || multi_ack) {
446 int ack = get_ack(fd[0], result_sha1);
447 if (ack) {
448 if (args->verbose)
449 fprintf(stderr, "got ack (%d) %s\n", ack,
450 sha1_to_hex(result_sha1));
451 if (ack == ACK)
452 return 0;
453 multi_ack = 1;
454 continue;
455 }
456 flushes--;
457 }
458 /* it is no error to fetch into a completely empty repo */
459 return count ? retval : 0;
460}
461
462static struct commit_list *complete;
463
464static int mark_complete(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
465{
466 struct object *o = parse_object(sha1);
467
468 while (o && o->type == OBJ_TAG) {
469 struct tag *t = (struct tag *) o;
470 if (!t->tagged)
471 break; /* broken repository */
472 o->flags |= COMPLETE;
473 o = parse_object(t->tagged->sha1);
474 }
475 if (o && o->type == OBJ_COMMIT) {
476 struct commit *commit = (struct commit *)o;
477 if (!(commit->object.flags & COMPLETE)) {
478 commit->object.flags |= COMPLETE;
479 commit_list_insert(commit, &complete);
480 }
481 }
482 return 0;
483}
484
485static void mark_recent_complete_commits(struct fetch_pack_args *args,
486 unsigned long cutoff)
487{
488 while (complete && cutoff <= complete->item->date) {
489 if (args->verbose)
490 fprintf(stderr, "Marking %s as complete\n",
491 sha1_to_hex(complete->item->object.sha1));
492 pop_most_recent_commit(&complete, COMPLETE);
493 }
494}
495
496static void filter_refs(struct fetch_pack_args *args,
497 struct ref **refs,
498 struct ref **sought, int nr_sought)
499{
500 struct ref *newlist = NULL;
501 struct ref **newtail = &newlist;
502 struct ref *ref, *next;
503 int i;
504
505 i = 0;
506 for (ref = *refs; ref; ref = next) {
507 int keep = 0;
508 next = ref->next;
509
510 if (starts_with(ref->name, "refs/") &&
511 check_refname_format(ref->name, 0))
512 ; /* trash */
513 else {
514 while (i < nr_sought) {
515 int cmp = strcmp(ref->name, sought[i]->name);
516 if (cmp < 0)
517 break; /* definitely do not have it */
518 else if (cmp == 0) {
519 keep = 1; /* definitely have it */
520 sought[i]->matched = 1;
521 }
522 i++;
523 }
524 }
525
526 if (!keep && args->fetch_all &&
527 (!args->depth || !starts_with(ref->name, "refs/tags/")))
528 keep = 1;
529
530 if (keep) {
531 *newtail = ref;
532 ref->next = NULL;
533 newtail = &ref->next;
534 } else {
535 free(ref);
536 }
537 }
538
539 /* Append unmatched requests to the list */
540 if (allow_tip_sha1_in_want) {
541 for (i = 0; i < nr_sought; i++) {
542 ref = sought[i];
543 if (ref->matched)
544 continue;
545 if (get_sha1_hex(ref->name, ref->old_sha1))
546 continue;
547
548 ref->matched = 1;
549 *newtail = ref;
550 ref->next = NULL;
551 newtail = &ref->next;
552 }
553 }
554 *refs = newlist;
555}
556
557static void mark_alternate_complete(const struct ref *ref, void *unused)
558{
559 mark_complete(NULL, ref->old_sha1, 0, NULL);
560}
561
562static int everything_local(struct fetch_pack_args *args,
563 struct ref **refs,
564 struct ref **sought, int nr_sought)
565{
566 struct ref *ref;
567 int retval;
568 unsigned long cutoff = 0;
569
570 save_commit_buffer = 0;
571
572 for (ref = *refs; ref; ref = ref->next) {
573 struct object *o;
574
575 if (!has_sha1_file(ref->old_sha1))
576 continue;
577
578 o = parse_object(ref->old_sha1);
579 if (!o)
580 continue;
581
582 /* We already have it -- which may mean that we were
583 * in sync with the other side at some time after
584 * that (it is OK if we guess wrong here).
585 */
586 if (o->type == OBJ_COMMIT) {
587 struct commit *commit = (struct commit *)o;
588 if (!cutoff || cutoff < commit->date)
589 cutoff = commit->date;
590 }
591 }
592
593 if (!args->depth) {
594 for_each_ref(mark_complete, NULL);
595 for_each_alternate_ref(mark_alternate_complete, NULL);
596 commit_list_sort_by_date(&complete);
597 if (cutoff)
598 mark_recent_complete_commits(args, cutoff);
599 }
600
601 /*
602 * Mark all complete remote refs as common refs.
603 * Don't mark them common yet; the server has to be told so first.
604 */
605 for (ref = *refs; ref; ref = ref->next) {
606 struct object *o = deref_tag(lookup_object(ref->old_sha1),
607 NULL, 0);
608
609 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
610 continue;
611
612 if (!(o->flags & SEEN)) {
613 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
614
615 mark_common((struct commit *)o, 1, 1);
616 }
617 }
618
619 filter_refs(args, refs, sought, nr_sought);
620
621 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
622 const unsigned char *remote = ref->old_sha1;
623 unsigned char local[20];
624 struct object *o;
625
626 o = lookup_object(remote);
627 if (!o || !(o->flags & COMPLETE)) {
628 retval = 0;
629 if (!args->verbose)
630 continue;
631 fprintf(stderr,
632 "want %s (%s)\n", sha1_to_hex(remote),
633 ref->name);
634 continue;
635 }
636
637 hashcpy(ref->new_sha1, local);
638 if (!args->verbose)
639 continue;
640 fprintf(stderr,
641 "already have %s (%s)\n", sha1_to_hex(remote),
642 ref->name);
643 }
644 return retval;
645}
646
647static int sideband_demux(int in, int out, void *data)
648{
649 int *xd = data;
650
651 int ret = recv_sideband("fetch-pack", xd[0], out);
652 close(out);
653 return ret;
654}
655
656static int get_pack(struct fetch_pack_args *args,
657 int xd[2], char **pack_lockfile)
658{
659 struct async demux;
660 const char *argv[22];
661 char keep_arg[256];
662 char hdr_arg[256];
663 const char **av, *cmd_name;
664 int do_keep = args->keep_pack;
665 struct child_process cmd;
666 int ret;
667
668 memset(&demux, 0, sizeof(demux));
669 if (use_sideband) {
670 /* xd[] is talking with upload-pack; subprocess reads from
671 * xd[0], spits out band#2 to stderr, and feeds us band#1
672 * through demux->out.
673 */
674 demux.proc = sideband_demux;
675 demux.data = xd;
676 demux.out = -1;
677 if (start_async(&demux))
678 die("fetch-pack: unable to fork off sideband"
679 " demultiplexer");
680 }
681 else
682 demux.out = xd[0];
683
684 memset(&cmd, 0, sizeof(cmd));
685 cmd.argv = argv;
686 av = argv;
687 *hdr_arg = 0;
688 if (!args->keep_pack && unpack_limit) {
689 struct pack_header header;
690
691 if (read_pack_header(demux.out, &header))
692 die("protocol error: bad pack header");
693 snprintf(hdr_arg, sizeof(hdr_arg),
694 "--pack_header=%"PRIu32",%"PRIu32,
695 ntohl(header.hdr_version), ntohl(header.hdr_entries));
696 if (ntohl(header.hdr_entries) < unpack_limit)
697 do_keep = 0;
698 else
699 do_keep = 1;
700 }
701
702 if (alternate_shallow_file) {
703 *av++ = "--shallow-file";
704 *av++ = alternate_shallow_file;
705 }
706
707 if (do_keep) {
708 if (pack_lockfile)
709 cmd.out = -1;
710 *av++ = cmd_name = "index-pack";
711 *av++ = "--stdin";
712 if (!args->quiet && !args->no_progress)
713 *av++ = "-v";
714 if (args->use_thin_pack)
715 *av++ = "--fix-thin";
716 if (args->lock_pack || unpack_limit) {
717 int s = sprintf(keep_arg,
718 "--keep=fetch-pack %"PRIuMAX " on ", (uintmax_t) getpid());
719 if (gethostname(keep_arg + s, sizeof(keep_arg) - s))
720 strcpy(keep_arg + s, "localhost");
721 *av++ = keep_arg;
722 }
723 if (args->check_self_contained_and_connected)
724 *av++ = "--check-self-contained-and-connected";
725 }
726 else {
727 *av++ = cmd_name = "unpack-objects";
728 if (args->quiet || args->no_progress)
729 *av++ = "-q";
730 args->check_self_contained_and_connected = 0;
731 }
732 if (*hdr_arg)
733 *av++ = hdr_arg;
734 if (fetch_fsck_objects >= 0
735 ? fetch_fsck_objects
736 : transfer_fsck_objects >= 0
737 ? transfer_fsck_objects
738 : 0)
739 *av++ = "--strict";
740 *av++ = NULL;
741
742 cmd.in = demux.out;
743 cmd.git_cmd = 1;
744 if (start_command(&cmd))
745 die("fetch-pack: unable to fork off %s", cmd_name);
746 if (do_keep && pack_lockfile) {
747 *pack_lockfile = index_pack_lockfile(cmd.out);
748 close(cmd.out);
749 }
750
751 if (!use_sideband)
752 /* Closed by start_command() */
753 xd[0] = -1;
754
755 ret = finish_command(&cmd);
756 if (!ret || (args->check_self_contained_and_connected && ret == 1))
757 args->self_contained_and_connected =
758 args->check_self_contained_and_connected &&
759 ret == 0;
760 else
761 die("%s failed", cmd_name);
762 if (use_sideband && finish_async(&demux))
763 die("error in sideband demultiplexer");
764 return 0;
765}
766
767static int cmp_ref_by_name(const void *a_, const void *b_)
768{
769 const struct ref *a = *((const struct ref **)a_);
770 const struct ref *b = *((const struct ref **)b_);
771 return strcmp(a->name, b->name);
772}
773
774static struct ref *do_fetch_pack(struct fetch_pack_args *args,
775 int fd[2],
776 const struct ref *orig_ref,
777 struct ref **sought, int nr_sought,
778 struct shallow_info *si,
779 char **pack_lockfile)
780{
781 struct ref *ref = copy_ref_list(orig_ref);
782 unsigned char sha1[20];
783 const char *agent_feature;
784 int agent_len;
785
786 sort_ref_list(&ref, ref_compare_name);
787 qsort(sought, nr_sought, sizeof(*sought), cmp_ref_by_name);
788
789 if (is_repository_shallow() && !server_supports("shallow"))
790 die("Server does not support shallow clients");
791 if (server_supports("multi_ack_detailed")) {
792 if (args->verbose)
793 fprintf(stderr, "Server supports multi_ack_detailed\n");
794 multi_ack = 2;
795 if (server_supports("no-done")) {
796 if (args->verbose)
797 fprintf(stderr, "Server supports no-done\n");
798 if (args->stateless_rpc)
799 no_done = 1;
800 }
801 }
802 else if (server_supports("multi_ack")) {
803 if (args->verbose)
804 fprintf(stderr, "Server supports multi_ack\n");
805 multi_ack = 1;
806 }
807 if (server_supports("side-band-64k")) {
808 if (args->verbose)
809 fprintf(stderr, "Server supports side-band-64k\n");
810 use_sideband = 2;
811 }
812 else if (server_supports("side-band")) {
813 if (args->verbose)
814 fprintf(stderr, "Server supports side-band\n");
815 use_sideband = 1;
816 }
817 if (server_supports("allow-tip-sha1-in-want")) {
818 if (args->verbose)
819 fprintf(stderr, "Server supports allow-tip-sha1-in-want\n");
820 allow_tip_sha1_in_want = 1;
821 }
822 if (!server_supports("thin-pack"))
823 args->use_thin_pack = 0;
824 if (!server_supports("no-progress"))
825 args->no_progress = 0;
826 if (!server_supports("include-tag"))
827 args->include_tag = 0;
828 if (server_supports("ofs-delta")) {
829 if (args->verbose)
830 fprintf(stderr, "Server supports ofs-delta\n");
831 } else
832 prefer_ofs_delta = 0;
833
834 if ((agent_feature = server_feature_value("agent", &agent_len))) {
835 agent_supported = 1;
836 if (args->verbose && agent_len)
837 fprintf(stderr, "Server version is %.*s\n",
838 agent_len, agent_feature);
839 }
840
841 if (everything_local(args, &ref, sought, nr_sought)) {
842 packet_flush(fd[1]);
843 goto all_done;
844 }
845 if (find_common(args, fd, sha1, ref) < 0)
846 if (!args->keep_pack)
847 /* When cloning, it is not unusual to have
848 * no common commit.
849 */
850 warning("no common commits");
851
852 if (args->stateless_rpc)
853 packet_flush(fd[1]);
854 if (args->depth > 0)
855 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
856 NULL);
857 else if (si->nr_ours || si->nr_theirs)
858 alternate_shallow_file = setup_temporary_shallow(si->shallow);
859 else
860 alternate_shallow_file = NULL;
861 if (get_pack(args, fd, pack_lockfile))
862 die("git fetch-pack: fetch failed.");
863
864 all_done:
865 return ref;
866}
867
868static int fetch_pack_config(const char *var, const char *value, void *cb)
869{
870 if (strcmp(var, "fetch.unpacklimit") == 0) {
871 fetch_unpack_limit = git_config_int(var, value);
872 return 0;
873 }
874
875 if (strcmp(var, "transfer.unpacklimit") == 0) {
876 transfer_unpack_limit = git_config_int(var, value);
877 return 0;
878 }
879
880 if (strcmp(var, "repack.usedeltabaseoffset") == 0) {
881 prefer_ofs_delta = git_config_bool(var, value);
882 return 0;
883 }
884
885 if (!strcmp(var, "fetch.fsckobjects")) {
886 fetch_fsck_objects = git_config_bool(var, value);
887 return 0;
888 }
889
890 if (!strcmp(var, "transfer.fsckobjects")) {
891 transfer_fsck_objects = git_config_bool(var, value);
892 return 0;
893 }
894
895 return git_default_config(var, value, cb);
896}
897
898static void fetch_pack_setup(void)
899{
900 static int did_setup;
901 if (did_setup)
902 return;
903 git_config(fetch_pack_config, NULL);
904 if (0 <= transfer_unpack_limit)
905 unpack_limit = transfer_unpack_limit;
906 else if (0 <= fetch_unpack_limit)
907 unpack_limit = fetch_unpack_limit;
908 did_setup = 1;
909}
910
911static int remove_duplicates_in_refs(struct ref **ref, int nr)
912{
913 struct string_list names = STRING_LIST_INIT_NODUP;
914 int src, dst;
915
916 for (src = dst = 0; src < nr; src++) {
917 struct string_list_item *item;
918 item = string_list_insert(&names, ref[src]->name);
919 if (item->util)
920 continue; /* already have it */
921 item->util = ref[src];
922 if (src != dst)
923 ref[dst] = ref[src];
924 dst++;
925 }
926 for (src = dst; src < nr; src++)
927 ref[src] = NULL;
928 string_list_clear(&names, 0);
929 return dst;
930}
931
932static void update_shallow(struct fetch_pack_args *args,
933 struct ref **sought, int nr_sought,
934 struct shallow_info *si)
935{
936 struct sha1_array ref = SHA1_ARRAY_INIT;
937 int *status;
938 int i;
939
940 if (args->depth > 0 && alternate_shallow_file) {
941 if (*alternate_shallow_file == '\0') { /* --unshallow */
942 unlink_or_warn(git_path("shallow"));
943 rollback_lock_file(&shallow_lock);
944 } else
945 commit_lock_file(&shallow_lock);
946 return;
947 }
948
949 if (!si->shallow || !si->shallow->nr)
950 return;
951
952 if (args->cloning) {
953 /*
954 * remote is shallow, but this is a clone, there are
955 * no objects in repo to worry about. Accept any
956 * shallow points that exist in the pack (iow in repo
957 * after get_pack() and reprepare_packed_git())
958 */
959 struct sha1_array extra = SHA1_ARRAY_INIT;
960 unsigned char (*sha1)[20] = si->shallow->sha1;
961 for (i = 0; i < si->shallow->nr; i++)
962 if (has_sha1_file(sha1[i]))
963 sha1_array_append(&extra, sha1[i]);
964 if (extra.nr) {
965 setup_alternate_shallow(&shallow_lock,
966 &alternate_shallow_file,
967 &extra);
968 commit_lock_file(&shallow_lock);
969 }
970 sha1_array_clear(&extra);
971 return;
972 }
973
974 if (!si->nr_ours && !si->nr_theirs)
975 return;
976
977 remove_nonexistent_theirs_shallow(si);
978 if (!si->nr_ours && !si->nr_theirs)
979 return;
980 for (i = 0; i < nr_sought; i++)
981 sha1_array_append(&ref, sought[i]->old_sha1);
982 si->ref = &ref;
983
984 if (args->update_shallow) {
985 /*
986 * remote is also shallow, .git/shallow may be updated
987 * so all refs can be accepted. Make sure we only add
988 * shallow roots that are actually reachable from new
989 * refs.
990 */
991 struct sha1_array extra = SHA1_ARRAY_INIT;
992 unsigned char (*sha1)[20] = si->shallow->sha1;
993 assign_shallow_commits_to_refs(si, NULL, NULL);
994 if (!si->nr_ours && !si->nr_theirs) {
995 sha1_array_clear(&ref);
996 return;
997 }
998 for (i = 0; i < si->nr_ours; i++)
999 sha1_array_append(&extra, sha1[si->ours[i]]);
1000 for (i = 0; i < si->nr_theirs; i++)
1001 sha1_array_append(&extra, sha1[si->theirs[i]]);
1002 setup_alternate_shallow(&shallow_lock,
1003 &alternate_shallow_file,
1004 &extra);
1005 commit_lock_file(&shallow_lock);
1006 sha1_array_clear(&extra);
1007 sha1_array_clear(&ref);
1008 return;
1009 }
1010
1011 /*
1012 * remote is also shallow, check what ref is safe to update
1013 * without updating .git/shallow
1014 */
1015 status = xcalloc(nr_sought, sizeof(*status));
1016 assign_shallow_commits_to_refs(si, NULL, status);
1017 if (si->nr_ours || si->nr_theirs) {
1018 for (i = 0; i < nr_sought; i++)
1019 if (status[i])
1020 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1021 }
1022 free(status);
1023 sha1_array_clear(&ref);
1024}
1025
1026struct ref *fetch_pack(struct fetch_pack_args *args,
1027 int fd[], struct child_process *conn,
1028 const struct ref *ref,
1029 const char *dest,
1030 struct ref **sought, int nr_sought,
1031 struct sha1_array *shallow,
1032 char **pack_lockfile)
1033{
1034 struct ref *ref_cpy;
1035 struct shallow_info si;
1036
1037 fetch_pack_setup();
1038 if (nr_sought)
1039 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1040
1041 if (!ref) {
1042 packet_flush(fd[1]);
1043 die("no matching remote head");
1044 }
1045 prepare_shallow_info(&si, shallow);
1046 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1047 &si, pack_lockfile);
1048 reprepare_packed_git();
1049 update_shallow(args, sought, nr_sought, &si);
1050 clear_shallow_info(&si);
1051 return ref_cpy;
1052}