1#include "cache.h"
2#include "repository.h"
3#include "config.h"
4#include "lockfile.h"
5#include "refs.h"
6#include "pkt-line.h"
7#include "commit.h"
8#include "tag.h"
9#include "exec-cmd.h"
10#include "pack.h"
11#include "sideband.h"
12#include "fetch-pack.h"
13#include "remote.h"
14#include "run-command.h"
15#include "connect.h"
16#include "transport.h"
17#include "version.h"
18#include "prio-queue.h"
19#include "sha1-array.h"
20#include "oidset.h"
21#include "packfile.h"
22#include "object-store.h"
23#include "connected.h"
24
25static int transfer_unpack_limit = -1;
26static int fetch_unpack_limit = -1;
27static int unpack_limit = 100;
28static int prefer_ofs_delta = 1;
29static int no_done;
30static int deepen_since_ok;
31static int deepen_not_ok;
32static int fetch_fsck_objects = -1;
33static int transfer_fsck_objects = -1;
34static int agent_supported;
35static int server_supports_filtering;
36static struct lock_file shallow_lock;
37static const char *alternate_shallow_file;
38
39/* Remember to update object flag allocation in object.h */
40#define COMPLETE (1U << 0)
41#define COMMON (1U << 1)
42#define COMMON_REF (1U << 2)
43#define SEEN (1U << 3)
44#define POPPED (1U << 4)
45#define ALTERNATE (1U << 5)
46
47static int marked;
48
49/*
50 * After sending this many "have"s if we do not get any new ACK , we
51 * give up traversing our history.
52 */
53#define MAX_IN_VAIN 256
54
55static struct prio_queue rev_list = { compare_commits_by_commit_date };
56static int non_common_revs, multi_ack, use_sideband;
57/* Allow specifying sha1 if it is a ref tip. */
58#define ALLOW_TIP_SHA1 01
59/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
60#define ALLOW_REACHABLE_SHA1 02
61static unsigned int allow_unadvertised_object_request;
62
63__attribute__((format (printf, 2, 3)))
64static inline void print_verbose(const struct fetch_pack_args *args,
65 const char *fmt, ...)
66{
67 va_list params;
68
69 if (!args->verbose)
70 return;
71
72 va_start(params, fmt);
73 vfprintf(stderr, fmt, params);
74 va_end(params);
75 fputc('\n', stderr);
76}
77
78struct alternate_object_cache {
79 struct object **items;
80 size_t nr, alloc;
81};
82
83static void cache_one_alternate(const char *refname,
84 const struct object_id *oid,
85 void *vcache)
86{
87 struct alternate_object_cache *cache = vcache;
88 struct object *obj = parse_object(oid);
89
90 if (!obj || (obj->flags & ALTERNATE))
91 return;
92
93 obj->flags |= ALTERNATE;
94 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
95 cache->items[cache->nr++] = obj;
96}
97
98static void for_each_cached_alternate(void (*cb)(struct object *))
99{
100 static int initialized;
101 static struct alternate_object_cache cache;
102 size_t i;
103
104 if (!initialized) {
105 for_each_alternate_ref(cache_one_alternate, &cache);
106 initialized = 1;
107 }
108
109 for (i = 0; i < cache.nr; i++)
110 cb(cache.items[i]);
111}
112
113static void rev_list_push(struct commit *commit, int mark)
114{
115 if (!(commit->object.flags & mark)) {
116 commit->object.flags |= mark;
117
118 if (parse_commit(commit))
119 return;
120
121 prio_queue_put(&rev_list, commit);
122
123 if (!(commit->object.flags & COMMON))
124 non_common_revs++;
125 }
126}
127
128static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
129{
130 struct object *o = deref_tag(parse_object(oid), refname, 0);
131
132 if (o && o->type == OBJ_COMMIT)
133 rev_list_push((struct commit *)o, SEEN);
134
135 return 0;
136}
137
138static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
139 int flag, void *cb_data)
140{
141 return rev_list_insert_ref(refname, oid);
142}
143
144static int clear_marks(const char *refname, const struct object_id *oid,
145 int flag, void *cb_data)
146{
147 struct object *o = deref_tag(parse_object(oid), refname, 0);
148
149 if (o && o->type == OBJ_COMMIT)
150 clear_commit_marks((struct commit *)o,
151 COMMON | COMMON_REF | SEEN | POPPED);
152 return 0;
153}
154
155/*
156 This function marks a rev and its ancestors as common.
157 In some cases, it is desirable to mark only the ancestors (for example
158 when only the server does not yet know that they are common).
159*/
160
161static void mark_common(struct commit *commit,
162 int ancestors_only, int dont_parse)
163{
164 if (commit != NULL && !(commit->object.flags & COMMON)) {
165 struct object *o = (struct object *)commit;
166
167 if (!ancestors_only)
168 o->flags |= COMMON;
169
170 if (!(o->flags & SEEN))
171 rev_list_push(commit, SEEN);
172 else {
173 struct commit_list *parents;
174
175 if (!ancestors_only && !(o->flags & POPPED))
176 non_common_revs--;
177 if (!o->parsed && !dont_parse)
178 if (parse_commit(commit))
179 return;
180
181 for (parents = commit->parents;
182 parents;
183 parents = parents->next)
184 mark_common(parents->item, 0, dont_parse);
185 }
186 }
187}
188
189/*
190 Get the next rev to send, ignoring the common.
191*/
192
193static const struct object_id *get_rev(void)
194{
195 struct commit *commit = NULL;
196
197 while (commit == NULL) {
198 unsigned int mark;
199 struct commit_list *parents;
200
201 if (rev_list.nr == 0 || non_common_revs == 0)
202 return NULL;
203
204 commit = prio_queue_get(&rev_list);
205 parse_commit(commit);
206 parents = commit->parents;
207
208 commit->object.flags |= POPPED;
209 if (!(commit->object.flags & COMMON))
210 non_common_revs--;
211
212 if (commit->object.flags & COMMON) {
213 /* do not send "have", and ignore ancestors */
214 commit = NULL;
215 mark = COMMON | SEEN;
216 } else if (commit->object.flags & COMMON_REF)
217 /* send "have", and ignore ancestors */
218 mark = COMMON | SEEN;
219 else
220 /* send "have", also for its ancestors */
221 mark = SEEN;
222
223 while (parents) {
224 if (!(parents->item->object.flags & SEEN))
225 rev_list_push(parents->item, mark);
226 if (mark & COMMON)
227 mark_common(parents->item, 1, 0);
228 parents = parents->next;
229 }
230 }
231
232 return &commit->object.oid;
233}
234
235enum ack_type {
236 NAK = 0,
237 ACK,
238 ACK_continue,
239 ACK_common,
240 ACK_ready
241};
242
243static void consume_shallow_list(struct fetch_pack_args *args, int fd)
244{
245 if (args->stateless_rpc && args->deepen) {
246 /* If we sent a depth we will get back "duplicate"
247 * shallow and unshallow commands every time there
248 * is a block of have lines exchanged.
249 */
250 char *line;
251 while ((line = packet_read_line(fd, NULL))) {
252 if (starts_with(line, "shallow "))
253 continue;
254 if (starts_with(line, "unshallow "))
255 continue;
256 die(_("git fetch-pack: expected shallow list"));
257 }
258 }
259}
260
261static enum ack_type get_ack(int fd, struct object_id *result_oid)
262{
263 int len;
264 char *line = packet_read_line(fd, &len);
265 const char *arg;
266
267 if (!line)
268 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
269 if (!strcmp(line, "NAK"))
270 return NAK;
271 if (skip_prefix(line, "ACK ", &arg)) {
272 if (!get_oid_hex(arg, result_oid)) {
273 arg += 40;
274 len -= arg - line;
275 if (len < 1)
276 return ACK;
277 if (strstr(arg, "continue"))
278 return ACK_continue;
279 if (strstr(arg, "common"))
280 return ACK_common;
281 if (strstr(arg, "ready"))
282 return ACK_ready;
283 return ACK;
284 }
285 }
286 if (skip_prefix(line, "ERR ", &arg))
287 die(_("remote error: %s"), arg);
288 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
289}
290
291static void send_request(struct fetch_pack_args *args,
292 int fd, struct strbuf *buf)
293{
294 if (args->stateless_rpc) {
295 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
296 packet_flush(fd);
297 } else
298 write_or_die(fd, buf->buf, buf->len);
299}
300
301static void insert_one_alternate_object(struct object *obj)
302{
303 rev_list_insert_ref(NULL, &obj->oid);
304}
305
306#define INITIAL_FLUSH 16
307#define PIPESAFE_FLUSH 32
308#define LARGE_FLUSH 16384
309
310static int next_flush(int stateless_rpc, int count)
311{
312 if (stateless_rpc) {
313 if (count < LARGE_FLUSH)
314 count <<= 1;
315 else
316 count = count * 11 / 10;
317 } else {
318 if (count < PIPESAFE_FLUSH)
319 count <<= 1;
320 else
321 count += PIPESAFE_FLUSH;
322 }
323 return count;
324}
325
326static int find_common(struct fetch_pack_args *args,
327 int fd[2], struct object_id *result_oid,
328 struct ref *refs)
329{
330 int fetching;
331 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
332 const struct object_id *oid;
333 unsigned in_vain = 0;
334 int got_continue = 0;
335 int got_ready = 0;
336 struct strbuf req_buf = STRBUF_INIT;
337 size_t state_len = 0;
338
339 if (args->stateless_rpc && multi_ack == 1)
340 die(_("--stateless-rpc requires multi_ack_detailed"));
341 if (marked)
342 for_each_ref(clear_marks, NULL);
343 marked = 1;
344
345 for_each_ref(rev_list_insert_ref_oid, NULL);
346 for_each_cached_alternate(insert_one_alternate_object);
347
348 fetching = 0;
349 for ( ; refs ; refs = refs->next) {
350 struct object_id *remote = &refs->old_oid;
351 const char *remote_hex;
352 struct object *o;
353
354 /*
355 * If that object is complete (i.e. it is an ancestor of a
356 * local ref), we tell them we have it but do not have to
357 * tell them about its ancestors, which they already know
358 * about.
359 *
360 * We use lookup_object here because we are only
361 * interested in the case we *know* the object is
362 * reachable and we have already scanned it.
363 */
364 if (((o = lookup_object(remote->hash)) != NULL) &&
365 (o->flags & COMPLETE)) {
366 continue;
367 }
368
369 remote_hex = oid_to_hex(remote);
370 if (!fetching) {
371 struct strbuf c = STRBUF_INIT;
372 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
373 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
374 if (no_done) strbuf_addstr(&c, " no-done");
375 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
376 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
377 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
378 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
379 if (args->no_progress) strbuf_addstr(&c, " no-progress");
380 if (args->include_tag) strbuf_addstr(&c, " include-tag");
381 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
382 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
383 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
384 if (agent_supported) strbuf_addf(&c, " agent=%s",
385 git_user_agent_sanitized());
386 if (args->filter_options.choice)
387 strbuf_addstr(&c, " filter");
388 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
389 strbuf_release(&c);
390 } else
391 packet_buf_write(&req_buf, "want %s\n", remote_hex);
392 fetching++;
393 }
394
395 if (!fetching) {
396 strbuf_release(&req_buf);
397 packet_flush(fd[1]);
398 return 1;
399 }
400
401 if (is_repository_shallow(the_repository))
402 write_shallow_commits(&req_buf, 1, NULL);
403 if (args->depth > 0)
404 packet_buf_write(&req_buf, "deepen %d", args->depth);
405 if (args->deepen_since) {
406 timestamp_t max_age = approxidate(args->deepen_since);
407 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
408 }
409 if (args->deepen_not) {
410 int i;
411 for (i = 0; i < args->deepen_not->nr; i++) {
412 struct string_list_item *s = args->deepen_not->items + i;
413 packet_buf_write(&req_buf, "deepen-not %s", s->string);
414 }
415 }
416 if (server_supports_filtering && args->filter_options.choice)
417 packet_buf_write(&req_buf, "filter %s",
418 args->filter_options.filter_spec);
419 packet_buf_flush(&req_buf);
420 state_len = req_buf.len;
421
422 if (args->deepen) {
423 char *line;
424 const char *arg;
425 struct object_id oid;
426
427 send_request(args, fd[1], &req_buf);
428 while ((line = packet_read_line(fd[0], NULL))) {
429 if (skip_prefix(line, "shallow ", &arg)) {
430 if (get_oid_hex(arg, &oid))
431 die(_("invalid shallow line: %s"), line);
432 register_shallow(the_repository, &oid);
433 continue;
434 }
435 if (skip_prefix(line, "unshallow ", &arg)) {
436 if (get_oid_hex(arg, &oid))
437 die(_("invalid unshallow line: %s"), line);
438 if (!lookup_object(oid.hash))
439 die(_("object not found: %s"), line);
440 /* make sure that it is parsed as shallow */
441 if (!parse_object(&oid))
442 die(_("error in object: %s"), line);
443 if (unregister_shallow(&oid))
444 die(_("no shallow found: %s"), line);
445 continue;
446 }
447 die(_("expected shallow/unshallow, got %s"), line);
448 }
449 } else if (!args->stateless_rpc)
450 send_request(args, fd[1], &req_buf);
451
452 if (!args->stateless_rpc) {
453 /* If we aren't using the stateless-rpc interface
454 * we don't need to retain the headers.
455 */
456 strbuf_setlen(&req_buf, 0);
457 state_len = 0;
458 }
459
460 flushes = 0;
461 retval = -1;
462 if (args->no_dependents)
463 goto done;
464 while ((oid = get_rev())) {
465 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
466 print_verbose(args, "have %s", oid_to_hex(oid));
467 in_vain++;
468 if (flush_at <= ++count) {
469 int ack;
470
471 packet_buf_flush(&req_buf);
472 send_request(args, fd[1], &req_buf);
473 strbuf_setlen(&req_buf, state_len);
474 flushes++;
475 flush_at = next_flush(args->stateless_rpc, count);
476
477 /*
478 * We keep one window "ahead" of the other side, and
479 * will wait for an ACK only on the next one
480 */
481 if (!args->stateless_rpc && count == INITIAL_FLUSH)
482 continue;
483
484 consume_shallow_list(args, fd[0]);
485 do {
486 ack = get_ack(fd[0], result_oid);
487 if (ack)
488 print_verbose(args, _("got %s %d %s"), "ack",
489 ack, oid_to_hex(result_oid));
490 switch (ack) {
491 case ACK:
492 flushes = 0;
493 multi_ack = 0;
494 retval = 0;
495 goto done;
496 case ACK_common:
497 case ACK_ready:
498 case ACK_continue: {
499 struct commit *commit =
500 lookup_commit(result_oid);
501 if (!commit)
502 die(_("invalid commit %s"), oid_to_hex(result_oid));
503 if (args->stateless_rpc
504 && ack == ACK_common
505 && !(commit->object.flags & COMMON)) {
506 /* We need to replay the have for this object
507 * on the next RPC request so the peer knows
508 * it is in common with us.
509 */
510 const char *hex = oid_to_hex(result_oid);
511 packet_buf_write(&req_buf, "have %s\n", hex);
512 state_len = req_buf.len;
513 /*
514 * Reset in_vain because an ack
515 * for this commit has not been
516 * seen.
517 */
518 in_vain = 0;
519 } else if (!args->stateless_rpc
520 || ack != ACK_common)
521 in_vain = 0;
522 mark_common(commit, 0, 1);
523 retval = 0;
524 got_continue = 1;
525 if (ack == ACK_ready) {
526 clear_prio_queue(&rev_list);
527 got_ready = 1;
528 }
529 break;
530 }
531 }
532 } while (ack);
533 flushes--;
534 if (got_continue && MAX_IN_VAIN < in_vain) {
535 print_verbose(args, _("giving up"));
536 break; /* give up */
537 }
538 }
539 }
540done:
541 if (!got_ready || !no_done) {
542 packet_buf_write(&req_buf, "done\n");
543 send_request(args, fd[1], &req_buf);
544 }
545 print_verbose(args, _("done"));
546 if (retval != 0) {
547 multi_ack = 0;
548 flushes++;
549 }
550 strbuf_release(&req_buf);
551
552 if (!got_ready || !no_done)
553 consume_shallow_list(args, fd[0]);
554 while (flushes || multi_ack) {
555 int ack = get_ack(fd[0], result_oid);
556 if (ack) {
557 print_verbose(args, _("got %s (%d) %s"), "ack",
558 ack, oid_to_hex(result_oid));
559 if (ack == ACK)
560 return 0;
561 multi_ack = 1;
562 continue;
563 }
564 flushes--;
565 }
566 /* it is no error to fetch into a completely empty repo */
567 return count ? retval : 0;
568}
569
570static struct commit_list *complete;
571
572static int mark_complete(const struct object_id *oid)
573{
574 struct object *o = parse_object(oid);
575
576 while (o && o->type == OBJ_TAG) {
577 struct tag *t = (struct tag *) o;
578 if (!t->tagged)
579 break; /* broken repository */
580 o->flags |= COMPLETE;
581 o = parse_object(&t->tagged->oid);
582 }
583 if (o && o->type == OBJ_COMMIT) {
584 struct commit *commit = (struct commit *)o;
585 if (!(commit->object.flags & COMPLETE)) {
586 commit->object.flags |= COMPLETE;
587 commit_list_insert(commit, &complete);
588 }
589 }
590 return 0;
591}
592
593static int mark_complete_oid(const char *refname, const struct object_id *oid,
594 int flag, void *cb_data)
595{
596 return mark_complete(oid);
597}
598
599static void mark_recent_complete_commits(struct fetch_pack_args *args,
600 timestamp_t cutoff)
601{
602 while (complete && cutoff <= complete->item->date) {
603 print_verbose(args, _("Marking %s as complete"),
604 oid_to_hex(&complete->item->object.oid));
605 pop_most_recent_commit(&complete, COMPLETE);
606 }
607}
608
609static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
610{
611 for (; refs; refs = refs->next)
612 oidset_insert(oids, &refs->old_oid);
613}
614
615static int tip_oids_contain(struct oidset *tip_oids,
616 struct ref *unmatched, struct ref *newlist,
617 const struct object_id *id)
618{
619 /*
620 * Note that this only looks at the ref lists the first time it's
621 * called. This works out in filter_refs() because even though it may
622 * add to "newlist" between calls, the additions will always be for
623 * oids that are already in the set.
624 */
625 if (!tip_oids->map.map.tablesize) {
626 add_refs_to_oidset(tip_oids, unmatched);
627 add_refs_to_oidset(tip_oids, newlist);
628 }
629 return oidset_contains(tip_oids, id);
630}
631
632static void filter_refs(struct fetch_pack_args *args,
633 struct ref **refs,
634 struct ref **sought, int nr_sought)
635{
636 struct ref *newlist = NULL;
637 struct ref **newtail = &newlist;
638 struct ref *unmatched = NULL;
639 struct ref *ref, *next;
640 struct oidset tip_oids = OIDSET_INIT;
641 int i;
642
643 i = 0;
644 for (ref = *refs; ref; ref = next) {
645 int keep = 0;
646 next = ref->next;
647
648 if (starts_with(ref->name, "refs/") &&
649 check_refname_format(ref->name, 0))
650 ; /* trash */
651 else {
652 while (i < nr_sought) {
653 int cmp = strcmp(ref->name, sought[i]->name);
654 if (cmp < 0)
655 break; /* definitely do not have it */
656 else if (cmp == 0) {
657 keep = 1; /* definitely have it */
658 sought[i]->match_status = REF_MATCHED;
659 }
660 i++;
661 }
662
663 if (!keep && args->fetch_all &&
664 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
665 keep = 1;
666 }
667
668 if (keep) {
669 *newtail = ref;
670 ref->next = NULL;
671 newtail = &ref->next;
672 } else {
673 ref->next = unmatched;
674 unmatched = ref;
675 }
676 }
677
678 /* Append unmatched requests to the list */
679 for (i = 0; i < nr_sought; i++) {
680 struct object_id oid;
681 const char *p;
682
683 ref = sought[i];
684 if (ref->match_status != REF_NOT_MATCHED)
685 continue;
686 if (parse_oid_hex(ref->name, &oid, &p) ||
687 *p != '\0' ||
688 oidcmp(&oid, &ref->old_oid))
689 continue;
690
691 if ((allow_unadvertised_object_request &
692 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
693 tip_oids_contain(&tip_oids, unmatched, newlist,
694 &ref->old_oid)) {
695 ref->match_status = REF_MATCHED;
696 *newtail = copy_ref(ref);
697 newtail = &(*newtail)->next;
698 } else {
699 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
700 }
701 }
702
703 oidset_clear(&tip_oids);
704 for (ref = unmatched; ref; ref = next) {
705 next = ref->next;
706 free(ref);
707 }
708
709 *refs = newlist;
710}
711
712static void mark_alternate_complete(struct object *obj)
713{
714 mark_complete(&obj->oid);
715}
716
717struct loose_object_iter {
718 struct oidset *loose_object_set;
719 struct ref *refs;
720};
721
722/*
723 * If the number of refs is not larger than the number of loose objects,
724 * this function stops inserting.
725 */
726static int add_loose_objects_to_set(const struct object_id *oid,
727 const char *path,
728 void *data)
729{
730 struct loose_object_iter *iter = data;
731 oidset_insert(iter->loose_object_set, oid);
732 if (iter->refs == NULL)
733 return 1;
734
735 iter->refs = iter->refs->next;
736 return 0;
737}
738
739static int everything_local(struct fetch_pack_args *args,
740 struct ref **refs,
741 struct ref **sought, int nr_sought)
742{
743 struct ref *ref;
744 int retval;
745 int old_save_commit_buffer = save_commit_buffer;
746 timestamp_t cutoff = 0;
747 struct oidset loose_oid_set = OIDSET_INIT;
748 int use_oidset = 0;
749 struct loose_object_iter iter = {&loose_oid_set, *refs};
750
751 /* Enumerate all loose objects or know refs are not so many. */
752 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
753 &iter, 0);
754
755 save_commit_buffer = 0;
756
757 for (ref = *refs; ref; ref = ref->next) {
758 struct object *o;
759 unsigned int flags = OBJECT_INFO_QUICK;
760
761 if (use_oidset &&
762 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
763 /*
764 * I know this does not exist in the loose form,
765 * so check if it exists in a non-loose form.
766 */
767 flags |= OBJECT_INFO_IGNORE_LOOSE;
768 }
769
770 if (!has_object_file_with_flags(&ref->old_oid, flags))
771 continue;
772 o = parse_object(&ref->old_oid);
773 if (!o)
774 continue;
775
776 /* We already have it -- which may mean that we were
777 * in sync with the other side at some time after
778 * that (it is OK if we guess wrong here).
779 */
780 if (o->type == OBJ_COMMIT) {
781 struct commit *commit = (struct commit *)o;
782 if (!cutoff || cutoff < commit->date)
783 cutoff = commit->date;
784 }
785 }
786
787 oidset_clear(&loose_oid_set);
788
789 if (!args->no_dependents) {
790 if (!args->deepen) {
791 for_each_ref(mark_complete_oid, NULL);
792 for_each_cached_alternate(mark_alternate_complete);
793 commit_list_sort_by_date(&complete);
794 if (cutoff)
795 mark_recent_complete_commits(args, cutoff);
796 }
797
798 /*
799 * Mark all complete remote refs as common refs.
800 * Don't mark them common yet; the server has to be told so first.
801 */
802 for (ref = *refs; ref; ref = ref->next) {
803 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
804 NULL, 0);
805
806 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
807 continue;
808
809 if (!(o->flags & SEEN)) {
810 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
811
812 mark_common((struct commit *)o, 1, 1);
813 }
814 }
815 }
816
817 filter_refs(args, refs, sought, nr_sought);
818
819 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
820 const struct object_id *remote = &ref->old_oid;
821 struct object *o;
822
823 o = lookup_object(remote->hash);
824 if (!o || !(o->flags & COMPLETE)) {
825 retval = 0;
826 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
827 ref->name);
828 continue;
829 }
830 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
831 ref->name);
832 }
833
834 save_commit_buffer = old_save_commit_buffer;
835
836 return retval;
837}
838
839static int sideband_demux(int in, int out, void *data)
840{
841 int *xd = data;
842 int ret;
843
844 ret = recv_sideband("fetch-pack", xd[0], out);
845 close(out);
846 return ret;
847}
848
849static int get_pack(struct fetch_pack_args *args,
850 int xd[2], char **pack_lockfile)
851{
852 struct async demux;
853 int do_keep = args->keep_pack;
854 const char *cmd_name;
855 struct pack_header header;
856 int pass_header = 0;
857 struct child_process cmd = CHILD_PROCESS_INIT;
858 int ret;
859
860 memset(&demux, 0, sizeof(demux));
861 if (use_sideband) {
862 /* xd[] is talking with upload-pack; subprocess reads from
863 * xd[0], spits out band#2 to stderr, and feeds us band#1
864 * through demux->out.
865 */
866 demux.proc = sideband_demux;
867 demux.data = xd;
868 demux.out = -1;
869 demux.isolate_sigpipe = 1;
870 if (start_async(&demux))
871 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
872 }
873 else
874 demux.out = xd[0];
875
876 if (!args->keep_pack && unpack_limit) {
877
878 if (read_pack_header(demux.out, &header))
879 die(_("protocol error: bad pack header"));
880 pass_header = 1;
881 if (ntohl(header.hdr_entries) < unpack_limit)
882 do_keep = 0;
883 else
884 do_keep = 1;
885 }
886
887 if (alternate_shallow_file) {
888 argv_array_push(&cmd.args, "--shallow-file");
889 argv_array_push(&cmd.args, alternate_shallow_file);
890 }
891
892 if (do_keep || args->from_promisor) {
893 if (pack_lockfile)
894 cmd.out = -1;
895 cmd_name = "index-pack";
896 argv_array_push(&cmd.args, cmd_name);
897 argv_array_push(&cmd.args, "--stdin");
898 if (!args->quiet && !args->no_progress)
899 argv_array_push(&cmd.args, "-v");
900 if (args->use_thin_pack)
901 argv_array_push(&cmd.args, "--fix-thin");
902 if (do_keep && (args->lock_pack || unpack_limit)) {
903 char hostname[HOST_NAME_MAX + 1];
904 if (xgethostname(hostname, sizeof(hostname)))
905 xsnprintf(hostname, sizeof(hostname), "localhost");
906 argv_array_pushf(&cmd.args,
907 "--keep=fetch-pack %"PRIuMAX " on %s",
908 (uintmax_t)getpid(), hostname);
909 }
910 if (args->check_self_contained_and_connected)
911 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
912 if (args->from_promisor)
913 argv_array_push(&cmd.args, "--promisor");
914 }
915 else {
916 cmd_name = "unpack-objects";
917 argv_array_push(&cmd.args, cmd_name);
918 if (args->quiet || args->no_progress)
919 argv_array_push(&cmd.args, "-q");
920 args->check_self_contained_and_connected = 0;
921 }
922
923 if (pass_header)
924 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
925 ntohl(header.hdr_version),
926 ntohl(header.hdr_entries));
927 if (fetch_fsck_objects >= 0
928 ? fetch_fsck_objects
929 : transfer_fsck_objects >= 0
930 ? transfer_fsck_objects
931 : 0) {
932 if (args->from_promisor)
933 /*
934 * We cannot use --strict in index-pack because it
935 * checks both broken objects and links, but we only
936 * want to check for broken objects.
937 */
938 argv_array_push(&cmd.args, "--fsck-objects");
939 else
940 argv_array_push(&cmd.args, "--strict");
941 }
942
943 cmd.in = demux.out;
944 cmd.git_cmd = 1;
945 if (start_command(&cmd))
946 die(_("fetch-pack: unable to fork off %s"), cmd_name);
947 if (do_keep && pack_lockfile) {
948 *pack_lockfile = index_pack_lockfile(cmd.out);
949 close(cmd.out);
950 }
951
952 if (!use_sideband)
953 /* Closed by start_command() */
954 xd[0] = -1;
955
956 ret = finish_command(&cmd);
957 if (!ret || (args->check_self_contained_and_connected && ret == 1))
958 args->self_contained_and_connected =
959 args->check_self_contained_and_connected &&
960 ret == 0;
961 else
962 die(_("%s failed"), cmd_name);
963 if (use_sideband && finish_async(&demux))
964 die(_("error in sideband demultiplexer"));
965 return 0;
966}
967
968static int cmp_ref_by_name(const void *a_, const void *b_)
969{
970 const struct ref *a = *((const struct ref **)a_);
971 const struct ref *b = *((const struct ref **)b_);
972 return strcmp(a->name, b->name);
973}
974
975static struct ref *do_fetch_pack(struct fetch_pack_args *args,
976 int fd[2],
977 const struct ref *orig_ref,
978 struct ref **sought, int nr_sought,
979 struct shallow_info *si,
980 char **pack_lockfile)
981{
982 struct ref *ref = copy_ref_list(orig_ref);
983 struct object_id oid;
984 const char *agent_feature;
985 int agent_len;
986
987 sort_ref_list(&ref, ref_compare_name);
988 QSORT(sought, nr_sought, cmp_ref_by_name);
989
990 if ((args->depth > 0 || is_repository_shallow(the_repository)) && !server_supports("shallow"))
991 die(_("Server does not support shallow clients"));
992 if (args->depth > 0 || args->deepen_since || args->deepen_not)
993 args->deepen = 1;
994 if (server_supports("multi_ack_detailed")) {
995 print_verbose(args, _("Server supports multi_ack_detailed"));
996 multi_ack = 2;
997 if (server_supports("no-done")) {
998 print_verbose(args, _("Server supports no-done"));
999 if (args->stateless_rpc)
1000 no_done = 1;
1001 }
1002 }
1003 else if (server_supports("multi_ack")) {
1004 print_verbose(args, _("Server supports multi_ack"));
1005 multi_ack = 1;
1006 }
1007 if (server_supports("side-band-64k")) {
1008 print_verbose(args, _("Server supports side-band-64k"));
1009 use_sideband = 2;
1010 }
1011 else if (server_supports("side-band")) {
1012 print_verbose(args, _("Server supports side-band"));
1013 use_sideband = 1;
1014 }
1015 if (server_supports("allow-tip-sha1-in-want")) {
1016 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1017 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1018 }
1019 if (server_supports("allow-reachable-sha1-in-want")) {
1020 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1021 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1022 }
1023 if (!server_supports("thin-pack"))
1024 args->use_thin_pack = 0;
1025 if (!server_supports("no-progress"))
1026 args->no_progress = 0;
1027 if (!server_supports("include-tag"))
1028 args->include_tag = 0;
1029 if (server_supports("ofs-delta"))
1030 print_verbose(args, _("Server supports ofs-delta"));
1031 else
1032 prefer_ofs_delta = 0;
1033
1034 if (server_supports("filter")) {
1035 server_supports_filtering = 1;
1036 print_verbose(args, _("Server supports filter"));
1037 } else if (args->filter_options.choice) {
1038 warning("filtering not recognized by server, ignoring");
1039 }
1040
1041 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1042 agent_supported = 1;
1043 if (agent_len)
1044 print_verbose(args, _("Server version is %.*s"),
1045 agent_len, agent_feature);
1046 }
1047 if (server_supports("deepen-since"))
1048 deepen_since_ok = 1;
1049 else if (args->deepen_since)
1050 die(_("Server does not support --shallow-since"));
1051 if (server_supports("deepen-not"))
1052 deepen_not_ok = 1;
1053 else if (args->deepen_not)
1054 die(_("Server does not support --shallow-exclude"));
1055 if (!server_supports("deepen-relative") && args->deepen_relative)
1056 die(_("Server does not support --deepen"));
1057
1058 if (everything_local(args, &ref, sought, nr_sought)) {
1059 packet_flush(fd[1]);
1060 goto all_done;
1061 }
1062 if (find_common(args, fd, &oid, ref) < 0)
1063 if (!args->keep_pack)
1064 /* When cloning, it is not unusual to have
1065 * no common commit.
1066 */
1067 warning(_("no common commits"));
1068
1069 if (args->stateless_rpc)
1070 packet_flush(fd[1]);
1071 if (args->deepen)
1072 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1073 NULL);
1074 else if (si->nr_ours || si->nr_theirs)
1075 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1076 else
1077 alternate_shallow_file = NULL;
1078 if (get_pack(args, fd, pack_lockfile))
1079 die(_("git fetch-pack: fetch failed."));
1080
1081 all_done:
1082 return ref;
1083}
1084
1085static void add_shallow_requests(struct strbuf *req_buf,
1086 const struct fetch_pack_args *args)
1087{
1088 if (is_repository_shallow(the_repository))
1089 write_shallow_commits(req_buf, 1, NULL);
1090 if (args->depth > 0)
1091 packet_buf_write(req_buf, "deepen %d", args->depth);
1092 if (args->deepen_since) {
1093 timestamp_t max_age = approxidate(args->deepen_since);
1094 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1095 }
1096 if (args->deepen_not) {
1097 int i;
1098 for (i = 0; i < args->deepen_not->nr; i++) {
1099 struct string_list_item *s = args->deepen_not->items + i;
1100 packet_buf_write(req_buf, "deepen-not %s", s->string);
1101 }
1102 }
1103}
1104
1105static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1106{
1107 int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
1108
1109 for ( ; wants ; wants = wants->next) {
1110 const struct object_id *remote = &wants->old_oid;
1111 struct object *o;
1112
1113 /*
1114 * If that object is complete (i.e. it is an ancestor of a
1115 * local ref), we tell them we have it but do not have to
1116 * tell them about its ancestors, which they already know
1117 * about.
1118 *
1119 * We use lookup_object here because we are only
1120 * interested in the case we *know* the object is
1121 * reachable and we have already scanned it.
1122 */
1123 if (((o = lookup_object(remote->hash)) != NULL) &&
1124 (o->flags & COMPLETE)) {
1125 continue;
1126 }
1127
1128 if (!use_ref_in_want || wants->exact_oid)
1129 packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
1130 else
1131 packet_buf_write(req_buf, "want-ref %s\n", wants->name);
1132 }
1133}
1134
1135static void add_common(struct strbuf *req_buf, struct oidset *common)
1136{
1137 struct oidset_iter iter;
1138 const struct object_id *oid;
1139 oidset_iter_init(common, &iter);
1140
1141 while ((oid = oidset_iter_next(&iter))) {
1142 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1143 }
1144}
1145
1146static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1147{
1148 int ret = 0;
1149 int haves_added = 0;
1150 const struct object_id *oid;
1151
1152 while ((oid = get_rev())) {
1153 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1154 if (++haves_added >= *haves_to_send)
1155 break;
1156 }
1157
1158 *in_vain += haves_added;
1159 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1160 /* Send Done */
1161 packet_buf_write(req_buf, "done\n");
1162 ret = 1;
1163 }
1164
1165 /* Increase haves to send on next round */
1166 *haves_to_send = next_flush(1, *haves_to_send);
1167
1168 return ret;
1169}
1170
1171static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1172 const struct ref *wants, struct oidset *common,
1173 int *haves_to_send, int *in_vain)
1174{
1175 int ret = 0;
1176 struct strbuf req_buf = STRBUF_INIT;
1177
1178 if (server_supports_v2("fetch", 1))
1179 packet_buf_write(&req_buf, "command=fetch");
1180 if (server_supports_v2("agent", 0))
1181 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1182 if (args->server_options && args->server_options->nr &&
1183 server_supports_v2("server-option", 1)) {
1184 int i;
1185 for (i = 0; i < args->server_options->nr; i++)
1186 packet_write_fmt(fd_out, "server-option=%s",
1187 args->server_options->items[i].string);
1188 }
1189
1190 packet_buf_delim(&req_buf);
1191 if (args->use_thin_pack)
1192 packet_buf_write(&req_buf, "thin-pack");
1193 if (args->no_progress)
1194 packet_buf_write(&req_buf, "no-progress");
1195 if (args->include_tag)
1196 packet_buf_write(&req_buf, "include-tag");
1197 if (prefer_ofs_delta)
1198 packet_buf_write(&req_buf, "ofs-delta");
1199
1200 /* Add shallow-info and deepen request */
1201 if (server_supports_feature("fetch", "shallow", 0))
1202 add_shallow_requests(&req_buf, args);
1203 else if (is_repository_shallow(the_repository) || args->deepen)
1204 die(_("Server does not support shallow requests"));
1205
1206 /* Add filter */
1207 if (server_supports_feature("fetch", "filter", 0) &&
1208 args->filter_options.choice) {
1209 print_verbose(args, _("Server supports filter"));
1210 packet_buf_write(&req_buf, "filter %s",
1211 args->filter_options.filter_spec);
1212 } else if (args->filter_options.choice) {
1213 warning("filtering not recognized by server, ignoring");
1214 }
1215
1216 /* add wants */
1217 add_wants(wants, &req_buf);
1218
1219 if (args->no_dependents) {
1220 packet_buf_write(&req_buf, "done");
1221 ret = 1;
1222 } else {
1223 /* Add all of the common commits we've found in previous rounds */
1224 add_common(&req_buf, common);
1225
1226 /* Add initial haves */
1227 ret = add_haves(&req_buf, haves_to_send, in_vain);
1228 }
1229
1230 /* Send request */
1231 packet_buf_flush(&req_buf);
1232 write_or_die(fd_out, req_buf.buf, req_buf.len);
1233
1234 strbuf_release(&req_buf);
1235 return ret;
1236}
1237
1238/*
1239 * Processes a section header in a server's response and checks if it matches
1240 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1241 * not consumed); if 0, the line will be consumed and the function will die if
1242 * the section header doesn't match what was expected.
1243 */
1244static int process_section_header(struct packet_reader *reader,
1245 const char *section, int peek)
1246{
1247 int ret;
1248
1249 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1250 die("error reading section header '%s'", section);
1251
1252 ret = !strcmp(reader->line, section);
1253
1254 if (!peek) {
1255 if (!ret)
1256 die("expected '%s', received '%s'",
1257 section, reader->line);
1258 packet_reader_read(reader);
1259 }
1260
1261 return ret;
1262}
1263
1264static int process_acks(struct packet_reader *reader, struct oidset *common)
1265{
1266 /* received */
1267 int received_ready = 0;
1268 int received_ack = 0;
1269
1270 process_section_header(reader, "acknowledgments", 0);
1271 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1272 const char *arg;
1273
1274 if (!strcmp(reader->line, "NAK"))
1275 continue;
1276
1277 if (skip_prefix(reader->line, "ACK ", &arg)) {
1278 struct object_id oid;
1279 if (!get_oid_hex(arg, &oid)) {
1280 struct commit *commit;
1281 oidset_insert(common, &oid);
1282 commit = lookup_commit(&oid);
1283 mark_common(commit, 0, 1);
1284 }
1285 continue;
1286 }
1287
1288 if (!strcmp(reader->line, "ready")) {
1289 clear_prio_queue(&rev_list);
1290 received_ready = 1;
1291 continue;
1292 }
1293
1294 die("unexpected acknowledgment line: '%s'", reader->line);
1295 }
1296
1297 if (reader->status != PACKET_READ_FLUSH &&
1298 reader->status != PACKET_READ_DELIM)
1299 die("error processing acks: %d", reader->status);
1300
1301 /* return 0 if no common, 1 if there are common, or 2 if ready */
1302 return received_ready ? 2 : (received_ack ? 1 : 0);
1303}
1304
1305static void receive_shallow_info(struct fetch_pack_args *args,
1306 struct packet_reader *reader)
1307{
1308 process_section_header(reader, "shallow-info", 0);
1309 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1310 const char *arg;
1311 struct object_id oid;
1312
1313 if (skip_prefix(reader->line, "shallow ", &arg)) {
1314 if (get_oid_hex(arg, &oid))
1315 die(_("invalid shallow line: %s"), reader->line);
1316 register_shallow(the_repository, &oid);
1317 continue;
1318 }
1319 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1320 if (get_oid_hex(arg, &oid))
1321 die(_("invalid unshallow line: %s"), reader->line);
1322 if (!lookup_object(oid.hash))
1323 die(_("object not found: %s"), reader->line);
1324 /* make sure that it is parsed as shallow */
1325 if (!parse_object(&oid))
1326 die(_("error in object: %s"), reader->line);
1327 if (unregister_shallow(&oid))
1328 die(_("no shallow found: %s"), reader->line);
1329 continue;
1330 }
1331 die(_("expected shallow/unshallow, got %s"), reader->line);
1332 }
1333
1334 if (reader->status != PACKET_READ_FLUSH &&
1335 reader->status != PACKET_READ_DELIM)
1336 die("error processing shallow info: %d", reader->status);
1337
1338 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1339 args->deepen = 1;
1340}
1341
1342static void receive_wanted_refs(struct packet_reader *reader, struct ref *refs)
1343{
1344 process_section_header(reader, "wanted-refs", 0);
1345 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1346 struct object_id oid;
1347 const char *end;
1348 struct ref *r = NULL;
1349
1350 if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
1351 die("expected wanted-ref, got '%s'", reader->line);
1352
1353 for (r = refs; r; r = r->next) {
1354 if (!strcmp(end, r->name)) {
1355 oidcpy(&r->old_oid, &oid);
1356 break;
1357 }
1358 }
1359
1360 if (!r)
1361 die("unexpected wanted-ref: '%s'", reader->line);
1362 }
1363
1364 if (reader->status != PACKET_READ_DELIM)
1365 die("error processing wanted refs: %d", reader->status);
1366}
1367
1368enum fetch_state {
1369 FETCH_CHECK_LOCAL = 0,
1370 FETCH_SEND_REQUEST,
1371 FETCH_PROCESS_ACKS,
1372 FETCH_GET_PACK,
1373 FETCH_DONE,
1374};
1375
1376static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1377 int fd[2],
1378 const struct ref *orig_ref,
1379 struct ref **sought, int nr_sought,
1380 char **pack_lockfile)
1381{
1382 struct ref *ref = copy_ref_list(orig_ref);
1383 enum fetch_state state = FETCH_CHECK_LOCAL;
1384 struct oidset common = OIDSET_INIT;
1385 struct packet_reader reader;
1386 int in_vain = 0;
1387 int haves_to_send = INITIAL_FLUSH;
1388 packet_reader_init(&reader, fd[0], NULL, 0,
1389 PACKET_READ_CHOMP_NEWLINE);
1390
1391 while (state != FETCH_DONE) {
1392 switch (state) {
1393 case FETCH_CHECK_LOCAL:
1394 sort_ref_list(&ref, ref_compare_name);
1395 QSORT(sought, nr_sought, cmp_ref_by_name);
1396
1397 /* v2 supports these by default */
1398 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1399 use_sideband = 2;
1400 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1401 args->deepen = 1;
1402
1403 if (marked)
1404 for_each_ref(clear_marks, NULL);
1405 marked = 1;
1406
1407 for_each_ref(rev_list_insert_ref_oid, NULL);
1408 for_each_cached_alternate(insert_one_alternate_object);
1409
1410 /* Filter 'ref' by 'sought' and those that aren't local */
1411 if (everything_local(args, &ref, sought, nr_sought))
1412 state = FETCH_DONE;
1413 else
1414 state = FETCH_SEND_REQUEST;
1415 break;
1416 case FETCH_SEND_REQUEST:
1417 if (send_fetch_request(fd[1], args, ref, &common,
1418 &haves_to_send, &in_vain))
1419 state = FETCH_GET_PACK;
1420 else
1421 state = FETCH_PROCESS_ACKS;
1422 break;
1423 case FETCH_PROCESS_ACKS:
1424 /* Process ACKs/NAKs */
1425 switch (process_acks(&reader, &common)) {
1426 case 2:
1427 state = FETCH_GET_PACK;
1428 break;
1429 case 1:
1430 in_vain = 0;
1431 /* fallthrough */
1432 default:
1433 state = FETCH_SEND_REQUEST;
1434 break;
1435 }
1436 break;
1437 case FETCH_GET_PACK:
1438 /* Check for shallow-info section */
1439 if (process_section_header(&reader, "shallow-info", 1))
1440 receive_shallow_info(args, &reader);
1441
1442 if (process_section_header(&reader, "wanted-refs", 1))
1443 receive_wanted_refs(&reader, ref);
1444
1445 /* get the pack */
1446 process_section_header(&reader, "packfile", 0);
1447 if (get_pack(args, fd, pack_lockfile))
1448 die(_("git fetch-pack: fetch failed."));
1449
1450 state = FETCH_DONE;
1451 break;
1452 case FETCH_DONE:
1453 continue;
1454 }
1455 }
1456
1457 oidset_clear(&common);
1458 return ref;
1459}
1460
1461static void fetch_pack_config(void)
1462{
1463 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1464 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1465 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1466 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1467 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1468
1469 git_config(git_default_config, NULL);
1470}
1471
1472static void fetch_pack_setup(void)
1473{
1474 static int did_setup;
1475 if (did_setup)
1476 return;
1477 fetch_pack_config();
1478 if (0 <= transfer_unpack_limit)
1479 unpack_limit = transfer_unpack_limit;
1480 else if (0 <= fetch_unpack_limit)
1481 unpack_limit = fetch_unpack_limit;
1482 did_setup = 1;
1483}
1484
1485static int remove_duplicates_in_refs(struct ref **ref, int nr)
1486{
1487 struct string_list names = STRING_LIST_INIT_NODUP;
1488 int src, dst;
1489
1490 for (src = dst = 0; src < nr; src++) {
1491 struct string_list_item *item;
1492 item = string_list_insert(&names, ref[src]->name);
1493 if (item->util)
1494 continue; /* already have it */
1495 item->util = ref[src];
1496 if (src != dst)
1497 ref[dst] = ref[src];
1498 dst++;
1499 }
1500 for (src = dst; src < nr; src++)
1501 ref[src] = NULL;
1502 string_list_clear(&names, 0);
1503 return dst;
1504}
1505
1506static void update_shallow(struct fetch_pack_args *args,
1507 struct ref *refs,
1508 struct shallow_info *si)
1509{
1510 struct oid_array ref = OID_ARRAY_INIT;
1511 int *status;
1512 int i;
1513 struct ref *r;
1514
1515 if (args->deepen && alternate_shallow_file) {
1516 if (*alternate_shallow_file == '\0') { /* --unshallow */
1517 unlink_or_warn(git_path_shallow(the_repository));
1518 rollback_lock_file(&shallow_lock);
1519 } else
1520 commit_lock_file(&shallow_lock);
1521 return;
1522 }
1523
1524 if (!si->shallow || !si->shallow->nr)
1525 return;
1526
1527 if (args->cloning) {
1528 /*
1529 * remote is shallow, but this is a clone, there are
1530 * no objects in repo to worry about. Accept any
1531 * shallow points that exist in the pack (iow in repo
1532 * after get_pack() and reprepare_packed_git())
1533 */
1534 struct oid_array extra = OID_ARRAY_INIT;
1535 struct object_id *oid = si->shallow->oid;
1536 for (i = 0; i < si->shallow->nr; i++)
1537 if (has_object_file(&oid[i]))
1538 oid_array_append(&extra, &oid[i]);
1539 if (extra.nr) {
1540 setup_alternate_shallow(&shallow_lock,
1541 &alternate_shallow_file,
1542 &extra);
1543 commit_lock_file(&shallow_lock);
1544 }
1545 oid_array_clear(&extra);
1546 return;
1547 }
1548
1549 if (!si->nr_ours && !si->nr_theirs)
1550 return;
1551
1552 remove_nonexistent_theirs_shallow(si);
1553 if (!si->nr_ours && !si->nr_theirs)
1554 return;
1555 for (r = refs; r; r = r->next)
1556 oid_array_append(&ref, &r->old_oid);
1557 si->ref = &ref;
1558
1559 if (args->update_shallow) {
1560 /*
1561 * remote is also shallow, .git/shallow may be updated
1562 * so all refs can be accepted. Make sure we only add
1563 * shallow roots that are actually reachable from new
1564 * refs.
1565 */
1566 struct oid_array extra = OID_ARRAY_INIT;
1567 struct object_id *oid = si->shallow->oid;
1568 assign_shallow_commits_to_refs(si, NULL, NULL);
1569 if (!si->nr_ours && !si->nr_theirs) {
1570 oid_array_clear(&ref);
1571 return;
1572 }
1573 for (i = 0; i < si->nr_ours; i++)
1574 oid_array_append(&extra, &oid[si->ours[i]]);
1575 for (i = 0; i < si->nr_theirs; i++)
1576 oid_array_append(&extra, &oid[si->theirs[i]]);
1577 setup_alternate_shallow(&shallow_lock,
1578 &alternate_shallow_file,
1579 &extra);
1580 commit_lock_file(&shallow_lock);
1581 oid_array_clear(&extra);
1582 oid_array_clear(&ref);
1583 return;
1584 }
1585
1586 /*
1587 * remote is also shallow, check what ref is safe to update
1588 * without updating .git/shallow
1589 */
1590 status = xcalloc(ref.nr, sizeof(*status));
1591 assign_shallow_commits_to_refs(si, NULL, status);
1592 if (si->nr_ours || si->nr_theirs) {
1593 for (r = refs, i = 0; r; r = r->next, i++)
1594 if (status[i])
1595 r->status = REF_STATUS_REJECT_SHALLOW;
1596 }
1597 free(status);
1598 oid_array_clear(&ref);
1599}
1600
1601static int iterate_ref_map(void *cb_data, struct object_id *oid)
1602{
1603 struct ref **rm = cb_data;
1604 struct ref *ref = *rm;
1605
1606 if (!ref)
1607 return -1; /* end of the list */
1608 *rm = ref->next;
1609 oidcpy(oid, &ref->old_oid);
1610 return 0;
1611}
1612
1613struct ref *fetch_pack(struct fetch_pack_args *args,
1614 int fd[], struct child_process *conn,
1615 const struct ref *ref,
1616 const char *dest,
1617 struct ref **sought, int nr_sought,
1618 struct oid_array *shallow,
1619 char **pack_lockfile,
1620 enum protocol_version version)
1621{
1622 struct ref *ref_cpy;
1623 struct shallow_info si;
1624
1625 fetch_pack_setup();
1626 if (nr_sought)
1627 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1628
1629 if (!ref) {
1630 packet_flush(fd[1]);
1631 die(_("no matching remote head"));
1632 }
1633 prepare_shallow_info(&si, shallow);
1634 if (version == protocol_v2)
1635 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1636 pack_lockfile);
1637 else
1638 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1639 &si, pack_lockfile);
1640 reprepare_packed_git(the_repository);
1641
1642 if (!args->cloning && args->deepen) {
1643 struct check_connected_options opt = CHECK_CONNECTED_INIT;
1644 struct ref *iterator = ref_cpy;
1645 opt.shallow_file = alternate_shallow_file;
1646 if (args->deepen)
1647 opt.is_deepening_fetch = 1;
1648 if (check_connected(iterate_ref_map, &iterator, &opt)) {
1649 error(_("remote did not send all necessary objects"));
1650 free_refs(ref_cpy);
1651 ref_cpy = NULL;
1652 rollback_lock_file(&shallow_lock);
1653 goto cleanup;
1654 }
1655 args->connectivity_checked = 1;
1656 }
1657
1658 update_shallow(args, ref_cpy, &si);
1659cleanup:
1660 clear_shallow_info(&si);
1661 return ref_cpy;
1662}
1663
1664int report_unmatched_refs(struct ref **sought, int nr_sought)
1665{
1666 int i, ret = 0;
1667
1668 for (i = 0; i < nr_sought; i++) {
1669 if (!sought[i])
1670 continue;
1671 switch (sought[i]->match_status) {
1672 case REF_MATCHED:
1673 continue;
1674 case REF_NOT_MATCHED:
1675 error(_("no such remote ref %s"), sought[i]->name);
1676 break;
1677 case REF_UNADVERTISED_NOT_ALLOWED:
1678 error(_("Server does not allow request for unadvertised object %s"),
1679 sought[i]->name);
1680 break;
1681 }
1682 ret = 1;
1683 }
1684 return ret;
1685}