1#include "cache.h"
2#include "repository.h"
3#include "config.h"
4#include "lockfile.h"
5#include "refs.h"
6#include "pkt-line.h"
7#include "commit.h"
8#include "tag.h"
9#include "exec-cmd.h"
10#include "pack.h"
11#include "sideband.h"
12#include "fetch-pack.h"
13#include "remote.h"
14#include "run-command.h"
15#include "connect.h"
16#include "transport.h"
17#include "version.h"
18#include "prio-queue.h"
19#include "sha1-array.h"
20#include "oidset.h"
21#include "packfile.h"
22
23static int transfer_unpack_limit = -1;
24static int fetch_unpack_limit = -1;
25static int unpack_limit = 100;
26static int prefer_ofs_delta = 1;
27static int no_done;
28static int deepen_since_ok;
29static int deepen_not_ok;
30static int fetch_fsck_objects = -1;
31static int transfer_fsck_objects = -1;
32static int agent_supported;
33static int server_supports_filtering;
34static struct lock_file shallow_lock;
35static const char *alternate_shallow_file;
36
37/* Remember to update object flag allocation in object.h */
38#define COMPLETE (1U << 0)
39#define COMMON (1U << 1)
40#define COMMON_REF (1U << 2)
41#define SEEN (1U << 3)
42#define POPPED (1U << 4)
43#define ALTERNATE (1U << 5)
44
45static int marked;
46
47/*
48 * After sending this many "have"s if we do not get any new ACK , we
49 * give up traversing our history.
50 */
51#define MAX_IN_VAIN 256
52
53struct negotiation_state {
54 struct prio_queue rev_list;
55 int non_common_revs;
56};
57
58static int multi_ack, use_sideband;
59/* Allow specifying sha1 if it is a ref tip. */
60#define ALLOW_TIP_SHA1 01
61/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
62#define ALLOW_REACHABLE_SHA1 02
63static unsigned int allow_unadvertised_object_request;
64
65__attribute__((format (printf, 2, 3)))
66static inline void print_verbose(const struct fetch_pack_args *args,
67 const char *fmt, ...)
68{
69 va_list params;
70
71 if (!args->verbose)
72 return;
73
74 va_start(params, fmt);
75 vfprintf(stderr, fmt, params);
76 va_end(params);
77 fputc('\n', stderr);
78}
79
80struct alternate_object_cache {
81 struct object **items;
82 size_t nr, alloc;
83};
84
85static void cache_one_alternate(const char *refname,
86 const struct object_id *oid,
87 void *vcache)
88{
89 struct alternate_object_cache *cache = vcache;
90 struct object *obj = parse_object(oid);
91
92 if (!obj || (obj->flags & ALTERNATE))
93 return;
94
95 obj->flags |= ALTERNATE;
96 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
97 cache->items[cache->nr++] = obj;
98}
99
100static void for_each_cached_alternate(struct negotiation_state *ns,
101 void (*cb)(struct negotiation_state *,
102 struct object *))
103{
104 static int initialized;
105 static struct alternate_object_cache cache;
106 size_t i;
107
108 if (!initialized) {
109 for_each_alternate_ref(cache_one_alternate, &cache);
110 initialized = 1;
111 }
112
113 for (i = 0; i < cache.nr; i++)
114 cb(ns, cache.items[i]);
115}
116
117static void rev_list_push(struct negotiation_state *ns,
118 struct commit *commit, int mark)
119{
120 if (!(commit->object.flags & mark)) {
121 commit->object.flags |= mark;
122
123 if (parse_commit(commit))
124 return;
125
126 prio_queue_put(&ns->rev_list, commit);
127
128 if (!(commit->object.flags & COMMON))
129 ns->non_common_revs++;
130 }
131}
132
133static int rev_list_insert_ref(struct negotiation_state *ns,
134 const char *refname,
135 const struct object_id *oid)
136{
137 struct object *o = deref_tag(parse_object(oid), refname, 0);
138
139 if (o && o->type == OBJ_COMMIT)
140 rev_list_push(ns, (struct commit *)o, SEEN);
141
142 return 0;
143}
144
145static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
146 int flag, void *cb_data)
147{
148 return rev_list_insert_ref(cb_data, refname, oid);
149}
150
151static int clear_marks(const char *refname, const struct object_id *oid,
152 int flag, void *cb_data)
153{
154 struct object *o = deref_tag(parse_object(oid), refname, 0);
155
156 if (o && o->type == OBJ_COMMIT)
157 clear_commit_marks((struct commit *)o,
158 COMMON | COMMON_REF | SEEN | POPPED);
159 return 0;
160}
161
162/*
163 This function marks a rev and its ancestors as common.
164 In some cases, it is desirable to mark only the ancestors (for example
165 when only the server does not yet know that they are common).
166*/
167
168static void mark_common(struct negotiation_state *ns, struct commit *commit,
169 int ancestors_only, int dont_parse)
170{
171 if (commit != NULL && !(commit->object.flags & COMMON)) {
172 struct object *o = (struct object *)commit;
173
174 if (!ancestors_only)
175 o->flags |= COMMON;
176
177 if (!(o->flags & SEEN))
178 rev_list_push(ns, commit, SEEN);
179 else {
180 struct commit_list *parents;
181
182 if (!ancestors_only && !(o->flags & POPPED))
183 ns->non_common_revs--;
184 if (!o->parsed && !dont_parse)
185 if (parse_commit(commit))
186 return;
187
188 for (parents = commit->parents;
189 parents;
190 parents = parents->next)
191 mark_common(ns, parents->item, 0,
192 dont_parse);
193 }
194 }
195}
196
197/*
198 Get the next rev to send, ignoring the common.
199*/
200
201static const struct object_id *get_rev(struct negotiation_state *ns)
202{
203 struct commit *commit = NULL;
204
205 while (commit == NULL) {
206 unsigned int mark;
207 struct commit_list *parents;
208
209 if (ns->rev_list.nr == 0 || ns->non_common_revs == 0)
210 return NULL;
211
212 commit = prio_queue_get(&ns->rev_list);
213 parse_commit(commit);
214 parents = commit->parents;
215
216 commit->object.flags |= POPPED;
217 if (!(commit->object.flags & COMMON))
218 ns->non_common_revs--;
219
220 if (commit->object.flags & COMMON) {
221 /* do not send "have", and ignore ancestors */
222 commit = NULL;
223 mark = COMMON | SEEN;
224 } else if (commit->object.flags & COMMON_REF)
225 /* send "have", and ignore ancestors */
226 mark = COMMON | SEEN;
227 else
228 /* send "have", also for its ancestors */
229 mark = SEEN;
230
231 while (parents) {
232 if (!(parents->item->object.flags & SEEN))
233 rev_list_push(ns, parents->item, mark);
234 if (mark & COMMON)
235 mark_common(ns, parents->item, 1, 0);
236 parents = parents->next;
237 }
238 }
239
240 return &commit->object.oid;
241}
242
243enum ack_type {
244 NAK = 0,
245 ACK,
246 ACK_continue,
247 ACK_common,
248 ACK_ready
249};
250
251static void consume_shallow_list(struct fetch_pack_args *args, int fd)
252{
253 if (args->stateless_rpc && args->deepen) {
254 /* If we sent a depth we will get back "duplicate"
255 * shallow and unshallow commands every time there
256 * is a block of have lines exchanged.
257 */
258 char *line;
259 while ((line = packet_read_line(fd, NULL))) {
260 if (starts_with(line, "shallow "))
261 continue;
262 if (starts_with(line, "unshallow "))
263 continue;
264 die(_("git fetch-pack: expected shallow list"));
265 }
266 }
267}
268
269static enum ack_type get_ack(int fd, struct object_id *result_oid)
270{
271 int len;
272 char *line = packet_read_line(fd, &len);
273 const char *arg;
274
275 if (!line)
276 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
277 if (!strcmp(line, "NAK"))
278 return NAK;
279 if (skip_prefix(line, "ACK ", &arg)) {
280 if (!get_oid_hex(arg, result_oid)) {
281 arg += 40;
282 len -= arg - line;
283 if (len < 1)
284 return ACK;
285 if (strstr(arg, "continue"))
286 return ACK_continue;
287 if (strstr(arg, "common"))
288 return ACK_common;
289 if (strstr(arg, "ready"))
290 return ACK_ready;
291 return ACK;
292 }
293 }
294 if (skip_prefix(line, "ERR ", &arg))
295 die(_("remote error: %s"), arg);
296 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
297}
298
299static void send_request(struct fetch_pack_args *args,
300 int fd, struct strbuf *buf)
301{
302 if (args->stateless_rpc) {
303 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
304 packet_flush(fd);
305 } else
306 write_or_die(fd, buf->buf, buf->len);
307}
308
309static void insert_one_alternate_object(struct negotiation_state *ns,
310 struct object *obj)
311{
312 rev_list_insert_ref(ns, NULL, &obj->oid);
313}
314
315#define INITIAL_FLUSH 16
316#define PIPESAFE_FLUSH 32
317#define LARGE_FLUSH 16384
318
319static int next_flush(int stateless_rpc, int count)
320{
321 if (stateless_rpc) {
322 if (count < LARGE_FLUSH)
323 count <<= 1;
324 else
325 count = count * 11 / 10;
326 } else {
327 if (count < PIPESAFE_FLUSH)
328 count <<= 1;
329 else
330 count += PIPESAFE_FLUSH;
331 }
332 return count;
333}
334
335static int find_common(struct negotiation_state *ns,
336 struct fetch_pack_args *args,
337 int fd[2], struct object_id *result_oid,
338 struct ref *refs)
339{
340 int fetching;
341 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
342 const struct object_id *oid;
343 unsigned in_vain = 0;
344 int got_continue = 0;
345 int got_ready = 0;
346 struct strbuf req_buf = STRBUF_INIT;
347 size_t state_len = 0;
348
349 if (args->stateless_rpc && multi_ack == 1)
350 die(_("--stateless-rpc requires multi_ack_detailed"));
351
352 for_each_ref(rev_list_insert_ref_oid, ns);
353 for_each_cached_alternate(ns, insert_one_alternate_object);
354
355 fetching = 0;
356 for ( ; refs ; refs = refs->next) {
357 struct object_id *remote = &refs->old_oid;
358 const char *remote_hex;
359 struct object *o;
360
361 /*
362 * If that object is complete (i.e. it is an ancestor of a
363 * local ref), we tell them we have it but do not have to
364 * tell them about its ancestors, which they already know
365 * about.
366 *
367 * We use lookup_object here because we are only
368 * interested in the case we *know* the object is
369 * reachable and we have already scanned it.
370 */
371 if (((o = lookup_object(remote->hash)) != NULL) &&
372 (o->flags & COMPLETE)) {
373 continue;
374 }
375
376 remote_hex = oid_to_hex(remote);
377 if (!fetching) {
378 struct strbuf c = STRBUF_INIT;
379 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
380 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
381 if (no_done) strbuf_addstr(&c, " no-done");
382 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
383 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
384 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
385 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
386 if (args->no_progress) strbuf_addstr(&c, " no-progress");
387 if (args->include_tag) strbuf_addstr(&c, " include-tag");
388 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
389 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
390 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
391 if (agent_supported) strbuf_addf(&c, " agent=%s",
392 git_user_agent_sanitized());
393 if (args->filter_options.choice)
394 strbuf_addstr(&c, " filter");
395 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
396 strbuf_release(&c);
397 } else
398 packet_buf_write(&req_buf, "want %s\n", remote_hex);
399 fetching++;
400 }
401
402 if (!fetching) {
403 strbuf_release(&req_buf);
404 packet_flush(fd[1]);
405 return 1;
406 }
407
408 if (is_repository_shallow())
409 write_shallow_commits(&req_buf, 1, NULL);
410 if (args->depth > 0)
411 packet_buf_write(&req_buf, "deepen %d", args->depth);
412 if (args->deepen_since) {
413 timestamp_t max_age = approxidate(args->deepen_since);
414 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
415 }
416 if (args->deepen_not) {
417 int i;
418 for (i = 0; i < args->deepen_not->nr; i++) {
419 struct string_list_item *s = args->deepen_not->items + i;
420 packet_buf_write(&req_buf, "deepen-not %s", s->string);
421 }
422 }
423 if (server_supports_filtering && args->filter_options.choice)
424 packet_buf_write(&req_buf, "filter %s",
425 args->filter_options.filter_spec);
426 packet_buf_flush(&req_buf);
427 state_len = req_buf.len;
428
429 if (args->deepen) {
430 char *line;
431 const char *arg;
432 struct object_id oid;
433
434 send_request(args, fd[1], &req_buf);
435 while ((line = packet_read_line(fd[0], NULL))) {
436 if (skip_prefix(line, "shallow ", &arg)) {
437 if (get_oid_hex(arg, &oid))
438 die(_("invalid shallow line: %s"), line);
439 register_shallow(&oid);
440 continue;
441 }
442 if (skip_prefix(line, "unshallow ", &arg)) {
443 if (get_oid_hex(arg, &oid))
444 die(_("invalid unshallow line: %s"), line);
445 if (!lookup_object(oid.hash))
446 die(_("object not found: %s"), line);
447 /* make sure that it is parsed as shallow */
448 if (!parse_object(&oid))
449 die(_("error in object: %s"), line);
450 if (unregister_shallow(&oid))
451 die(_("no shallow found: %s"), line);
452 continue;
453 }
454 die(_("expected shallow/unshallow, got %s"), line);
455 }
456 } else if (!args->stateless_rpc)
457 send_request(args, fd[1], &req_buf);
458
459 if (!args->stateless_rpc) {
460 /* If we aren't using the stateless-rpc interface
461 * we don't need to retain the headers.
462 */
463 strbuf_setlen(&req_buf, 0);
464 state_len = 0;
465 }
466
467 flushes = 0;
468 retval = -1;
469 if (args->no_dependents)
470 goto done;
471 while ((oid = get_rev(ns))) {
472 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
473 print_verbose(args, "have %s", oid_to_hex(oid));
474 in_vain++;
475 if (flush_at <= ++count) {
476 int ack;
477
478 packet_buf_flush(&req_buf);
479 send_request(args, fd[1], &req_buf);
480 strbuf_setlen(&req_buf, state_len);
481 flushes++;
482 flush_at = next_flush(args->stateless_rpc, count);
483
484 /*
485 * We keep one window "ahead" of the other side, and
486 * will wait for an ACK only on the next one
487 */
488 if (!args->stateless_rpc && count == INITIAL_FLUSH)
489 continue;
490
491 consume_shallow_list(args, fd[0]);
492 do {
493 ack = get_ack(fd[0], result_oid);
494 if (ack)
495 print_verbose(args, _("got %s %d %s"), "ack",
496 ack, oid_to_hex(result_oid));
497 switch (ack) {
498 case ACK:
499 flushes = 0;
500 multi_ack = 0;
501 retval = 0;
502 goto done;
503 case ACK_common:
504 case ACK_ready:
505 case ACK_continue: {
506 struct commit *commit =
507 lookup_commit(result_oid);
508 int was_common;
509 if (!commit)
510 die(_("invalid commit %s"), oid_to_hex(result_oid));
511 was_common = commit->object.flags & COMMON;
512 mark_common(ns, commit, 0, 1);
513 if (args->stateless_rpc
514 && ack == ACK_common
515 && !was_common) {
516 /* We need to replay the have for this object
517 * on the next RPC request so the peer knows
518 * it is in common with us.
519 */
520 const char *hex = oid_to_hex(result_oid);
521 packet_buf_write(&req_buf, "have %s\n", hex);
522 state_len = req_buf.len;
523 /*
524 * Reset in_vain because an ack
525 * for this commit has not been
526 * seen.
527 */
528 in_vain = 0;
529 } else if (!args->stateless_rpc
530 || ack != ACK_common)
531 in_vain = 0;
532 retval = 0;
533 got_continue = 1;
534 if (ack == ACK_ready)
535 got_ready = 1;
536 break;
537 }
538 }
539 } while (ack);
540 flushes--;
541 if (got_continue && MAX_IN_VAIN < in_vain) {
542 print_verbose(args, _("giving up"));
543 break; /* give up */
544 }
545 if (got_ready)
546 break;
547 }
548 }
549done:
550 if (!got_ready || !no_done) {
551 packet_buf_write(&req_buf, "done\n");
552 send_request(args, fd[1], &req_buf);
553 }
554 print_verbose(args, _("done"));
555 if (retval != 0) {
556 multi_ack = 0;
557 flushes++;
558 }
559 strbuf_release(&req_buf);
560
561 if (!got_ready || !no_done)
562 consume_shallow_list(args, fd[0]);
563 while (flushes || multi_ack) {
564 int ack = get_ack(fd[0], result_oid);
565 if (ack) {
566 print_verbose(args, _("got %s (%d) %s"), "ack",
567 ack, oid_to_hex(result_oid));
568 if (ack == ACK)
569 return 0;
570 multi_ack = 1;
571 continue;
572 }
573 flushes--;
574 }
575 /* it is no error to fetch into a completely empty repo */
576 return count ? retval : 0;
577}
578
579static struct commit_list *complete;
580
581static int mark_complete(const struct object_id *oid)
582{
583 struct object *o = parse_object(oid);
584
585 while (o && o->type == OBJ_TAG) {
586 struct tag *t = (struct tag *) o;
587 if (!t->tagged)
588 break; /* broken repository */
589 o->flags |= COMPLETE;
590 o = parse_object(&t->tagged->oid);
591 }
592 if (o && o->type == OBJ_COMMIT) {
593 struct commit *commit = (struct commit *)o;
594 if (!(commit->object.flags & COMPLETE)) {
595 commit->object.flags |= COMPLETE;
596 commit_list_insert(commit, &complete);
597 }
598 }
599 return 0;
600}
601
602static int mark_complete_oid(const char *refname, const struct object_id *oid,
603 int flag, void *cb_data)
604{
605 return mark_complete(oid);
606}
607
608static void mark_recent_complete_commits(struct fetch_pack_args *args,
609 timestamp_t cutoff)
610{
611 while (complete && cutoff <= complete->item->date) {
612 print_verbose(args, _("Marking %s as complete"),
613 oid_to_hex(&complete->item->object.oid));
614 pop_most_recent_commit(&complete, COMPLETE);
615 }
616}
617
618static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
619{
620 for (; refs; refs = refs->next)
621 oidset_insert(oids, &refs->old_oid);
622}
623
624static int tip_oids_contain(struct oidset *tip_oids,
625 struct ref *unmatched, struct ref *newlist,
626 const struct object_id *id)
627{
628 /*
629 * Note that this only looks at the ref lists the first time it's
630 * called. This works out in filter_refs() because even though it may
631 * add to "newlist" between calls, the additions will always be for
632 * oids that are already in the set.
633 */
634 if (!tip_oids->map.map.tablesize) {
635 add_refs_to_oidset(tip_oids, unmatched);
636 add_refs_to_oidset(tip_oids, newlist);
637 }
638 return oidset_contains(tip_oids, id);
639}
640
641static void filter_refs(struct fetch_pack_args *args,
642 struct ref **refs,
643 struct ref **sought, int nr_sought)
644{
645 struct ref *newlist = NULL;
646 struct ref **newtail = &newlist;
647 struct ref *unmatched = NULL;
648 struct ref *ref, *next;
649 struct oidset tip_oids = OIDSET_INIT;
650 int i;
651
652 i = 0;
653 for (ref = *refs; ref; ref = next) {
654 int keep = 0;
655 next = ref->next;
656
657 if (starts_with(ref->name, "refs/") &&
658 check_refname_format(ref->name, 0))
659 ; /* trash */
660 else {
661 while (i < nr_sought) {
662 int cmp = strcmp(ref->name, sought[i]->name);
663 if (cmp < 0)
664 break; /* definitely do not have it */
665 else if (cmp == 0) {
666 keep = 1; /* definitely have it */
667 sought[i]->match_status = REF_MATCHED;
668 }
669 i++;
670 }
671 }
672
673 if (!keep && args->fetch_all &&
674 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
675 keep = 1;
676
677 if (keep) {
678 *newtail = ref;
679 ref->next = NULL;
680 newtail = &ref->next;
681 } else {
682 ref->next = unmatched;
683 unmatched = ref;
684 }
685 }
686
687 /* Append unmatched requests to the list */
688 for (i = 0; i < nr_sought; i++) {
689 struct object_id oid;
690 const char *p;
691
692 ref = sought[i];
693 if (ref->match_status != REF_NOT_MATCHED)
694 continue;
695 if (parse_oid_hex(ref->name, &oid, &p) ||
696 *p != '\0' ||
697 oidcmp(&oid, &ref->old_oid))
698 continue;
699
700 if ((allow_unadvertised_object_request &
701 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
702 tip_oids_contain(&tip_oids, unmatched, newlist,
703 &ref->old_oid)) {
704 ref->match_status = REF_MATCHED;
705 *newtail = copy_ref(ref);
706 newtail = &(*newtail)->next;
707 } else {
708 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
709 }
710 }
711
712 oidset_clear(&tip_oids);
713 for (ref = unmatched; ref; ref = next) {
714 next = ref->next;
715 free(ref);
716 }
717
718 *refs = newlist;
719}
720
721static void mark_alternate_complete(struct negotiation_state *unused,
722 struct object *obj)
723{
724 mark_complete(&obj->oid);
725}
726
727struct loose_object_iter {
728 struct oidset *loose_object_set;
729 struct ref *refs;
730};
731
732/*
733 * If the number of refs is not larger than the number of loose objects,
734 * this function stops inserting.
735 */
736static int add_loose_objects_to_set(const struct object_id *oid,
737 const char *path,
738 void *data)
739{
740 struct loose_object_iter *iter = data;
741 oidset_insert(iter->loose_object_set, oid);
742 if (iter->refs == NULL)
743 return 1;
744
745 iter->refs = iter->refs->next;
746 return 0;
747}
748
749/*
750 * Mark recent commits available locally and reachable from a local ref as
751 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
752 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
753 * thus do not need COMMON_REF marks).
754 *
755 * The cutoff time for recency is determined by this heuristic: it is the
756 * earliest commit time of the objects in refs that are commits and that we know
757 * the commit time of.
758 */
759static void mark_complete_and_common_ref(struct negotiation_state *ns,
760 struct fetch_pack_args *args,
761 struct ref **refs)
762{
763 struct ref *ref;
764 int old_save_commit_buffer = save_commit_buffer;
765 timestamp_t cutoff = 0;
766 struct oidset loose_oid_set = OIDSET_INIT;
767 int use_oidset = 0;
768 struct loose_object_iter iter = {&loose_oid_set, *refs};
769
770 /* Enumerate all loose objects or know refs are not so many. */
771 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
772 &iter, 0);
773
774 save_commit_buffer = 0;
775
776 for (ref = *refs; ref; ref = ref->next) {
777 struct object *o;
778 unsigned int flags = OBJECT_INFO_QUICK;
779
780 if (use_oidset &&
781 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
782 /*
783 * I know this does not exist in the loose form,
784 * so check if it exists in a non-loose form.
785 */
786 flags |= OBJECT_INFO_IGNORE_LOOSE;
787 }
788
789 if (!has_object_file_with_flags(&ref->old_oid, flags))
790 continue;
791 o = parse_object(&ref->old_oid);
792 if (!o)
793 continue;
794
795 /* We already have it -- which may mean that we were
796 * in sync with the other side at some time after
797 * that (it is OK if we guess wrong here).
798 */
799 if (o->type == OBJ_COMMIT) {
800 struct commit *commit = (struct commit *)o;
801 if (!cutoff || cutoff < commit->date)
802 cutoff = commit->date;
803 }
804 }
805
806 oidset_clear(&loose_oid_set);
807
808 if (!args->no_dependents) {
809 if (!args->deepen) {
810 for_each_ref(mark_complete_oid, NULL);
811 for_each_cached_alternate(NULL, mark_alternate_complete);
812 commit_list_sort_by_date(&complete);
813 if (cutoff)
814 mark_recent_complete_commits(args, cutoff);
815 }
816
817 /*
818 * Mark all complete remote refs as common refs.
819 * Don't mark them common yet; the server has to be told so first.
820 */
821 for (ref = *refs; ref; ref = ref->next) {
822 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
823 NULL, 0);
824
825 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
826 continue;
827
828 if (!(o->flags & SEEN)) {
829 rev_list_push(ns, (struct commit *)o,
830 COMMON_REF | SEEN);
831
832 mark_common(ns, (struct commit *)o, 1, 1);
833 }
834 }
835 }
836
837 save_commit_buffer = old_save_commit_buffer;
838}
839
840/*
841 * Returns 1 if every object pointed to by the given remote refs is available
842 * locally and reachable from a local ref, and 0 otherwise.
843 */
844static int everything_local(struct fetch_pack_args *args,
845 struct ref **refs)
846{
847 struct ref *ref;
848 int retval;
849
850 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
851 const struct object_id *remote = &ref->old_oid;
852 struct object *o;
853
854 o = lookup_object(remote->hash);
855 if (!o || !(o->flags & COMPLETE)) {
856 retval = 0;
857 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
858 ref->name);
859 continue;
860 }
861 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
862 ref->name);
863 }
864
865 return retval;
866}
867
868static int sideband_demux(int in, int out, void *data)
869{
870 int *xd = data;
871 int ret;
872
873 ret = recv_sideband("fetch-pack", xd[0], out);
874 close(out);
875 return ret;
876}
877
878static int get_pack(struct fetch_pack_args *args,
879 int xd[2], char **pack_lockfile)
880{
881 struct async demux;
882 int do_keep = args->keep_pack;
883 const char *cmd_name;
884 struct pack_header header;
885 int pass_header = 0;
886 struct child_process cmd = CHILD_PROCESS_INIT;
887 int ret;
888
889 memset(&demux, 0, sizeof(demux));
890 if (use_sideband) {
891 /* xd[] is talking with upload-pack; subprocess reads from
892 * xd[0], spits out band#2 to stderr, and feeds us band#1
893 * through demux->out.
894 */
895 demux.proc = sideband_demux;
896 demux.data = xd;
897 demux.out = -1;
898 demux.isolate_sigpipe = 1;
899 if (start_async(&demux))
900 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
901 }
902 else
903 demux.out = xd[0];
904
905 if (!args->keep_pack && unpack_limit) {
906
907 if (read_pack_header(demux.out, &header))
908 die(_("protocol error: bad pack header"));
909 pass_header = 1;
910 if (ntohl(header.hdr_entries) < unpack_limit)
911 do_keep = 0;
912 else
913 do_keep = 1;
914 }
915
916 if (alternate_shallow_file) {
917 argv_array_push(&cmd.args, "--shallow-file");
918 argv_array_push(&cmd.args, alternate_shallow_file);
919 }
920
921 if (do_keep || args->from_promisor) {
922 if (pack_lockfile)
923 cmd.out = -1;
924 cmd_name = "index-pack";
925 argv_array_push(&cmd.args, cmd_name);
926 argv_array_push(&cmd.args, "--stdin");
927 if (!args->quiet && !args->no_progress)
928 argv_array_push(&cmd.args, "-v");
929 if (args->use_thin_pack)
930 argv_array_push(&cmd.args, "--fix-thin");
931 if (do_keep && (args->lock_pack || unpack_limit)) {
932 char hostname[HOST_NAME_MAX + 1];
933 if (xgethostname(hostname, sizeof(hostname)))
934 xsnprintf(hostname, sizeof(hostname), "localhost");
935 argv_array_pushf(&cmd.args,
936 "--keep=fetch-pack %"PRIuMAX " on %s",
937 (uintmax_t)getpid(), hostname);
938 }
939 if (args->check_self_contained_and_connected)
940 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
941 if (args->from_promisor)
942 argv_array_push(&cmd.args, "--promisor");
943 }
944 else {
945 cmd_name = "unpack-objects";
946 argv_array_push(&cmd.args, cmd_name);
947 if (args->quiet || args->no_progress)
948 argv_array_push(&cmd.args, "-q");
949 args->check_self_contained_and_connected = 0;
950 }
951
952 if (pass_header)
953 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
954 ntohl(header.hdr_version),
955 ntohl(header.hdr_entries));
956 if (fetch_fsck_objects >= 0
957 ? fetch_fsck_objects
958 : transfer_fsck_objects >= 0
959 ? transfer_fsck_objects
960 : 0) {
961 if (args->from_promisor)
962 /*
963 * We cannot use --strict in index-pack because it
964 * checks both broken objects and links, but we only
965 * want to check for broken objects.
966 */
967 argv_array_push(&cmd.args, "--fsck-objects");
968 else
969 argv_array_push(&cmd.args, "--strict");
970 }
971
972 cmd.in = demux.out;
973 cmd.git_cmd = 1;
974 if (start_command(&cmd))
975 die(_("fetch-pack: unable to fork off %s"), cmd_name);
976 if (do_keep && pack_lockfile) {
977 *pack_lockfile = index_pack_lockfile(cmd.out);
978 close(cmd.out);
979 }
980
981 if (!use_sideband)
982 /* Closed by start_command() */
983 xd[0] = -1;
984
985 ret = finish_command(&cmd);
986 if (!ret || (args->check_self_contained_and_connected && ret == 1))
987 args->self_contained_and_connected =
988 args->check_self_contained_and_connected &&
989 ret == 0;
990 else
991 die(_("%s failed"), cmd_name);
992 if (use_sideband && finish_async(&demux))
993 die(_("error in sideband demultiplexer"));
994 return 0;
995}
996
997static int cmp_ref_by_name(const void *a_, const void *b_)
998{
999 const struct ref *a = *((const struct ref **)a_);
1000 const struct ref *b = *((const struct ref **)b_);
1001 return strcmp(a->name, b->name);
1002}
1003
1004static struct ref *do_fetch_pack(struct fetch_pack_args *args,
1005 int fd[2],
1006 const struct ref *orig_ref,
1007 struct ref **sought, int nr_sought,
1008 struct shallow_info *si,
1009 char **pack_lockfile)
1010{
1011 struct ref *ref = copy_ref_list(orig_ref);
1012 struct object_id oid;
1013 const char *agent_feature;
1014 int agent_len;
1015 struct negotiation_state ns = { { compare_commits_by_commit_date } };
1016
1017 sort_ref_list(&ref, ref_compare_name);
1018 QSORT(sought, nr_sought, cmp_ref_by_name);
1019
1020 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
1021 die(_("Server does not support shallow clients"));
1022 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1023 args->deepen = 1;
1024 if (server_supports("multi_ack_detailed")) {
1025 print_verbose(args, _("Server supports multi_ack_detailed"));
1026 multi_ack = 2;
1027 if (server_supports("no-done")) {
1028 print_verbose(args, _("Server supports no-done"));
1029 if (args->stateless_rpc)
1030 no_done = 1;
1031 }
1032 }
1033 else if (server_supports("multi_ack")) {
1034 print_verbose(args, _("Server supports multi_ack"));
1035 multi_ack = 1;
1036 }
1037 if (server_supports("side-band-64k")) {
1038 print_verbose(args, _("Server supports side-band-64k"));
1039 use_sideband = 2;
1040 }
1041 else if (server_supports("side-band")) {
1042 print_verbose(args, _("Server supports side-band"));
1043 use_sideband = 1;
1044 }
1045 if (server_supports("allow-tip-sha1-in-want")) {
1046 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1047 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1048 }
1049 if (server_supports("allow-reachable-sha1-in-want")) {
1050 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1051 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1052 }
1053 if (!server_supports("thin-pack"))
1054 args->use_thin_pack = 0;
1055 if (!server_supports("no-progress"))
1056 args->no_progress = 0;
1057 if (!server_supports("include-tag"))
1058 args->include_tag = 0;
1059 if (server_supports("ofs-delta"))
1060 print_verbose(args, _("Server supports ofs-delta"));
1061 else
1062 prefer_ofs_delta = 0;
1063
1064 if (server_supports("filter")) {
1065 server_supports_filtering = 1;
1066 print_verbose(args, _("Server supports filter"));
1067 } else if (args->filter_options.choice) {
1068 warning("filtering not recognized by server, ignoring");
1069 }
1070
1071 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1072 agent_supported = 1;
1073 if (agent_len)
1074 print_verbose(args, _("Server version is %.*s"),
1075 agent_len, agent_feature);
1076 }
1077 if (server_supports("deepen-since"))
1078 deepen_since_ok = 1;
1079 else if (args->deepen_since)
1080 die(_("Server does not support --shallow-since"));
1081 if (server_supports("deepen-not"))
1082 deepen_not_ok = 1;
1083 else if (args->deepen_not)
1084 die(_("Server does not support --shallow-exclude"));
1085 if (!server_supports("deepen-relative") && args->deepen_relative)
1086 die(_("Server does not support --deepen"));
1087
1088 if (marked)
1089 for_each_ref(clear_marks, NULL);
1090 marked = 1;
1091 mark_complete_and_common_ref(&ns, args, &ref);
1092 filter_refs(args, &ref, sought, nr_sought);
1093 if (everything_local(args, &ref)) {
1094 packet_flush(fd[1]);
1095 goto all_done;
1096 }
1097 if (find_common(&ns, args, fd, &oid, ref) < 0)
1098 if (!args->keep_pack)
1099 /* When cloning, it is not unusual to have
1100 * no common commit.
1101 */
1102 warning(_("no common commits"));
1103
1104 if (args->stateless_rpc)
1105 packet_flush(fd[1]);
1106 if (args->deepen)
1107 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1108 NULL);
1109 else if (si->nr_ours || si->nr_theirs)
1110 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1111 else
1112 alternate_shallow_file = NULL;
1113 if (get_pack(args, fd, pack_lockfile))
1114 die(_("git fetch-pack: fetch failed."));
1115
1116 all_done:
1117 clear_prio_queue(&ns.rev_list);
1118 return ref;
1119}
1120
1121static void add_shallow_requests(struct strbuf *req_buf,
1122 const struct fetch_pack_args *args)
1123{
1124 if (is_repository_shallow())
1125 write_shallow_commits(req_buf, 1, NULL);
1126 if (args->depth > 0)
1127 packet_buf_write(req_buf, "deepen %d", args->depth);
1128 if (args->deepen_since) {
1129 timestamp_t max_age = approxidate(args->deepen_since);
1130 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1131 }
1132 if (args->deepen_not) {
1133 int i;
1134 for (i = 0; i < args->deepen_not->nr; i++) {
1135 struct string_list_item *s = args->deepen_not->items + i;
1136 packet_buf_write(req_buf, "deepen-not %s", s->string);
1137 }
1138 }
1139}
1140
1141static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1142{
1143 for ( ; wants ; wants = wants->next) {
1144 const struct object_id *remote = &wants->old_oid;
1145 const char *remote_hex;
1146 struct object *o;
1147
1148 /*
1149 * If that object is complete (i.e. it is an ancestor of a
1150 * local ref), we tell them we have it but do not have to
1151 * tell them about its ancestors, which they already know
1152 * about.
1153 *
1154 * We use lookup_object here because we are only
1155 * interested in the case we *know* the object is
1156 * reachable and we have already scanned it.
1157 */
1158 if (((o = lookup_object(remote->hash)) != NULL) &&
1159 (o->flags & COMPLETE)) {
1160 continue;
1161 }
1162
1163 remote_hex = oid_to_hex(remote);
1164 packet_buf_write(req_buf, "want %s\n", remote_hex);
1165 }
1166}
1167
1168static void add_common(struct strbuf *req_buf, struct oidset *common)
1169{
1170 struct oidset_iter iter;
1171 const struct object_id *oid;
1172 oidset_iter_init(common, &iter);
1173
1174 while ((oid = oidset_iter_next(&iter))) {
1175 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1176 }
1177}
1178
1179static int add_haves(struct negotiation_state *ns, struct strbuf *req_buf,
1180 int *haves_to_send, int *in_vain)
1181{
1182 int ret = 0;
1183 int haves_added = 0;
1184 const struct object_id *oid;
1185
1186 while ((oid = get_rev(ns))) {
1187 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1188 if (++haves_added >= *haves_to_send)
1189 break;
1190 }
1191
1192 *in_vain += haves_added;
1193 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1194 /* Send Done */
1195 packet_buf_write(req_buf, "done\n");
1196 ret = 1;
1197 }
1198
1199 /* Increase haves to send on next round */
1200 *haves_to_send = next_flush(1, *haves_to_send);
1201
1202 return ret;
1203}
1204
1205static int send_fetch_request(struct negotiation_state *ns, int fd_out,
1206 const struct fetch_pack_args *args,
1207 const struct ref *wants, struct oidset *common,
1208 int *haves_to_send, int *in_vain)
1209{
1210 int ret = 0;
1211 struct strbuf req_buf = STRBUF_INIT;
1212
1213 if (server_supports_v2("fetch", 1))
1214 packet_buf_write(&req_buf, "command=fetch");
1215 if (server_supports_v2("agent", 0))
1216 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1217 if (args->server_options && args->server_options->nr &&
1218 server_supports_v2("server-option", 1)) {
1219 int i;
1220 for (i = 0; i < args->server_options->nr; i++)
1221 packet_write_fmt(fd_out, "server-option=%s",
1222 args->server_options->items[i].string);
1223 }
1224
1225 packet_buf_delim(&req_buf);
1226 if (args->use_thin_pack)
1227 packet_buf_write(&req_buf, "thin-pack");
1228 if (args->no_progress)
1229 packet_buf_write(&req_buf, "no-progress");
1230 if (args->include_tag)
1231 packet_buf_write(&req_buf, "include-tag");
1232 if (prefer_ofs_delta)
1233 packet_buf_write(&req_buf, "ofs-delta");
1234
1235 /* Add shallow-info and deepen request */
1236 if (server_supports_feature("fetch", "shallow", 0))
1237 add_shallow_requests(&req_buf, args);
1238 else if (is_repository_shallow() || args->deepen)
1239 die(_("Server does not support shallow requests"));
1240
1241 /* Add filter */
1242 if (server_supports_feature("fetch", "filter", 0) &&
1243 args->filter_options.choice) {
1244 print_verbose(args, _("Server supports filter"));
1245 packet_buf_write(&req_buf, "filter %s",
1246 args->filter_options.filter_spec);
1247 } else if (args->filter_options.choice) {
1248 warning("filtering not recognized by server, ignoring");
1249 }
1250
1251 /* add wants */
1252 add_wants(wants, &req_buf);
1253
1254 if (args->no_dependents) {
1255 packet_buf_write(&req_buf, "done");
1256 ret = 1;
1257 } else {
1258 /* Add all of the common commits we've found in previous rounds */
1259 add_common(&req_buf, common);
1260
1261 /* Add initial haves */
1262 ret = add_haves(ns, &req_buf, haves_to_send, in_vain);
1263 }
1264
1265 /* Send request */
1266 packet_buf_flush(&req_buf);
1267 write_or_die(fd_out, req_buf.buf, req_buf.len);
1268
1269 strbuf_release(&req_buf);
1270 return ret;
1271}
1272
1273/*
1274 * Processes a section header in a server's response and checks if it matches
1275 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1276 * not consumed); if 0, the line will be consumed and the function will die if
1277 * the section header doesn't match what was expected.
1278 */
1279static int process_section_header(struct packet_reader *reader,
1280 const char *section, int peek)
1281{
1282 int ret;
1283
1284 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1285 die("error reading section header '%s'", section);
1286
1287 ret = !strcmp(reader->line, section);
1288
1289 if (!peek) {
1290 if (!ret)
1291 die("expected '%s', received '%s'",
1292 section, reader->line);
1293 packet_reader_read(reader);
1294 }
1295
1296 return ret;
1297}
1298
1299static int process_acks(struct negotiation_state *ns,
1300 struct packet_reader *reader,
1301 struct oidset *common)
1302{
1303 /* received */
1304 int received_ready = 0;
1305 int received_ack = 0;
1306
1307 process_section_header(reader, "acknowledgments", 0);
1308 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1309 const char *arg;
1310
1311 if (!strcmp(reader->line, "NAK"))
1312 continue;
1313
1314 if (skip_prefix(reader->line, "ACK ", &arg)) {
1315 struct object_id oid;
1316 if (!get_oid_hex(arg, &oid)) {
1317 struct commit *commit;
1318 oidset_insert(common, &oid);
1319 commit = lookup_commit(&oid);
1320 mark_common(ns, commit, 0, 1);
1321 }
1322 continue;
1323 }
1324
1325 if (!strcmp(reader->line, "ready")) {
1326 received_ready = 1;
1327 continue;
1328 }
1329
1330 die("unexpected acknowledgment line: '%s'", reader->line);
1331 }
1332
1333 if (reader->status != PACKET_READ_FLUSH &&
1334 reader->status != PACKET_READ_DELIM)
1335 die("error processing acks: %d", reader->status);
1336
1337 /* return 0 if no common, 1 if there are common, or 2 if ready */
1338 return received_ready ? 2 : (received_ack ? 1 : 0);
1339}
1340
1341static void receive_shallow_info(struct fetch_pack_args *args,
1342 struct packet_reader *reader)
1343{
1344 process_section_header(reader, "shallow-info", 0);
1345 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1346 const char *arg;
1347 struct object_id oid;
1348
1349 if (skip_prefix(reader->line, "shallow ", &arg)) {
1350 if (get_oid_hex(arg, &oid))
1351 die(_("invalid shallow line: %s"), reader->line);
1352 register_shallow(&oid);
1353 continue;
1354 }
1355 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1356 if (get_oid_hex(arg, &oid))
1357 die(_("invalid unshallow line: %s"), reader->line);
1358 if (!lookup_object(oid.hash))
1359 die(_("object not found: %s"), reader->line);
1360 /* make sure that it is parsed as shallow */
1361 if (!parse_object(&oid))
1362 die(_("error in object: %s"), reader->line);
1363 if (unregister_shallow(&oid))
1364 die(_("no shallow found: %s"), reader->line);
1365 continue;
1366 }
1367 die(_("expected shallow/unshallow, got %s"), reader->line);
1368 }
1369
1370 if (reader->status != PACKET_READ_FLUSH &&
1371 reader->status != PACKET_READ_DELIM)
1372 die("error processing shallow info: %d", reader->status);
1373
1374 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1375 args->deepen = 1;
1376}
1377
1378enum fetch_state {
1379 FETCH_CHECK_LOCAL = 0,
1380 FETCH_SEND_REQUEST,
1381 FETCH_PROCESS_ACKS,
1382 FETCH_GET_PACK,
1383 FETCH_DONE,
1384};
1385
1386static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1387 int fd[2],
1388 const struct ref *orig_ref,
1389 struct ref **sought, int nr_sought,
1390 char **pack_lockfile)
1391{
1392 struct ref *ref = copy_ref_list(orig_ref);
1393 enum fetch_state state = FETCH_CHECK_LOCAL;
1394 struct oidset common = OIDSET_INIT;
1395 struct packet_reader reader;
1396 int in_vain = 0;
1397 int haves_to_send = INITIAL_FLUSH;
1398 struct negotiation_state ns = { { compare_commits_by_commit_date } };
1399 packet_reader_init(&reader, fd[0], NULL, 0,
1400 PACKET_READ_CHOMP_NEWLINE);
1401
1402 while (state != FETCH_DONE) {
1403 switch (state) {
1404 case FETCH_CHECK_LOCAL:
1405 sort_ref_list(&ref, ref_compare_name);
1406 QSORT(sought, nr_sought, cmp_ref_by_name);
1407
1408 /* v2 supports these by default */
1409 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1410 use_sideband = 2;
1411 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1412 args->deepen = 1;
1413
1414 if (marked)
1415 for_each_ref(clear_marks, NULL);
1416 marked = 1;
1417
1418 /* Filter 'ref' by 'sought' and those that aren't local */
1419 mark_complete_and_common_ref(&ns, args, &ref);
1420 filter_refs(args, &ref, sought, nr_sought);
1421 if (everything_local(args, &ref))
1422 state = FETCH_DONE;
1423 else
1424 state = FETCH_SEND_REQUEST;
1425
1426 for_each_ref(rev_list_insert_ref_oid, &ns);
1427 for_each_cached_alternate(&ns,
1428 insert_one_alternate_object);
1429 break;
1430 case FETCH_SEND_REQUEST:
1431 if (send_fetch_request(&ns, fd[1], args, ref, &common,
1432 &haves_to_send, &in_vain))
1433 state = FETCH_GET_PACK;
1434 else
1435 state = FETCH_PROCESS_ACKS;
1436 break;
1437 case FETCH_PROCESS_ACKS:
1438 /* Process ACKs/NAKs */
1439 switch (process_acks(&ns, &reader, &common)) {
1440 case 2:
1441 state = FETCH_GET_PACK;
1442 break;
1443 case 1:
1444 in_vain = 0;
1445 /* fallthrough */
1446 default:
1447 state = FETCH_SEND_REQUEST;
1448 break;
1449 }
1450 break;
1451 case FETCH_GET_PACK:
1452 /* Check for shallow-info section */
1453 if (process_section_header(&reader, "shallow-info", 1))
1454 receive_shallow_info(args, &reader);
1455
1456 /* get the pack */
1457 process_section_header(&reader, "packfile", 0);
1458 if (get_pack(args, fd, pack_lockfile))
1459 die(_("git fetch-pack: fetch failed."));
1460
1461 state = FETCH_DONE;
1462 break;
1463 case FETCH_DONE:
1464 continue;
1465 }
1466 }
1467
1468 clear_prio_queue(&ns.rev_list);
1469 oidset_clear(&common);
1470 return ref;
1471}
1472
1473static void fetch_pack_config(void)
1474{
1475 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1476 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1477 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1478 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1479 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1480
1481 git_config(git_default_config, NULL);
1482}
1483
1484static void fetch_pack_setup(void)
1485{
1486 static int did_setup;
1487 if (did_setup)
1488 return;
1489 fetch_pack_config();
1490 if (0 <= transfer_unpack_limit)
1491 unpack_limit = transfer_unpack_limit;
1492 else if (0 <= fetch_unpack_limit)
1493 unpack_limit = fetch_unpack_limit;
1494 did_setup = 1;
1495}
1496
1497static int remove_duplicates_in_refs(struct ref **ref, int nr)
1498{
1499 struct string_list names = STRING_LIST_INIT_NODUP;
1500 int src, dst;
1501
1502 for (src = dst = 0; src < nr; src++) {
1503 struct string_list_item *item;
1504 item = string_list_insert(&names, ref[src]->name);
1505 if (item->util)
1506 continue; /* already have it */
1507 item->util = ref[src];
1508 if (src != dst)
1509 ref[dst] = ref[src];
1510 dst++;
1511 }
1512 for (src = dst; src < nr; src++)
1513 ref[src] = NULL;
1514 string_list_clear(&names, 0);
1515 return dst;
1516}
1517
1518static void update_shallow(struct fetch_pack_args *args,
1519 struct ref **sought, int nr_sought,
1520 struct shallow_info *si)
1521{
1522 struct oid_array ref = OID_ARRAY_INIT;
1523 int *status;
1524 int i;
1525
1526 if (args->deepen && alternate_shallow_file) {
1527 if (*alternate_shallow_file == '\0') { /* --unshallow */
1528 unlink_or_warn(git_path_shallow());
1529 rollback_lock_file(&shallow_lock);
1530 } else
1531 commit_lock_file(&shallow_lock);
1532 return;
1533 }
1534
1535 if (!si->shallow || !si->shallow->nr)
1536 return;
1537
1538 if (args->cloning) {
1539 /*
1540 * remote is shallow, but this is a clone, there are
1541 * no objects in repo to worry about. Accept any
1542 * shallow points that exist in the pack (iow in repo
1543 * after get_pack() and reprepare_packed_git())
1544 */
1545 struct oid_array extra = OID_ARRAY_INIT;
1546 struct object_id *oid = si->shallow->oid;
1547 for (i = 0; i < si->shallow->nr; i++)
1548 if (has_object_file(&oid[i]))
1549 oid_array_append(&extra, &oid[i]);
1550 if (extra.nr) {
1551 setup_alternate_shallow(&shallow_lock,
1552 &alternate_shallow_file,
1553 &extra);
1554 commit_lock_file(&shallow_lock);
1555 }
1556 oid_array_clear(&extra);
1557 return;
1558 }
1559
1560 if (!si->nr_ours && !si->nr_theirs)
1561 return;
1562
1563 remove_nonexistent_theirs_shallow(si);
1564 if (!si->nr_ours && !si->nr_theirs)
1565 return;
1566 for (i = 0; i < nr_sought; i++)
1567 oid_array_append(&ref, &sought[i]->old_oid);
1568 si->ref = &ref;
1569
1570 if (args->update_shallow) {
1571 /*
1572 * remote is also shallow, .git/shallow may be updated
1573 * so all refs can be accepted. Make sure we only add
1574 * shallow roots that are actually reachable from new
1575 * refs.
1576 */
1577 struct oid_array extra = OID_ARRAY_INIT;
1578 struct object_id *oid = si->shallow->oid;
1579 assign_shallow_commits_to_refs(si, NULL, NULL);
1580 if (!si->nr_ours && !si->nr_theirs) {
1581 oid_array_clear(&ref);
1582 return;
1583 }
1584 for (i = 0; i < si->nr_ours; i++)
1585 oid_array_append(&extra, &oid[si->ours[i]]);
1586 for (i = 0; i < si->nr_theirs; i++)
1587 oid_array_append(&extra, &oid[si->theirs[i]]);
1588 setup_alternate_shallow(&shallow_lock,
1589 &alternate_shallow_file,
1590 &extra);
1591 commit_lock_file(&shallow_lock);
1592 oid_array_clear(&extra);
1593 oid_array_clear(&ref);
1594 return;
1595 }
1596
1597 /*
1598 * remote is also shallow, check what ref is safe to update
1599 * without updating .git/shallow
1600 */
1601 status = xcalloc(nr_sought, sizeof(*status));
1602 assign_shallow_commits_to_refs(si, NULL, status);
1603 if (si->nr_ours || si->nr_theirs) {
1604 for (i = 0; i < nr_sought; i++)
1605 if (status[i])
1606 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1607 }
1608 free(status);
1609 oid_array_clear(&ref);
1610}
1611
1612struct ref *fetch_pack(struct fetch_pack_args *args,
1613 int fd[], struct child_process *conn,
1614 const struct ref *ref,
1615 const char *dest,
1616 struct ref **sought, int nr_sought,
1617 struct oid_array *shallow,
1618 char **pack_lockfile,
1619 enum protocol_version version)
1620{
1621 struct ref *ref_cpy;
1622 struct shallow_info si;
1623
1624 fetch_pack_setup();
1625 if (nr_sought)
1626 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1627
1628 if (!ref) {
1629 packet_flush(fd[1]);
1630 die(_("no matching remote head"));
1631 }
1632 prepare_shallow_info(&si, shallow);
1633 if (version == protocol_v2)
1634 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1635 pack_lockfile);
1636 else
1637 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1638 &si, pack_lockfile);
1639 reprepare_packed_git(the_repository);
1640 update_shallow(args, sought, nr_sought, &si);
1641 clear_shallow_info(&si);
1642 return ref_cpy;
1643}
1644
1645int report_unmatched_refs(struct ref **sought, int nr_sought)
1646{
1647 int i, ret = 0;
1648
1649 for (i = 0; i < nr_sought; i++) {
1650 if (!sought[i])
1651 continue;
1652 switch (sought[i]->match_status) {
1653 case REF_MATCHED:
1654 continue;
1655 case REF_NOT_MATCHED:
1656 error(_("no such remote ref %s"), sought[i]->name);
1657 break;
1658 case REF_UNADVERTISED_NOT_ALLOWED:
1659 error(_("Server does not allow request for unadvertised object %s"),
1660 sought[i]->name);
1661 break;
1662 }
1663 ret = 1;
1664 }
1665 return ret;
1666}