1#include "cache.h"
2#include "repository.h"
3#include "config.h"
4#include "lockfile.h"
5#include "refs.h"
6#include "pkt-line.h"
7#include "commit.h"
8#include "tag.h"
9#include "exec-cmd.h"
10#include "pack.h"
11#include "sideband.h"
12#include "fetch-pack.h"
13#include "remote.h"
14#include "run-command.h"
15#include "connect.h"
16#include "transport.h"
17#include "version.h"
18#include "sha1-array.h"
19#include "oidset.h"
20#include "packfile.h"
21#include "object-store.h"
22#include "connected.h"
23#include "fetch-negotiator.h"
24#include "fsck.h"
25
26static int transfer_unpack_limit = -1;
27static int fetch_unpack_limit = -1;
28static int unpack_limit = 100;
29static int prefer_ofs_delta = 1;
30static int no_done;
31static int deepen_since_ok;
32static int deepen_not_ok;
33static int fetch_fsck_objects = -1;
34static int transfer_fsck_objects = -1;
35static int agent_supported;
36static int server_supports_filtering;
37static struct lock_file shallow_lock;
38static const char *alternate_shallow_file;
39static char *negotiation_algorithm;
40static struct strbuf fsck_msg_types = STRBUF_INIT;
41
42/* Remember to update object flag allocation in object.h */
43#define COMPLETE (1U << 0)
44#define ALTERNATE (1U << 1)
45
46/*
47 * After sending this many "have"s if we do not get any new ACK , we
48 * give up traversing our history.
49 */
50#define MAX_IN_VAIN 256
51
52static int multi_ack, use_sideband;
53/* Allow specifying sha1 if it is a ref tip. */
54#define ALLOW_TIP_SHA1 01
55/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
56#define ALLOW_REACHABLE_SHA1 02
57static unsigned int allow_unadvertised_object_request;
58
59__attribute__((format (printf, 2, 3)))
60static inline void print_verbose(const struct fetch_pack_args *args,
61 const char *fmt, ...)
62{
63 va_list params;
64
65 if (!args->verbose)
66 return;
67
68 va_start(params, fmt);
69 vfprintf(stderr, fmt, params);
70 va_end(params);
71 fputc('\n', stderr);
72}
73
74struct alternate_object_cache {
75 struct object **items;
76 size_t nr, alloc;
77};
78
79static void cache_one_alternate(const struct object_id *oid,
80 void *vcache)
81{
82 struct alternate_object_cache *cache = vcache;
83 struct object *obj = parse_object(the_repository, oid);
84
85 if (!obj || (obj->flags & ALTERNATE))
86 return;
87
88 obj->flags |= ALTERNATE;
89 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
90 cache->items[cache->nr++] = obj;
91}
92
93static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
94 void (*cb)(struct fetch_negotiator *,
95 struct object *))
96{
97 static int initialized;
98 static struct alternate_object_cache cache;
99 size_t i;
100
101 if (!initialized) {
102 for_each_alternate_ref(cache_one_alternate, &cache);
103 initialized = 1;
104 }
105
106 for (i = 0; i < cache.nr; i++)
107 cb(negotiator, cache.items[i]);
108}
109
110static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
111 const char *refname,
112 const struct object_id *oid)
113{
114 struct object *o = deref_tag(the_repository,
115 parse_object(the_repository, oid),
116 refname, 0);
117
118 if (o && o->type == OBJ_COMMIT)
119 negotiator->add_tip(negotiator, (struct commit *)o);
120
121 return 0;
122}
123
124static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
125 int flag, void *cb_data)
126{
127 return rev_list_insert_ref(cb_data, refname, oid);
128}
129
130enum ack_type {
131 NAK = 0,
132 ACK,
133 ACK_continue,
134 ACK_common,
135 ACK_ready
136};
137
138static void consume_shallow_list(struct fetch_pack_args *args, int fd)
139{
140 if (args->stateless_rpc && args->deepen) {
141 /* If we sent a depth we will get back "duplicate"
142 * shallow and unshallow commands every time there
143 * is a block of have lines exchanged.
144 */
145 char *line;
146 while ((line = packet_read_line(fd, NULL))) {
147 if (starts_with(line, "shallow "))
148 continue;
149 if (starts_with(line, "unshallow "))
150 continue;
151 die(_("git fetch-pack: expected shallow list"));
152 }
153 }
154}
155
156static enum ack_type get_ack(int fd, struct object_id *result_oid)
157{
158 int len;
159 char *line = packet_read_line(fd, &len);
160 const char *arg;
161
162 if (!line)
163 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
164 if (!strcmp(line, "NAK"))
165 return NAK;
166 if (skip_prefix(line, "ACK ", &arg)) {
167 if (!get_oid_hex(arg, result_oid)) {
168 arg += 40;
169 len -= arg - line;
170 if (len < 1)
171 return ACK;
172 if (strstr(arg, "continue"))
173 return ACK_continue;
174 if (strstr(arg, "common"))
175 return ACK_common;
176 if (strstr(arg, "ready"))
177 return ACK_ready;
178 return ACK;
179 }
180 }
181 if (skip_prefix(line, "ERR ", &arg))
182 die(_("remote error: %s"), arg);
183 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
184}
185
186static void send_request(struct fetch_pack_args *args,
187 int fd, struct strbuf *buf)
188{
189 if (args->stateless_rpc) {
190 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
191 packet_flush(fd);
192 } else
193 write_or_die(fd, buf->buf, buf->len);
194}
195
196static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
197 struct object *obj)
198{
199 rev_list_insert_ref(negotiator, NULL, &obj->oid);
200}
201
202#define INITIAL_FLUSH 16
203#define PIPESAFE_FLUSH 32
204#define LARGE_FLUSH 16384
205
206static int next_flush(int stateless_rpc, int count)
207{
208 if (stateless_rpc) {
209 if (count < LARGE_FLUSH)
210 count <<= 1;
211 else
212 count = count * 11 / 10;
213 } else {
214 if (count < PIPESAFE_FLUSH)
215 count <<= 1;
216 else
217 count += PIPESAFE_FLUSH;
218 }
219 return count;
220}
221
222static void mark_tips(struct fetch_negotiator *negotiator,
223 const struct oid_array *negotiation_tips)
224{
225 int i;
226
227 if (!negotiation_tips) {
228 for_each_ref(rev_list_insert_ref_oid, negotiator);
229 return;
230 }
231
232 for (i = 0; i < negotiation_tips->nr; i++)
233 rev_list_insert_ref(negotiator, NULL,
234 &negotiation_tips->oid[i]);
235 return;
236}
237
238static int find_common(struct fetch_negotiator *negotiator,
239 struct fetch_pack_args *args,
240 int fd[2], struct object_id *result_oid,
241 struct ref *refs)
242{
243 int fetching;
244 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
245 const struct object_id *oid;
246 unsigned in_vain = 0;
247 int got_continue = 0;
248 int got_ready = 0;
249 struct strbuf req_buf = STRBUF_INIT;
250 size_t state_len = 0;
251
252 if (args->stateless_rpc && multi_ack == 1)
253 die(_("--stateless-rpc requires multi_ack_detailed"));
254
255 if (!args->no_dependents) {
256 mark_tips(negotiator, args->negotiation_tips);
257 for_each_cached_alternate(negotiator, insert_one_alternate_object);
258 }
259
260 fetching = 0;
261 for ( ; refs ; refs = refs->next) {
262 struct object_id *remote = &refs->old_oid;
263 const char *remote_hex;
264 struct object *o;
265
266 /*
267 * If that object is complete (i.e. it is an ancestor of a
268 * local ref), we tell them we have it but do not have to
269 * tell them about its ancestors, which they already know
270 * about.
271 *
272 * We use lookup_object here because we are only
273 * interested in the case we *know* the object is
274 * reachable and we have already scanned it.
275 *
276 * Do this only if args->no_dependents is false (if it is true,
277 * we cannot trust the object flags).
278 */
279 if (!args->no_dependents &&
280 ((o = lookup_object(the_repository, remote->hash)) != NULL) &&
281 (o->flags & COMPLETE)) {
282 continue;
283 }
284
285 remote_hex = oid_to_hex(remote);
286 if (!fetching) {
287 struct strbuf c = STRBUF_INIT;
288 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
289 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
290 if (no_done) strbuf_addstr(&c, " no-done");
291 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
292 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
293 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
294 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
295 if (args->no_progress) strbuf_addstr(&c, " no-progress");
296 if (args->include_tag) strbuf_addstr(&c, " include-tag");
297 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
298 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
299 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
300 if (agent_supported) strbuf_addf(&c, " agent=%s",
301 git_user_agent_sanitized());
302 if (args->filter_options.choice)
303 strbuf_addstr(&c, " filter");
304 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
305 strbuf_release(&c);
306 } else
307 packet_buf_write(&req_buf, "want %s\n", remote_hex);
308 fetching++;
309 }
310
311 if (!fetching) {
312 strbuf_release(&req_buf);
313 packet_flush(fd[1]);
314 return 1;
315 }
316
317 if (is_repository_shallow(the_repository))
318 write_shallow_commits(&req_buf, 1, NULL);
319 if (args->depth > 0)
320 packet_buf_write(&req_buf, "deepen %d", args->depth);
321 if (args->deepen_since) {
322 timestamp_t max_age = approxidate(args->deepen_since);
323 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
324 }
325 if (args->deepen_not) {
326 int i;
327 for (i = 0; i < args->deepen_not->nr; i++) {
328 struct string_list_item *s = args->deepen_not->items + i;
329 packet_buf_write(&req_buf, "deepen-not %s", s->string);
330 }
331 }
332 if (server_supports_filtering && args->filter_options.choice)
333 packet_buf_write(&req_buf, "filter %s",
334 args->filter_options.filter_spec);
335 packet_buf_flush(&req_buf);
336 state_len = req_buf.len;
337
338 if (args->deepen) {
339 char *line;
340 const char *arg;
341 struct object_id oid;
342
343 send_request(args, fd[1], &req_buf);
344 while ((line = packet_read_line(fd[0], NULL))) {
345 if (skip_prefix(line, "shallow ", &arg)) {
346 if (get_oid_hex(arg, &oid))
347 die(_("invalid shallow line: %s"), line);
348 register_shallow(the_repository, &oid);
349 continue;
350 }
351 if (skip_prefix(line, "unshallow ", &arg)) {
352 if (get_oid_hex(arg, &oid))
353 die(_("invalid unshallow line: %s"), line);
354 if (!lookup_object(the_repository, oid.hash))
355 die(_("object not found: %s"), line);
356 /* make sure that it is parsed as shallow */
357 if (!parse_object(the_repository, &oid))
358 die(_("error in object: %s"), line);
359 if (unregister_shallow(&oid))
360 die(_("no shallow found: %s"), line);
361 continue;
362 }
363 die(_("expected shallow/unshallow, got %s"), line);
364 }
365 } else if (!args->stateless_rpc)
366 send_request(args, fd[1], &req_buf);
367
368 if (!args->stateless_rpc) {
369 /* If we aren't using the stateless-rpc interface
370 * we don't need to retain the headers.
371 */
372 strbuf_setlen(&req_buf, 0);
373 state_len = 0;
374 }
375
376 flushes = 0;
377 retval = -1;
378 if (args->no_dependents)
379 goto done;
380 while ((oid = negotiator->next(negotiator))) {
381 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
382 print_verbose(args, "have %s", oid_to_hex(oid));
383 in_vain++;
384 if (flush_at <= ++count) {
385 int ack;
386
387 packet_buf_flush(&req_buf);
388 send_request(args, fd[1], &req_buf);
389 strbuf_setlen(&req_buf, state_len);
390 flushes++;
391 flush_at = next_flush(args->stateless_rpc, count);
392
393 /*
394 * We keep one window "ahead" of the other side, and
395 * will wait for an ACK only on the next one
396 */
397 if (!args->stateless_rpc && count == INITIAL_FLUSH)
398 continue;
399
400 consume_shallow_list(args, fd[0]);
401 do {
402 ack = get_ack(fd[0], result_oid);
403 if (ack)
404 print_verbose(args, _("got %s %d %s"), "ack",
405 ack, oid_to_hex(result_oid));
406 switch (ack) {
407 case ACK:
408 flushes = 0;
409 multi_ack = 0;
410 retval = 0;
411 goto done;
412 case ACK_common:
413 case ACK_ready:
414 case ACK_continue: {
415 struct commit *commit =
416 lookup_commit(the_repository,
417 result_oid);
418 int was_common;
419
420 if (!commit)
421 die(_("invalid commit %s"), oid_to_hex(result_oid));
422 was_common = negotiator->ack(negotiator, commit);
423 if (args->stateless_rpc
424 && ack == ACK_common
425 && !was_common) {
426 /* We need to replay the have for this object
427 * on the next RPC request so the peer knows
428 * it is in common with us.
429 */
430 const char *hex = oid_to_hex(result_oid);
431 packet_buf_write(&req_buf, "have %s\n", hex);
432 state_len = req_buf.len;
433 /*
434 * Reset in_vain because an ack
435 * for this commit has not been
436 * seen.
437 */
438 in_vain = 0;
439 } else if (!args->stateless_rpc
440 || ack != ACK_common)
441 in_vain = 0;
442 retval = 0;
443 got_continue = 1;
444 if (ack == ACK_ready)
445 got_ready = 1;
446 break;
447 }
448 }
449 } while (ack);
450 flushes--;
451 if (got_continue && MAX_IN_VAIN < in_vain) {
452 print_verbose(args, _("giving up"));
453 break; /* give up */
454 }
455 if (got_ready)
456 break;
457 }
458 }
459done:
460 if (!got_ready || !no_done) {
461 packet_buf_write(&req_buf, "done\n");
462 send_request(args, fd[1], &req_buf);
463 }
464 print_verbose(args, _("done"));
465 if (retval != 0) {
466 multi_ack = 0;
467 flushes++;
468 }
469 strbuf_release(&req_buf);
470
471 if (!got_ready || !no_done)
472 consume_shallow_list(args, fd[0]);
473 while (flushes || multi_ack) {
474 int ack = get_ack(fd[0], result_oid);
475 if (ack) {
476 print_verbose(args, _("got %s (%d) %s"), "ack",
477 ack, oid_to_hex(result_oid));
478 if (ack == ACK)
479 return 0;
480 multi_ack = 1;
481 continue;
482 }
483 flushes--;
484 }
485 /* it is no error to fetch into a completely empty repo */
486 return count ? retval : 0;
487}
488
489static struct commit_list *complete;
490
491static int mark_complete(const struct object_id *oid)
492{
493 struct object *o = parse_object(the_repository, oid);
494
495 while (o && o->type == OBJ_TAG) {
496 struct tag *t = (struct tag *) o;
497 if (!t->tagged)
498 break; /* broken repository */
499 o->flags |= COMPLETE;
500 o = parse_object(the_repository, &t->tagged->oid);
501 }
502 if (o && o->type == OBJ_COMMIT) {
503 struct commit *commit = (struct commit *)o;
504 if (!(commit->object.flags & COMPLETE)) {
505 commit->object.flags |= COMPLETE;
506 commit_list_insert(commit, &complete);
507 }
508 }
509 return 0;
510}
511
512static int mark_complete_oid(const char *refname, const struct object_id *oid,
513 int flag, void *cb_data)
514{
515 return mark_complete(oid);
516}
517
518static void mark_recent_complete_commits(struct fetch_pack_args *args,
519 timestamp_t cutoff)
520{
521 while (complete && cutoff <= complete->item->date) {
522 print_verbose(args, _("Marking %s as complete"),
523 oid_to_hex(&complete->item->object.oid));
524 pop_most_recent_commit(&complete, COMPLETE);
525 }
526}
527
528static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
529{
530 for (; refs; refs = refs->next)
531 oidset_insert(oids, &refs->old_oid);
532}
533
534static int is_unmatched_ref(const struct ref *ref)
535{
536 struct object_id oid;
537 const char *p;
538 return ref->match_status == REF_NOT_MATCHED &&
539 !parse_oid_hex(ref->name, &oid, &p) &&
540 *p == '\0' &&
541 oideq(&oid, &ref->old_oid);
542}
543
544static void filter_refs(struct fetch_pack_args *args,
545 struct ref **refs,
546 struct ref **sought, int nr_sought)
547{
548 struct ref *newlist = NULL;
549 struct ref **newtail = &newlist;
550 struct ref *unmatched = NULL;
551 struct ref *ref, *next;
552 struct oidset tip_oids = OIDSET_INIT;
553 int i;
554 int strict = !(allow_unadvertised_object_request &
555 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
556
557 i = 0;
558 for (ref = *refs; ref; ref = next) {
559 int keep = 0;
560 next = ref->next;
561
562 if (starts_with(ref->name, "refs/") &&
563 check_refname_format(ref->name, 0))
564 ; /* trash */
565 else {
566 while (i < nr_sought) {
567 int cmp = strcmp(ref->name, sought[i]->name);
568 if (cmp < 0)
569 break; /* definitely do not have it */
570 else if (cmp == 0) {
571 keep = 1; /* definitely have it */
572 sought[i]->match_status = REF_MATCHED;
573 }
574 i++;
575 }
576
577 if (!keep && args->fetch_all &&
578 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
579 keep = 1;
580 }
581
582 if (keep) {
583 *newtail = ref;
584 ref->next = NULL;
585 newtail = &ref->next;
586 } else {
587 ref->next = unmatched;
588 unmatched = ref;
589 }
590 }
591
592 if (strict) {
593 for (i = 0; i < nr_sought; i++) {
594 ref = sought[i];
595 if (!is_unmatched_ref(ref))
596 continue;
597
598 add_refs_to_oidset(&tip_oids, unmatched);
599 add_refs_to_oidset(&tip_oids, newlist);
600 break;
601 }
602 }
603
604 /* Append unmatched requests to the list */
605 for (i = 0; i < nr_sought; i++) {
606 ref = sought[i];
607 if (!is_unmatched_ref(ref))
608 continue;
609
610 if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
611 ref->match_status = REF_MATCHED;
612 *newtail = copy_ref(ref);
613 newtail = &(*newtail)->next;
614 } else {
615 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
616 }
617 }
618
619 oidset_clear(&tip_oids);
620 for (ref = unmatched; ref; ref = next) {
621 next = ref->next;
622 free(ref);
623 }
624
625 *refs = newlist;
626}
627
628static void mark_alternate_complete(struct fetch_negotiator *unused,
629 struct object *obj)
630{
631 mark_complete(&obj->oid);
632}
633
634struct loose_object_iter {
635 struct oidset *loose_object_set;
636 struct ref *refs;
637};
638
639/*
640 * If the number of refs is not larger than the number of loose objects,
641 * this function stops inserting.
642 */
643static int add_loose_objects_to_set(const struct object_id *oid,
644 const char *path,
645 void *data)
646{
647 struct loose_object_iter *iter = data;
648 oidset_insert(iter->loose_object_set, oid);
649 if (iter->refs == NULL)
650 return 1;
651
652 iter->refs = iter->refs->next;
653 return 0;
654}
655
656/*
657 * Mark recent commits available locally and reachable from a local ref as
658 * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
659 * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
660 * thus do not need COMMON_REF marks).
661 *
662 * The cutoff time for recency is determined by this heuristic: it is the
663 * earliest commit time of the objects in refs that are commits and that we know
664 * the commit time of.
665 */
666static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
667 struct fetch_pack_args *args,
668 struct ref **refs)
669{
670 struct ref *ref;
671 int old_save_commit_buffer = save_commit_buffer;
672 timestamp_t cutoff = 0;
673 struct oidset loose_oid_set = OIDSET_INIT;
674 int use_oidset = 0;
675 struct loose_object_iter iter = {&loose_oid_set, *refs};
676
677 /* Enumerate all loose objects or know refs are not so many. */
678 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
679 &iter, 0);
680
681 save_commit_buffer = 0;
682
683 for (ref = *refs; ref; ref = ref->next) {
684 struct object *o;
685 unsigned int flags = OBJECT_INFO_QUICK;
686
687 if (use_oidset &&
688 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
689 /*
690 * I know this does not exist in the loose form,
691 * so check if it exists in a non-loose form.
692 */
693 flags |= OBJECT_INFO_IGNORE_LOOSE;
694 }
695
696 if (!has_object_file_with_flags(&ref->old_oid, flags))
697 continue;
698 o = parse_object(the_repository, &ref->old_oid);
699 if (!o)
700 continue;
701
702 /* We already have it -- which may mean that we were
703 * in sync with the other side at some time after
704 * that (it is OK if we guess wrong here).
705 */
706 if (o->type == OBJ_COMMIT) {
707 struct commit *commit = (struct commit *)o;
708 if (!cutoff || cutoff < commit->date)
709 cutoff = commit->date;
710 }
711 }
712
713 oidset_clear(&loose_oid_set);
714
715 if (!args->deepen) {
716 for_each_ref(mark_complete_oid, NULL);
717 for_each_cached_alternate(NULL, mark_alternate_complete);
718 commit_list_sort_by_date(&complete);
719 if (cutoff)
720 mark_recent_complete_commits(args, cutoff);
721 }
722
723 /*
724 * Mark all complete remote refs as common refs.
725 * Don't mark them common yet; the server has to be told so first.
726 */
727 for (ref = *refs; ref; ref = ref->next) {
728 struct object *o = deref_tag(the_repository,
729 lookup_object(the_repository,
730 ref->old_oid.hash),
731 NULL, 0);
732
733 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
734 continue;
735
736 negotiator->known_common(negotiator,
737 (struct commit *)o);
738 }
739
740 save_commit_buffer = old_save_commit_buffer;
741}
742
743/*
744 * Returns 1 if every object pointed to by the given remote refs is available
745 * locally and reachable from a local ref, and 0 otherwise.
746 */
747static int everything_local(struct fetch_pack_args *args,
748 struct ref **refs)
749{
750 struct ref *ref;
751 int retval;
752
753 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
754 const struct object_id *remote = &ref->old_oid;
755 struct object *o;
756
757 o = lookup_object(the_repository, remote->hash);
758 if (!o || !(o->flags & COMPLETE)) {
759 retval = 0;
760 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
761 ref->name);
762 continue;
763 }
764 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
765 ref->name);
766 }
767
768 return retval;
769}
770
771static int sideband_demux(int in, int out, void *data)
772{
773 int *xd = data;
774 int ret;
775
776 ret = recv_sideband("fetch-pack", xd[0], out);
777 close(out);
778 return ret;
779}
780
781static int get_pack(struct fetch_pack_args *args,
782 int xd[2], char **pack_lockfile)
783{
784 struct async demux;
785 int do_keep = args->keep_pack;
786 const char *cmd_name;
787 struct pack_header header;
788 int pass_header = 0;
789 struct child_process cmd = CHILD_PROCESS_INIT;
790 int ret;
791
792 memset(&demux, 0, sizeof(demux));
793 if (use_sideband) {
794 /* xd[] is talking with upload-pack; subprocess reads from
795 * xd[0], spits out band#2 to stderr, and feeds us band#1
796 * through demux->out.
797 */
798 demux.proc = sideband_demux;
799 demux.data = xd;
800 demux.out = -1;
801 demux.isolate_sigpipe = 1;
802 if (start_async(&demux))
803 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
804 }
805 else
806 demux.out = xd[0];
807
808 if (!args->keep_pack && unpack_limit) {
809
810 if (read_pack_header(demux.out, &header))
811 die(_("protocol error: bad pack header"));
812 pass_header = 1;
813 if (ntohl(header.hdr_entries) < unpack_limit)
814 do_keep = 0;
815 else
816 do_keep = 1;
817 }
818
819 if (alternate_shallow_file) {
820 argv_array_push(&cmd.args, "--shallow-file");
821 argv_array_push(&cmd.args, alternate_shallow_file);
822 }
823
824 if (do_keep || args->from_promisor) {
825 if (pack_lockfile)
826 cmd.out = -1;
827 cmd_name = "index-pack";
828 argv_array_push(&cmd.args, cmd_name);
829 argv_array_push(&cmd.args, "--stdin");
830 if (!args->quiet && !args->no_progress)
831 argv_array_push(&cmd.args, "-v");
832 if (args->use_thin_pack)
833 argv_array_push(&cmd.args, "--fix-thin");
834 if (do_keep && (args->lock_pack || unpack_limit)) {
835 char hostname[HOST_NAME_MAX + 1];
836 if (xgethostname(hostname, sizeof(hostname)))
837 xsnprintf(hostname, sizeof(hostname), "localhost");
838 argv_array_pushf(&cmd.args,
839 "--keep=fetch-pack %"PRIuMAX " on %s",
840 (uintmax_t)getpid(), hostname);
841 }
842 if (args->check_self_contained_and_connected)
843 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
844 if (args->from_promisor)
845 argv_array_push(&cmd.args, "--promisor");
846 }
847 else {
848 cmd_name = "unpack-objects";
849 argv_array_push(&cmd.args, cmd_name);
850 if (args->quiet || args->no_progress)
851 argv_array_push(&cmd.args, "-q");
852 args->check_self_contained_and_connected = 0;
853 }
854
855 if (pass_header)
856 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
857 ntohl(header.hdr_version),
858 ntohl(header.hdr_entries));
859 if (fetch_fsck_objects >= 0
860 ? fetch_fsck_objects
861 : transfer_fsck_objects >= 0
862 ? transfer_fsck_objects
863 : 0) {
864 if (args->from_promisor)
865 /*
866 * We cannot use --strict in index-pack because it
867 * checks both broken objects and links, but we only
868 * want to check for broken objects.
869 */
870 argv_array_push(&cmd.args, "--fsck-objects");
871 else
872 argv_array_pushf(&cmd.args, "--strict%s",
873 fsck_msg_types.buf);
874 }
875
876 cmd.in = demux.out;
877 cmd.git_cmd = 1;
878 if (start_command(&cmd))
879 die(_("fetch-pack: unable to fork off %s"), cmd_name);
880 if (do_keep && pack_lockfile) {
881 *pack_lockfile = index_pack_lockfile(cmd.out);
882 close(cmd.out);
883 }
884
885 if (!use_sideband)
886 /* Closed by start_command() */
887 xd[0] = -1;
888
889 ret = finish_command(&cmd);
890 if (!ret || (args->check_self_contained_and_connected && ret == 1))
891 args->self_contained_and_connected =
892 args->check_self_contained_and_connected &&
893 ret == 0;
894 else
895 die(_("%s failed"), cmd_name);
896 if (use_sideband && finish_async(&demux))
897 die(_("error in sideband demultiplexer"));
898 return 0;
899}
900
901static int cmp_ref_by_name(const void *a_, const void *b_)
902{
903 const struct ref *a = *((const struct ref **)a_);
904 const struct ref *b = *((const struct ref **)b_);
905 return strcmp(a->name, b->name);
906}
907
908static struct ref *do_fetch_pack(struct fetch_pack_args *args,
909 int fd[2],
910 const struct ref *orig_ref,
911 struct ref **sought, int nr_sought,
912 struct shallow_info *si,
913 char **pack_lockfile)
914{
915 struct ref *ref = copy_ref_list(orig_ref);
916 struct object_id oid;
917 const char *agent_feature;
918 int agent_len;
919 struct fetch_negotiator negotiator;
920 fetch_negotiator_init(&negotiator, negotiation_algorithm);
921
922 sort_ref_list(&ref, ref_compare_name);
923 QSORT(sought, nr_sought, cmp_ref_by_name);
924
925 if ((args->depth > 0 || is_repository_shallow(the_repository)) && !server_supports("shallow"))
926 die(_("Server does not support shallow clients"));
927 if (args->depth > 0 || args->deepen_since || args->deepen_not)
928 args->deepen = 1;
929 if (server_supports("multi_ack_detailed")) {
930 print_verbose(args, _("Server supports multi_ack_detailed"));
931 multi_ack = 2;
932 if (server_supports("no-done")) {
933 print_verbose(args, _("Server supports no-done"));
934 if (args->stateless_rpc)
935 no_done = 1;
936 }
937 }
938 else if (server_supports("multi_ack")) {
939 print_verbose(args, _("Server supports multi_ack"));
940 multi_ack = 1;
941 }
942 if (server_supports("side-band-64k")) {
943 print_verbose(args, _("Server supports side-band-64k"));
944 use_sideband = 2;
945 }
946 else if (server_supports("side-band")) {
947 print_verbose(args, _("Server supports side-band"));
948 use_sideband = 1;
949 }
950 if (server_supports("allow-tip-sha1-in-want")) {
951 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
952 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
953 }
954 if (server_supports("allow-reachable-sha1-in-want")) {
955 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
956 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
957 }
958 if (!server_supports("thin-pack"))
959 args->use_thin_pack = 0;
960 if (!server_supports("no-progress"))
961 args->no_progress = 0;
962 if (!server_supports("include-tag"))
963 args->include_tag = 0;
964 if (server_supports("ofs-delta"))
965 print_verbose(args, _("Server supports ofs-delta"));
966 else
967 prefer_ofs_delta = 0;
968
969 if (server_supports("filter")) {
970 server_supports_filtering = 1;
971 print_verbose(args, _("Server supports filter"));
972 } else if (args->filter_options.choice) {
973 warning("filtering not recognized by server, ignoring");
974 }
975
976 if ((agent_feature = server_feature_value("agent", &agent_len))) {
977 agent_supported = 1;
978 if (agent_len)
979 print_verbose(args, _("Server version is %.*s"),
980 agent_len, agent_feature);
981 }
982 if (server_supports("deepen-since"))
983 deepen_since_ok = 1;
984 else if (args->deepen_since)
985 die(_("Server does not support --shallow-since"));
986 if (server_supports("deepen-not"))
987 deepen_not_ok = 1;
988 else if (args->deepen_not)
989 die(_("Server does not support --shallow-exclude"));
990 if (!server_supports("deepen-relative") && args->deepen_relative)
991 die(_("Server does not support --deepen"));
992
993 if (!args->no_dependents) {
994 mark_complete_and_common_ref(&negotiator, args, &ref);
995 filter_refs(args, &ref, sought, nr_sought);
996 if (everything_local(args, &ref)) {
997 packet_flush(fd[1]);
998 goto all_done;
999 }
1000 } else {
1001 filter_refs(args, &ref, sought, nr_sought);
1002 }
1003 if (find_common(&negotiator, args, fd, &oid, ref) < 0)
1004 if (!args->keep_pack)
1005 /* When cloning, it is not unusual to have
1006 * no common commit.
1007 */
1008 warning(_("no common commits"));
1009
1010 if (args->stateless_rpc)
1011 packet_flush(fd[1]);
1012 if (args->deepen)
1013 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1014 NULL);
1015 else if (si->nr_ours || si->nr_theirs)
1016 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1017 else
1018 alternate_shallow_file = NULL;
1019 if (get_pack(args, fd, pack_lockfile))
1020 die(_("git fetch-pack: fetch failed."));
1021
1022 all_done:
1023 negotiator.release(&negotiator);
1024 return ref;
1025}
1026
1027static void add_shallow_requests(struct strbuf *req_buf,
1028 const struct fetch_pack_args *args)
1029{
1030 if (is_repository_shallow(the_repository))
1031 write_shallow_commits(req_buf, 1, NULL);
1032 if (args->depth > 0)
1033 packet_buf_write(req_buf, "deepen %d", args->depth);
1034 if (args->deepen_since) {
1035 timestamp_t max_age = approxidate(args->deepen_since);
1036 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1037 }
1038 if (args->deepen_not) {
1039 int i;
1040 for (i = 0; i < args->deepen_not->nr; i++) {
1041 struct string_list_item *s = args->deepen_not->items + i;
1042 packet_buf_write(req_buf, "deepen-not %s", s->string);
1043 }
1044 }
1045}
1046
1047static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
1048{
1049 int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
1050
1051 for ( ; wants ; wants = wants->next) {
1052 const struct object_id *remote = &wants->old_oid;
1053 struct object *o;
1054
1055 /*
1056 * If that object is complete (i.e. it is an ancestor of a
1057 * local ref), we tell them we have it but do not have to
1058 * tell them about its ancestors, which they already know
1059 * about.
1060 *
1061 * We use lookup_object here because we are only
1062 * interested in the case we *know* the object is
1063 * reachable and we have already scanned it.
1064 *
1065 * Do this only if args->no_dependents is false (if it is true,
1066 * we cannot trust the object flags).
1067 */
1068 if (!no_dependents &&
1069 ((o = lookup_object(the_repository, remote->hash)) != NULL) &&
1070 (o->flags & COMPLETE)) {
1071 continue;
1072 }
1073
1074 if (!use_ref_in_want || wants->exact_oid)
1075 packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
1076 else
1077 packet_buf_write(req_buf, "want-ref %s\n", wants->name);
1078 }
1079}
1080
1081static void add_common(struct strbuf *req_buf, struct oidset *common)
1082{
1083 struct oidset_iter iter;
1084 const struct object_id *oid;
1085 oidset_iter_init(common, &iter);
1086
1087 while ((oid = oidset_iter_next(&iter))) {
1088 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1089 }
1090}
1091
1092static int add_haves(struct fetch_negotiator *negotiator,
1093 struct strbuf *req_buf,
1094 int *haves_to_send, int *in_vain)
1095{
1096 int ret = 0;
1097 int haves_added = 0;
1098 const struct object_id *oid;
1099
1100 while ((oid = negotiator->next(negotiator))) {
1101 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1102 if (++haves_added >= *haves_to_send)
1103 break;
1104 }
1105
1106 *in_vain += haves_added;
1107 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1108 /* Send Done */
1109 packet_buf_write(req_buf, "done\n");
1110 ret = 1;
1111 }
1112
1113 /* Increase haves to send on next round */
1114 *haves_to_send = next_flush(1, *haves_to_send);
1115
1116 return ret;
1117}
1118
1119static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
1120 const struct fetch_pack_args *args,
1121 const struct ref *wants, struct oidset *common,
1122 int *haves_to_send, int *in_vain)
1123{
1124 int ret = 0;
1125 struct strbuf req_buf = STRBUF_INIT;
1126
1127 if (server_supports_v2("fetch", 1))
1128 packet_buf_write(&req_buf, "command=fetch");
1129 if (server_supports_v2("agent", 0))
1130 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1131 if (args->server_options && args->server_options->nr &&
1132 server_supports_v2("server-option", 1)) {
1133 int i;
1134 for (i = 0; i < args->server_options->nr; i++)
1135 packet_write_fmt(fd_out, "server-option=%s",
1136 args->server_options->items[i].string);
1137 }
1138
1139 packet_buf_delim(&req_buf);
1140 if (args->use_thin_pack)
1141 packet_buf_write(&req_buf, "thin-pack");
1142 if (args->no_progress)
1143 packet_buf_write(&req_buf, "no-progress");
1144 if (args->include_tag)
1145 packet_buf_write(&req_buf, "include-tag");
1146 if (prefer_ofs_delta)
1147 packet_buf_write(&req_buf, "ofs-delta");
1148
1149 /* Add shallow-info and deepen request */
1150 if (server_supports_feature("fetch", "shallow", 0))
1151 add_shallow_requests(&req_buf, args);
1152 else if (is_repository_shallow(the_repository) || args->deepen)
1153 die(_("Server does not support shallow requests"));
1154
1155 /* Add filter */
1156 if (server_supports_feature("fetch", "filter", 0) &&
1157 args->filter_options.choice) {
1158 print_verbose(args, _("Server supports filter"));
1159 packet_buf_write(&req_buf, "filter %s",
1160 args->filter_options.filter_spec);
1161 } else if (args->filter_options.choice) {
1162 warning("filtering not recognized by server, ignoring");
1163 }
1164
1165 /* add wants */
1166 add_wants(args->no_dependents, wants, &req_buf);
1167
1168 if (args->no_dependents) {
1169 packet_buf_write(&req_buf, "done");
1170 ret = 1;
1171 } else {
1172 /* Add all of the common commits we've found in previous rounds */
1173 add_common(&req_buf, common);
1174
1175 /* Add initial haves */
1176 ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
1177 }
1178
1179 /* Send request */
1180 packet_buf_flush(&req_buf);
1181 write_or_die(fd_out, req_buf.buf, req_buf.len);
1182
1183 strbuf_release(&req_buf);
1184 return ret;
1185}
1186
1187/*
1188 * Processes a section header in a server's response and checks if it matches
1189 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1190 * not consumed); if 0, the line will be consumed and the function will die if
1191 * the section header doesn't match what was expected.
1192 */
1193static int process_section_header(struct packet_reader *reader,
1194 const char *section, int peek)
1195{
1196 int ret;
1197
1198 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1199 die(_("error reading section header '%s'"), section);
1200
1201 ret = !strcmp(reader->line, section);
1202
1203 if (!peek) {
1204 if (!ret)
1205 die(_("expected '%s', received '%s'"),
1206 section, reader->line);
1207 packet_reader_read(reader);
1208 }
1209
1210 return ret;
1211}
1212
1213static int process_acks(struct fetch_negotiator *negotiator,
1214 struct packet_reader *reader,
1215 struct oidset *common)
1216{
1217 /* received */
1218 int received_ready = 0;
1219 int received_ack = 0;
1220
1221 process_section_header(reader, "acknowledgments", 0);
1222 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1223 const char *arg;
1224
1225 if (!strcmp(reader->line, "NAK"))
1226 continue;
1227
1228 if (skip_prefix(reader->line, "ACK ", &arg)) {
1229 struct object_id oid;
1230 if (!get_oid_hex(arg, &oid)) {
1231 struct commit *commit;
1232 oidset_insert(common, &oid);
1233 commit = lookup_commit(the_repository, &oid);
1234 negotiator->ack(negotiator, commit);
1235 }
1236 continue;
1237 }
1238
1239 if (!strcmp(reader->line, "ready")) {
1240 received_ready = 1;
1241 continue;
1242 }
1243
1244 die(_("unexpected acknowledgment line: '%s'"), reader->line);
1245 }
1246
1247 if (reader->status != PACKET_READ_FLUSH &&
1248 reader->status != PACKET_READ_DELIM)
1249 die(_("error processing acks: %d"), reader->status);
1250
1251 /*
1252 * If an "acknowledgments" section is sent, a packfile is sent if and
1253 * only if "ready" was sent in this section. The other sections
1254 * ("shallow-info" and "wanted-refs") are sent only if a packfile is
1255 * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
1256 * otherwise.
1257 */
1258 if (received_ready && reader->status != PACKET_READ_DELIM)
1259 die(_("expected packfile to be sent after 'ready'"));
1260 if (!received_ready && reader->status != PACKET_READ_FLUSH)
1261 die(_("expected no other sections to be sent after no 'ready'"));
1262
1263 /* return 0 if no common, 1 if there are common, or 2 if ready */
1264 return received_ready ? 2 : (received_ack ? 1 : 0);
1265}
1266
1267static void receive_shallow_info(struct fetch_pack_args *args,
1268 struct packet_reader *reader)
1269{
1270 process_section_header(reader, "shallow-info", 0);
1271 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1272 const char *arg;
1273 struct object_id oid;
1274
1275 if (skip_prefix(reader->line, "shallow ", &arg)) {
1276 if (get_oid_hex(arg, &oid))
1277 die(_("invalid shallow line: %s"), reader->line);
1278 register_shallow(the_repository, &oid);
1279 continue;
1280 }
1281 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1282 if (get_oid_hex(arg, &oid))
1283 die(_("invalid unshallow line: %s"), reader->line);
1284 if (!lookup_object(the_repository, oid.hash))
1285 die(_("object not found: %s"), reader->line);
1286 /* make sure that it is parsed as shallow */
1287 if (!parse_object(the_repository, &oid))
1288 die(_("error in object: %s"), reader->line);
1289 if (unregister_shallow(&oid))
1290 die(_("no shallow found: %s"), reader->line);
1291 continue;
1292 }
1293 die(_("expected shallow/unshallow, got %s"), reader->line);
1294 }
1295
1296 if (reader->status != PACKET_READ_FLUSH &&
1297 reader->status != PACKET_READ_DELIM)
1298 die(_("error processing shallow info: %d"), reader->status);
1299
1300 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1301 args->deepen = 1;
1302}
1303
1304static void receive_wanted_refs(struct packet_reader *reader,
1305 struct ref **sought, int nr_sought)
1306{
1307 process_section_header(reader, "wanted-refs", 0);
1308 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1309 struct object_id oid;
1310 const char *end;
1311 int i;
1312
1313 if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
1314 die(_("expected wanted-ref, got '%s'"), reader->line);
1315
1316 for (i = 0; i < nr_sought; i++) {
1317 if (!strcmp(end, sought[i]->name)) {
1318 oidcpy(&sought[i]->old_oid, &oid);
1319 break;
1320 }
1321 }
1322
1323 if (i == nr_sought)
1324 die(_("unexpected wanted-ref: '%s'"), reader->line);
1325 }
1326
1327 if (reader->status != PACKET_READ_DELIM)
1328 die(_("error processing wanted refs: %d"), reader->status);
1329}
1330
1331enum fetch_state {
1332 FETCH_CHECK_LOCAL = 0,
1333 FETCH_SEND_REQUEST,
1334 FETCH_PROCESS_ACKS,
1335 FETCH_GET_PACK,
1336 FETCH_DONE,
1337};
1338
1339static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1340 int fd[2],
1341 const struct ref *orig_ref,
1342 struct ref **sought, int nr_sought,
1343 char **pack_lockfile)
1344{
1345 struct ref *ref = copy_ref_list(orig_ref);
1346 enum fetch_state state = FETCH_CHECK_LOCAL;
1347 struct oidset common = OIDSET_INIT;
1348 struct packet_reader reader;
1349 int in_vain = 0;
1350 int haves_to_send = INITIAL_FLUSH;
1351 struct fetch_negotiator negotiator;
1352 fetch_negotiator_init(&negotiator, negotiation_algorithm);
1353 packet_reader_init(&reader, fd[0], NULL, 0,
1354 PACKET_READ_CHOMP_NEWLINE);
1355
1356 while (state != FETCH_DONE) {
1357 switch (state) {
1358 case FETCH_CHECK_LOCAL:
1359 sort_ref_list(&ref, ref_compare_name);
1360 QSORT(sought, nr_sought, cmp_ref_by_name);
1361
1362 /* v2 supports these by default */
1363 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1364 use_sideband = 2;
1365 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1366 args->deepen = 1;
1367
1368 /* Filter 'ref' by 'sought' and those that aren't local */
1369 if (!args->no_dependents) {
1370 mark_complete_and_common_ref(&negotiator, args, &ref);
1371 filter_refs(args, &ref, sought, nr_sought);
1372 if (everything_local(args, &ref))
1373 state = FETCH_DONE;
1374 else
1375 state = FETCH_SEND_REQUEST;
1376
1377 mark_tips(&negotiator, args->negotiation_tips);
1378 for_each_cached_alternate(&negotiator,
1379 insert_one_alternate_object);
1380 } else {
1381 filter_refs(args, &ref, sought, nr_sought);
1382 state = FETCH_SEND_REQUEST;
1383 }
1384 break;
1385 case FETCH_SEND_REQUEST:
1386 if (send_fetch_request(&negotiator, fd[1], args, ref,
1387 &common,
1388 &haves_to_send, &in_vain))
1389 state = FETCH_GET_PACK;
1390 else
1391 state = FETCH_PROCESS_ACKS;
1392 break;
1393 case FETCH_PROCESS_ACKS:
1394 /* Process ACKs/NAKs */
1395 switch (process_acks(&negotiator, &reader, &common)) {
1396 case 2:
1397 state = FETCH_GET_PACK;
1398 break;
1399 case 1:
1400 in_vain = 0;
1401 /* fallthrough */
1402 default:
1403 state = FETCH_SEND_REQUEST;
1404 break;
1405 }
1406 break;
1407 case FETCH_GET_PACK:
1408 /* Check for shallow-info section */
1409 if (process_section_header(&reader, "shallow-info", 1))
1410 receive_shallow_info(args, &reader);
1411
1412 if (process_section_header(&reader, "wanted-refs", 1))
1413 receive_wanted_refs(&reader, sought, nr_sought);
1414
1415 /* get the pack */
1416 process_section_header(&reader, "packfile", 0);
1417 if (get_pack(args, fd, pack_lockfile))
1418 die(_("git fetch-pack: fetch failed."));
1419
1420 state = FETCH_DONE;
1421 break;
1422 case FETCH_DONE:
1423 continue;
1424 }
1425 }
1426
1427 negotiator.release(&negotiator);
1428 oidset_clear(&common);
1429 return ref;
1430}
1431
1432static int fetch_pack_config_cb(const char *var, const char *value, void *cb)
1433{
1434 if (strcmp(var, "fetch.fsck.skiplist") == 0) {
1435 const char *path;
1436
1437 if (git_config_pathname(&path, var, value))
1438 return 1;
1439 strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
1440 fsck_msg_types.len ? ',' : '=', path);
1441 free((char *)path);
1442 return 0;
1443 }
1444
1445 if (skip_prefix(var, "fetch.fsck.", &var)) {
1446 if (is_valid_msg_type(var, value))
1447 strbuf_addf(&fsck_msg_types, "%c%s=%s",
1448 fsck_msg_types.len ? ',' : '=', var, value);
1449 else
1450 warning("Skipping unknown msg id '%s'", var);
1451 return 0;
1452 }
1453
1454 return git_default_config(var, value, cb);
1455}
1456
1457static void fetch_pack_config(void)
1458{
1459 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1460 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1461 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1462 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1463 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1464 git_config_get_string("fetch.negotiationalgorithm",
1465 &negotiation_algorithm);
1466
1467 git_config(fetch_pack_config_cb, NULL);
1468}
1469
1470static void fetch_pack_setup(void)
1471{
1472 static int did_setup;
1473 if (did_setup)
1474 return;
1475 fetch_pack_config();
1476 if (0 <= transfer_unpack_limit)
1477 unpack_limit = transfer_unpack_limit;
1478 else if (0 <= fetch_unpack_limit)
1479 unpack_limit = fetch_unpack_limit;
1480 did_setup = 1;
1481}
1482
1483static int remove_duplicates_in_refs(struct ref **ref, int nr)
1484{
1485 struct string_list names = STRING_LIST_INIT_NODUP;
1486 int src, dst;
1487
1488 for (src = dst = 0; src < nr; src++) {
1489 struct string_list_item *item;
1490 item = string_list_insert(&names, ref[src]->name);
1491 if (item->util)
1492 continue; /* already have it */
1493 item->util = ref[src];
1494 if (src != dst)
1495 ref[dst] = ref[src];
1496 dst++;
1497 }
1498 for (src = dst; src < nr; src++)
1499 ref[src] = NULL;
1500 string_list_clear(&names, 0);
1501 return dst;
1502}
1503
1504static void update_shallow(struct fetch_pack_args *args,
1505 struct ref **sought, int nr_sought,
1506 struct shallow_info *si)
1507{
1508 struct oid_array ref = OID_ARRAY_INIT;
1509 int *status;
1510 int i;
1511
1512 if (args->deepen && alternate_shallow_file) {
1513 if (*alternate_shallow_file == '\0') { /* --unshallow */
1514 unlink_or_warn(git_path_shallow(the_repository));
1515 rollback_lock_file(&shallow_lock);
1516 } else
1517 commit_lock_file(&shallow_lock);
1518 return;
1519 }
1520
1521 if (!si->shallow || !si->shallow->nr)
1522 return;
1523
1524 if (args->cloning) {
1525 /*
1526 * remote is shallow, but this is a clone, there are
1527 * no objects in repo to worry about. Accept any
1528 * shallow points that exist in the pack (iow in repo
1529 * after get_pack() and reprepare_packed_git())
1530 */
1531 struct oid_array extra = OID_ARRAY_INIT;
1532 struct object_id *oid = si->shallow->oid;
1533 for (i = 0; i < si->shallow->nr; i++)
1534 if (has_object_file(&oid[i]))
1535 oid_array_append(&extra, &oid[i]);
1536 if (extra.nr) {
1537 setup_alternate_shallow(&shallow_lock,
1538 &alternate_shallow_file,
1539 &extra);
1540 commit_lock_file(&shallow_lock);
1541 }
1542 oid_array_clear(&extra);
1543 return;
1544 }
1545
1546 if (!si->nr_ours && !si->nr_theirs)
1547 return;
1548
1549 remove_nonexistent_theirs_shallow(si);
1550 if (!si->nr_ours && !si->nr_theirs)
1551 return;
1552 for (i = 0; i < nr_sought; i++)
1553 oid_array_append(&ref, &sought[i]->old_oid);
1554 si->ref = &ref;
1555
1556 if (args->update_shallow) {
1557 /*
1558 * remote is also shallow, .git/shallow may be updated
1559 * so all refs can be accepted. Make sure we only add
1560 * shallow roots that are actually reachable from new
1561 * refs.
1562 */
1563 struct oid_array extra = OID_ARRAY_INIT;
1564 struct object_id *oid = si->shallow->oid;
1565 assign_shallow_commits_to_refs(si, NULL, NULL);
1566 if (!si->nr_ours && !si->nr_theirs) {
1567 oid_array_clear(&ref);
1568 return;
1569 }
1570 for (i = 0; i < si->nr_ours; i++)
1571 oid_array_append(&extra, &oid[si->ours[i]]);
1572 for (i = 0; i < si->nr_theirs; i++)
1573 oid_array_append(&extra, &oid[si->theirs[i]]);
1574 setup_alternate_shallow(&shallow_lock,
1575 &alternate_shallow_file,
1576 &extra);
1577 commit_lock_file(&shallow_lock);
1578 oid_array_clear(&extra);
1579 oid_array_clear(&ref);
1580 return;
1581 }
1582
1583 /*
1584 * remote is also shallow, check what ref is safe to update
1585 * without updating .git/shallow
1586 */
1587 status = xcalloc(nr_sought, sizeof(*status));
1588 assign_shallow_commits_to_refs(si, NULL, status);
1589 if (si->nr_ours || si->nr_theirs) {
1590 for (i = 0; i < nr_sought; i++)
1591 if (status[i])
1592 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1593 }
1594 free(status);
1595 oid_array_clear(&ref);
1596}
1597
1598static int iterate_ref_map(void *cb_data, struct object_id *oid)
1599{
1600 struct ref **rm = cb_data;
1601 struct ref *ref = *rm;
1602
1603 if (!ref)
1604 return -1; /* end of the list */
1605 *rm = ref->next;
1606 oidcpy(oid, &ref->old_oid);
1607 return 0;
1608}
1609
1610struct ref *fetch_pack(struct fetch_pack_args *args,
1611 int fd[], struct child_process *conn,
1612 const struct ref *ref,
1613 const char *dest,
1614 struct ref **sought, int nr_sought,
1615 struct oid_array *shallow,
1616 char **pack_lockfile,
1617 enum protocol_version version)
1618{
1619 struct ref *ref_cpy;
1620 struct shallow_info si;
1621
1622 fetch_pack_setup();
1623 if (nr_sought)
1624 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1625
1626 if (args->no_dependents && !args->filter_options.choice) {
1627 /*
1628 * The protocol does not support requesting that only the
1629 * wanted objects be sent, so approximate this by setting a
1630 * "blob:none" filter if no filter is already set. This works
1631 * for all object types: note that wanted blobs will still be
1632 * sent because they are directly specified as a "want".
1633 *
1634 * NEEDSWORK: Add an option in the protocol to request that
1635 * only the wanted objects be sent, and implement it.
1636 */
1637 parse_list_objects_filter(&args->filter_options, "blob:none");
1638 }
1639
1640 if (version != protocol_v2 && !ref) {
1641 packet_flush(fd[1]);
1642 die(_("no matching remote head"));
1643 }
1644 prepare_shallow_info(&si, shallow);
1645 if (version == protocol_v2)
1646 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1647 pack_lockfile);
1648 else
1649 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1650 &si, pack_lockfile);
1651 reprepare_packed_git(the_repository);
1652
1653 if (!args->cloning && args->deepen) {
1654 struct check_connected_options opt = CHECK_CONNECTED_INIT;
1655 struct ref *iterator = ref_cpy;
1656 opt.shallow_file = alternate_shallow_file;
1657 if (args->deepen)
1658 opt.is_deepening_fetch = 1;
1659 if (check_connected(iterate_ref_map, &iterator, &opt)) {
1660 error(_("remote did not send all necessary objects"));
1661 free_refs(ref_cpy);
1662 ref_cpy = NULL;
1663 rollback_lock_file(&shallow_lock);
1664 goto cleanup;
1665 }
1666 args->connectivity_checked = 1;
1667 }
1668
1669 update_shallow(args, sought, nr_sought, &si);
1670cleanup:
1671 clear_shallow_info(&si);
1672 return ref_cpy;
1673}
1674
1675int report_unmatched_refs(struct ref **sought, int nr_sought)
1676{
1677 int i, ret = 0;
1678
1679 for (i = 0; i < nr_sought; i++) {
1680 if (!sought[i])
1681 continue;
1682 switch (sought[i]->match_status) {
1683 case REF_MATCHED:
1684 continue;
1685 case REF_NOT_MATCHED:
1686 error(_("no such remote ref %s"), sought[i]->name);
1687 break;
1688 case REF_UNADVERTISED_NOT_ALLOWED:
1689 error(_("Server does not allow request for unadvertised object %s"),
1690 sought[i]->name);
1691 break;
1692 }
1693 ret = 1;
1694 }
1695 return ret;
1696}