1#include "cache.h"
2#include "repository.h"
3#include "config.h"
4#include "lockfile.h"
5#include "refs.h"
6#include "pkt-line.h"
7#include "commit.h"
8#include "tag.h"
9#include "exec-cmd.h"
10#include "pack.h"
11#include "sideband.h"
12#include "fetch-pack.h"
13#include "remote.h"
14#include "run-command.h"
15#include "connect.h"
16#include "transport.h"
17#include "version.h"
18#include "prio-queue.h"
19#include "sha1-array.h"
20#include "oidset.h"
21#include "packfile.h"
22#include "object-store.h"
23
24static int transfer_unpack_limit = -1;
25static int fetch_unpack_limit = -1;
26static int unpack_limit = 100;
27static int prefer_ofs_delta = 1;
28static int no_done;
29static int deepen_since_ok;
30static int deepen_not_ok;
31static int fetch_fsck_objects = -1;
32static int transfer_fsck_objects = -1;
33static int agent_supported;
34static int server_supports_filtering;
35static struct lock_file shallow_lock;
36static const char *alternate_shallow_file;
37
38/* Remember to update object flag allocation in object.h */
39#define COMPLETE (1U << 0)
40#define COMMON (1U << 1)
41#define COMMON_REF (1U << 2)
42#define SEEN (1U << 3)
43#define POPPED (1U << 4)
44#define ALTERNATE (1U << 5)
45
46static int marked;
47
48/*
49 * After sending this many "have"s if we do not get any new ACK , we
50 * give up traversing our history.
51 */
52#define MAX_IN_VAIN 256
53
54static struct prio_queue rev_list = { compare_commits_by_commit_date };
55static int non_common_revs, multi_ack, use_sideband;
56/* Allow specifying sha1 if it is a ref tip. */
57#define ALLOW_TIP_SHA1 01
58/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
59#define ALLOW_REACHABLE_SHA1 02
60static unsigned int allow_unadvertised_object_request;
61
62__attribute__((format (printf, 2, 3)))
63static inline void print_verbose(const struct fetch_pack_args *args,
64 const char *fmt, ...)
65{
66 va_list params;
67
68 if (!args->verbose)
69 return;
70
71 va_start(params, fmt);
72 vfprintf(stderr, fmt, params);
73 va_end(params);
74 fputc('\n', stderr);
75}
76
77struct alternate_object_cache {
78 struct object **items;
79 size_t nr, alloc;
80};
81
82static void cache_one_alternate(const char *refname,
83 const struct object_id *oid,
84 void *vcache)
85{
86 struct alternate_object_cache *cache = vcache;
87 struct object *obj = parse_object(the_repository, oid);
88
89 if (!obj || (obj->flags & ALTERNATE))
90 return;
91
92 obj->flags |= ALTERNATE;
93 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
94 cache->items[cache->nr++] = obj;
95}
96
97static void for_each_cached_alternate(void (*cb)(struct object *))
98{
99 static int initialized;
100 static struct alternate_object_cache cache;
101 size_t i;
102
103 if (!initialized) {
104 for_each_alternate_ref(cache_one_alternate, &cache);
105 initialized = 1;
106 }
107
108 for (i = 0; i < cache.nr; i++)
109 cb(cache.items[i]);
110}
111
112static void rev_list_push(struct commit *commit, int mark)
113{
114 if (!(commit->object.flags & mark)) {
115 commit->object.flags |= mark;
116
117 if (parse_commit(commit))
118 return;
119
120 prio_queue_put(&rev_list, commit);
121
122 if (!(commit->object.flags & COMMON))
123 non_common_revs++;
124 }
125}
126
127static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128{
129 struct object *o = deref_tag(parse_object(the_repository, oid),
130 refname, 0);
131
132 if (o && o->type == OBJ_COMMIT)
133 rev_list_push((struct commit *)o, SEEN);
134
135 return 0;
136}
137
138static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
139 int flag, void *cb_data)
140{
141 return rev_list_insert_ref(refname, oid);
142}
143
144static int clear_marks(const char *refname, const struct object_id *oid,
145 int flag, void *cb_data)
146{
147 struct object *o = deref_tag(parse_object(the_repository, oid),
148 refname, 0);
149
150 if (o && o->type == OBJ_COMMIT)
151 clear_commit_marks((struct commit *)o,
152 COMMON | COMMON_REF | SEEN | POPPED);
153 return 0;
154}
155
156/*
157 This function marks a rev and its ancestors as common.
158 In some cases, it is desirable to mark only the ancestors (for example
159 when only the server does not yet know that they are common).
160*/
161
162static void mark_common(struct commit *commit,
163 int ancestors_only, int dont_parse)
164{
165 if (commit != NULL && !(commit->object.flags & COMMON)) {
166 struct object *o = (struct object *)commit;
167
168 if (!ancestors_only)
169 o->flags |= COMMON;
170
171 if (!(o->flags & SEEN))
172 rev_list_push(commit, SEEN);
173 else {
174 struct commit_list *parents;
175
176 if (!ancestors_only && !(o->flags & POPPED))
177 non_common_revs--;
178 if (!o->parsed && !dont_parse)
179 if (parse_commit(commit))
180 return;
181
182 for (parents = commit->parents;
183 parents;
184 parents = parents->next)
185 mark_common(parents->item, 0, dont_parse);
186 }
187 }
188}
189
190/*
191 Get the next rev to send, ignoring the common.
192*/
193
194static const struct object_id *get_rev(void)
195{
196 struct commit *commit = NULL;
197
198 while (commit == NULL) {
199 unsigned int mark;
200 struct commit_list *parents;
201
202 if (rev_list.nr == 0 || non_common_revs == 0)
203 return NULL;
204
205 commit = prio_queue_get(&rev_list);
206 parse_commit(commit);
207 parents = commit->parents;
208
209 commit->object.flags |= POPPED;
210 if (!(commit->object.flags & COMMON))
211 non_common_revs--;
212
213 if (commit->object.flags & COMMON) {
214 /* do not send "have", and ignore ancestors */
215 commit = NULL;
216 mark = COMMON | SEEN;
217 } else if (commit->object.flags & COMMON_REF)
218 /* send "have", and ignore ancestors */
219 mark = COMMON | SEEN;
220 else
221 /* send "have", also for its ancestors */
222 mark = SEEN;
223
224 while (parents) {
225 if (!(parents->item->object.flags & SEEN))
226 rev_list_push(parents->item, mark);
227 if (mark & COMMON)
228 mark_common(parents->item, 1, 0);
229 parents = parents->next;
230 }
231 }
232
233 return &commit->object.oid;
234}
235
236enum ack_type {
237 NAK = 0,
238 ACK,
239 ACK_continue,
240 ACK_common,
241 ACK_ready
242};
243
244static void consume_shallow_list(struct fetch_pack_args *args, int fd)
245{
246 if (args->stateless_rpc && args->deepen) {
247 /* If we sent a depth we will get back "duplicate"
248 * shallow and unshallow commands every time there
249 * is a block of have lines exchanged.
250 */
251 char *line;
252 while ((line = packet_read_line(fd, NULL))) {
253 if (starts_with(line, "shallow "))
254 continue;
255 if (starts_with(line, "unshallow "))
256 continue;
257 die(_("git fetch-pack: expected shallow list"));
258 }
259 }
260}
261
262static enum ack_type get_ack(int fd, struct object_id *result_oid)
263{
264 int len;
265 char *line = packet_read_line(fd, &len);
266 const char *arg;
267
268 if (!line)
269 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
270 if (!strcmp(line, "NAK"))
271 return NAK;
272 if (skip_prefix(line, "ACK ", &arg)) {
273 if (!get_oid_hex(arg, result_oid)) {
274 arg += 40;
275 len -= arg - line;
276 if (len < 1)
277 return ACK;
278 if (strstr(arg, "continue"))
279 return ACK_continue;
280 if (strstr(arg, "common"))
281 return ACK_common;
282 if (strstr(arg, "ready"))
283 return ACK_ready;
284 return ACK;
285 }
286 }
287 if (skip_prefix(line, "ERR ", &arg))
288 die(_("remote error: %s"), arg);
289 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
290}
291
292static void send_request(struct fetch_pack_args *args,
293 int fd, struct strbuf *buf)
294{
295 if (args->stateless_rpc) {
296 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
297 packet_flush(fd);
298 } else
299 write_or_die(fd, buf->buf, buf->len);
300}
301
302static void insert_one_alternate_object(struct object *obj)
303{
304 rev_list_insert_ref(NULL, &obj->oid);
305}
306
307#define INITIAL_FLUSH 16
308#define PIPESAFE_FLUSH 32
309#define LARGE_FLUSH 16384
310
311static int next_flush(int stateless_rpc, int count)
312{
313 if (stateless_rpc) {
314 if (count < LARGE_FLUSH)
315 count <<= 1;
316 else
317 count = count * 11 / 10;
318 } else {
319 if (count < PIPESAFE_FLUSH)
320 count <<= 1;
321 else
322 count += PIPESAFE_FLUSH;
323 }
324 return count;
325}
326
327static int find_common(struct fetch_pack_args *args,
328 int fd[2], struct object_id *result_oid,
329 struct ref *refs)
330{
331 int fetching;
332 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
333 const struct object_id *oid;
334 unsigned in_vain = 0;
335 int got_continue = 0;
336 int got_ready = 0;
337 struct strbuf req_buf = STRBUF_INIT;
338 size_t state_len = 0;
339
340 if (args->stateless_rpc && multi_ack == 1)
341 die(_("--stateless-rpc requires multi_ack_detailed"));
342 if (marked)
343 for_each_ref(clear_marks, NULL);
344 marked = 1;
345
346 for_each_ref(rev_list_insert_ref_oid, NULL);
347 for_each_cached_alternate(insert_one_alternate_object);
348
349 fetching = 0;
350 for ( ; refs ; refs = refs->next) {
351 struct object_id *remote = &refs->old_oid;
352 const char *remote_hex;
353 struct object *o;
354
355 /*
356 * If that object is complete (i.e. it is an ancestor of a
357 * local ref), we tell them we have it but do not have to
358 * tell them about its ancestors, which they already know
359 * about.
360 *
361 * We use lookup_object here because we are only
362 * interested in the case we *know* the object is
363 * reachable and we have already scanned it.
364 */
365 if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
366 (o->flags & COMPLETE)) {
367 continue;
368 }
369
370 remote_hex = oid_to_hex(remote);
371 if (!fetching) {
372 struct strbuf c = STRBUF_INIT;
373 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
374 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
375 if (no_done) strbuf_addstr(&c, " no-done");
376 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
377 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
378 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
379 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
380 if (args->no_progress) strbuf_addstr(&c, " no-progress");
381 if (args->include_tag) strbuf_addstr(&c, " include-tag");
382 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
383 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
384 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
385 if (agent_supported) strbuf_addf(&c, " agent=%s",
386 git_user_agent_sanitized());
387 if (args->filter_options.choice)
388 strbuf_addstr(&c, " filter");
389 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
390 strbuf_release(&c);
391 } else
392 packet_buf_write(&req_buf, "want %s\n", remote_hex);
393 fetching++;
394 }
395
396 if (!fetching) {
397 strbuf_release(&req_buf);
398 packet_flush(fd[1]);
399 return 1;
400 }
401
402 if (is_repository_shallow(the_repository))
403 write_shallow_commits(&req_buf, 1, NULL);
404 if (args->depth > 0)
405 packet_buf_write(&req_buf, "deepen %d", args->depth);
406 if (args->deepen_since) {
407 timestamp_t max_age = approxidate(args->deepen_since);
408 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
409 }
410 if (args->deepen_not) {
411 int i;
412 for (i = 0; i < args->deepen_not->nr; i++) {
413 struct string_list_item *s = args->deepen_not->items + i;
414 packet_buf_write(&req_buf, "deepen-not %s", s->string);
415 }
416 }
417 if (server_supports_filtering && args->filter_options.choice)
418 packet_buf_write(&req_buf, "filter %s",
419 args->filter_options.filter_spec);
420 packet_buf_flush(&req_buf);
421 state_len = req_buf.len;
422
423 if (args->deepen) {
424 char *line;
425 const char *arg;
426 struct object_id oid;
427
428 send_request(args, fd[1], &req_buf);
429 while ((line = packet_read_line(fd[0], NULL))) {
430 if (skip_prefix(line, "shallow ", &arg)) {
431 if (get_oid_hex(arg, &oid))
432 die(_("invalid shallow line: %s"), line);
433 register_shallow(the_repository, &oid);
434 continue;
435 }
436 if (skip_prefix(line, "unshallow ", &arg)) {
437 if (get_oid_hex(arg, &oid))
438 die(_("invalid unshallow line: %s"), line);
439 if (!lookup_object(the_repository, oid.hash))
440 die(_("object not found: %s"), line);
441 /* make sure that it is parsed as shallow */
442 if (!parse_object(the_repository, &oid))
443 die(_("error in object: %s"), line);
444 if (unregister_shallow(&oid))
445 die(_("no shallow found: %s"), line);
446 continue;
447 }
448 die(_("expected shallow/unshallow, got %s"), line);
449 }
450 } else if (!args->stateless_rpc)
451 send_request(args, fd[1], &req_buf);
452
453 if (!args->stateless_rpc) {
454 /* If we aren't using the stateless-rpc interface
455 * we don't need to retain the headers.
456 */
457 strbuf_setlen(&req_buf, 0);
458 state_len = 0;
459 }
460
461 flushes = 0;
462 retval = -1;
463 if (args->no_dependents)
464 goto done;
465 while ((oid = get_rev())) {
466 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
467 print_verbose(args, "have %s", oid_to_hex(oid));
468 in_vain++;
469 if (flush_at <= ++count) {
470 int ack;
471
472 packet_buf_flush(&req_buf);
473 send_request(args, fd[1], &req_buf);
474 strbuf_setlen(&req_buf, state_len);
475 flushes++;
476 flush_at = next_flush(args->stateless_rpc, count);
477
478 /*
479 * We keep one window "ahead" of the other side, and
480 * will wait for an ACK only on the next one
481 */
482 if (!args->stateless_rpc && count == INITIAL_FLUSH)
483 continue;
484
485 consume_shallow_list(args, fd[0]);
486 do {
487 ack = get_ack(fd[0], result_oid);
488 if (ack)
489 print_verbose(args, _("got %s %d %s"), "ack",
490 ack, oid_to_hex(result_oid));
491 switch (ack) {
492 case ACK:
493 flushes = 0;
494 multi_ack = 0;
495 retval = 0;
496 goto done;
497 case ACK_common:
498 case ACK_ready:
499 case ACK_continue: {
500 struct commit *commit =
501 lookup_commit(result_oid);
502 if (!commit)
503 die(_("invalid commit %s"), oid_to_hex(result_oid));
504 if (args->stateless_rpc
505 && ack == ACK_common
506 && !(commit->object.flags & COMMON)) {
507 /* We need to replay the have for this object
508 * on the next RPC request so the peer knows
509 * it is in common with us.
510 */
511 const char *hex = oid_to_hex(result_oid);
512 packet_buf_write(&req_buf, "have %s\n", hex);
513 state_len = req_buf.len;
514 /*
515 * Reset in_vain because an ack
516 * for this commit has not been
517 * seen.
518 */
519 in_vain = 0;
520 } else if (!args->stateless_rpc
521 || ack != ACK_common)
522 in_vain = 0;
523 mark_common(commit, 0, 1);
524 retval = 0;
525 got_continue = 1;
526 if (ack == ACK_ready) {
527 clear_prio_queue(&rev_list);
528 got_ready = 1;
529 }
530 break;
531 }
532 }
533 } while (ack);
534 flushes--;
535 if (got_continue && MAX_IN_VAIN < in_vain) {
536 print_verbose(args, _("giving up"));
537 break; /* give up */
538 }
539 }
540 }
541done:
542 if (!got_ready || !no_done) {
543 packet_buf_write(&req_buf, "done\n");
544 send_request(args, fd[1], &req_buf);
545 }
546 print_verbose(args, _("done"));
547 if (retval != 0) {
548 multi_ack = 0;
549 flushes++;
550 }
551 strbuf_release(&req_buf);
552
553 if (!got_ready || !no_done)
554 consume_shallow_list(args, fd[0]);
555 while (flushes || multi_ack) {
556 int ack = get_ack(fd[0], result_oid);
557 if (ack) {
558 print_verbose(args, _("got %s (%d) %s"), "ack",
559 ack, oid_to_hex(result_oid));
560 if (ack == ACK)
561 return 0;
562 multi_ack = 1;
563 continue;
564 }
565 flushes--;
566 }
567 /* it is no error to fetch into a completely empty repo */
568 return count ? retval : 0;
569}
570
571static struct commit_list *complete;
572
573static int mark_complete(const struct object_id *oid)
574{
575 struct object *o = parse_object(the_repository, oid);
576
577 while (o && o->type == OBJ_TAG) {
578 struct tag *t = (struct tag *) o;
579 if (!t->tagged)
580 break; /* broken repository */
581 o->flags |= COMPLETE;
582 o = parse_object(the_repository, &t->tagged->oid);
583 }
584 if (o && o->type == OBJ_COMMIT) {
585 struct commit *commit = (struct commit *)o;
586 if (!(commit->object.flags & COMPLETE)) {
587 commit->object.flags |= COMPLETE;
588 commit_list_insert(commit, &complete);
589 }
590 }
591 return 0;
592}
593
594static int mark_complete_oid(const char *refname, const struct object_id *oid,
595 int flag, void *cb_data)
596{
597 return mark_complete(oid);
598}
599
600static void mark_recent_complete_commits(struct fetch_pack_args *args,
601 timestamp_t cutoff)
602{
603 while (complete && cutoff <= complete->item->date) {
604 print_verbose(args, _("Marking %s as complete"),
605 oid_to_hex(&complete->item->object.oid));
606 pop_most_recent_commit(&complete, COMPLETE);
607 }
608}
609
610static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
611{
612 for (; refs; refs = refs->next)
613 oidset_insert(oids, &refs->old_oid);
614}
615
616static int tip_oids_contain(struct oidset *tip_oids,
617 struct ref *unmatched, struct ref *newlist,
618 const struct object_id *id)
619{
620 /*
621 * Note that this only looks at the ref lists the first time it's
622 * called. This works out in filter_refs() because even though it may
623 * add to "newlist" between calls, the additions will always be for
624 * oids that are already in the set.
625 */
626 if (!tip_oids->map.map.tablesize) {
627 add_refs_to_oidset(tip_oids, unmatched);
628 add_refs_to_oidset(tip_oids, newlist);
629 }
630 return oidset_contains(tip_oids, id);
631}
632
633static void filter_refs(struct fetch_pack_args *args,
634 struct ref **refs,
635 struct ref **sought, int nr_sought)
636{
637 struct ref *newlist = NULL;
638 struct ref **newtail = &newlist;
639 struct ref *unmatched = NULL;
640 struct ref *ref, *next;
641 struct oidset tip_oids = OIDSET_INIT;
642 int i;
643
644 i = 0;
645 for (ref = *refs; ref; ref = next) {
646 int keep = 0;
647 next = ref->next;
648
649 if (starts_with(ref->name, "refs/") &&
650 check_refname_format(ref->name, 0))
651 ; /* trash */
652 else {
653 while (i < nr_sought) {
654 int cmp = strcmp(ref->name, sought[i]->name);
655 if (cmp < 0)
656 break; /* definitely do not have it */
657 else if (cmp == 0) {
658 keep = 1; /* definitely have it */
659 sought[i]->match_status = REF_MATCHED;
660 }
661 i++;
662 }
663
664 if (!keep && args->fetch_all &&
665 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
666 keep = 1;
667 }
668
669 if (keep) {
670 *newtail = ref;
671 ref->next = NULL;
672 newtail = &ref->next;
673 } else {
674 ref->next = unmatched;
675 unmatched = ref;
676 }
677 }
678
679 /* Append unmatched requests to the list */
680 for (i = 0; i < nr_sought; i++) {
681 struct object_id oid;
682 const char *p;
683
684 ref = sought[i];
685 if (ref->match_status != REF_NOT_MATCHED)
686 continue;
687 if (parse_oid_hex(ref->name, &oid, &p) ||
688 *p != '\0' ||
689 oidcmp(&oid, &ref->old_oid))
690 continue;
691
692 if ((allow_unadvertised_object_request &
693 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
694 tip_oids_contain(&tip_oids, unmatched, newlist,
695 &ref->old_oid)) {
696 ref->match_status = REF_MATCHED;
697 *newtail = copy_ref(ref);
698 newtail = &(*newtail)->next;
699 } else {
700 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
701 }
702 }
703
704 oidset_clear(&tip_oids);
705 for (ref = unmatched; ref; ref = next) {
706 next = ref->next;
707 free(ref);
708 }
709
710 *refs = newlist;
711}
712
713static void mark_alternate_complete(struct object *obj)
714{
715 mark_complete(&obj->oid);
716}
717
718struct loose_object_iter {
719 struct oidset *loose_object_set;
720 struct ref *refs;
721};
722
723/*
724 * If the number of refs is not larger than the number of loose objects,
725 * this function stops inserting.
726 */
727static int add_loose_objects_to_set(const struct object_id *oid,
728 const char *path,
729 void *data)
730{
731 struct loose_object_iter *iter = data;
732 oidset_insert(iter->loose_object_set, oid);
733 if (iter->refs == NULL)
734 return 1;
735
736 iter->refs = iter->refs->next;
737 return 0;
738}
739
740static int everything_local(struct fetch_pack_args *args,
741 struct ref **refs,
742 struct ref **sought, int nr_sought)
743{
744 struct ref *ref;
745 int retval;
746 int old_save_commit_buffer = save_commit_buffer;
747 timestamp_t cutoff = 0;
748 struct oidset loose_oid_set = OIDSET_INIT;
749 int use_oidset = 0;
750 struct loose_object_iter iter = {&loose_oid_set, *refs};
751
752 /* Enumerate all loose objects or know refs are not so many. */
753 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
754 &iter, 0);
755
756 save_commit_buffer = 0;
757
758 for (ref = *refs; ref; ref = ref->next) {
759 struct object *o;
760 unsigned int flags = OBJECT_INFO_QUICK;
761
762 if (use_oidset &&
763 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
764 /*
765 * I know this does not exist in the loose form,
766 * so check if it exists in a non-loose form.
767 */
768 flags |= OBJECT_INFO_IGNORE_LOOSE;
769 }
770
771 if (!has_object_file_with_flags(&ref->old_oid, flags))
772 continue;
773 o = parse_object(the_repository, &ref->old_oid);
774 if (!o)
775 continue;
776
777 /* We already have it -- which may mean that we were
778 * in sync with the other side at some time after
779 * that (it is OK if we guess wrong here).
780 */
781 if (o->type == OBJ_COMMIT) {
782 struct commit *commit = (struct commit *)o;
783 if (!cutoff || cutoff < commit->date)
784 cutoff = commit->date;
785 }
786 }
787
788 oidset_clear(&loose_oid_set);
789
790 if (!args->no_dependents) {
791 if (!args->deepen) {
792 for_each_ref(mark_complete_oid, NULL);
793 for_each_cached_alternate(mark_alternate_complete);
794 commit_list_sort_by_date(&complete);
795 if (cutoff)
796 mark_recent_complete_commits(args, cutoff);
797 }
798
799 /*
800 * Mark all complete remote refs as common refs.
801 * Don't mark them common yet; the server has to be told so first.
802 */
803 for (ref = *refs; ref; ref = ref->next) {
804 struct object *o = deref_tag(lookup_object(the_repository,
805 ref->old_oid.hash),
806 NULL, 0);
807
808 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
809 continue;
810
811 if (!(o->flags & SEEN)) {
812 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
813
814 mark_common((struct commit *)o, 1, 1);
815 }
816 }
817 }
818
819 filter_refs(args, refs, sought, nr_sought);
820
821 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
822 const struct object_id *remote = &ref->old_oid;
823 struct object *o;
824
825 o = lookup_object(the_repository, remote->hash);
826 if (!o || !(o->flags & COMPLETE)) {
827 retval = 0;
828 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
829 ref->name);
830 continue;
831 }
832 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
833 ref->name);
834 }
835
836 save_commit_buffer = old_save_commit_buffer;
837
838 return retval;
839}
840
841static int sideband_demux(int in, int out, void *data)
842{
843 int *xd = data;
844 int ret;
845
846 ret = recv_sideband("fetch-pack", xd[0], out);
847 close(out);
848 return ret;
849}
850
851static int get_pack(struct fetch_pack_args *args,
852 int xd[2], char **pack_lockfile)
853{
854 struct async demux;
855 int do_keep = args->keep_pack;
856 const char *cmd_name;
857 struct pack_header header;
858 int pass_header = 0;
859 struct child_process cmd = CHILD_PROCESS_INIT;
860 int ret;
861
862 memset(&demux, 0, sizeof(demux));
863 if (use_sideband) {
864 /* xd[] is talking with upload-pack; subprocess reads from
865 * xd[0], spits out band#2 to stderr, and feeds us band#1
866 * through demux->out.
867 */
868 demux.proc = sideband_demux;
869 demux.data = xd;
870 demux.out = -1;
871 demux.isolate_sigpipe = 1;
872 if (start_async(&demux))
873 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
874 }
875 else
876 demux.out = xd[0];
877
878 if (!args->keep_pack && unpack_limit) {
879
880 if (read_pack_header(demux.out, &header))
881 die(_("protocol error: bad pack header"));
882 pass_header = 1;
883 if (ntohl(header.hdr_entries) < unpack_limit)
884 do_keep = 0;
885 else
886 do_keep = 1;
887 }
888
889 if (alternate_shallow_file) {
890 argv_array_push(&cmd.args, "--shallow-file");
891 argv_array_push(&cmd.args, alternate_shallow_file);
892 }
893
894 if (do_keep || args->from_promisor) {
895 if (pack_lockfile)
896 cmd.out = -1;
897 cmd_name = "index-pack";
898 argv_array_push(&cmd.args, cmd_name);
899 argv_array_push(&cmd.args, "--stdin");
900 if (!args->quiet && !args->no_progress)
901 argv_array_push(&cmd.args, "-v");
902 if (args->use_thin_pack)
903 argv_array_push(&cmd.args, "--fix-thin");
904 if (do_keep && (args->lock_pack || unpack_limit)) {
905 char hostname[HOST_NAME_MAX + 1];
906 if (xgethostname(hostname, sizeof(hostname)))
907 xsnprintf(hostname, sizeof(hostname), "localhost");
908 argv_array_pushf(&cmd.args,
909 "--keep=fetch-pack %"PRIuMAX " on %s",
910 (uintmax_t)getpid(), hostname);
911 }
912 if (args->check_self_contained_and_connected)
913 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
914 if (args->from_promisor)
915 argv_array_push(&cmd.args, "--promisor");
916 }
917 else {
918 cmd_name = "unpack-objects";
919 argv_array_push(&cmd.args, cmd_name);
920 if (args->quiet || args->no_progress)
921 argv_array_push(&cmd.args, "-q");
922 args->check_self_contained_and_connected = 0;
923 }
924
925 if (pass_header)
926 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
927 ntohl(header.hdr_version),
928 ntohl(header.hdr_entries));
929 if (fetch_fsck_objects >= 0
930 ? fetch_fsck_objects
931 : transfer_fsck_objects >= 0
932 ? transfer_fsck_objects
933 : 0) {
934 if (args->from_promisor)
935 /*
936 * We cannot use --strict in index-pack because it
937 * checks both broken objects and links, but we only
938 * want to check for broken objects.
939 */
940 argv_array_push(&cmd.args, "--fsck-objects");
941 else
942 argv_array_push(&cmd.args, "--strict");
943 }
944
945 cmd.in = demux.out;
946 cmd.git_cmd = 1;
947 if (start_command(&cmd))
948 die(_("fetch-pack: unable to fork off %s"), cmd_name);
949 if (do_keep && pack_lockfile) {
950 *pack_lockfile = index_pack_lockfile(cmd.out);
951 close(cmd.out);
952 }
953
954 if (!use_sideband)
955 /* Closed by start_command() */
956 xd[0] = -1;
957
958 ret = finish_command(&cmd);
959 if (!ret || (args->check_self_contained_and_connected && ret == 1))
960 args->self_contained_and_connected =
961 args->check_self_contained_and_connected &&
962 ret == 0;
963 else
964 die(_("%s failed"), cmd_name);
965 if (use_sideband && finish_async(&demux))
966 die(_("error in sideband demultiplexer"));
967 return 0;
968}
969
970static int cmp_ref_by_name(const void *a_, const void *b_)
971{
972 const struct ref *a = *((const struct ref **)a_);
973 const struct ref *b = *((const struct ref **)b_);
974 return strcmp(a->name, b->name);
975}
976
977static struct ref *do_fetch_pack(struct fetch_pack_args *args,
978 int fd[2],
979 const struct ref *orig_ref,
980 struct ref **sought, int nr_sought,
981 struct shallow_info *si,
982 char **pack_lockfile)
983{
984 struct ref *ref = copy_ref_list(orig_ref);
985 struct object_id oid;
986 const char *agent_feature;
987 int agent_len;
988
989 sort_ref_list(&ref, ref_compare_name);
990 QSORT(sought, nr_sought, cmp_ref_by_name);
991
992 if ((args->depth > 0 || is_repository_shallow(the_repository)) && !server_supports("shallow"))
993 die(_("Server does not support shallow clients"));
994 if (args->depth > 0 || args->deepen_since || args->deepen_not)
995 args->deepen = 1;
996 if (server_supports("multi_ack_detailed")) {
997 print_verbose(args, _("Server supports multi_ack_detailed"));
998 multi_ack = 2;
999 if (server_supports("no-done")) {
1000 print_verbose(args, _("Server supports no-done"));
1001 if (args->stateless_rpc)
1002 no_done = 1;
1003 }
1004 }
1005 else if (server_supports("multi_ack")) {
1006 print_verbose(args, _("Server supports multi_ack"));
1007 multi_ack = 1;
1008 }
1009 if (server_supports("side-band-64k")) {
1010 print_verbose(args, _("Server supports side-band-64k"));
1011 use_sideband = 2;
1012 }
1013 else if (server_supports("side-band")) {
1014 print_verbose(args, _("Server supports side-band"));
1015 use_sideband = 1;
1016 }
1017 if (server_supports("allow-tip-sha1-in-want")) {
1018 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1019 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1020 }
1021 if (server_supports("allow-reachable-sha1-in-want")) {
1022 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1023 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1024 }
1025 if (!server_supports("thin-pack"))
1026 args->use_thin_pack = 0;
1027 if (!server_supports("no-progress"))
1028 args->no_progress = 0;
1029 if (!server_supports("include-tag"))
1030 args->include_tag = 0;
1031 if (server_supports("ofs-delta"))
1032 print_verbose(args, _("Server supports ofs-delta"));
1033 else
1034 prefer_ofs_delta = 0;
1035
1036 if (server_supports("filter")) {
1037 server_supports_filtering = 1;
1038 print_verbose(args, _("Server supports filter"));
1039 } else if (args->filter_options.choice) {
1040 warning("filtering not recognized by server, ignoring");
1041 }
1042
1043 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1044 agent_supported = 1;
1045 if (agent_len)
1046 print_verbose(args, _("Server version is %.*s"),
1047 agent_len, agent_feature);
1048 }
1049 if (server_supports("deepen-since"))
1050 deepen_since_ok = 1;
1051 else if (args->deepen_since)
1052 die(_("Server does not support --shallow-since"));
1053 if (server_supports("deepen-not"))
1054 deepen_not_ok = 1;
1055 else if (args->deepen_not)
1056 die(_("Server does not support --shallow-exclude"));
1057 if (!server_supports("deepen-relative") && args->deepen_relative)
1058 die(_("Server does not support --deepen"));
1059
1060 if (everything_local(args, &ref, sought, nr_sought)) {
1061 packet_flush(fd[1]);
1062 goto all_done;
1063 }
1064 if (find_common(args, fd, &oid, ref) < 0)
1065 if (!args->keep_pack)
1066 /* When cloning, it is not unusual to have
1067 * no common commit.
1068 */
1069 warning(_("no common commits"));
1070
1071 if (args->stateless_rpc)
1072 packet_flush(fd[1]);
1073 if (args->deepen)
1074 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1075 NULL);
1076 else if (si->nr_ours || si->nr_theirs)
1077 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1078 else
1079 alternate_shallow_file = NULL;
1080 if (get_pack(args, fd, pack_lockfile))
1081 die(_("git fetch-pack: fetch failed."));
1082
1083 all_done:
1084 return ref;
1085}
1086
1087static void add_shallow_requests(struct strbuf *req_buf,
1088 const struct fetch_pack_args *args)
1089{
1090 if (is_repository_shallow(the_repository))
1091 write_shallow_commits(req_buf, 1, NULL);
1092 if (args->depth > 0)
1093 packet_buf_write(req_buf, "deepen %d", args->depth);
1094 if (args->deepen_since) {
1095 timestamp_t max_age = approxidate(args->deepen_since);
1096 packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
1097 }
1098 if (args->deepen_not) {
1099 int i;
1100 for (i = 0; i < args->deepen_not->nr; i++) {
1101 struct string_list_item *s = args->deepen_not->items + i;
1102 packet_buf_write(req_buf, "deepen-not %s", s->string);
1103 }
1104 }
1105}
1106
1107static void add_wants(const struct ref *wants, struct strbuf *req_buf)
1108{
1109 for ( ; wants ; wants = wants->next) {
1110 const struct object_id *remote = &wants->old_oid;
1111 const char *remote_hex;
1112 struct object *o;
1113
1114 /*
1115 * If that object is complete (i.e. it is an ancestor of a
1116 * local ref), we tell them we have it but do not have to
1117 * tell them about its ancestors, which they already know
1118 * about.
1119 *
1120 * We use lookup_object here because we are only
1121 * interested in the case we *know* the object is
1122 * reachable and we have already scanned it.
1123 */
1124 if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
1125 (o->flags & COMPLETE)) {
1126 continue;
1127 }
1128
1129 remote_hex = oid_to_hex(remote);
1130 packet_buf_write(req_buf, "want %s\n", remote_hex);
1131 }
1132}
1133
1134static void add_common(struct strbuf *req_buf, struct oidset *common)
1135{
1136 struct oidset_iter iter;
1137 const struct object_id *oid;
1138 oidset_iter_init(common, &iter);
1139
1140 while ((oid = oidset_iter_next(&iter))) {
1141 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1142 }
1143}
1144
1145static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
1146{
1147 int ret = 0;
1148 int haves_added = 0;
1149 const struct object_id *oid;
1150
1151 while ((oid = get_rev())) {
1152 packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
1153 if (++haves_added >= *haves_to_send)
1154 break;
1155 }
1156
1157 *in_vain += haves_added;
1158 if (!haves_added || *in_vain >= MAX_IN_VAIN) {
1159 /* Send Done */
1160 packet_buf_write(req_buf, "done\n");
1161 ret = 1;
1162 }
1163
1164 /* Increase haves to send on next round */
1165 *haves_to_send = next_flush(1, *haves_to_send);
1166
1167 return ret;
1168}
1169
1170static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
1171 const struct ref *wants, struct oidset *common,
1172 int *haves_to_send, int *in_vain)
1173{
1174 int ret = 0;
1175 struct strbuf req_buf = STRBUF_INIT;
1176
1177 if (server_supports_v2("fetch", 1))
1178 packet_buf_write(&req_buf, "command=fetch");
1179 if (server_supports_v2("agent", 0))
1180 packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
1181 if (args->server_options && args->server_options->nr &&
1182 server_supports_v2("server-option", 1)) {
1183 int i;
1184 for (i = 0; i < args->server_options->nr; i++)
1185 packet_write_fmt(fd_out, "server-option=%s",
1186 args->server_options->items[i].string);
1187 }
1188
1189 packet_buf_delim(&req_buf);
1190 if (args->use_thin_pack)
1191 packet_buf_write(&req_buf, "thin-pack");
1192 if (args->no_progress)
1193 packet_buf_write(&req_buf, "no-progress");
1194 if (args->include_tag)
1195 packet_buf_write(&req_buf, "include-tag");
1196 if (prefer_ofs_delta)
1197 packet_buf_write(&req_buf, "ofs-delta");
1198
1199 /* Add shallow-info and deepen request */
1200 if (server_supports_feature("fetch", "shallow", 0))
1201 add_shallow_requests(&req_buf, args);
1202 else if (is_repository_shallow(the_repository) || args->deepen)
1203 die(_("Server does not support shallow requests"));
1204
1205 /* Add filter */
1206 if (server_supports_feature("fetch", "filter", 0) &&
1207 args->filter_options.choice) {
1208 print_verbose(args, _("Server supports filter"));
1209 packet_buf_write(&req_buf, "filter %s",
1210 args->filter_options.filter_spec);
1211 } else if (args->filter_options.choice) {
1212 warning("filtering not recognized by server, ignoring");
1213 }
1214
1215 /* add wants */
1216 add_wants(wants, &req_buf);
1217
1218 if (args->no_dependents) {
1219 packet_buf_write(&req_buf, "done");
1220 ret = 1;
1221 } else {
1222 /* Add all of the common commits we've found in previous rounds */
1223 add_common(&req_buf, common);
1224
1225 /* Add initial haves */
1226 ret = add_haves(&req_buf, haves_to_send, in_vain);
1227 }
1228
1229 /* Send request */
1230 packet_buf_flush(&req_buf);
1231 write_or_die(fd_out, req_buf.buf, req_buf.len);
1232
1233 strbuf_release(&req_buf);
1234 return ret;
1235}
1236
1237/*
1238 * Processes a section header in a server's response and checks if it matches
1239 * `section`. If the value of `peek` is 1, the header line will be peeked (and
1240 * not consumed); if 0, the line will be consumed and the function will die if
1241 * the section header doesn't match what was expected.
1242 */
1243static int process_section_header(struct packet_reader *reader,
1244 const char *section, int peek)
1245{
1246 int ret;
1247
1248 if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
1249 die("error reading section header '%s'", section);
1250
1251 ret = !strcmp(reader->line, section);
1252
1253 if (!peek) {
1254 if (!ret)
1255 die("expected '%s', received '%s'",
1256 section, reader->line);
1257 packet_reader_read(reader);
1258 }
1259
1260 return ret;
1261}
1262
1263static int process_acks(struct packet_reader *reader, struct oidset *common)
1264{
1265 /* received */
1266 int received_ready = 0;
1267 int received_ack = 0;
1268
1269 process_section_header(reader, "acknowledgments", 0);
1270 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1271 const char *arg;
1272
1273 if (!strcmp(reader->line, "NAK"))
1274 continue;
1275
1276 if (skip_prefix(reader->line, "ACK ", &arg)) {
1277 struct object_id oid;
1278 if (!get_oid_hex(arg, &oid)) {
1279 struct commit *commit;
1280 oidset_insert(common, &oid);
1281 commit = lookup_commit(&oid);
1282 mark_common(commit, 0, 1);
1283 }
1284 continue;
1285 }
1286
1287 if (!strcmp(reader->line, "ready")) {
1288 clear_prio_queue(&rev_list);
1289 received_ready = 1;
1290 continue;
1291 }
1292
1293 die("unexpected acknowledgment line: '%s'", reader->line);
1294 }
1295
1296 if (reader->status != PACKET_READ_FLUSH &&
1297 reader->status != PACKET_READ_DELIM)
1298 die("error processing acks: %d", reader->status);
1299
1300 /* return 0 if no common, 1 if there are common, or 2 if ready */
1301 return received_ready ? 2 : (received_ack ? 1 : 0);
1302}
1303
1304static void receive_shallow_info(struct fetch_pack_args *args,
1305 struct packet_reader *reader)
1306{
1307 process_section_header(reader, "shallow-info", 0);
1308 while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
1309 const char *arg;
1310 struct object_id oid;
1311
1312 if (skip_prefix(reader->line, "shallow ", &arg)) {
1313 if (get_oid_hex(arg, &oid))
1314 die(_("invalid shallow line: %s"), reader->line);
1315 register_shallow(the_repository, &oid);
1316 continue;
1317 }
1318 if (skip_prefix(reader->line, "unshallow ", &arg)) {
1319 if (get_oid_hex(arg, &oid))
1320 die(_("invalid unshallow line: %s"), reader->line);
1321 if (!lookup_object(the_repository, oid.hash))
1322 die(_("object not found: %s"), reader->line);
1323 /* make sure that it is parsed as shallow */
1324 if (!parse_object(the_repository, &oid))
1325 die(_("error in object: %s"), reader->line);
1326 if (unregister_shallow(&oid))
1327 die(_("no shallow found: %s"), reader->line);
1328 continue;
1329 }
1330 die(_("expected shallow/unshallow, got %s"), reader->line);
1331 }
1332
1333 if (reader->status != PACKET_READ_FLUSH &&
1334 reader->status != PACKET_READ_DELIM)
1335 die("error processing shallow info: %d", reader->status);
1336
1337 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
1338 args->deepen = 1;
1339}
1340
1341enum fetch_state {
1342 FETCH_CHECK_LOCAL = 0,
1343 FETCH_SEND_REQUEST,
1344 FETCH_PROCESS_ACKS,
1345 FETCH_GET_PACK,
1346 FETCH_DONE,
1347};
1348
1349static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
1350 int fd[2],
1351 const struct ref *orig_ref,
1352 struct ref **sought, int nr_sought,
1353 char **pack_lockfile)
1354{
1355 struct ref *ref = copy_ref_list(orig_ref);
1356 enum fetch_state state = FETCH_CHECK_LOCAL;
1357 struct oidset common = OIDSET_INIT;
1358 struct packet_reader reader;
1359 int in_vain = 0;
1360 int haves_to_send = INITIAL_FLUSH;
1361 packet_reader_init(&reader, fd[0], NULL, 0,
1362 PACKET_READ_CHOMP_NEWLINE);
1363
1364 while (state != FETCH_DONE) {
1365 switch (state) {
1366 case FETCH_CHECK_LOCAL:
1367 sort_ref_list(&ref, ref_compare_name);
1368 QSORT(sought, nr_sought, cmp_ref_by_name);
1369
1370 /* v2 supports these by default */
1371 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1372 use_sideband = 2;
1373 if (args->depth > 0 || args->deepen_since || args->deepen_not)
1374 args->deepen = 1;
1375
1376 if (marked)
1377 for_each_ref(clear_marks, NULL);
1378 marked = 1;
1379
1380 for_each_ref(rev_list_insert_ref_oid, NULL);
1381 for_each_cached_alternate(insert_one_alternate_object);
1382
1383 /* Filter 'ref' by 'sought' and those that aren't local */
1384 if (everything_local(args, &ref, sought, nr_sought))
1385 state = FETCH_DONE;
1386 else
1387 state = FETCH_SEND_REQUEST;
1388 break;
1389 case FETCH_SEND_REQUEST:
1390 if (send_fetch_request(fd[1], args, ref, &common,
1391 &haves_to_send, &in_vain))
1392 state = FETCH_GET_PACK;
1393 else
1394 state = FETCH_PROCESS_ACKS;
1395 break;
1396 case FETCH_PROCESS_ACKS:
1397 /* Process ACKs/NAKs */
1398 switch (process_acks(&reader, &common)) {
1399 case 2:
1400 state = FETCH_GET_PACK;
1401 break;
1402 case 1:
1403 in_vain = 0;
1404 /* fallthrough */
1405 default:
1406 state = FETCH_SEND_REQUEST;
1407 break;
1408 }
1409 break;
1410 case FETCH_GET_PACK:
1411 /* Check for shallow-info section */
1412 if (process_section_header(&reader, "shallow-info", 1))
1413 receive_shallow_info(args, &reader);
1414
1415 /* get the pack */
1416 process_section_header(&reader, "packfile", 0);
1417 if (get_pack(args, fd, pack_lockfile))
1418 die(_("git fetch-pack: fetch failed."));
1419
1420 state = FETCH_DONE;
1421 break;
1422 case FETCH_DONE:
1423 continue;
1424 }
1425 }
1426
1427 oidset_clear(&common);
1428 return ref;
1429}
1430
1431static void fetch_pack_config(void)
1432{
1433 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1434 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1435 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1436 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1437 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1438
1439 git_config(git_default_config, NULL);
1440}
1441
1442static void fetch_pack_setup(void)
1443{
1444 static int did_setup;
1445 if (did_setup)
1446 return;
1447 fetch_pack_config();
1448 if (0 <= transfer_unpack_limit)
1449 unpack_limit = transfer_unpack_limit;
1450 else if (0 <= fetch_unpack_limit)
1451 unpack_limit = fetch_unpack_limit;
1452 did_setup = 1;
1453}
1454
1455static int remove_duplicates_in_refs(struct ref **ref, int nr)
1456{
1457 struct string_list names = STRING_LIST_INIT_NODUP;
1458 int src, dst;
1459
1460 for (src = dst = 0; src < nr; src++) {
1461 struct string_list_item *item;
1462 item = string_list_insert(&names, ref[src]->name);
1463 if (item->util)
1464 continue; /* already have it */
1465 item->util = ref[src];
1466 if (src != dst)
1467 ref[dst] = ref[src];
1468 dst++;
1469 }
1470 for (src = dst; src < nr; src++)
1471 ref[src] = NULL;
1472 string_list_clear(&names, 0);
1473 return dst;
1474}
1475
1476static void update_shallow(struct fetch_pack_args *args,
1477 struct ref **sought, int nr_sought,
1478 struct shallow_info *si)
1479{
1480 struct oid_array ref = OID_ARRAY_INIT;
1481 int *status;
1482 int i;
1483
1484 if (args->deepen && alternate_shallow_file) {
1485 if (*alternate_shallow_file == '\0') { /* --unshallow */
1486 unlink_or_warn(git_path_shallow(the_repository));
1487 rollback_lock_file(&shallow_lock);
1488 } else
1489 commit_lock_file(&shallow_lock);
1490 return;
1491 }
1492
1493 if (!si->shallow || !si->shallow->nr)
1494 return;
1495
1496 if (args->cloning) {
1497 /*
1498 * remote is shallow, but this is a clone, there are
1499 * no objects in repo to worry about. Accept any
1500 * shallow points that exist in the pack (iow in repo
1501 * after get_pack() and reprepare_packed_git())
1502 */
1503 struct oid_array extra = OID_ARRAY_INIT;
1504 struct object_id *oid = si->shallow->oid;
1505 for (i = 0; i < si->shallow->nr; i++)
1506 if (has_object_file(&oid[i]))
1507 oid_array_append(&extra, &oid[i]);
1508 if (extra.nr) {
1509 setup_alternate_shallow(&shallow_lock,
1510 &alternate_shallow_file,
1511 &extra);
1512 commit_lock_file(&shallow_lock);
1513 }
1514 oid_array_clear(&extra);
1515 return;
1516 }
1517
1518 if (!si->nr_ours && !si->nr_theirs)
1519 return;
1520
1521 remove_nonexistent_theirs_shallow(si);
1522 if (!si->nr_ours && !si->nr_theirs)
1523 return;
1524 for (i = 0; i < nr_sought; i++)
1525 oid_array_append(&ref, &sought[i]->old_oid);
1526 si->ref = &ref;
1527
1528 if (args->update_shallow) {
1529 /*
1530 * remote is also shallow, .git/shallow may be updated
1531 * so all refs can be accepted. Make sure we only add
1532 * shallow roots that are actually reachable from new
1533 * refs.
1534 */
1535 struct oid_array extra = OID_ARRAY_INIT;
1536 struct object_id *oid = si->shallow->oid;
1537 assign_shallow_commits_to_refs(si, NULL, NULL);
1538 if (!si->nr_ours && !si->nr_theirs) {
1539 oid_array_clear(&ref);
1540 return;
1541 }
1542 for (i = 0; i < si->nr_ours; i++)
1543 oid_array_append(&extra, &oid[si->ours[i]]);
1544 for (i = 0; i < si->nr_theirs; i++)
1545 oid_array_append(&extra, &oid[si->theirs[i]]);
1546 setup_alternate_shallow(&shallow_lock,
1547 &alternate_shallow_file,
1548 &extra);
1549 commit_lock_file(&shallow_lock);
1550 oid_array_clear(&extra);
1551 oid_array_clear(&ref);
1552 return;
1553 }
1554
1555 /*
1556 * remote is also shallow, check what ref is safe to update
1557 * without updating .git/shallow
1558 */
1559 status = xcalloc(nr_sought, sizeof(*status));
1560 assign_shallow_commits_to_refs(si, NULL, status);
1561 if (si->nr_ours || si->nr_theirs) {
1562 for (i = 0; i < nr_sought; i++)
1563 if (status[i])
1564 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1565 }
1566 free(status);
1567 oid_array_clear(&ref);
1568}
1569
1570struct ref *fetch_pack(struct fetch_pack_args *args,
1571 int fd[], struct child_process *conn,
1572 const struct ref *ref,
1573 const char *dest,
1574 struct ref **sought, int nr_sought,
1575 struct oid_array *shallow,
1576 char **pack_lockfile,
1577 enum protocol_version version)
1578{
1579 struct ref *ref_cpy;
1580 struct shallow_info si;
1581
1582 fetch_pack_setup();
1583 if (nr_sought)
1584 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1585
1586 if (!ref) {
1587 packet_flush(fd[1]);
1588 die(_("no matching remote head"));
1589 }
1590 prepare_shallow_info(&si, shallow);
1591 if (version == protocol_v2)
1592 ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
1593 pack_lockfile);
1594 else
1595 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1596 &si, pack_lockfile);
1597 reprepare_packed_git(the_repository);
1598 update_shallow(args, sought, nr_sought, &si);
1599 clear_shallow_info(&si);
1600 return ref_cpy;
1601}
1602
1603int report_unmatched_refs(struct ref **sought, int nr_sought)
1604{
1605 int i, ret = 0;
1606
1607 for (i = 0; i < nr_sought; i++) {
1608 if (!sought[i])
1609 continue;
1610 switch (sought[i]->match_status) {
1611 case REF_MATCHED:
1612 continue;
1613 case REF_NOT_MATCHED:
1614 error(_("no such remote ref %s"), sought[i]->name);
1615 break;
1616 case REF_UNADVERTISED_NOT_ALLOWED:
1617 error(_("Server does not allow request for unadvertised object %s"),
1618 sought[i]->name);
1619 break;
1620 }
1621 ret = 1;
1622 }
1623 return ret;
1624}