a0e338459f96be1740ffc887cf3b1c146a30f39c
1#include "cache.h"
2#include "repository.h"
3#include "tempfile.h"
4#include "lockfile.h"
5#include "object-store.h"
6#include "commit.h"
7#include "tag.h"
8#include "pkt-line.h"
9#include "remote.h"
10#include "refs.h"
11#include "sha1-array.h"
12#include "diff.h"
13#include "revision.h"
14#include "commit-slab.h"
15#include "revision.h"
16#include "list-objects.h"
17
18static int is_shallow = -1;
19static struct stat_validity shallow_stat;
20static char *alternate_shallow_file;
21
22void set_alternate_shallow_file_the_repository(const char *path, int override)
23{
24 if (is_shallow != -1)
25 die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
26 if (alternate_shallow_file && !override)
27 return;
28 free(alternate_shallow_file);
29 alternate_shallow_file = xstrdup_or_null(path);
30}
31
32int register_shallow_the_repository(const struct object_id *oid)
33{
34 struct commit_graft *graft =
35 xmalloc(sizeof(struct commit_graft));
36 struct commit *commit = lookup_commit(oid);
37
38 oidcpy(&graft->oid, oid);
39 graft->nr_parent = -1;
40 if (commit && commit->object.parsed)
41 commit->parents = NULL;
42 return register_commit_graft(the_repository, graft, 0);
43}
44
45int is_repository_shallow_the_repository(void)
46{
47 FILE *fp;
48 char buf[1024];
49 const char *path = alternate_shallow_file;
50
51 if (is_shallow >= 0)
52 return is_shallow;
53
54 if (!path)
55 path = git_path_shallow(the_repository);
56 /*
57 * fetch-pack sets '--shallow-file ""' as an indicator that no
58 * shallow file should be used. We could just open it and it
59 * will likely fail. But let's do an explicit check instead.
60 */
61 if (!*path || (fp = fopen(path, "r")) == NULL) {
62 stat_validity_clear(&shallow_stat);
63 is_shallow = 0;
64 return is_shallow;
65 }
66 stat_validity_update(&shallow_stat, fileno(fp));
67 is_shallow = 1;
68
69 while (fgets(buf, sizeof(buf), fp)) {
70 struct object_id oid;
71 if (get_oid_hex(buf, &oid))
72 die("bad shallow line: %s", buf);
73 register_shallow(the_repository, &oid);
74 }
75 fclose(fp);
76 return is_shallow;
77}
78
79struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
80 int shallow_flag, int not_shallow_flag)
81{
82 int i = 0, cur_depth = 0;
83 struct commit_list *result = NULL;
84 struct object_array stack = OBJECT_ARRAY_INIT;
85 struct commit *commit = NULL;
86 struct commit_graft *graft;
87
88 while (commit || i < heads->nr || stack.nr) {
89 struct commit_list *p;
90 if (!commit) {
91 if (i < heads->nr) {
92 commit = (struct commit *)
93 deref_tag(heads->objects[i++].item, NULL, 0);
94 if (!commit || commit->object.type != OBJ_COMMIT) {
95 commit = NULL;
96 continue;
97 }
98 if (!commit->util)
99 commit->util = xmalloc(sizeof(int));
100 *(int *)commit->util = 0;
101 cur_depth = 0;
102 } else {
103 commit = (struct commit *)
104 object_array_pop(&stack);
105 cur_depth = *(int *)commit->util;
106 }
107 }
108 parse_commit_or_die(commit);
109 cur_depth++;
110 if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
111 (is_repository_shallow(the_repository) && !commit->parents &&
112 (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
113 graft->nr_parent < 0)) {
114 commit_list_insert(commit, &result);
115 commit->object.flags |= shallow_flag;
116 commit = NULL;
117 continue;
118 }
119 commit->object.flags |= not_shallow_flag;
120 for (p = commit->parents, commit = NULL; p; p = p->next) {
121 if (!p->item->util) {
122 int *pointer = xmalloc(sizeof(int));
123 p->item->util = pointer;
124 *pointer = cur_depth;
125 } else {
126 int *pointer = p->item->util;
127 if (cur_depth >= *pointer)
128 continue;
129 *pointer = cur_depth;
130 }
131 if (p->next)
132 add_object_array(&p->item->object,
133 NULL, &stack);
134 else {
135 commit = p->item;
136 cur_depth = *(int *)commit->util;
137 }
138 }
139 }
140
141 return result;
142}
143
144static void show_commit(struct commit *commit, void *data)
145{
146 commit_list_insert(commit, data);
147}
148
149/*
150 * Given rev-list arguments, run rev-list. All reachable commits
151 * except border ones are marked with not_shallow_flag. Border commits
152 * are marked with shallow_flag. The list of border/shallow commits
153 * are also returned.
154 */
155struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
156 int shallow_flag,
157 int not_shallow_flag)
158{
159 struct commit_list *result = NULL, *p;
160 struct commit_list *not_shallow_list = NULL;
161 struct rev_info revs;
162 int both_flags = shallow_flag | not_shallow_flag;
163
164 /*
165 * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
166 * set at this point. But better be safe than sorry.
167 */
168 clear_object_flags(both_flags);
169
170 is_repository_shallow(the_repository); /* make sure shallows are read */
171
172 init_revisions(&revs, NULL);
173 save_commit_buffer = 0;
174 setup_revisions(ac, av, &revs, NULL);
175
176 if (prepare_revision_walk(&revs))
177 die("revision walk setup failed");
178 traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list);
179
180 /* Mark all reachable commits as NOT_SHALLOW */
181 for (p = not_shallow_list; p; p = p->next)
182 p->item->object.flags |= not_shallow_flag;
183
184 /*
185 * mark border commits SHALLOW + NOT_SHALLOW.
186 * We cannot clear NOT_SHALLOW right now. Imagine border
187 * commit A is processed first, then commit B, whose parent is
188 * A, later. If NOT_SHALLOW on A is cleared at step 1, B
189 * itself is considered border at step 2, which is incorrect.
190 */
191 for (p = not_shallow_list; p; p = p->next) {
192 struct commit *c = p->item;
193 struct commit_list *parent;
194
195 if (parse_commit(c))
196 die("unable to parse commit %s",
197 oid_to_hex(&c->object.oid));
198
199 for (parent = c->parents; parent; parent = parent->next)
200 if (!(parent->item->object.flags & not_shallow_flag)) {
201 c->object.flags |= shallow_flag;
202 commit_list_insert(c, &result);
203 break;
204 }
205 }
206 free_commit_list(not_shallow_list);
207
208 /*
209 * Now we can clean up NOT_SHALLOW on border commits. Having
210 * both flags set can confuse the caller.
211 */
212 for (p = result; p; p = p->next) {
213 struct object *o = &p->item->object;
214 if ((o->flags & both_flags) == both_flags)
215 o->flags &= ~not_shallow_flag;
216 }
217 return result;
218}
219
220#define check_shallow_file_for_update(r) check_shallow_file_for_update_##r()
221static void check_shallow_file_for_update_the_repository(void)
222{
223 if (is_shallow == -1)
224 die("BUG: shallow must be initialized by now");
225
226 if (!stat_validity_check(&shallow_stat, git_path_shallow(the_repository)))
227 die("shallow file has changed since we read it");
228}
229
230#define SEEN_ONLY 1
231#define VERBOSE 2
232
233struct write_shallow_data {
234 struct strbuf *out;
235 int use_pack_protocol;
236 int count;
237 unsigned flags;
238};
239
240static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
241{
242 struct write_shallow_data *data = cb_data;
243 const char *hex = oid_to_hex(&graft->oid);
244 if (graft->nr_parent != -1)
245 return 0;
246 if (data->flags & SEEN_ONLY) {
247 struct commit *c = lookup_commit(&graft->oid);
248 if (!c || !(c->object.flags & SEEN)) {
249 if (data->flags & VERBOSE)
250 printf("Removing %s from .git/shallow\n",
251 oid_to_hex(&c->object.oid));
252 return 0;
253 }
254 }
255 data->count++;
256 if (data->use_pack_protocol)
257 packet_buf_write(data->out, "shallow %s", hex);
258 else {
259 strbuf_addstr(data->out, hex);
260 strbuf_addch(data->out, '\n');
261 }
262 return 0;
263}
264
265static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
266 const struct oid_array *extra,
267 unsigned flags)
268{
269 struct write_shallow_data data;
270 int i;
271 data.out = out;
272 data.use_pack_protocol = use_pack_protocol;
273 data.count = 0;
274 data.flags = flags;
275 for_each_commit_graft(write_one_shallow, &data);
276 if (!extra)
277 return data.count;
278 for (i = 0; i < extra->nr; i++) {
279 strbuf_addstr(out, oid_to_hex(extra->oid + i));
280 strbuf_addch(out, '\n');
281 data.count++;
282 }
283 return data.count;
284}
285
286int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
287 const struct oid_array *extra)
288{
289 return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
290}
291
292const char *setup_temporary_shallow(const struct oid_array *extra)
293{
294 struct tempfile *temp;
295 struct strbuf sb = STRBUF_INIT;
296
297 if (write_shallow_commits(&sb, 0, extra)) {
298 temp = xmks_tempfile(git_path("shallow_XXXXXX"));
299
300 if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
301 close_tempfile_gently(temp) < 0)
302 die_errno("failed to write to %s",
303 get_tempfile_path(temp));
304 strbuf_release(&sb);
305 return get_tempfile_path(temp);
306 }
307 /*
308 * is_repository_shallow() sees empty string as "no shallow
309 * file".
310 */
311 return "";
312}
313
314void setup_alternate_shallow(struct lock_file *shallow_lock,
315 const char **alternate_shallow_file,
316 const struct oid_array *extra)
317{
318 struct strbuf sb = STRBUF_INIT;
319 int fd;
320
321 fd = hold_lock_file_for_update(shallow_lock,
322 git_path_shallow(the_repository),
323 LOCK_DIE_ON_ERROR);
324 check_shallow_file_for_update(the_repository);
325 if (write_shallow_commits(&sb, 0, extra)) {
326 if (write_in_full(fd, sb.buf, sb.len) < 0)
327 die_errno("failed to write to %s",
328 get_lock_file_path(shallow_lock));
329 *alternate_shallow_file = get_lock_file_path(shallow_lock);
330 } else
331 /*
332 * is_repository_shallow() sees empty string as "no
333 * shallow file".
334 */
335 *alternate_shallow_file = "";
336 strbuf_release(&sb);
337}
338
339static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
340{
341 int fd = *(int *)cb;
342 if (graft->nr_parent == -1)
343 packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
344 return 0;
345}
346
347void advertise_shallow_grafts(int fd)
348{
349 if (!is_repository_shallow(the_repository))
350 return;
351 for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
352}
353
354/*
355 * mark_reachable_objects() should have been run prior to this and all
356 * reachable commits marked as "SEEN".
357 */
358void prune_shallow(int show_only)
359{
360 static struct lock_file shallow_lock;
361 struct strbuf sb = STRBUF_INIT;
362 int fd;
363
364 if (show_only) {
365 write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY | VERBOSE);
366 strbuf_release(&sb);
367 return;
368 }
369 fd = hold_lock_file_for_update(&shallow_lock,
370 git_path_shallow(the_repository),
371 LOCK_DIE_ON_ERROR);
372 check_shallow_file_for_update(the_repository);
373 if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
374 if (write_in_full(fd, sb.buf, sb.len) < 0)
375 die_errno("failed to write to %s",
376 get_lock_file_path(&shallow_lock));
377 commit_lock_file(&shallow_lock);
378 } else {
379 unlink(git_path_shallow(the_repository));
380 rollback_lock_file(&shallow_lock);
381 }
382 strbuf_release(&sb);
383}
384
385struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
386
387/*
388 * Step 1, split sender shallow commits into "ours" and "theirs"
389 * Step 2, clean "ours" based on .git/shallow
390 */
391void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
392{
393 int i;
394 trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
395 memset(info, 0, sizeof(*info));
396 info->shallow = sa;
397 if (!sa)
398 return;
399 ALLOC_ARRAY(info->ours, sa->nr);
400 ALLOC_ARRAY(info->theirs, sa->nr);
401 for (i = 0; i < sa->nr; i++) {
402 if (has_object_file(sa->oid + i)) {
403 struct commit_graft *graft;
404 graft = lookup_commit_graft(the_repository,
405 &sa->oid[i]);
406 if (graft && graft->nr_parent < 0)
407 continue;
408 info->ours[info->nr_ours++] = i;
409 } else
410 info->theirs[info->nr_theirs++] = i;
411 }
412}
413
414void clear_shallow_info(struct shallow_info *info)
415{
416 free(info->ours);
417 free(info->theirs);
418}
419
420/* Step 4, remove non-existent ones in "theirs" after getting the pack */
421
422void remove_nonexistent_theirs_shallow(struct shallow_info *info)
423{
424 struct object_id *oid = info->shallow->oid;
425 int i, dst;
426 trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
427 for (i = dst = 0; i < info->nr_theirs; i++) {
428 if (i != dst)
429 info->theirs[dst] = info->theirs[i];
430 if (has_object_file(oid + info->theirs[i]))
431 dst++;
432 }
433 info->nr_theirs = dst;
434}
435
436define_commit_slab(ref_bitmap, uint32_t *);
437
438#define POOL_SIZE (512 * 1024)
439
440struct paint_info {
441 struct ref_bitmap ref_bitmap;
442 unsigned nr_bits;
443 char **pools;
444 char *free, *end;
445 unsigned pool_count;
446};
447
448static uint32_t *paint_alloc(struct paint_info *info)
449{
450 unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
451 unsigned size = nr * sizeof(uint32_t);
452 void *p;
453 if (!info->pool_count || size > info->end - info->free) {
454 if (size > POOL_SIZE)
455 die("BUG: pool size too small for %d in paint_alloc()",
456 size);
457 info->pool_count++;
458 REALLOC_ARRAY(info->pools, info->pool_count);
459 info->free = xmalloc(POOL_SIZE);
460 info->pools[info->pool_count - 1] = info->free;
461 info->end = info->free + POOL_SIZE;
462 }
463 p = info->free;
464 info->free += size;
465 return p;
466}
467
468/*
469 * Given a commit SHA-1, walk down to parents until either SEEN,
470 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
471 * all walked commits.
472 */
473static void paint_down(struct paint_info *info, const struct object_id *oid,
474 unsigned int id)
475{
476 unsigned int i, nr;
477 struct commit_list *head = NULL;
478 int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
479 size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
480 struct commit *c = lookup_commit_reference_gently(oid, 1);
481 uint32_t *tmp; /* to be freed before return */
482 uint32_t *bitmap;
483
484 if (!c)
485 return;
486
487 tmp = xmalloc(bitmap_size);
488 bitmap = paint_alloc(info);
489 memset(bitmap, 0, bitmap_size);
490 bitmap[id / 32] |= (1U << (id % 32));
491 commit_list_insert(c, &head);
492 while (head) {
493 struct commit_list *p;
494 struct commit *c = pop_commit(&head);
495 uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
496
497 /* XXX check "UNINTERESTING" from pack bitmaps if available */
498 if (c->object.flags & (SEEN | UNINTERESTING))
499 continue;
500 else
501 c->object.flags |= SEEN;
502
503 if (*refs == NULL)
504 *refs = bitmap;
505 else {
506 memcpy(tmp, *refs, bitmap_size);
507 for (i = 0; i < bitmap_nr; i++)
508 tmp[i] |= bitmap[i];
509 if (memcmp(tmp, *refs, bitmap_size)) {
510 *refs = paint_alloc(info);
511 memcpy(*refs, tmp, bitmap_size);
512 }
513 }
514
515 if (c->object.flags & BOTTOM)
516 continue;
517
518 if (parse_commit(c))
519 die("unable to parse commit %s",
520 oid_to_hex(&c->object.oid));
521
522 for (p = c->parents; p; p = p->next) {
523 if (p->item->object.flags & SEEN)
524 continue;
525 commit_list_insert(p->item, &head);
526 }
527 }
528
529 nr = get_max_object_index();
530 for (i = 0; i < nr; i++) {
531 struct object *o = get_indexed_object(i);
532 if (o && o->type == OBJ_COMMIT)
533 o->flags &= ~SEEN;
534 }
535
536 free(tmp);
537}
538
539static int mark_uninteresting(const char *refname, const struct object_id *oid,
540 int flags, void *cb_data)
541{
542 struct commit *commit = lookup_commit_reference_gently(oid, 1);
543 if (!commit)
544 return 0;
545 commit->object.flags |= UNINTERESTING;
546 mark_parents_uninteresting(commit);
547 return 0;
548}
549
550static void post_assign_shallow(struct shallow_info *info,
551 struct ref_bitmap *ref_bitmap,
552 int *ref_status);
553/*
554 * Step 6(+7), associate shallow commits with new refs
555 *
556 * info->ref must be initialized before calling this function.
557 *
558 * If used is not NULL, it's an array of info->shallow->nr
559 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
560 * m-th shallow commit from info->shallow.
561 *
562 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
563 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
564 * the ref needs some shallow commits from either info->ours or
565 * info->theirs.
566 */
567void assign_shallow_commits_to_refs(struct shallow_info *info,
568 uint32_t **used, int *ref_status)
569{
570 struct object_id *oid = info->shallow->oid;
571 struct oid_array *ref = info->ref;
572 unsigned int i, nr;
573 int *shallow, nr_shallow = 0;
574 struct paint_info pi;
575
576 trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
577 ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
578 for (i = 0; i < info->nr_ours; i++)
579 shallow[nr_shallow++] = info->ours[i];
580 for (i = 0; i < info->nr_theirs; i++)
581 shallow[nr_shallow++] = info->theirs[i];
582
583 /*
584 * Prepare the commit graph to track what refs can reach what
585 * (new) shallow commits.
586 */
587 nr = get_max_object_index();
588 for (i = 0; i < nr; i++) {
589 struct object *o = get_indexed_object(i);
590 if (!o || o->type != OBJ_COMMIT)
591 continue;
592
593 o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
594 }
595
596 memset(&pi, 0, sizeof(pi));
597 init_ref_bitmap(&pi.ref_bitmap);
598 pi.nr_bits = ref->nr;
599
600 /*
601 * "--not --all" to cut short the traversal if new refs
602 * connect to old refs. If not (e.g. force ref updates) it'll
603 * have to go down to the current shallow commits.
604 */
605 head_ref(mark_uninteresting, NULL);
606 for_each_ref(mark_uninteresting, NULL);
607
608 /* Mark potential bottoms so we won't go out of bound */
609 for (i = 0; i < nr_shallow; i++) {
610 struct commit *c = lookup_commit(&oid[shallow[i]]);
611 c->object.flags |= BOTTOM;
612 }
613
614 for (i = 0; i < ref->nr; i++)
615 paint_down(&pi, ref->oid + i, i);
616
617 if (used) {
618 int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
619 memset(used, 0, sizeof(*used) * info->shallow->nr);
620 for (i = 0; i < nr_shallow; i++) {
621 const struct commit *c = lookup_commit(&oid[shallow[i]]);
622 uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
623 if (*map)
624 used[shallow[i]] = xmemdupz(*map, bitmap_size);
625 }
626 /*
627 * unreachable shallow commits are not removed from
628 * "ours" and "theirs". The user is supposed to run
629 * step 7 on every ref separately and not trust "ours"
630 * and "theirs" any more.
631 */
632 } else
633 post_assign_shallow(info, &pi.ref_bitmap, ref_status);
634
635 clear_ref_bitmap(&pi.ref_bitmap);
636 for (i = 0; i < pi.pool_count; i++)
637 free(pi.pools[i]);
638 free(pi.pools);
639 free(shallow);
640}
641
642struct commit_array {
643 struct commit **commits;
644 int nr, alloc;
645};
646
647static int add_ref(const char *refname, const struct object_id *oid,
648 int flags, void *cb_data)
649{
650 struct commit_array *ca = cb_data;
651 ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
652 ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
653 if (ca->commits[ca->nr])
654 ca->nr++;
655 return 0;
656}
657
658static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
659{
660 unsigned int i;
661 if (!ref_status)
662 return;
663 for (i = 0; i < nr; i++)
664 if (bitmap[i / 32] & (1U << (i % 32)))
665 ref_status[i]++;
666}
667
668/*
669 * Step 7, reachability test on "ours" at commit level
670 */
671static void post_assign_shallow(struct shallow_info *info,
672 struct ref_bitmap *ref_bitmap,
673 int *ref_status)
674{
675 struct object_id *oid = info->shallow->oid;
676 struct commit *c;
677 uint32_t **bitmap;
678 int dst, i, j;
679 int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
680 struct commit_array ca;
681
682 trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
683 if (ref_status)
684 memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
685
686 /* Remove unreachable shallow commits from "theirs" */
687 for (i = dst = 0; i < info->nr_theirs; i++) {
688 if (i != dst)
689 info->theirs[dst] = info->theirs[i];
690 c = lookup_commit(&oid[info->theirs[i]]);
691 bitmap = ref_bitmap_at(ref_bitmap, c);
692 if (!*bitmap)
693 continue;
694 for (j = 0; j < bitmap_nr; j++)
695 if (bitmap[0][j]) {
696 update_refstatus(ref_status, info->ref->nr, *bitmap);
697 dst++;
698 break;
699 }
700 }
701 info->nr_theirs = dst;
702
703 memset(&ca, 0, sizeof(ca));
704 head_ref(add_ref, &ca);
705 for_each_ref(add_ref, &ca);
706
707 /* Remove unreachable shallow commits from "ours" */
708 for (i = dst = 0; i < info->nr_ours; i++) {
709 if (i != dst)
710 info->ours[dst] = info->ours[i];
711 c = lookup_commit(&oid[info->ours[i]]);
712 bitmap = ref_bitmap_at(ref_bitmap, c);
713 if (!*bitmap)
714 continue;
715 for (j = 0; j < bitmap_nr; j++)
716 if (bitmap[0][j] &&
717 /* Step 7, reachability test at commit level */
718 !in_merge_bases_many(c, ca.nr, ca.commits)) {
719 update_refstatus(ref_status, info->ref->nr, *bitmap);
720 dst++;
721 break;
722 }
723 }
724 info->nr_ours = dst;
725
726 free(ca.commits);
727}
728
729/* (Delayed) step 7, reachability test at commit level */
730int delayed_reachability_test(struct shallow_info *si, int c)
731{
732 if (si->need_reachability_test[c]) {
733 struct commit *commit = lookup_commit(&si->shallow->oid[c]);
734
735 if (!si->commits) {
736 struct commit_array ca;
737
738 memset(&ca, 0, sizeof(ca));
739 head_ref(add_ref, &ca);
740 for_each_ref(add_ref, &ca);
741 si->commits = ca.commits;
742 si->nr_commits = ca.nr;
743 }
744
745 si->reachable[c] = in_merge_bases_many(commit,
746 si->nr_commits,
747 si->commits);
748 si->need_reachability_test[c] = 0;
749 }
750 return si->reachable[c];
751}