shallow.con commit Merge branch 'en/merge-recursive-tests' (93b74a7)
   1#include "cache.h"
   2#include "tempfile.h"
   3#include "lockfile.h"
   4#include "commit.h"
   5#include "tag.h"
   6#include "pkt-line.h"
   7#include "remote.h"
   8#include "refs.h"
   9#include "sha1-array.h"
  10#include "diff.h"
  11#include "revision.h"
  12#include "commit-slab.h"
  13#include "revision.h"
  14#include "list-objects.h"
  15#include "commit-slab.h"
  16
  17static int is_shallow = -1;
  18static struct stat_validity shallow_stat;
  19static char *alternate_shallow_file;
  20
  21void set_alternate_shallow_file(const char *path, int override)
  22{
  23        if (is_shallow != -1)
  24                BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
  25        if (alternate_shallow_file && !override)
  26                return;
  27        free(alternate_shallow_file);
  28        alternate_shallow_file = xstrdup_or_null(path);
  29}
  30
  31int register_shallow(const struct object_id *oid)
  32{
  33        struct commit_graft *graft =
  34                xmalloc(sizeof(struct commit_graft));
  35        struct commit *commit = lookup_commit(oid);
  36
  37        oidcpy(&graft->oid, oid);
  38        graft->nr_parent = -1;
  39        if (commit && commit->object.parsed)
  40                commit->parents = NULL;
  41        return register_commit_graft(graft, 0);
  42}
  43
  44int is_repository_shallow(void)
  45{
  46        FILE *fp;
  47        char buf[1024];
  48        const char *path = alternate_shallow_file;
  49
  50        if (is_shallow >= 0)
  51                return is_shallow;
  52
  53        if (!path)
  54                path = git_path_shallow();
  55        /*
  56         * fetch-pack sets '--shallow-file ""' as an indicator that no
  57         * shallow file should be used. We could just open it and it
  58         * will likely fail. But let's do an explicit check instead.
  59         */
  60        if (!*path || (fp = fopen(path, "r")) == NULL) {
  61                stat_validity_clear(&shallow_stat);
  62                is_shallow = 0;
  63                return is_shallow;
  64        }
  65        stat_validity_update(&shallow_stat, fileno(fp));
  66        is_shallow = 1;
  67
  68        while (fgets(buf, sizeof(buf), fp)) {
  69                struct object_id oid;
  70                if (get_oid_hex(buf, &oid))
  71                        die("bad shallow line: %s", buf);
  72                register_shallow(&oid);
  73        }
  74        fclose(fp);
  75        return is_shallow;
  76}
  77
  78/*
  79 * TODO: use "int" elemtype instead of "int *" when/if commit-slab
  80 * supports a "valid" flag.
  81 */
  82define_commit_slab(commit_depth, int *);
  83struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
  84                int shallow_flag, int not_shallow_flag)
  85{
  86        int i = 0, cur_depth = 0;
  87        struct commit_list *result = NULL;
  88        struct object_array stack = OBJECT_ARRAY_INIT;
  89        struct commit *commit = NULL;
  90        struct commit_graft *graft;
  91        struct commit_depth depths;
  92
  93        init_commit_depth(&depths);
  94        while (commit || i < heads->nr || stack.nr) {
  95                struct commit_list *p;
  96                if (!commit) {
  97                        if (i < heads->nr) {
  98                                int **depth_slot;
  99                                commit = (struct commit *)
 100                                        deref_tag(heads->objects[i++].item, NULL, 0);
 101                                if (!commit || commit->object.type != OBJ_COMMIT) {
 102                                        commit = NULL;
 103                                        continue;
 104                                }
 105                                depth_slot = commit_depth_at(&depths, commit);
 106                                if (!*depth_slot)
 107                                        *depth_slot = xmalloc(sizeof(int));
 108                                **depth_slot = 0;
 109                                cur_depth = 0;
 110                        } else {
 111                                commit = (struct commit *)
 112                                        object_array_pop(&stack);
 113                                cur_depth = **commit_depth_at(&depths, commit);
 114                        }
 115                }
 116                parse_commit_or_die(commit);
 117                cur_depth++;
 118                if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
 119                    (is_repository_shallow() && !commit->parents &&
 120                     (graft = lookup_commit_graft(&commit->object.oid)) != NULL &&
 121                     graft->nr_parent < 0)) {
 122                        commit_list_insert(commit, &result);
 123                        commit->object.flags |= shallow_flag;
 124                        commit = NULL;
 125                        continue;
 126                }
 127                commit->object.flags |= not_shallow_flag;
 128                for (p = commit->parents, commit = NULL; p; p = p->next) {
 129                        int **depth_slot = commit_depth_at(&depths, p->item);
 130                        if (!*depth_slot) {
 131                                *depth_slot = xmalloc(sizeof(int));
 132                                **depth_slot = cur_depth;
 133                        } else {
 134                                if (cur_depth >= **depth_slot)
 135                                        continue;
 136                                **depth_slot = cur_depth;
 137                        }
 138                        if (p->next)
 139                                add_object_array(&p->item->object,
 140                                                NULL, &stack);
 141                        else {
 142                                commit = p->item;
 143                                cur_depth = **commit_depth_at(&depths, commit);
 144                        }
 145                }
 146        }
 147        for (i = 0; i < depths.slab_count; i++) {
 148                int j;
 149
 150                for (j = 0; j < depths.slab_size; j++)
 151                        free(depths.slab[i][j]);
 152        }
 153        clear_commit_depth(&depths);
 154
 155        return result;
 156}
 157
 158static void show_commit(struct commit *commit, void *data)
 159{
 160        commit_list_insert(commit, data);
 161}
 162
 163/*
 164 * Given rev-list arguments, run rev-list. All reachable commits
 165 * except border ones are marked with not_shallow_flag. Border commits
 166 * are marked with shallow_flag. The list of border/shallow commits
 167 * are also returned.
 168 */
 169struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
 170                                                    int shallow_flag,
 171                                                    int not_shallow_flag)
 172{
 173        struct commit_list *result = NULL, *p;
 174        struct commit_list *not_shallow_list = NULL;
 175        struct rev_info revs;
 176        int both_flags = shallow_flag | not_shallow_flag;
 177
 178        /*
 179         * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
 180         * set at this point. But better be safe than sorry.
 181         */
 182        clear_object_flags(both_flags);
 183
 184        is_repository_shallow(); /* make sure shallows are read */
 185
 186        init_revisions(&revs, NULL);
 187        save_commit_buffer = 0;
 188        setup_revisions(ac, av, &revs, NULL);
 189
 190        if (prepare_revision_walk(&revs))
 191                die("revision walk setup failed");
 192        traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
 193
 194        /* Mark all reachable commits as NOT_SHALLOW */
 195        for (p = not_shallow_list; p; p = p->next)
 196                p->item->object.flags |= not_shallow_flag;
 197
 198        /*
 199         * mark border commits SHALLOW + NOT_SHALLOW.
 200         * We cannot clear NOT_SHALLOW right now. Imagine border
 201         * commit A is processed first, then commit B, whose parent is
 202         * A, later. If NOT_SHALLOW on A is cleared at step 1, B
 203         * itself is considered border at step 2, which is incorrect.
 204         */
 205        for (p = not_shallow_list; p; p = p->next) {
 206                struct commit *c = p->item;
 207                struct commit_list *parent;
 208
 209                if (parse_commit(c))
 210                        die("unable to parse commit %s",
 211                            oid_to_hex(&c->object.oid));
 212
 213                for (parent = c->parents; parent; parent = parent->next)
 214                        if (!(parent->item->object.flags & not_shallow_flag)) {
 215                                c->object.flags |= shallow_flag;
 216                                commit_list_insert(c, &result);
 217                                break;
 218                        }
 219        }
 220        free_commit_list(not_shallow_list);
 221
 222        /*
 223         * Now we can clean up NOT_SHALLOW on border commits. Having
 224         * both flags set can confuse the caller.
 225         */
 226        for (p = result; p; p = p->next) {
 227                struct object *o = &p->item->object;
 228                if ((o->flags & both_flags) == both_flags)
 229                        o->flags &= ~not_shallow_flag;
 230        }
 231        return result;
 232}
 233
 234static void check_shallow_file_for_update(void)
 235{
 236        if (is_shallow == -1)
 237                BUG("shallow must be initialized by now");
 238
 239        if (!stat_validity_check(&shallow_stat, git_path_shallow()))
 240                die("shallow file has changed since we read it");
 241}
 242
 243#define SEEN_ONLY 1
 244#define VERBOSE   2
 245
 246struct write_shallow_data {
 247        struct strbuf *out;
 248        int use_pack_protocol;
 249        int count;
 250        unsigned flags;
 251};
 252
 253static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
 254{
 255        struct write_shallow_data *data = cb_data;
 256        const char *hex = oid_to_hex(&graft->oid);
 257        if (graft->nr_parent != -1)
 258                return 0;
 259        if (data->flags & SEEN_ONLY) {
 260                struct commit *c = lookup_commit(&graft->oid);
 261                if (!c || !(c->object.flags & SEEN)) {
 262                        if (data->flags & VERBOSE)
 263                                printf("Removing %s from .git/shallow\n",
 264                                       oid_to_hex(&c->object.oid));
 265                        return 0;
 266                }
 267        }
 268        data->count++;
 269        if (data->use_pack_protocol)
 270                packet_buf_write(data->out, "shallow %s", hex);
 271        else {
 272                strbuf_addstr(data->out, hex);
 273                strbuf_addch(data->out, '\n');
 274        }
 275        return 0;
 276}
 277
 278static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
 279                                   const struct oid_array *extra,
 280                                   unsigned flags)
 281{
 282        struct write_shallow_data data;
 283        int i;
 284        data.out = out;
 285        data.use_pack_protocol = use_pack_protocol;
 286        data.count = 0;
 287        data.flags = flags;
 288        for_each_commit_graft(write_one_shallow, &data);
 289        if (!extra)
 290                return data.count;
 291        for (i = 0; i < extra->nr; i++) {
 292                strbuf_addstr(out, oid_to_hex(extra->oid + i));
 293                strbuf_addch(out, '\n');
 294                data.count++;
 295        }
 296        return data.count;
 297}
 298
 299int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
 300                          const struct oid_array *extra)
 301{
 302        return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
 303}
 304
 305const char *setup_temporary_shallow(const struct oid_array *extra)
 306{
 307        struct tempfile *temp;
 308        struct strbuf sb = STRBUF_INIT;
 309
 310        if (write_shallow_commits(&sb, 0, extra)) {
 311                temp = xmks_tempfile(git_path("shallow_XXXXXX"));
 312
 313                if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
 314                    close_tempfile_gently(temp) < 0)
 315                        die_errno("failed to write to %s",
 316                                  get_tempfile_path(temp));
 317                strbuf_release(&sb);
 318                return get_tempfile_path(temp);
 319        }
 320        /*
 321         * is_repository_shallow() sees empty string as "no shallow
 322         * file".
 323         */
 324        return "";
 325}
 326
 327void setup_alternate_shallow(struct lock_file *shallow_lock,
 328                             const char **alternate_shallow_file,
 329                             const struct oid_array *extra)
 330{
 331        struct strbuf sb = STRBUF_INIT;
 332        int fd;
 333
 334        fd = hold_lock_file_for_update(shallow_lock, git_path_shallow(),
 335                                       LOCK_DIE_ON_ERROR);
 336        check_shallow_file_for_update();
 337        if (write_shallow_commits(&sb, 0, extra)) {
 338                if (write_in_full(fd, sb.buf, sb.len) < 0)
 339                        die_errno("failed to write to %s",
 340                                  get_lock_file_path(shallow_lock));
 341                *alternate_shallow_file = get_lock_file_path(shallow_lock);
 342        } else
 343                /*
 344                 * is_repository_shallow() sees empty string as "no
 345                 * shallow file".
 346                 */
 347                *alternate_shallow_file = "";
 348        strbuf_release(&sb);
 349}
 350
 351static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
 352{
 353        int fd = *(int *)cb;
 354        if (graft->nr_parent == -1)
 355                packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
 356        return 0;
 357}
 358
 359void advertise_shallow_grafts(int fd)
 360{
 361        if (!is_repository_shallow())
 362                return;
 363        for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
 364}
 365
 366/*
 367 * mark_reachable_objects() should have been run prior to this and all
 368 * reachable commits marked as "SEEN".
 369 */
 370void prune_shallow(int show_only)
 371{
 372        struct lock_file shallow_lock = LOCK_INIT;
 373        struct strbuf sb = STRBUF_INIT;
 374        int fd;
 375
 376        if (show_only) {
 377                write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY | VERBOSE);
 378                strbuf_release(&sb);
 379                return;
 380        }
 381        fd = hold_lock_file_for_update(&shallow_lock, git_path_shallow(),
 382                                       LOCK_DIE_ON_ERROR);
 383        check_shallow_file_for_update();
 384        if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
 385                if (write_in_full(fd, sb.buf, sb.len) < 0)
 386                        die_errno("failed to write to %s",
 387                                  get_lock_file_path(&shallow_lock));
 388                commit_lock_file(&shallow_lock);
 389        } else {
 390                unlink(git_path_shallow());
 391                rollback_lock_file(&shallow_lock);
 392        }
 393        strbuf_release(&sb);
 394}
 395
 396struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
 397
 398/*
 399 * Step 1, split sender shallow commits into "ours" and "theirs"
 400 * Step 2, clean "ours" based on .git/shallow
 401 */
 402void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
 403{
 404        int i;
 405        trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
 406        memset(info, 0, sizeof(*info));
 407        info->shallow = sa;
 408        if (!sa)
 409                return;
 410        ALLOC_ARRAY(info->ours, sa->nr);
 411        ALLOC_ARRAY(info->theirs, sa->nr);
 412        for (i = 0; i < sa->nr; i++) {
 413                if (has_object_file(sa->oid + i)) {
 414                        struct commit_graft *graft;
 415                        graft = lookup_commit_graft(&sa->oid[i]);
 416                        if (graft && graft->nr_parent < 0)
 417                                continue;
 418                        info->ours[info->nr_ours++] = i;
 419                } else
 420                        info->theirs[info->nr_theirs++] = i;
 421        }
 422}
 423
 424void clear_shallow_info(struct shallow_info *info)
 425{
 426        free(info->ours);
 427        free(info->theirs);
 428}
 429
 430/* Step 4, remove non-existent ones in "theirs" after getting the pack */
 431
 432void remove_nonexistent_theirs_shallow(struct shallow_info *info)
 433{
 434        struct object_id *oid = info->shallow->oid;
 435        int i, dst;
 436        trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
 437        for (i = dst = 0; i < info->nr_theirs; i++) {
 438                if (i != dst)
 439                        info->theirs[dst] = info->theirs[i];
 440                if (has_object_file(oid + info->theirs[i]))
 441                        dst++;
 442        }
 443        info->nr_theirs = dst;
 444}
 445
 446define_commit_slab(ref_bitmap, uint32_t *);
 447
 448#define POOL_SIZE (512 * 1024)
 449
 450struct paint_info {
 451        struct ref_bitmap ref_bitmap;
 452        unsigned nr_bits;
 453        char **pools;
 454        char *free, *end;
 455        unsigned pool_count;
 456};
 457
 458static uint32_t *paint_alloc(struct paint_info *info)
 459{
 460        unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
 461        unsigned size = nr * sizeof(uint32_t);
 462        void *p;
 463        if (!info->pool_count || size > info->end - info->free) {
 464                if (size > POOL_SIZE)
 465                        BUG("pool size too small for %d in paint_alloc()",
 466                            size);
 467                info->pool_count++;
 468                REALLOC_ARRAY(info->pools, info->pool_count);
 469                info->free = xmalloc(POOL_SIZE);
 470                info->pools[info->pool_count - 1] = info->free;
 471                info->end = info->free + POOL_SIZE;
 472        }
 473        p = info->free;
 474        info->free += size;
 475        return p;
 476}
 477
 478/*
 479 * Given a commit SHA-1, walk down to parents until either SEEN,
 480 * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
 481 * all walked commits.
 482 */
 483static void paint_down(struct paint_info *info, const struct object_id *oid,
 484                       unsigned int id)
 485{
 486        unsigned int i, nr;
 487        struct commit_list *head = NULL;
 488        int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
 489        size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
 490        struct commit *c = lookup_commit_reference_gently(oid, 1);
 491        uint32_t *tmp; /* to be freed before return */
 492        uint32_t *bitmap;
 493
 494        if (!c)
 495                return;
 496
 497        tmp = xmalloc(bitmap_size);
 498        bitmap = paint_alloc(info);
 499        memset(bitmap, 0, bitmap_size);
 500        bitmap[id / 32] |= (1U << (id % 32));
 501        commit_list_insert(c, &head);
 502        while (head) {
 503                struct commit_list *p;
 504                struct commit *c = pop_commit(&head);
 505                uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
 506
 507                /* XXX check "UNINTERESTING" from pack bitmaps if available */
 508                if (c->object.flags & (SEEN | UNINTERESTING))
 509                        continue;
 510                else
 511                        c->object.flags |= SEEN;
 512
 513                if (*refs == NULL)
 514                        *refs = bitmap;
 515                else {
 516                        memcpy(tmp, *refs, bitmap_size);
 517                        for (i = 0; i < bitmap_nr; i++)
 518                                tmp[i] |= bitmap[i];
 519                        if (memcmp(tmp, *refs, bitmap_size)) {
 520                                *refs = paint_alloc(info);
 521                                memcpy(*refs, tmp, bitmap_size);
 522                        }
 523                }
 524
 525                if (c->object.flags & BOTTOM)
 526                        continue;
 527
 528                if (parse_commit(c))
 529                        die("unable to parse commit %s",
 530                            oid_to_hex(&c->object.oid));
 531
 532                for (p = c->parents; p; p = p->next) {
 533                        if (p->item->object.flags & SEEN)
 534                                continue;
 535                        commit_list_insert(p->item, &head);
 536                }
 537        }
 538
 539        nr = get_max_object_index();
 540        for (i = 0; i < nr; i++) {
 541                struct object *o = get_indexed_object(i);
 542                if (o && o->type == OBJ_COMMIT)
 543                        o->flags &= ~SEEN;
 544        }
 545
 546        free(tmp);
 547}
 548
 549static int mark_uninteresting(const char *refname, const struct object_id *oid,
 550                              int flags, void *cb_data)
 551{
 552        struct commit *commit = lookup_commit_reference_gently(oid, 1);
 553        if (!commit)
 554                return 0;
 555        commit->object.flags |= UNINTERESTING;
 556        mark_parents_uninteresting(commit);
 557        return 0;
 558}
 559
 560static void post_assign_shallow(struct shallow_info *info,
 561                                struct ref_bitmap *ref_bitmap,
 562                                int *ref_status);
 563/*
 564 * Step 6(+7), associate shallow commits with new refs
 565 *
 566 * info->ref must be initialized before calling this function.
 567 *
 568 * If used is not NULL, it's an array of info->shallow->nr
 569 * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
 570 * m-th shallow commit from info->shallow.
 571 *
 572 * If used is NULL, "ours" and "theirs" are updated. And if ref_status
 573 * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
 574 * the ref needs some shallow commits from either info->ours or
 575 * info->theirs.
 576 */
 577void assign_shallow_commits_to_refs(struct shallow_info *info,
 578                                    uint32_t **used, int *ref_status)
 579{
 580        struct object_id *oid = info->shallow->oid;
 581        struct oid_array *ref = info->ref;
 582        unsigned int i, nr;
 583        int *shallow, nr_shallow = 0;
 584        struct paint_info pi;
 585
 586        trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
 587        ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
 588        for (i = 0; i < info->nr_ours; i++)
 589                shallow[nr_shallow++] = info->ours[i];
 590        for (i = 0; i < info->nr_theirs; i++)
 591                shallow[nr_shallow++] = info->theirs[i];
 592
 593        /*
 594         * Prepare the commit graph to track what refs can reach what
 595         * (new) shallow commits.
 596         */
 597        nr = get_max_object_index();
 598        for (i = 0; i < nr; i++) {
 599                struct object *o = get_indexed_object(i);
 600                if (!o || o->type != OBJ_COMMIT)
 601                        continue;
 602
 603                o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
 604        }
 605
 606        memset(&pi, 0, sizeof(pi));
 607        init_ref_bitmap(&pi.ref_bitmap);
 608        pi.nr_bits = ref->nr;
 609
 610        /*
 611         * "--not --all" to cut short the traversal if new refs
 612         * connect to old refs. If not (e.g. force ref updates) it'll
 613         * have to go down to the current shallow commits.
 614         */
 615        head_ref(mark_uninteresting, NULL);
 616        for_each_ref(mark_uninteresting, NULL);
 617
 618        /* Mark potential bottoms so we won't go out of bound */
 619        for (i = 0; i < nr_shallow; i++) {
 620                struct commit *c = lookup_commit(&oid[shallow[i]]);
 621                c->object.flags |= BOTTOM;
 622        }
 623
 624        for (i = 0; i < ref->nr; i++)
 625                paint_down(&pi, ref->oid + i, i);
 626
 627        if (used) {
 628                int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
 629                memset(used, 0, sizeof(*used) * info->shallow->nr);
 630                for (i = 0; i < nr_shallow; i++) {
 631                        const struct commit *c = lookup_commit(&oid[shallow[i]]);
 632                        uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
 633                        if (*map)
 634                                used[shallow[i]] = xmemdupz(*map, bitmap_size);
 635                }
 636                /*
 637                 * unreachable shallow commits are not removed from
 638                 * "ours" and "theirs". The user is supposed to run
 639                 * step 7 on every ref separately and not trust "ours"
 640                 * and "theirs" any more.
 641                 */
 642        } else
 643                post_assign_shallow(info, &pi.ref_bitmap, ref_status);
 644
 645        clear_ref_bitmap(&pi.ref_bitmap);
 646        for (i = 0; i < pi.pool_count; i++)
 647                free(pi.pools[i]);
 648        free(pi.pools);
 649        free(shallow);
 650}
 651
 652struct commit_array {
 653        struct commit **commits;
 654        int nr, alloc;
 655};
 656
 657static int add_ref(const char *refname, const struct object_id *oid,
 658                   int flags, void *cb_data)
 659{
 660        struct commit_array *ca = cb_data;
 661        ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
 662        ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
 663        if (ca->commits[ca->nr])
 664                ca->nr++;
 665        return 0;
 666}
 667
 668static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
 669{
 670        unsigned int i;
 671        if (!ref_status)
 672                return;
 673        for (i = 0; i < nr; i++)
 674                if (bitmap[i / 32] & (1U << (i % 32)))
 675                        ref_status[i]++;
 676}
 677
 678/*
 679 * Step 7, reachability test on "ours" at commit level
 680 */
 681static void post_assign_shallow(struct shallow_info *info,
 682                                struct ref_bitmap *ref_bitmap,
 683                                int *ref_status)
 684{
 685        struct object_id *oid = info->shallow->oid;
 686        struct commit *c;
 687        uint32_t **bitmap;
 688        int dst, i, j;
 689        int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
 690        struct commit_array ca;
 691
 692        trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
 693        if (ref_status)
 694                memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
 695
 696        /* Remove unreachable shallow commits from "theirs" */
 697        for (i = dst = 0; i < info->nr_theirs; i++) {
 698                if (i != dst)
 699                        info->theirs[dst] = info->theirs[i];
 700                c = lookup_commit(&oid[info->theirs[i]]);
 701                bitmap = ref_bitmap_at(ref_bitmap, c);
 702                if (!*bitmap)
 703                        continue;
 704                for (j = 0; j < bitmap_nr; j++)
 705                        if (bitmap[0][j]) {
 706                                update_refstatus(ref_status, info->ref->nr, *bitmap);
 707                                dst++;
 708                                break;
 709                        }
 710        }
 711        info->nr_theirs = dst;
 712
 713        memset(&ca, 0, sizeof(ca));
 714        head_ref(add_ref, &ca);
 715        for_each_ref(add_ref, &ca);
 716
 717        /* Remove unreachable shallow commits from "ours" */
 718        for (i = dst = 0; i < info->nr_ours; i++) {
 719                if (i != dst)
 720                        info->ours[dst] = info->ours[i];
 721                c = lookup_commit(&oid[info->ours[i]]);
 722                bitmap = ref_bitmap_at(ref_bitmap, c);
 723                if (!*bitmap)
 724                        continue;
 725                for (j = 0; j < bitmap_nr; j++)
 726                        if (bitmap[0][j] &&
 727                            /* Step 7, reachability test at commit level */
 728                            !in_merge_bases_many(c, ca.nr, ca.commits)) {
 729                                update_refstatus(ref_status, info->ref->nr, *bitmap);
 730                                dst++;
 731                                break;
 732                        }
 733        }
 734        info->nr_ours = dst;
 735
 736        free(ca.commits);
 737}
 738
 739/* (Delayed) step 7, reachability test at commit level */
 740int delayed_reachability_test(struct shallow_info *si, int c)
 741{
 742        if (si->need_reachability_test[c]) {
 743                struct commit *commit = lookup_commit(&si->shallow->oid[c]);
 744
 745                if (!si->commits) {
 746                        struct commit_array ca;
 747
 748                        memset(&ca, 0, sizeof(ca));
 749                        head_ref(add_ref, &ca);
 750                        for_each_ref(add_ref, &ca);
 751                        si->commits = ca.commits;
 752                        si->nr_commits = ca.nr;
 753                }
 754
 755                si->reachable[c] = in_merge_bases_many(commit,
 756                                                       si->nr_commits,
 757                                                       si->commits);
 758                si->need_reachability_test[c] = 0;
 759        }
 760        return si->reachable[c];
 761}