for_each_ref(add_pending_uninteresting_ref, &revs);
add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ /* Save pending objects, so they can be cleaned up later. */
refs = revs.pending;
revs.leak_pending = 1;
+ /*
+ * prepare_revision_walk (together with .leak_pending = 1) makes us
+ * the sole owner of the list of pending objects.
+ */
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
if (!(old->object.flags & UNINTERESTING))
else
describe_detached_head(_("Previous HEAD position was"), old);
+ /* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
- free(refs.objects);
+
+ object_array_clear(&refs);
}
static int switch_branches(const struct checkout_opts *opts,
if (new->path && !opts->force_detach && !opts->new_branch &&
!opts->ignore_other_worktrees) {
- struct object_id oid;
int flag;
- char *head_ref = resolve_refdup("HEAD", 0, oid.hash, &flag);
+ char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
if (head_ref &&
(!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
die_if_checked_out(new->path, 1);
strbuf_release(&buf);
}
+ UNLEAK(opts);
if (opts.patch_mode || opts.pathspec.nr)
return checkout_paths(&opts, new.name);
else