struct object_id rev;
struct commit *head;
int errs = 0;
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
return run_add_interactive(revision, "--patch=checkout",
&opts->pathspec);
- lock_file = xcalloc(1, sizeof(struct lock_file));
-
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(&opts->pathspec) < 0)
return error(_("index file corrupt"));
}
errs |= finish_delayed_checkout(&state);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
read_ref_full("HEAD", 0, rev.hash, NULL);
* update paths in the work tree, and we cannot revert
* them.
*/
+ /* fallthrough */
case 0:
return 0;
default:
int *writeout_error)
{
int ret;
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(NULL) < 0)
return error(_("index file corrupt"));
if (!cache_tree_fully_valid(active_cache_tree))
cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
for_each_ref(add_pending_uninteresting_ref, &revs);
add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ /* Save pending objects, so they can be cleaned up later. */
refs = revs.pending;
revs.leak_pending = 1;
+ /*
+ * prepare_revision_walk (together with .leak_pending = 1) makes us
+ * the sole owner of the list of pending objects.
+ */
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
if (!(old->object.flags & UNINTERESTING))
else
describe_detached_head(_("Previous HEAD position was"), old);
+ /* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
- free(refs.objects);
+
+ object_array_clear(&refs);
}
static int switch_branches(const struct checkout_opts *opts,
if (new->path && !opts->force_detach && !opts->new_branch &&
!opts->ignore_other_worktrees) {
- struct object_id oid;
int flag;
- char *head_ref = resolve_refdup("HEAD", 0, oid.hash, &flag);
+ char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
if (head_ref &&
(!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
die_if_checked_out(new->path, 1);
strbuf_release(&buf);
}
+ UNLEAK(opts);
if (opts.patch_mode || opts.pathspec.nr)
return checkout_paths(&opts, new.name);
else