#include "builtin.h"
+#include "config.h"
#include "lockfile.h"
#include "parse-options.h"
#include "refs.h"
int changed)
{
return run_hook_le(NULL, "post-checkout",
- sha1_to_hex(old ? old->object.oid.hash : null_sha1),
- sha1_to_hex(new ? new->object.oid.hash : null_sha1),
+ oid_to_hex(old ? &old->object.oid : &null_oid),
+ oid_to_hex(new ? &new->object.oid : &null_oid),
changed ? "1" : "0", NULL);
/* "new" can be NULL when checking out from the index before
a commit exists. */
len = base->len + strlen(pathname);
ce = xcalloc(1, cache_entry_size(len));
- hashcpy(ce->sha1, sha1);
+ hashcpy(ce->oid.hash, sha1);
memcpy(ce->name, base->buf, base->len);
memcpy(ce->name + base->len, pathname, len - base->len);
ce->ce_flags = create_ce_flags(0) | CE_UPDATE;
if (pos >= 0) {
struct cache_entry *old = active_cache[pos];
if (ce->ce_mode == old->ce_mode &&
- !hashcmp(ce->sha1, old->sha1)) {
+ !oidcmp(&ce->oid, &old->oid)) {
old->ce_flags |= CE_UPDATE;
free(ce);
return 0;
const char *path = ce->name;
mmfile_t ancestor, ours, theirs;
int status;
- unsigned char sha1[20];
+ struct object_id oid;
mmbuffer_t result_buf;
- unsigned char threeway[3][20];
+ struct object_id threeway[3];
unsigned mode = 0;
memset(threeway, 0, sizeof(threeway));
stage = ce_stage(ce);
if (!stage || strcmp(path, ce->name))
break;
- hashcpy(threeway[stage - 1], ce->sha1);
+ oidcpy(&threeway[stage - 1], &ce->oid);
if (stage == 2)
mode = create_ce_mode(ce->ce_mode);
pos++;
ce = active_cache[pos];
}
- if (is_null_sha1(threeway[1]) || is_null_sha1(threeway[2]))
+ if (is_null_oid(&threeway[1]) || is_null_oid(&threeway[2]))
return error(_("path '%s' does not have necessary versions"), path);
- read_mmblob(&ancestor, threeway[0]);
- read_mmblob(&ours, threeway[1]);
- read_mmblob(&theirs, threeway[2]);
+ read_mmblob(&ancestor, &threeway[0]);
+ read_mmblob(&ours, &threeway[1]);
+ read_mmblob(&theirs, &threeway[2]);
/*
* NEEDSWORK: re-create conflicts from merges with
/*
* NEEDSWORK:
* There is absolutely no reason to write this as a blob object
- * and create a phony cache entry just to leak. This hack is
- * primarily to get to the write_entry() machinery that massages
- * the contents to work-tree format and writes out which only
- * allows it for a cache entry. The code in write_entry() needs
- * to be refactored to allow us to feed a <buffer, size, mode>
- * instead of a cache entry. Such a refactoring would help
- * merge_recursive as well (it also writes the merge result to the
- * object database even when it may contain conflicts).
+ * and create a phony cache entry. This hack is primarily to get
+ * to the write_entry() machinery that massages the contents to
+ * work-tree format and writes out which only allows it for a
+ * cache entry. The code in write_entry() needs to be refactored
+ * to allow us to feed a <buffer, size, mode> instead of a cache
+ * entry. Such a refactoring would help merge_recursive as well
+ * (it also writes the merge result to the object database even
+ * when it may contain conflicts).
*/
if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, sha1))
+ blob_type, oid.hash))
die(_("Unable to add merge result for '%s'"), path);
- ce = make_cache_entry(mode, sha1, path, 2, 0);
+ free(result_buf.ptr);
+ ce = make_cache_entry(mode, oid.hash, path, 2, 0);
if (!ce)
die(_("make_cache_entry failed for path '%s'"), path);
status = checkout_entry(ce, state, NULL);
+ free(ce);
return status;
}
const char *revision)
{
int pos;
- struct checkout state;
+ struct checkout state = CHECKOUT_INIT;
static char *ps_matched;
- unsigned char rev[20];
+ struct object_id rev;
struct commit *head;
int errs = 0;
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
return run_add_interactive(revision, "--patch=checkout",
&opts->pathspec);
- lock_file = xcalloc(1, sizeof(struct lock_file));
-
- hold_locked_index(lock_file, 1);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(&opts->pathspec) < 0)
return error(_("index file corrupt"));
return 1;
/* Now we are committed to check them out */
- memset(&state, 0, sizeof(state));
state.force = 1;
state.refresh_cache = 1;
state.istate = &the_index;
+
+ enable_delayed_checkout(&state);
for (pos = 0; pos < active_nr; pos++) {
struct cache_entry *ce = active_cache[pos];
if (ce->ce_flags & CE_MATCHED) {
pos = skip_same_name(ce, pos) - 1;
}
}
+ errs |= finish_delayed_checkout(&state);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
- read_ref_full("HEAD", 0, rev, NULL);
- head = lookup_commit_reference_gently(rev, 1);
+ read_ref_full("HEAD", 0, &rev, NULL);
+ head = lookup_commit_reference_gently(&rev, 1);
errs |= post_checkout_hook(head, head, 0);
return errs;
* update paths in the work tree, and we cannot revert
* them.
*/
+ /* fallthrough */
case 0:
return 0;
default:
{
struct strbuf buf = STRBUF_INIT;
- strbuf_branchname(&buf, branch->name);
+ strbuf_branchname(&buf, branch->name, INTERPRET_BRANCH_LOCAL);
if (strcmp(buf.buf, branch->name))
branch->name = xstrdup(buf.buf);
strbuf_splice(&buf, 0, 0, "refs/heads/", 11);
int *writeout_error)
{
int ret;
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
- hold_locked_index(lock_file, 1);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(NULL) < 0)
return error(_("index file corrupt"));
setup_standard_excludes(topts.dir);
}
tree = parse_tree_indirect(old->commit ?
- old->commit->object.oid.hash :
- EMPTY_TREE_SHA1_BIN);
+ &old->commit->object.oid :
+ &empty_tree_oid);
init_tree_desc(&trees[0], tree->buffer, tree->size);
- tree = parse_tree_indirect(new->commit->object.oid.hash);
+ tree = parse_tree_indirect(&new->commit->object.oid);
init_tree_desc(&trees[1], tree->buffer, tree->size);
ret = unpack_trees(2, trees, &topts);
if (!cache_tree_fully_valid(active_cache_tree))
cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
const char *old_desc, *reflog_msg;
if (opts->new_branch) {
if (opts->new_orphan_branch) {
- if (opts->new_branch_log && !log_all_ref_updates) {
+ char *refname;
+
+ refname = mkpathdup("refs/heads/%s", opts->new_orphan_branch);
+ if (opts->new_branch_log &&
+ !should_autocreate_reflog(refname)) {
int ret;
- char *refname;
struct strbuf err = STRBUF_INIT;
- refname = mkpathdup("refs/heads/%s", opts->new_orphan_branch);
ret = safe_create_reflog(refname, 1, &err);
- free(refname);
if (ret) {
fprintf(stderr, _("Can not do reflog for '%s': %s\n"),
opts->new_orphan_branch, err.buf);
strbuf_release(&err);
+ free(refname);
return;
}
strbuf_release(&err);
}
+ free(refname);
}
else
create_branch(opts->new_branch, new->name,
if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) {
/* Nothing to do. */
} else if (opts->force_detach || !new->path) { /* No longer on any branch. */
- update_ref(msg.buf, "HEAD", new->commit->object.oid.hash, NULL,
- REF_NODEREF, UPDATE_REFS_DIE_ON_ERR);
+ update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
+ REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
if (!opts->quiet) {
if (old->path &&
advice_detached_head && !opts->force_detach)
const struct object_id *oid,
int flags, void *cb_data)
{
- add_pending_sha1(cb_data, refname, oid->hash, UNINTERESTING);
+ add_pending_oid(cb_data, refname, oid, UNINTERESTING);
return 0;
}
add_pending_object(&revs, object, oid_to_hex(&object->oid));
for_each_ref(add_pending_uninteresting_ref, &revs);
- add_pending_sha1(&revs, "HEAD", new->object.oid.hash, UNINTERESTING);
+ add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ /* Save pending objects, so they can be cleaned up later. */
refs = revs.pending;
revs.leak_pending = 1;
+ /*
+ * prepare_revision_walk (together with .leak_pending = 1) makes us
+ * the sole owner of the list of pending objects.
+ */
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
if (!(old->object.flags & UNINTERESTING))
else
describe_detached_head(_("Previous HEAD position was"), old);
+ /* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
- free(refs.objects);
+
+ object_array_clear(&refs);
}
static int switch_branches(const struct checkout_opts *opts,
int ret = 0;
struct branch_info old;
void *path_to_free;
- unsigned char rev[20];
+ struct object_id rev;
int flag, writeout_error = 0;
memset(&old, 0, sizeof(old));
- old.path = path_to_free = resolve_refdup("HEAD", 0, rev, &flag);
- old.commit = lookup_commit_reference_gently(rev, 1);
+ old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
+ if (old.path)
+ old.commit = lookup_commit_reference_gently(&rev, 1);
if (!(flag & REF_ISSYMREF))
old.path = NULL;
}
if (starts_with(var, "submodule."))
- return parse_submodule_config_option(var, value);
+ return git_default_submodule_config(var, value, NULL);
return git_xmerge_config(var, value, NULL);
}
struct tracking_name_data {
/* const */ char *src_ref;
char *dst_ref;
- unsigned char *dst_sha1;
+ struct object_id *dst_oid;
int unique;
};
memset(&query, 0, sizeof(struct refspec));
query.src = cb->src_ref;
if (remote_find_tracking(remote, &query) ||
- get_sha1(query.dst, cb->dst_sha1)) {
+ get_oid(query.dst, cb->dst_oid)) {
free(query.dst);
return 0;
}
return 0;
}
-static const char *unique_tracking_name(const char *name, unsigned char *sha1)
+static const char *unique_tracking_name(const char *name, struct object_id *oid)
{
struct tracking_name_data cb_data = { NULL, NULL, NULL, 1 };
- char src_ref[PATH_MAX];
- snprintf(src_ref, PATH_MAX, "refs/heads/%s", name);
- cb_data.src_ref = src_ref;
- cb_data.dst_sha1 = sha1;
+ cb_data.src_ref = xstrfmt("refs/heads/%s", name);
+ cb_data.dst_oid = oid;
for_each_remote(check_tracking_name, &cb_data);
+ free(cb_data.src_ref);
if (cb_data.unique)
return cb_data.dst_ref;
free(cb_data.dst_ref);
int dwim_new_local_branch_ok,
struct branch_info *new,
struct checkout_opts *opts,
- unsigned char rev[20])
+ struct object_id *rev)
{
struct tree **source_tree = &opts->source_tree;
const char **new_branch = &opts->new_branch;
int argcount = 0;
- unsigned char branch_rev[20];
+ struct object_id branch_rev;
const char *arg;
int dash_dash_pos;
int has_dash_dash = 0;
if (!strcmp(arg, "-"))
arg = "@{-1}";
- if (get_sha1_mb(arg, rev)) {
+ if (get_oid_mb(arg, rev)) {
/*
* Either case (3) or (4), with <something> not being
* a commit, or an attempt to use case (1) with an
setup_branch_path(new);
if (!check_refname_format(new->path, 0) &&
- !read_ref(new->path, branch_rev))
- hashcpy(rev, branch_rev);
+ !read_ref(new->path, &branch_rev))
+ oidcpy(rev, &branch_rev);
else
new->path = NULL; /* not an existing branch */
if (new->path && !opts->force_detach && !opts->new_branch &&
!opts->ignore_other_worktrees) {
- unsigned char sha1[20];
int flag;
- char *head_ref = resolve_refdup("HEAD", 0, sha1, &flag);
+ char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
if (head_ref &&
(!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
die_if_checked_out(new->path, 1);
}
if (!new->commit && opts->new_branch) {
- unsigned char rev[20];
+ struct object_id rev;
int flag;
- if (!read_ref_full("HEAD", 0, rev, &flag) &&
- (flag & REF_ISSYMREF) && is_null_sha1(rev))
+ if (!read_ref_full("HEAD", 0, &rev, &flag) &&
+ (flag & REF_ISSYMREF) && is_null_oid(&rev))
return switch_unborn_to_new_branch(opts);
}
return switch_branches(opts, new);
N_("second guess 'git checkout <no-such-branch>'")),
OPT_BOOL(0, "ignore-other-worktrees", &opts.ignore_other_worktrees,
N_("do not check if another worktree is holding the given ref")),
+ { OPTION_CALLBACK, 0, "recurse-submodules", NULL,
+ "checkout", "control recursive updating of submodules",
+ PARSE_OPT_OPTARG, option_parse_recurse_submodules_worktree_updater },
OPT_BOOL(0, "progress", &opts.show_progress, N_("force progress reporting")),
OPT_END(),
};
opts.prefix = prefix;
opts.show_progress = -1;
- gitmodules_config();
git_config(git_checkout_config, &opts);
opts.track = BRANCH_TRACK_UNSPECIFIED;
* remote branches, erroring out for invalid or ambiguous cases.
*/
if (argc) {
- unsigned char rev[20];
+ struct object_id rev;
int dwim_ok =
!opts.patch_mode &&
dwim_new_local_branch &&
opts.track == BRANCH_TRACK_UNSPECIFIED &&
!opts.new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new, &opts, rev);
+ &new, &opts, &rev);
argv += n;
argc -= n;
}
* new_branch && argc > 1 will be caught later.
*/
if (opts.new_branch && argc == 1)
- die(_("Cannot update paths and switch to branch '%s' at the same time.\n"
- "Did you intend to checkout '%s' which can not be resolved as commit?"),
- opts.new_branch, argv[0]);
+ die(_("'%s' is not a commit and a branch '%s' cannot be created from it"),
+ argv[0], opts.new_branch);
if (opts.force_detach)
die(_("git checkout: --detach does not take a path argument '%s'"),
if (opts.new_branch) {
struct strbuf buf = STRBUF_INIT;
- opts.branch_exists =
- validate_new_branchname(opts.new_branch, &buf,
- !!opts.new_branch_force,
- !!opts.new_branch_force);
-
+ if (opts.new_branch_force)
+ opts.branch_exists = validate_branchname(opts.new_branch, &buf);
+ else
+ opts.branch_exists =
+ validate_new_branchname(opts.new_branch, &buf, 0);
strbuf_release(&buf);
}
+ UNLEAK(opts);
if (opts.patch_mode || opts.pathspec.nr)
return checkout_paths(&opts, new.name);
else