}
int init_apply_state(struct apply_state *state,
- const char *prefix,
- struct lock_file *lock_file)
+ const char *prefix)
{
memset(state, 0, sizeof(*state));
state->prefix = prefix;
- state->lock_file = lock_file;
- state->newfd = -1;
state->apply = 1;
state->line_termination = '\n';
state->p_value = 1;
}
if (state->check_index)
state->unsafe_paths = 0;
- if (!state->lock_file)
- return error("BUG: state->lock_file should not be NULL");
if (state->apply_verbosity <= verbosity_silent) {
state->saved_error_routine = get_error_routine();
state->apply = 0;
state->update_index = state->check_index && state->apply;
- if (state->update_index && state->newfd < 0) {
+ if (state->update_index && !is_lock_file_locked(&state->lock_file)) {
if (state->index_file)
- state->newfd = hold_lock_file_for_update(state->lock_file,
- state->index_file,
- LOCK_DIE_ON_ERROR);
+ hold_lock_file_for_update(&state->lock_file,
+ state->index_file,
+ LOCK_DIE_ON_ERROR);
else
- state->newfd = hold_locked_index(state->lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&state->lock_file, LOCK_DIE_ON_ERROR);
}
if (state->check_index && read_apply_cache(state) < 0) {
}
if (state->update_index) {
- res = write_locked_index(&the_index, state->lock_file, COMMIT_LOCK);
+ res = write_locked_index(&the_index, &state->lock_file, COMMIT_LOCK);
if (res) {
error(_("Unable to write new index file"));
res = -128;
goto end;
}
- state->newfd = -1;
}
res = !!errs;
end:
- if (state->newfd >= 0) {
- rollback_lock_file(state->lock_file);
- state->newfd = -1;
- }
+ rollback_lock_file(&state->lock_file);
if (state->apply_verbosity <= verbosity_silent) {
set_error_routine(state->saved_error_routine);
struct apply_state {
const char *prefix;
- /* These are lock_file related */
- struct lock_file *lock_file;
- int newfd;
+ /* Lock file */
+ struct lock_file lock_file;
/* These control what gets looked at and modified */
int apply; /* this is not a dry-run */
int *force_apply, int *options,
const char * const *apply_usage);
extern int init_apply_state(struct apply_state *state,
- const char *prefix,
- struct lock_file *lock_file);
+ const char *prefix);
extern void clear_apply_state(struct apply_state *state);
extern int check_apply_state(struct apply_state *state, int force_apply);
*/
static void refresh_and_write_cache(void)
{
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write index file"));
}
struct argv_array apply_opts = ARGV_ARRAY_INIT;
struct apply_state apply_state;
int res, opts_left;
- static struct lock_file lock_file;
int force_apply = 0;
int options = 0;
- if (init_apply_state(&apply_state, NULL, &lock_file))
+ if (init_apply_state(&apply_state, NULL))
die("BUG: init_apply_state() failed");
argv_array_push(&apply_opts, "apply");
*/
static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
{
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
struct unpack_trees_options opts;
struct tree_desc t[2];
if (parse_tree(head) || parse_tree(remote))
return -1;
- lock_file = xcalloc(1, sizeof(struct lock_file));
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
init_tree_desc(&t[1], remote->buffer, remote->size);
if (unpack_trees(2, t, &opts)) {
- rollback_lock_file(lock_file);
+ rollback_lock_file(&lock_file);
return -1;
}
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
return 0;
*/
static int merge_tree(struct tree *tree)
{
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
struct unpack_trees_options opts;
struct tree_desc t[1];
if (parse_tree(tree))
return -1;
- lock_file = xcalloc(1, sizeof(struct lock_file));
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
memset(&opts, 0, sizeof(opts));
opts.head_idx = 1;
init_tree_desc(&t[0], tree->buffer, tree->size);
if (unpack_trees(1, t, &opts)) {
- rollback_lock_file(lock_file);
+ rollback_lock_file(&lock_file);
return -1;
}
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
return 0;
NULL
};
-static struct lock_file lock_file;
-
int cmd_apply(int argc, const char **argv, const char *prefix)
{
int force_apply = 0;
int ret;
struct apply_state state;
- if (init_apply_state(&state, prefix, &lock_file))
+ if (init_apply_state(&state, prefix))
exit(128);
argc = apply_parse_options(argc, argv,
NULL
};
-static struct lock_file lock_file;
-
static int option_parse_stage(const struct option *opt,
const char *arg, int unset)
{
int cmd_checkout_index(int argc, const char **argv, const char *prefix)
{
int i;
- int newfd = -1;
+ struct lock_file lock_file = LOCK_INIT;
int all = 0;
int read_from_stdin = 0;
int prefix_length;
if (index_opt && !state.base_dir_len && !to_tempfile) {
state.refresh_cache = 1;
state.istate = &the_index;
- newfd = hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
}
/* Check out named files first */
if (all)
checkout_all(prefix, prefix_length);
- if (0 <= newfd &&
+ if (is_lock_file_locked(&lock_file) &&
write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die("Unable to write new index file");
return 0;
struct object_id rev;
struct commit *head;
int errs = 0;
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
die(_("'%s' cannot be used with updating paths"), "--track");
return run_add_interactive(revision, "--patch=checkout",
&opts->pathspec);
- lock_file = xcalloc(1, sizeof(struct lock_file));
-
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(&opts->pathspec) < 0)
return error(_("index file corrupt"));
}
errs |= finish_delayed_checkout(&state);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
read_ref_full("HEAD", 0, rev.hash, NULL);
int *writeout_error)
{
int ret;
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
if (read_cache_preload(NULL) < 0)
return error(_("index file corrupt"));
if (!cache_tree_fully_valid(active_cache_tree))
cache_tree_update(&the_index, WRITE_TREE_SILENT | WRITE_TREE_REPAIR);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
{
struct object_id oid;
char *head;
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
struct unpack_trees_options opts;
struct tree *tree;
struct tree_desc t;
/* We need to be in the new work tree for the checkout */
setup_work_tree();
- lock_file = xcalloc(1, sizeof(struct lock_file));
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
memset(&opts, 0, sizeof opts);
opts.update = 1;
if (unpack_trees(1, &t, &opts) < 0)
die(_("unable to checkout working tree"));
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
err |= run_hook_le(NULL, "post-checkout", sha1_to_hex(null_sha1),
refresh_cache_or_die(refresh_flags);
- if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
+ if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to create temporary index"));
old_index_env = getenv(INDEX_ENVIRONMENT);
if (update_main_cache_tree(WRITE_TREE_SILENT) == 0) {
if (reopen_lock_file(&index_lock) < 0)
die(_("unable to write index file"));
- if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
+ if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to update temporary index"));
} else
warning(_("Failed to update main cache tree"));
add_files_to_cache(also ? prefix : NULL, &pathspec, 0);
refresh_cache_or_die(refresh_flags);
update_main_cache_tree(WRITE_TREE_SILENT);
- if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
+ if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to write new_index file"));
commit_style = COMMIT_NORMAL;
ret = get_lock_file_path(&index_lock);
add_remove_files(&partial);
refresh_cache(REFRESH_QUIET);
update_main_cache_tree(WRITE_TREE_SILENT);
- if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
+ if (write_locked_index(&the_index, &index_lock, 0))
die(_("unable to write new_index file"));
hold_lock_file_for_update(&false_lock,
add_remove_files(&partial);
refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, &false_lock, CLOSE_LOCK))
+ if (write_locked_index(&the_index, &false_lock, 0))
die(_("unable to write temporary index file"));
discard_cache();
static void refresh_index_quietly(void)
{
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
int fd;
- lock_file = xcalloc(1, sizeof(struct lock_file));
- fd = hold_locked_index(lock_file, 0);
+ fd = hold_locked_index(&lock_file, 0);
if (fd < 0)
return;
discard_cache();
read_cache();
refresh_cache(REFRESH_QUIET|REFRESH_UNMERGED);
- update_index_if_able(&the_index, lock_file);
+ update_index_if_able(&the_index, &lock_file);
}
static int builtin_diff_files(struct rev_info *revs, int argc, const char **argv)
if (hold_lock_file_for_update(&lock, buf.buf, 0) < 0 ||
write_locked_index(&wtindex, &lock, COMMIT_LOCK)) {
ret = error("could not write %s", buf.buf);
- rollback_lock_file(&lock);
goto finish;
}
changed_files(&wt_modified, buf.buf, workdir);
int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
{
- int entries, was_valid, newfd;
+ int entries, was_valid;
struct lock_file lock_file = LOCK_INIT;
int ret = 0;
- newfd = hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
+ hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
entries = read_index_from(index_state, index_path);
if (entries < 0) {
ret = WRITE_TREE_UNMERGED_INDEX;
goto out;
}
- if (0 <= newfd) {
- if (!write_locked_index(index_state, &lock_file, COMMIT_LOCK))
- newfd = -1;
- }
+ write_locked_index(index_state, &lock_file, COMMIT_LOCK);
/* Not being able to write is fine -- we are only interested
* in updating the cache-tree part, and if the next caller
* ends up using the old index with unupdated cache-tree part
hashcpy(sha1, index_state->cache_tree->oid.hash);
out:
- if (0 <= newfd)
- rollback_lock_file(&lock_file);
+ rollback_lock_file(&lock_file);
return ret;
}
extern int read_index_from(struct index_state *, const char *path);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
+
+/* For use with `write_locked_index()`. */
#define COMMIT_LOCK (1 << 0)
-#define CLOSE_LOCK (1 << 1)
+
+/*
+ * Write the index while holding an already-taken lock. Close the lock,
+ * and if `COMMIT_LOCK` is given, commit it.
+ *
+ * Unless a split index is in use, write the index into the lockfile.
+ *
+ * With a split index, write the shared index to a temporary file,
+ * adjust its permissions and rename it into place, then write the
+ * split index to the lockfile. If the temporary file for the shared
+ * index cannot be created, fall back to the behavior described in
+ * the previous paragraph.
+ *
+ * With `COMMIT_LOCK`, the lock is always committed or rolled back.
+ * Without it, the lock is closed, but neither committed nor rolled
+ * back.
+ */
extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
+
extern int discard_index(struct index_state *);
extern void move_index_extensions(struct index_state *dst, struct index_state *src);
extern int unmerged_index(const struct index_state *);
extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
extern struct cache_entry *refresh_cache_entry(struct cache_entry *, unsigned int);
+/*
+ * Opportunistically update the index but do not complain if we can't.
+ * The lockfile is always committed or rolled back.
+ */
extern void update_index_if_able(struct index_state *, struct lock_file *);
extern int hold_locked_index(struct lock_file *, int);
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
- struct lock_file *lock;
+ struct lock_file lock = LOCK_INIT;
int out_fd;
char buf[1024];
FILE *config_file = NULL;
if (!config_filename)
config_filename = filename_buf = git_pathdup("config");
- lock = xcalloc(1, sizeof(struct lock_file));
- out_fd = hold_lock_file_for_update(lock, config_filename, 0);
+ out_fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (out_fd < 0) {
ret = error("could not lock config file %s", config_filename);
goto out;
goto out;
}
- if (chmod(get_lock_file_path(lock), st.st_mode & 07777) < 0) {
+ if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) {
ret = error_errno("chmod on %s failed",
- get_lock_file_path(lock));
+ get_lock_file_path(&lock));
goto out;
}
*/
if (copystr.len > 0) {
if (write_in_full(out_fd, copystr.buf, copystr.len) != copystr.len) {
- ret = write_error(get_lock_file_path(lock));
+ ret = write_error(get_lock_file_path(&lock));
goto out;
}
strbuf_reset(©str);
store.baselen = strlen(new_name);
if (!copy) {
if (write_section(out_fd, new_name) < 0) {
- ret = write_error(get_lock_file_path(lock));
+ ret = write_error(get_lock_file_path(&lock));
goto out;
}
/*
}
if (write_in_full(out_fd, output, length) < 0) {
- ret = write_error(get_lock_file_path(lock));
+ ret = write_error(get_lock_file_path(&lock));
goto out;
}
}
*/
if (copystr.len > 0) {
if (write_in_full(out_fd, copystr.buf, copystr.len) != copystr.len) {
- ret = write_error(get_lock_file_path(lock));
+ ret = write_error(get_lock_file_path(&lock));
goto out;
}
strbuf_reset(©str);
fclose(config_file);
config_file = NULL;
commit_and_out:
- if (commit_lock_file(lock) < 0)
+ if (commit_lock_file(&lock) < 0)
ret = error_errno("could not write config file %s",
config_filename);
out:
if (config_file)
fclose(config_file);
- rollback_lock_file(lock);
+ rollback_lock_file(&lock);
out_no_rollback:
free(filename_buf);
return ret;
* If the lockfile is still open, close it (and the file pointer if it
* has been opened using `fdopen_lock_file()`) without renaming the
* lockfile over the file being locked. Return 0 upon success. On
- * failure to `close(2)`, return a negative value and roll back the
- * lock file. Usually `commit_lock_file()`, `commit_lock_file_to()`,
+ * failure to `close(2)`, return a negative value (the lockfile is not
+ * rolled back). Usually `commit_lock_file()`, `commit_lock_file_to()`,
* or `rollback_lock_file()` should eventually be called.
*/
static inline int close_lock_file_gently(struct lock_file *lk)
struct commit **result)
{
int clean;
- struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock = LOCK_INIT;
struct commit *head_commit = get_ref(head, o->branch1);
struct commit *next_commit = get_ref(merge, o->branch2);
struct commit_list *ca = NULL;
}
}
- hold_locked_index(lock, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
clean = merge_recursive(o, head_commit, next_commit, ca,
result);
if (clean < 0)
return clean;
if (active_cache_changed &&
- write_locked_index(&the_index, lock, COMMIT_LOCK))
+ write_locked_index(&the_index, &lock, COMMIT_LOCK))
return err(o, _("Unable to write index."));
return clean ? 0 : 1;
struct tree_desc t[MAX_UNPACK_TREES];
int i, nr_trees = 0;
struct dir_struct dir;
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
refresh_cache(REFRESH_QUIET);
- if (hold_locked_index(lock_file, LOCK_REPORT_ON_ERROR) < 0)
+ if (hold_locked_index(&lock_file, LOCK_REPORT_ON_ERROR) < 0)
return -1;
memset(&trees, 0, sizeof(trees));
}
if (unpack_trees(nr_trees, t, &opts))
return -1;
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK)) {
- rollback_lock_file(lock_file);
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
return error(_("unable to write new index file"));
- }
return 0;
}
return 0;
}
-/*
- * Opportunistically update the index but do not complain if we can't
- */
void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
{
if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- verify_index(istate) &&
- write_locked_index(istate, lockfile, COMMIT_LOCK))
+ verify_index(istate))
+ write_locked_index(istate, lockfile, COMMIT_LOCK);
+ else
rollback_lock_file(lockfile);
}
+/*
+ * On success, `tempfile` is closed. If it is the temporary file
+ * of a `struct lock_file`, we will therefore effectively perform
+ * a 'close_lock_file_gently()`. Since that is an implementation
+ * detail of lockfiles, callers of `do_write_index()` should not
+ * rely on it.
+ */
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
return -1;
if (close_tempfile_gently(tempfile)) {
error(_("could not close '%s'"), tempfile->filename.buf);
- delete_tempfile(&tempfile);
return -1;
}
if (stat(tempfile->filename.buf, &st))
int ret = do_write_index(istate, lock->tempfile, 0);
if (ret)
return ret;
- assert((flags & (COMMIT_LOCK | CLOSE_LOCK)) !=
- (COMMIT_LOCK | CLOSE_LOCK));
if (flags & COMMIT_LOCK)
return commit_locked_index(lock);
- else if (flags & CLOSE_LOCK)
- return close_lock_file_gently(lock);
- else
- return ret;
+ return close_lock_file_gently(lock);
}
static int write_split_index(struct index_state *istate,
(istate->cache_changed & ~EXTMASK)) {
if (si)
hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
}
if (getenv("GIT_TEST_SPLIT_INDEX")) {
if (new_shared_index) {
ret = write_shared_index(istate, lock, flags);
if (ret)
- return ret;
+ goto out;
}
ret = write_split_index(istate, lock, flags);
if (!ret && !new_shared_index)
freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+out:
+ if (flags & COMMIT_LOCK)
+ rollback_lock_file(lock);
return ret;
}
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
if (the_index.cache_changed && index_fd >= 0) {
if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) {
- rollback_lock_file(&index_lock);
return error(_("git %s: failed to refresh the index"),
_(action_name(opts)));
}
void add_to_alternates_file(const char *reference)
{
- struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock = LOCK_INIT;
char *alts = git_pathdup("objects/info/alternates");
FILE *in, *out;
+ int found = 0;
- hold_lock_file_for_update(lock, alts, LOCK_DIE_ON_ERROR);
- out = fdopen_lock_file(lock, "w");
+ hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
+ out = fdopen_lock_file(&lock, "w");
if (!out)
die_errno("unable to fdopen alternates lockfile");
in = fopen(alts, "r");
if (in) {
struct strbuf line = STRBUF_INIT;
- int found = 0;
while (strbuf_getline(&line, in) != EOF) {
if (!strcmp(reference, line.buf)) {
strbuf_release(&line);
fclose(in);
-
- if (found) {
- rollback_lock_file(lock);
- lock = NULL;
- }
}
else if (errno != ENOENT)
die_errno("unable to read alternates file");
- if (lock) {
+ if (found) {
+ rollback_lock_file(&lock);
+ } else {
fprintf_or_die(out, "%s\n", reference);
- if (commit_lock_file(lock))
+ if (commit_lock_file(&lock))
die_errno("unable to move new alternates file into place");
if (alt_odb_tail)
link_alt_odb_entries(reference, '\n', NULL, 0);
* `create_tempfile()` returns an allocated tempfile on success or NULL
* on failure. On errors, `errno` describes the reason for failure.
*
- * `delete_tempfile()`, `rename_tempfile()`, and `close_tempfile_gently()`
- * return 0 on success. On failure they set `errno` appropriately and return
- * -1. `delete` and `rename` (but not `close`) do their best to delete the
- * temporary file before returning.
+ * `rename_tempfile()` and `close_tempfile_gently()` return 0 on success.
+ * On failure they set `errno` appropriately and return -1.
+ * `delete_tempfile()` and `rename` (but not `close`) do their best to
+ * delete the temporary file before returning.
*/
struct tempfile {
*/
int require_clean_work_tree(const char *action, const char *hint, int ignore_submodules, int gently)
{
- struct lock_file *lock_file = xcalloc(1, sizeof(*lock_file));
+ struct lock_file lock_file = LOCK_INIT;
int err = 0, fd;
- fd = hold_locked_index(lock_file, 0);
+ fd = hold_locked_index(&lock_file, 0);
refresh_cache(REFRESH_QUIET);
if (0 <= fd)
- update_index_if_able(&the_index, lock_file);
- rollback_lock_file(lock_file);
+ update_index_if_able(&the_index, &lock_file);
+ rollback_lock_file(&lock_file);
if (has_unstaged_changes(ignore_submodules)) {
/* TRANSLATORS: the action is e.g. "pull with rebase" */