if (!get_oid("HEAD", &curr_head)) {
write_state_text(state, "abort-safety", oid_to_hex(&curr_head));
if (!state->rebasing)
- update_ref_oid("am", "ORIG_HEAD", &curr_head, NULL, 0,
- UPDATE_REFS_DIE_ON_ERR);
+ update_ref("am", "ORIG_HEAD", &curr_head, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
} else {
write_state_text(state, "abort-safety", "");
if (!state->rebasing)
*/
static void refresh_and_write_cache(void)
{
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+ struct lock_file lock_file = LOCK_INIT;
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write index file"));
}
struct argv_array apply_opts = ARGV_ARRAY_INIT;
struct apply_state apply_state;
int res, opts_left;
- static struct lock_file lock_file;
int force_apply = 0;
int options = 0;
- if (init_apply_state(&apply_state, NULL, &lock_file))
+ if (init_apply_state(&apply_state, NULL))
die("BUG: init_apply_state() failed");
argv_array_push(&apply_opts, "apply");
strbuf_addf(&sb, "%s: %.*s", reflog_msg, linelen(state->msg),
state->msg);
- update_ref_oid(sb.buf, "HEAD", &commit, old_oid, 0,
- UPDATE_REFS_DIE_ON_ERR);
+ update_ref(sb.buf, "HEAD", &commit, old_oid, 0,
+ UPDATE_REFS_DIE_ON_ERR);
if (state->rebasing) {
FILE *fp = xfopen(am_path(state, "rewritten"), "a");
*/
static int fast_forward_to(struct tree *head, struct tree *remote, int reset)
{
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
struct unpack_trees_options opts;
struct tree_desc t[2];
if (parse_tree(head) || parse_tree(remote))
return -1;
- lock_file = xcalloc(1, sizeof(struct lock_file));
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
init_tree_desc(&t[1], remote->buffer, remote->size);
if (unpack_trees(2, t, &opts)) {
- rollback_lock_file(lock_file);
+ rollback_lock_file(&lock_file);
return -1;
}
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
return 0;
*/
static int merge_tree(struct tree *tree)
{
- struct lock_file *lock_file;
+ struct lock_file lock_file = LOCK_INIT;
struct unpack_trees_options opts;
struct tree_desc t[1];
if (parse_tree(tree))
return -1;
- lock_file = xcalloc(1, sizeof(struct lock_file));
- hold_locked_index(lock_file, LOCK_DIE_ON_ERROR);
+ hold_locked_index(&lock_file, LOCK_DIE_ON_ERROR);
memset(&opts, 0, sizeof(opts));
opts.head_idx = 1;
init_tree_desc(&t[0], tree->buffer, tree->size);
if (unpack_trees(1, t, &opts)) {
- rollback_lock_file(lock_file);
+ rollback_lock_file(&lock_file);
return -1;
}
- if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
return 0;
am_rerere_clear();
- curr_branch = resolve_refdup("HEAD", 0, curr_head.hash, NULL);
+ curr_branch = resolve_refdup("HEAD", 0, &curr_head, NULL);
has_curr_head = curr_branch && !is_null_oid(&curr_head);
if (!has_curr_head)
hashcpy(curr_head.hash, EMPTY_TREE_SHA1_BIN);
clean_index(&curr_head, &orig_head);
if (has_orig_head)
- update_ref_oid("am --abort", "HEAD", &orig_head,
- has_curr_head ? &curr_head : NULL, 0,
- UPDATE_REFS_DIE_ON_ERR);
+ update_ref("am --abort", "HEAD", &orig_head,
+ has_curr_head ? &curr_head : NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
else if (curr_branch)
delete_ref(NULL, curr_branch, NULL, REF_NODEREF);