struct ref_lock {
char *ref_name;
- struct lock_file *lk;
+ struct lock_file lk;
struct object_id old_oid;
};
struct strbuf *sb,
const char *refname)
{
- if (!refname) {
- /*
- * FIXME: of course this is wrong in multi worktree
- * setting. To be fixed real soon.
- */
- strbuf_addf(sb, "%s/logs", refs->gitcommondir);
- return;
- }
-
switch (ref_type(refname)) {
case REF_TYPE_PER_WORKTREE:
case REF_TYPE_PSEUDOREF:
static void unlock_ref(struct ref_lock *lock)
{
- /* Do not free lock->lk -- atexit() still looks at them */
- if (lock->lk)
- rollback_lock_file(lock->lk);
+ rollback_lock_file(&lock->lk);
free(lock->ref_name);
free(lock);
}
goto error_return;
}
- if (!lock->lk)
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
if (hold_lock_file_for_update_timeout(
- lock->lk, ref_file.buf, LOCK_NO_DEREF,
+ &lock->lk, ref_file.buf, LOCK_NO_DEREF,
get_files_ref_lock_timeout_ms()) < 0) {
if (errno == ENOENT && --attempts_remaining > 0) {
/*
goto error_return;
}
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
lock->ref_name = xstrdup(refname);
- if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
+ if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
last_errno = errno;
unable_to_lock_message(ref_file.buf, errno, err);
goto error_return;
strbuf_release(&err);
}
-static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r)
+/*
+ * Prune the loose versions of the references in the linked list
+ * `*refs_to_prune`, freeing the entries in the list as we go.
+ */
+static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
{
- while (r) {
+ while (*refs_to_prune) {
+ struct ref_to_prune *r = *refs_to_prune;
+ *refs_to_prune = r->next;
prune_ref(refs, r);
- r = r->next;
+ free(r);
}
}
int ok;
struct ref_to_prune *refs_to_prune = NULL;
struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+
+ transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
+ if (!transaction)
+ return -1;
packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
continue;
/*
- * Create an entry in the packed-refs cache equivalent
- * to the one from the loose ref cache, except that
- * we don't copy the peeled status, because we want it
- * to be re-peeled.
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
*/
- add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid);
+ if (ref_transaction_update(transaction, iter->refname,
+ iter->oid->hash, NULL,
+ REF_NODEREF, NULL, &err))
+ die("failure preparing to create packed reference %s: %s",
+ iter->refname, err.buf);
/* Schedule the loose reference for pruning if requested. */
if ((flags & PACK_REFS_PRUNE)) {
if (ok != ITER_DONE)
die("error while iterating over references");
- if (commit_packed_refs(refs->packed_ref_store, &err))
- die("unable to overwrite old ref-pack file: %s", err.buf);
+ if (ref_transaction_commit(transaction, &err))
+ die("unable to write new packed-refs: %s", err.buf);
+
+ ref_transaction_free(transaction);
+
packed_refs_unlock(refs->packed_ref_store);
- prune_refs(refs, refs_to_prune);
+ prune_refs(refs, &refs_to_prune);
strbuf_release(&err);
return 0;
}
if (packed_refs_lock(refs->packed_ref_store, 0, &err))
goto error;
- if (repack_without_refs(refs->packed_ref_store, refnames, &err)) {
+ if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
packed_refs_unlock(refs->packed_ref_store);
goto error;
}
return ret;
}
-static int close_ref(struct ref_lock *lock)
+static int close_ref_gently(struct ref_lock *lock)
{
- if (close_lock_file(lock->lk))
+ if (close_lock_file_gently(&lock->lk))
return -1;
return 0;
}
static int commit_ref(struct ref_lock *lock)
{
- char *path = get_locked_file_path(lock->lk);
+ char *path = get_locked_file_path(&lock->lk);
struct stat st;
if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
free(path);
}
- if (commit_lock_file(lock->lk))
+ if (commit_lock_file(&lock->lk))
return -1;
return 0;
}
written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
free(logrec);
- if (written != len)
+ if (written < 0)
return -1;
return 0;
unlock_ref(lock);
return -1;
}
- fd = get_lock_file_fd(lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_in_full(fd, &term, 1) != 1 ||
- close_ref(lock) < 0) {
+ fd = get_lock_file_fd(&lock->lk);
+ if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_in_full(fd, &term, 1) < 0 ||
+ close_ref_gently(lock) < 0) {
strbuf_addf(err,
- "couldn't write '%s'", get_lock_file_path(lock->lk));
+ "couldn't write '%s'", get_lock_file_path(&lock->lk));
unlock_ref(lock);
return -1;
}
* check with HEAD only which should cover 99% of all usage
* scenarios (even 100% of the default ones).
*/
- struct object_id head_oid;
int head_flag;
const char *head_ref;
head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
RESOLVE_REF_READING,
- head_oid.hash, &head_flag);
+ NULL, &head_flag);
if (head_ref && (head_flag & REF_ISSYMREF) &&
!strcmp(head_ref, lock->ref_name)) {
struct strbuf log_err = STRBUF_INIT;
{
int ret = -1;
#ifndef NO_SYMLINK_HEAD
- char *ref_path = get_locked_file_path(lock->lk);
+ char *ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
ret = symlink(target, ref_path);
free(ref_path);
return 0;
}
- if (!fdopen_lock_file(lock->lk, "w"))
+ if (!fdopen_lock_file(&lock->lk, "w"))
return error("unable to fdopen %s: %s",
- lock->lk->tempfile.filename.buf, strerror(errno));
+ lock->lk.tempfile->filename.buf, strerror(errno));
update_symref_reflog(refs, lock, refname, target, logmsg);
/* no error check; commit_ref will check ferror */
- fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
+ fprintf(lock->lk.tempfile->fp, "ref: %s\n", target);
if (commit_ref(lock) < 0)
return error("unable to write symref for %s: %s", refname,
strerror(errno));
files_reflog_iterator_abort
};
-static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
+ const char *gitdir)
{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ,
- "reflog_iterator_begin");
struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
- files_reflog_path(refs, &sb, NULL);
+ strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
strbuf_release(&sb);
+
return ref_iterator;
}
+static enum iterator_selection reflog_iterator_select(
+ struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data)
+{
+ if (iter_worktree) {
+ /*
+ * We're a bit loose here. We probably should ignore
+ * common refs if they are accidentally added as
+ * per-worktree refs.
+ */
+ return ITER_SELECT_0;
+ } else if (iter_common) {
+ if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
+ return ITER_SELECT_1;
+
+ /*
+ * The main ref store may contain main worktree's
+ * per-worktree refs, which should be ignored
+ */
+ return ITER_SKIP_1;
+ } else
+ return ITER_DONE;
+}
+
+static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ,
+ "reflog_iterator_begin");
+
+ if (!strcmp(refs->gitdir, refs->gitcommondir)) {
+ return reflog_iterator_begin(ref_store, refs->gitcommondir);
+ } else {
+ return merge_ref_iterator_begin(
+ reflog_iterator_begin(ref_store, refs->gitdir),
+ reflog_iterator_begin(ref_store, refs->gitcommondir),
+ reflog_iterator_select, refs);
+ }
+}
+
/*
* If update is a direct update of head_ref (the reference pointed to
* by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
* the lockfile is still open. Close it to
* free up the file descriptor:
*/
- if (close_ref(lock)) {
+ if (close_ref_gently(lock)) {
strbuf_addf(err, "couldn't close '%s.lock'",
update->refname);
ret = TRANSACTION_GENERIC_ERROR;
return ret;
}
+struct files_transaction_backend_data {
+ struct ref_transaction *packed_transaction;
+ int packed_refs_locked;
+};
+
/*
* Unlock any references in `transaction` that are still locked, and
* mark the transaction closed.
*/
-static void files_transaction_cleanup(struct ref_transaction *transaction)
+static void files_transaction_cleanup(struct files_ref_store *refs,
+ struct ref_transaction *transaction)
{
size_t i;
+ struct files_transaction_backend_data *backend_data =
+ transaction->backend_data;
+ struct strbuf err = STRBUF_INIT;
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
}
+ if (backend_data->packed_transaction &&
+ ref_transaction_abort(backend_data->packed_transaction, &err)) {
+ error("error aborting transaction: %s", err.buf);
+ strbuf_release(&err);
+ }
+
+ if (backend_data->packed_refs_locked)
+ packed_refs_unlock(refs->packed_ref_store);
+
+ free(backend_data);
+
transaction->state = REF_TRANSACTION_CLOSED;
}
char *head_ref = NULL;
int head_type;
struct object_id head_oid;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
if (!transaction->nr)
goto cleanup;
+ backend_data = xcalloc(1, sizeof(*backend_data));
+ transaction->backend_data = backend_data;
+
/*
* Fail if a refname appears more than once in the
* transaction. (If we end up splitting up any updates using
head_ref, &affected_refnames, err);
if (ret)
break;
+
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ /*
+ * This reference has to be deleted from
+ * packed-refs if it exists there.
+ */
+ if (!packed_transaction) {
+ packed_transaction = ref_store_transaction_begin(
+ refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ backend_data->packed_transaction =
+ packed_transaction;
+ }
+
+ ref_transaction_add_update(
+ packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ update->new_oid.hash, update->old_oid.hash,
+ NULL);
+ }
+ }
+
+ if (packed_transaction) {
+ if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ backend_data->packed_refs_locked = 1;
+ ret = ref_transaction_prepare(packed_transaction, err);
}
cleanup:
string_list_clear(&affected_refnames, 0);
if (ret)
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
else
transaction->state = REF_TRANSACTION_PREPARED;
files_downcast(ref_store, 0, "ref_transaction_finish");
size_t i;
int ret = 0;
- struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;
- struct string_list_item *ref_to_delete;
struct strbuf sb = STRBUF_INIT;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction;
+
assert(err);
return 0;
}
+ backend_data = transaction->backend_data;
+ packed_transaction = backend_data->packed_transaction;
+
/* Perform updates first so live commits remain referenced */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
}
}
- /* Perform deletes now that updates are safely completed */
+
+ /*
+ * Now that updates are safely completed, we can perform
+ * deletes. First delete the reflogs of any references that
+ * will be deleted, since (in the unexpected event of an
+ * error) leaving a reference without a reflog is less bad
+ * than leaving a reflog without a reference (the latter is a
+ * mildly invalid repository state):
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ strbuf_reset(&sb);
+ files_reflog_path(refs, &sb, update->refname);
+ if (!unlink_or_warn(sb.buf))
+ try_remove_empty_parents(refs, update->refname,
+ REMOVE_EMPTY_PARENTS_REFLOG);
+ }
+ }
+
+ /*
+ * Perform deletes now that updates are safely completed.
+ *
+ * First delete any packed versions of the references, while
+ * retaining the packed-refs lock:
+ */
+ if (packed_transaction) {
+ ret = ref_transaction_commit(packed_transaction, err);
+ ref_transaction_free(packed_transaction);
+ packed_transaction = NULL;
+ backend_data->packed_transaction = NULL;
+ if (ret)
+ goto cleanup;
+ }
+
+ /* Now delete the loose versions of the references: */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
struct ref_lock *lock = update->backend_data;
}
update->flags |= REF_DELETED_LOOSE;
}
-
- if (!(update->flags & REF_ISPRUNING))
- string_list_append(&refs_to_delete,
- lock->ref_name);
}
}
- if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- packed_refs_unlock(refs->packed_ref_store);
- goto cleanup;
- }
-
- packed_refs_unlock(refs->packed_ref_store);
-
- /* Delete the reflogs of any references that were deleted: */
- for_each_string_list_item(ref_to_delete, &refs_to_delete) {
- strbuf_reset(&sb);
- files_reflog_path(refs, &sb, ref_to_delete->string);
- if (!unlink_or_warn(sb.buf))
- try_remove_empty_parents(refs, ref_to_delete->string,
- REMOVE_EMPTY_PARENTS_REFLOG);
- }
-
clear_loose_ref_cache(refs);
cleanup:
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
strbuf_release(&sb);
- string_list_clear(&refs_to_delete, 0);
return ret;
}
struct ref_transaction *transaction,
struct strbuf *err)
{
- files_transaction_cleanup(transaction);
+ struct files_ref_store *refs =
+ files_downcast(ref_store, 0, "ref_transaction_abort");
+
+ files_transaction_cleanup(refs, transaction);
return 0;
}
size_t i;
int ret = 0;
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
&affected_refnames))
die("BUG: initial ref transaction called with existing refs");
+ packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
ret = TRANSACTION_NAME_CONFLICT;
goto cleanup;
}
+
+ /*
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
+ */
+ ref_transaction_add_update(packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ update->new_oid.hash, update->old_oid.hash,
+ NULL);
}
if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
goto cleanup;
}
- for (i = 0; i < transaction->nr; i++) {
- struct ref_update *update = transaction->updates[i];
-
- if ((update->flags & REF_HAVE_NEW) &&
- !is_null_oid(&update->new_oid))
- add_packed_ref(refs->packed_ref_store, update->refname,
- &update->new_oid);
- }
-
- if (commit_packed_refs(refs->packed_ref_store, err)) {
+ if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
cleanup:
+ if (packed_transaction)
+ ref_transaction_free(packed_transaction);
packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
!(type & REF_ISSYMREF) &&
!is_null_oid(&cb.last_kept_oid);
- if (close_lock_file(&reflog_lock)) {
+ if (close_lock_file_gently(&reflog_lock)) {
status |= error("couldn't write %s: %s", log_file,
strerror(errno));
+ rollback_lock_file(&reflog_lock);
} else if (update &&
- (write_in_full(get_lock_file_fd(lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
- close_ref(lock) < 0)) {
+ (write_in_full(get_lock_file_fd(&lock->lk),
+ oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 1 ||
+ close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
- get_lock_file_path(lock->lk));
+ get_lock_file_path(&lock->lk));
rollback_lock_file(&reflog_lock);
} else if (commit_lock_file(&reflog_lock)) {
status |= error("unable to write reflog '%s' (%s)",