if (!refs_resolve_ref_unsafe(&refs->base,
refname.buf,
RESOLVE_REF_READING,
- oid.hash, &flag)) {
+ &oid, &flag)) {
oidclr(&oid);
flag |= REF_ISBROKEN;
} else if (is_null_oid(&oid)) {
}
static int files_read_raw_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1,
+ const char *refname, struct object_id *oid,
struct strbuf *referent, unsigned int *type)
{
struct files_ref_store *refs =
struct strbuf sb_path = STRBUF_INIT;
const char *path;
const char *buf;
+ const char *p;
struct stat st;
int fd;
int ret = -1;
if (errno != ENOENT)
goto out;
if (refs_read_raw_ref(refs->packed_ref_store, refname,
- sha1, referent, type)) {
+ oid, referent, type)) {
errno = ENOENT;
goto out;
}
* packed ref:
*/
if (refs_read_raw_ref(refs->packed_ref_store, refname,
- sha1, referent, type)) {
+ oid, referent, type)) {
errno = EISDIR;
goto out;
}
* Please note that FETCH_HEAD has additional
* data after the sha.
*/
- if (get_sha1_hex(buf, sha1) ||
- (buf[40] != '\0' && !isspace(buf[40]))) {
+ if (parse_oid_hex(buf, oid, &p) ||
+ (*p != '\0' && !isspace(*p))) {
*type |= REF_ISBROKEN;
errno = EINVAL;
goto out;
*/
if (files_read_raw_ref(&refs->base, refname,
- lock->old_oid.hash, referent, type)) {
+ &lock->old_oid, referent, type)) {
if (errno == ENOENT) {
if (mustexist) {
/* Garden variety missing reference. */
return ret;
}
-static int files_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- int flag;
- unsigned char base[20];
-
- if (current_ref_iter && current_ref_iter->refname == refname) {
- struct object_id peeled;
-
- if (ref_iterator_peel(current_ref_iter, &peeled))
- return -1;
- hashcpy(sha1, peeled.hash);
- return 0;
- }
-
- if (refs_read_ref_full(ref_store, refname,
- RESOLVE_REF_READING, base, &flag))
- return -1;
-
- /*
- * If the reference is packed, read its ref_entry from the
- * cache in the hope that we already know its peeled value.
- * We only try this optimization on packed references because
- * (a) forcing the filling of the loose reference cache could
- * be expensive and (b) loose references anyway usually do not
- * have REF_KNOWS_PEELED.
- */
- if (flag & REF_ISPACKED &&
- !refs_peel_ref(refs->packed_ref_store, refname, sha1))
- return 0;
-
- return peel_object(base, sha1);
-}
-
struct files_ref_iterator {
struct ref_iterator base;
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_iterator *loose_iter, *packed_iter;
+ struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
- iter = xcalloc(1, sizeof(*iter));
- ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
-
/*
* We must make sure that all loose refs are read before
* accessing the packed-refs file; this avoids a race
refs->packed_ref_store, prefix, 0,
DO_FOR_EACH_INCLUDE_BROKEN);
- iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
+ overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
+
+ iter = xcalloc(1, sizeof(*iter));
+ ref_iterator = &iter->base;
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
+ overlay_iter->ordered);
+ iter->iter0 = overlay_iter;
iter->flags = flags;
return ref_iterator;
}
/*
- * Verify that the reference locked by lock has the value old_sha1.
- * Fail if the reference doesn't exist and mustexist is set. Return 0
- * on success. On error, write an error message to err, set errno, and
- * return a negative value.
+ * Verify that the reference locked by lock has the value old_oid
+ * (unless it is NULL). Fail if the reference doesn't exist and
+ * mustexist is set. Return 0 on success. On error, write an error
+ * message to err, set errno, and return a negative value.
*/
static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
- const unsigned char *old_sha1, int mustexist,
+ const struct object_id *old_oid, int mustexist,
struct strbuf *err)
{
assert(err);
if (refs_read_ref_full(ref_store, lock->ref_name,
mustexist ? RESOLVE_REF_READING : 0,
- lock->old_oid.hash, NULL)) {
- if (old_sha1) {
+ &lock->old_oid, NULL)) {
+ if (old_oid) {
int save_errno = errno;
strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
errno = save_errno;
return 0;
}
}
- if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
+ if (old_oid && oidcmp(&lock->old_oid, old_oid)) {
strbuf_addf(err, "ref '%s' is at %s but expected %s",
lock->ref_name,
oid_to_hex(&lock->old_oid),
- sha1_to_hex(old_sha1));
+ oid_to_hex(old_oid));
errno = EBUSY;
return -1;
}
* Locks a ref returning the lock on success and NULL on failure.
* On failure errno is set to something meaningful.
*/
-static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
- const char *refname,
- const unsigned char *old_sha1,
- const struct string_list *extras,
- const struct string_list *skip,
- unsigned int flags, int *type,
- struct strbuf *err)
+static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
+ const char *refname,
+ const struct object_id *old_oid,
+ const struct string_list *extras,
+ const struct string_list *skip,
+ unsigned int flags, int *type,
+ struct strbuf *err)
{
struct strbuf ref_file = STRBUF_INIT;
struct ref_lock *lock;
int last_errno = 0;
- int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
+ int mustexist = (old_oid && !is_null_oid(old_oid));
int resolve_flags = RESOLVE_REF_NO_RECURSE;
int resolved;
- files_assert_main_repository(refs, "lock_ref_sha1_basic");
+ files_assert_main_repository(refs, "lock_ref_oid_basic");
assert(err);
lock = xcalloc(1, sizeof(struct ref_lock));
files_ref_path(refs, &ref_file, refname);
resolved = !!refs_resolve_ref_unsafe(&refs->base,
refname, resolve_flags,
- lock->old_oid.hash, type);
+ &lock->old_oid, type);
if (!resolved && errno == EISDIR) {
/*
* we are trying to lock foo but we used to
}
resolved = !!refs_resolve_ref_unsafe(&refs->base,
refname, resolve_flags,
- lock->old_oid.hash, type);
+ &lock->old_oid, type);
}
if (!resolved) {
last_errno = errno;
goto error_return;
}
- if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {
+ if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) {
last_errno = errno;
goto error_return;
}
struct ref_to_prune {
struct ref_to_prune *next;
- unsigned char sha1[20];
+ struct object_id oid;
char name[FLEX_ARRAY];
};
transaction = ref_store_transaction_begin(&refs->base, &err);
if (!transaction ||
- ref_transaction_delete(transaction, r->name, r->sha1,
+ ref_transaction_delete(transaction, r->name, &r->oid,
REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
ref_transaction_commit(transaction, &err)) {
ref_transaction_free(transaction);
strbuf_release(&err);
}
-static void prune_refs(struct files_ref_store *refs, struct ref_to_prune *r)
+/*
+ * Prune the loose versions of the references in the linked list
+ * `*refs_to_prune`, freeing the entries in the list as we go.
+ */
+static void prune_refs(struct files_ref_store *refs, struct ref_to_prune **refs_to_prune)
{
- while (r) {
+ while (*refs_to_prune) {
+ struct ref_to_prune *r = *refs_to_prune;
+ *refs_to_prune = r->next;
prune_ref(refs, r);
- r = r->next;
+ free(r);
}
}
int ok;
struct ref_to_prune *refs_to_prune = NULL;
struct strbuf err = STRBUF_INIT;
+ struct ref_transaction *transaction;
+
+ transaction = ref_store_transaction_begin(refs->packed_ref_store, &err);
+ if (!transaction)
+ return -1;
packed_refs_lock(refs->packed_ref_store, LOCK_DIE_ON_ERROR, &err);
continue;
/*
- * Create an entry in the packed-refs cache equivalent
- * to the one from the loose ref cache, except that
- * we don't copy the peeled status, because we want it
- * to be re-peeled.
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
*/
- add_packed_ref(refs->packed_ref_store, iter->refname, iter->oid);
+ if (ref_transaction_update(transaction, iter->refname,
+ iter->oid, NULL,
+ REF_NODEREF, NULL, &err))
+ die("failure preparing to create packed reference %s: %s",
+ iter->refname, err.buf);
/* Schedule the loose reference for pruning if requested. */
if ((flags & PACK_REFS_PRUNE)) {
struct ref_to_prune *n;
FLEX_ALLOC_STR(n, name, iter->refname);
- hashcpy(n->sha1, iter->oid->hash);
+ oidcpy(&n->oid, iter->oid);
n->next = refs_to_prune;
refs_to_prune = n;
}
if (ok != ITER_DONE)
die("error while iterating over references");
- if (commit_packed_refs(refs->packed_ref_store, &err))
- die("unable to overwrite old ref-pack file: %s", err.buf);
+ if (ref_transaction_commit(transaction, &err))
+ die("unable to write new packed-refs: %s", err.buf);
+
+ ref_transaction_free(transaction);
+
packed_refs_unlock(refs->packed_ref_store);
- prune_refs(refs, refs_to_prune);
+ prune_refs(refs, &refs_to_prune);
strbuf_release(&err);
return 0;
}
if (packed_refs_lock(refs->packed_ref_store, 0, &err))
goto error;
- if (repack_without_refs(refs->packed_ref_store, refnames, &err)) {
+ if (refs_delete_refs(refs->packed_ref_store, msg, refnames, flags)) {
packed_refs_unlock(refs->packed_ref_store);
goto error;
}
const struct object_id *oid, const char *logmsg,
struct strbuf *err);
-static int files_rename_ref(struct ref_store *ref_store,
+static int files_copy_or_rename_ref(struct ref_store *ref_store,
const char *oldrefname, const char *newrefname,
- const char *logmsg)
+ const char *logmsg, int copy)
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- orig_oid.hash, &flag)) {
+ &orig_oid, &flag)) {
ret = error("refname %s not found", oldrefname);
goto out;
}
if (flag & REF_ISSYMREF) {
- ret = error("refname %s is a symbolic ref, renaming it is not supported",
- oldrefname);
+ if (copy)
+ ret = error("refname %s is a symbolic ref, copying it is not supported",
+ oldrefname);
+ else
+ ret = error("refname %s is a symbolic ref, renaming it is not supported",
+ oldrefname);
goto out;
}
if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
goto out;
}
- if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
+ if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
oldrefname, strerror(errno));
goto out;
}
- if (refs_delete_ref(&refs->base, logmsg, oldrefname,
- orig_oid.hash, REF_NODEREF)) {
+ if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
+ ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
+ oldrefname, strerror(errno));
+ goto out;
+ }
+
+ if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
+ &orig_oid, REF_NODEREF)) {
error("unable to delete old %s", oldrefname);
goto rollback;
}
* the safety anyway; we want to delete the reference whatever
* its current value.
*/
- if (!refs_read_ref_full(&refs->base, newrefname,
+ if (!copy && !refs_read_ref_full(&refs->base, newrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- oid.hash, NULL) &&
+ &oid, NULL) &&
refs_delete_ref(&refs->base, NULL, newrefname,
NULL, REF_NODEREF)) {
if (errno == EISDIR) {
logmoved = log;
- lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL,
+ REF_NODEREF, NULL, &err);
if (!lock) {
- error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ if (copy)
+ error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ else
+ error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
strbuf_release(&err);
goto rollback;
}
goto out;
rollback:
- lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL,
+ REF_NODEREF, NULL, &err);
if (!lock) {
error("unable to lock %s for rollback: %s", oldrefname, err.buf);
strbuf_release(&err);
return ret;
}
+static int files_rename_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 0);
+}
+
+static int files_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 1);
+}
+
static int close_ref_gently(struct ref_lock *lock)
{
if (close_lock_file_gently(&lock->lk))
written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
free(logrec);
- if (written != len)
+ if (written < 0)
return -1;
return 0;
return -1;
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_in_full(fd, &term, 1) != 1 ||
+ if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_in_full(fd, &term, 1) < 0 ||
close_ref_gently(lock) < 0) {
strbuf_addf(err,
"couldn't write '%s'", get_lock_file_path(&lock->lk));
* check with HEAD only which should cover 99% of all usage
* scenarios (even 100% of the default ones).
*/
- struct object_id head_oid;
int head_flag;
const char *head_ref;
head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
RESOLVE_REF_READING,
- head_oid.hash, &head_flag);
+ NULL, &head_flag);
if (head_ref && (head_flag & REF_ISSYMREF) &&
!strcmp(head_ref, lock->ref_name)) {
struct strbuf log_err = STRBUF_INIT;
struct object_id new_oid;
if (logmsg &&
!refs_read_ref_full(&refs->base, target,
- RESOLVE_REF_READING, new_oid.hash, NULL) &&
+ RESOLVE_REF_READING, &new_oid, NULL) &&
files_log_ref_write(refs, refname, &lock->old_oid,
&new_oid, logmsg, 0, &err)) {
error("%s", err.buf);
struct ref_lock *lock;
int ret;
- lock = lock_ref_sha1_basic(refs, refname, NULL,
- NULL, NULL, REF_NODEREF, NULL,
- &err);
+ lock = lock_ref_oid_basic(refs, refname, NULL,
+ NULL, NULL, REF_NODEREF, NULL,
+ &err);
if (!lock) {
error("%s", err.buf);
strbuf_release(&err);
if (refs_read_ref_full(iter->ref_store,
diter->relative_path, 0,
- iter->oid.hash, &flags)) {
+ &iter->oid, &flags)) {
error("bad ref for %s", diter->path.buf);
continue;
}
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
return reflog_iterator_begin(ref_store, refs->gitcommondir);
} else {
return merge_ref_iterator_begin(
+ 0,
reflog_iterator_begin(ref_store, refs->gitdir),
reflog_iterator_begin(ref_store, refs->gitcommondir),
reflog_iterator_select, refs);
new_update = ref_transaction_add_update(
transaction, "HEAD",
update->flags | REF_LOG_ONLY | REF_NODEREF,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
update->msg);
/*
new_update = ref_transaction_add_update(
transaction, referent, new_flags,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
update->msg);
new_update->parent_update = update;
*/
if (refs_read_ref_full(&refs->base,
referent.buf, 0,
- lock->old_oid.hash, NULL)) {
+ &lock->old_oid, NULL)) {
if (update->flags & REF_HAVE_OLD) {
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
return ret;
}
+struct files_transaction_backend_data {
+ struct ref_transaction *packed_transaction;
+ int packed_refs_locked;
+};
+
/*
* Unlock any references in `transaction` that are still locked, and
* mark the transaction closed.
*/
-static void files_transaction_cleanup(struct ref_transaction *transaction)
+static void files_transaction_cleanup(struct files_ref_store *refs,
+ struct ref_transaction *transaction)
{
size_t i;
+ struct files_transaction_backend_data *backend_data =
+ transaction->backend_data;
+ struct strbuf err = STRBUF_INIT;
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
}
+ if (backend_data->packed_transaction &&
+ ref_transaction_abort(backend_data->packed_transaction, &err)) {
+ error("error aborting transaction: %s", err.buf);
+ strbuf_release(&err);
+ }
+
+ if (backend_data->packed_refs_locked)
+ packed_refs_unlock(refs->packed_ref_store);
+
+ free(backend_data);
+
transaction->state = REF_TRANSACTION_CLOSED;
}
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
char *head_ref = NULL;
int head_type;
- struct object_id head_oid;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
if (!transaction->nr)
goto cleanup;
+ backend_data = xcalloc(1, sizeof(*backend_data));
+ transaction->backend_data = backend_data;
+
/*
* Fail if a refname appears more than once in the
* transaction. (If we end up splitting up any updates using
*/
head_ref = refs_resolve_refdup(ref_store, "HEAD",
RESOLVE_REF_NO_RECURSE,
- head_oid.hash, &head_type);
+ NULL, &head_type);
if (head_ref && !(head_type & REF_ISSYMREF)) {
FREE_AND_NULL(head_ref);
ret = lock_ref_for_update(refs, update, transaction,
head_ref, &affected_refnames, err);
if (ret)
- break;
+ goto cleanup;
+
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ /*
+ * This reference has to be deleted from
+ * packed-refs if it exists there.
+ */
+ if (!packed_transaction) {
+ packed_transaction = ref_store_transaction_begin(
+ refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
+ backend_data->packed_transaction =
+ packed_transaction;
+ }
+
+ ref_transaction_add_update(
+ packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ &update->new_oid, &update->old_oid,
+ NULL);
+ }
+ }
+
+ if (packed_transaction) {
+ if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ backend_data->packed_refs_locked = 1;
+ ret = ref_transaction_prepare(packed_transaction, err);
}
cleanup:
string_list_clear(&affected_refnames, 0);
if (ret)
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
else
transaction->state = REF_TRANSACTION_PREPARED;
files_downcast(ref_store, 0, "ref_transaction_finish");
size_t i;
int ret = 0;
- struct string_list refs_to_delete = STRING_LIST_INIT_NODUP;
- struct string_list_item *ref_to_delete;
struct strbuf sb = STRBUF_INIT;
+ struct files_transaction_backend_data *backend_data;
+ struct ref_transaction *packed_transaction;
+
assert(err);
return 0;
}
+ backend_data = transaction->backend_data;
+ packed_transaction = backend_data->packed_transaction;
+
/* Perform updates first so live commits remain referenced */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
}
}
- /* Perform deletes now that updates are safely completed */
+
+ /*
+ * Now that updates are safely completed, we can perform
+ * deletes. First delete the reflogs of any references that
+ * will be deleted, since (in the unexpected event of an
+ * error) leaving a reference without a reflog is less bad
+ * than leaving a reflog without a reference (the latter is a
+ * mildly invalid repository state):
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ strbuf_reset(&sb);
+ files_reflog_path(refs, &sb, update->refname);
+ if (!unlink_or_warn(sb.buf))
+ try_remove_empty_parents(refs, update->refname,
+ REMOVE_EMPTY_PARENTS_REFLOG);
+ }
+ }
+
+ /*
+ * Perform deletes now that updates are safely completed.
+ *
+ * First delete any packed versions of the references, while
+ * retaining the packed-refs lock:
+ */
+ if (packed_transaction) {
+ ret = ref_transaction_commit(packed_transaction, err);
+ ref_transaction_free(packed_transaction);
+ packed_transaction = NULL;
+ backend_data->packed_transaction = NULL;
+ if (ret)
+ goto cleanup;
+ }
+
+ /* Now delete the loose versions of the references: */
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
struct ref_lock *lock = update->backend_data;
}
update->flags |= REF_DELETED_LOOSE;
}
-
- if (!(update->flags & REF_ISPRUNING))
- string_list_append(&refs_to_delete,
- lock->ref_name);
}
}
- if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
- }
-
- if (repack_without_refs(refs->packed_ref_store, &refs_to_delete, err)) {
- ret = TRANSACTION_GENERIC_ERROR;
- packed_refs_unlock(refs->packed_ref_store);
- goto cleanup;
- }
-
- packed_refs_unlock(refs->packed_ref_store);
-
- /* Delete the reflogs of any references that were deleted: */
- for_each_string_list_item(ref_to_delete, &refs_to_delete) {
- strbuf_reset(&sb);
- files_reflog_path(refs, &sb, ref_to_delete->string);
- if (!unlink_or_warn(sb.buf))
- try_remove_empty_parents(refs, ref_to_delete->string,
- REMOVE_EMPTY_PARENTS_REFLOG);
- }
-
clear_loose_ref_cache(refs);
cleanup:
- files_transaction_cleanup(transaction);
+ files_transaction_cleanup(refs, transaction);
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
}
strbuf_release(&sb);
- string_list_clear(&refs_to_delete, 0);
return ret;
}
struct ref_transaction *transaction,
struct strbuf *err)
{
- files_transaction_cleanup(transaction);
+ struct files_ref_store *refs =
+ files_downcast(ref_store, 0, "ref_transaction_abort");
+
+ files_transaction_cleanup(refs, transaction);
return 0;
}
size_t i;
int ret = 0;
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
+ struct ref_transaction *packed_transaction = NULL;
assert(err);
&affected_refnames))
die("BUG: initial ref transaction called with existing refs");
+ packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
+ if (!packed_transaction) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
ret = TRANSACTION_NAME_CONFLICT;
goto cleanup;
}
+
+ /*
+ * Add a reference creation for this reference to the
+ * packed-refs transaction:
+ */
+ ref_transaction_add_update(packed_transaction, update->refname,
+ update->flags & ~REF_HAVE_OLD,
+ &update->new_oid, &update->old_oid,
+ NULL);
}
if (packed_refs_lock(refs->packed_ref_store, 0, err)) {
goto cleanup;
}
- for (i = 0; i < transaction->nr; i++) {
- struct ref_update *update = transaction->updates[i];
-
- if ((update->flags & REF_HAVE_NEW) &&
- !is_null_oid(&update->new_oid))
- add_packed_ref(refs->packed_ref_store, update->refname,
- &update->new_oid);
- }
-
- if (commit_packed_refs(refs->packed_ref_store, err)) {
+ if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
goto cleanup;
}
cleanup:
+ if (packed_transaction)
+ ref_transaction_free(packed_transaction);
packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
}
static int files_reflog_expire(struct ref_store *ref_store,
- const char *refname, const unsigned char *sha1,
+ const char *refname, const struct object_id *oid,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
int status = 0;
int type;
struct strbuf err = STRBUF_INIT;
- struct object_id oid;
memset(&cb, 0, sizeof(cb));
cb.flags = flags;
* reference itself, plus we might need to update the
* reference if --updateref was specified:
*/
- lock = lock_ref_sha1_basic(refs, refname, sha1,
- NULL, NULL, REF_NODEREF,
- &type, &err);
+ lock = lock_ref_oid_basic(refs, refname, oid,
+ NULL, NULL, REF_NODEREF,
+ &type, &err);
if (!lock) {
error("cannot lock ref '%s': %s", refname, err.buf);
strbuf_release(&err);
}
}
- hashcpy(oid.hash, sha1);
-
- (*prepare_fn)(refname, &oid, cb.policy_cb);
+ (*prepare_fn)(refname, oid, cb.policy_cb);
refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
(*cleanup_fn)(cb.policy_cb);
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_str_in_full(get_lock_file_fd(&lock->lk), "\n") != 1 ||
+ oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
get_lock_file_path(&lock->lk));
files_initial_transaction_commit,
files_pack_refs,
- files_peel_ref,
files_create_symref,
files_delete_refs,
files_rename_ref,
+ files_copy_ref,
files_ref_iterator_begin,
files_read_raw_ref,