struct ref_lock {
char *ref_name;
- struct lock_file *lk;
+ struct lock_file lk;
struct object_id old_oid;
};
struct strbuf *sb,
const char *refname)
{
- if (!refname) {
- /*
- * FIXME: of course this is wrong in multi worktree
- * setting. To be fixed real soon.
- */
- strbuf_addf(sb, "%s/logs", refs->gitcommondir);
- return;
- }
-
switch (ref_type(refname)) {
case REF_TYPE_PER_WORKTREE:
case REF_TYPE_PSEUDOREF:
if (!refs_resolve_ref_unsafe(&refs->base,
refname.buf,
RESOLVE_REF_READING,
- oid.hash, &flag)) {
+ &oid, &flag)) {
oidclr(&oid);
flag |= REF_ISBROKEN;
} else if (is_null_oid(&oid)) {
}
static int files_read_raw_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1,
+ const char *refname, struct object_id *oid,
struct strbuf *referent, unsigned int *type)
{
struct files_ref_store *refs =
struct strbuf sb_path = STRBUF_INIT;
const char *path;
const char *buf;
+ const char *p;
struct stat st;
int fd;
int ret = -1;
if (errno != ENOENT)
goto out;
if (refs_read_raw_ref(refs->packed_ref_store, refname,
- sha1, referent, type)) {
+ oid, referent, type)) {
errno = ENOENT;
goto out;
}
* packed ref:
*/
if (refs_read_raw_ref(refs->packed_ref_store, refname,
- sha1, referent, type)) {
+ oid, referent, type)) {
errno = EISDIR;
goto out;
}
* Please note that FETCH_HEAD has additional
* data after the sha.
*/
- if (get_sha1_hex(buf, sha1) ||
- (buf[40] != '\0' && !isspace(buf[40]))) {
+ if (parse_oid_hex(buf, oid, &p) ||
+ (*p != '\0' && !isspace(*p))) {
*type |= REF_ISBROKEN;
errno = EINVAL;
goto out;
static void unlock_ref(struct ref_lock *lock)
{
- /* Do not free lock->lk -- atexit() still looks at them */
- if (lock->lk)
- rollback_lock_file(lock->lk);
+ rollback_lock_file(&lock->lk);
free(lock->ref_name);
free(lock);
}
goto error_return;
}
- if (!lock->lk)
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
if (hold_lock_file_for_update_timeout(
- lock->lk, ref_file.buf, LOCK_NO_DEREF,
+ &lock->lk, ref_file.buf, LOCK_NO_DEREF,
get_files_ref_lock_timeout_ms()) < 0) {
if (errno == ENOENT && --attempts_remaining > 0) {
/*
*/
if (files_read_raw_ref(&refs->base, refname,
- lock->old_oid.hash, referent, type)) {
+ &lock->old_oid, referent, type)) {
if (errno == ENOENT) {
if (mustexist) {
/* Garden variety missing reference. */
return ret;
}
-static int files_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- int flag;
- unsigned char base[20];
-
- if (current_ref_iter && current_ref_iter->refname == refname) {
- struct object_id peeled;
-
- if (ref_iterator_peel(current_ref_iter, &peeled))
- return -1;
- hashcpy(sha1, peeled.hash);
- return 0;
- }
-
- if (refs_read_ref_full(ref_store, refname,
- RESOLVE_REF_READING, base, &flag))
- return -1;
-
- /*
- * If the reference is packed, read its ref_entry from the
- * cache in the hope that we already know its peeled value.
- * We only try this optimization on packed references because
- * (a) forcing the filling of the loose reference cache could
- * be expensive and (b) loose references anyway usually do not
- * have REF_KNOWS_PEELED.
- */
- if (flag & REF_ISPACKED &&
- !refs_peel_ref(refs->packed_ref_store, refname, sha1))
- return 0;
-
- return peel_object(base, sha1);
-}
-
struct files_ref_iterator {
struct ref_iterator base;
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_iterator *loose_iter, *packed_iter;
+ struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
- iter = xcalloc(1, sizeof(*iter));
- ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
-
/*
* We must make sure that all loose refs are read before
* accessing the packed-refs file; this avoids a race
refs->packed_ref_store, prefix, 0,
DO_FOR_EACH_INCLUDE_BROKEN);
- iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
+ overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
+
+ iter = xcalloc(1, sizeof(*iter));
+ ref_iterator = &iter->base;
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
+ overlay_iter->ordered);
+ iter->iter0 = overlay_iter;
iter->flags = flags;
return ref_iterator;
}
/*
- * Verify that the reference locked by lock has the value old_sha1.
- * Fail if the reference doesn't exist and mustexist is set. Return 0
- * on success. On error, write an error message to err, set errno, and
- * return a negative value.
+ * Verify that the reference locked by lock has the value old_oid
+ * (unless it is NULL). Fail if the reference doesn't exist and
+ * mustexist is set. Return 0 on success. On error, write an error
+ * message to err, set errno, and return a negative value.
*/
static int verify_lock(struct ref_store *ref_store, struct ref_lock *lock,
- const unsigned char *old_sha1, int mustexist,
+ const struct object_id *old_oid, int mustexist,
struct strbuf *err)
{
assert(err);
if (refs_read_ref_full(ref_store, lock->ref_name,
mustexist ? RESOLVE_REF_READING : 0,
- lock->old_oid.hash, NULL)) {
- if (old_sha1) {
+ &lock->old_oid, NULL)) {
+ if (old_oid) {
int save_errno = errno;
strbuf_addf(err, "can't verify ref '%s'", lock->ref_name);
errno = save_errno;
return 0;
}
}
- if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
+ if (old_oid && oidcmp(&lock->old_oid, old_oid)) {
strbuf_addf(err, "ref '%s' is at %s but expected %s",
lock->ref_name,
oid_to_hex(&lock->old_oid),
- sha1_to_hex(old_sha1));
+ oid_to_hex(old_oid));
errno = EBUSY;
return -1;
}
* Locks a ref returning the lock on success and NULL on failure.
* On failure errno is set to something meaningful.
*/
-static struct ref_lock *lock_ref_sha1_basic(struct files_ref_store *refs,
- const char *refname,
- const unsigned char *old_sha1,
- const struct string_list *extras,
- const struct string_list *skip,
- unsigned int flags, int *type,
- struct strbuf *err)
+static struct ref_lock *lock_ref_oid_basic(struct files_ref_store *refs,
+ const char *refname,
+ const struct object_id *old_oid,
+ const struct string_list *extras,
+ const struct string_list *skip,
+ unsigned int flags, int *type,
+ struct strbuf *err)
{
struct strbuf ref_file = STRBUF_INIT;
struct ref_lock *lock;
int last_errno = 0;
- int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
+ int mustexist = (old_oid && !is_null_oid(old_oid));
int resolve_flags = RESOLVE_REF_NO_RECURSE;
int resolved;
- files_assert_main_repository(refs, "lock_ref_sha1_basic");
+ files_assert_main_repository(refs, "lock_ref_oid_basic");
assert(err);
lock = xcalloc(1, sizeof(struct ref_lock));
files_ref_path(refs, &ref_file, refname);
resolved = !!refs_resolve_ref_unsafe(&refs->base,
refname, resolve_flags,
- lock->old_oid.hash, type);
+ &lock->old_oid, type);
if (!resolved && errno == EISDIR) {
/*
* we are trying to lock foo but we used to
}
resolved = !!refs_resolve_ref_unsafe(&refs->base,
refname, resolve_flags,
- lock->old_oid.hash, type);
+ &lock->old_oid, type);
}
if (!resolved) {
last_errno = errno;
goto error_return;
}
- lock->lk = xcalloc(1, sizeof(struct lock_file));
-
lock->ref_name = xstrdup(refname);
- if (raceproof_create_file(ref_file.buf, create_reflock, lock->lk)) {
+ if (raceproof_create_file(ref_file.buf, create_reflock, &lock->lk)) {
last_errno = errno;
unable_to_lock_message(ref_file.buf, errno, err);
goto error_return;
}
- if (verify_lock(&refs->base, lock, old_sha1, mustexist, err)) {
+ if (verify_lock(&refs->base, lock, old_oid, mustexist, err)) {
last_errno = errno;
goto error_return;
}
struct ref_to_prune {
struct ref_to_prune *next;
- unsigned char sha1[20];
+ struct object_id oid;
char name[FLEX_ARRAY];
};
transaction = ref_store_transaction_begin(&refs->base, &err);
if (!transaction ||
- ref_transaction_delete(transaction, r->name, r->sha1,
+ ref_transaction_delete(transaction, r->name, &r->oid,
REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
ref_transaction_commit(transaction, &err)) {
ref_transaction_free(transaction);
* packed-refs transaction:
*/
if (ref_transaction_update(transaction, iter->refname,
- iter->oid->hash, NULL,
+ iter->oid, NULL,
REF_NODEREF, NULL, &err))
die("failure preparing to create packed reference %s: %s",
iter->refname, err.buf);
if ((flags & PACK_REFS_PRUNE)) {
struct ref_to_prune *n;
FLEX_ALLOC_STR(n, name, iter->refname);
- hashcpy(n->sha1, iter->oid->hash);
+ oidcpy(&n->oid, iter->oid);
n->next = refs_to_prune;
refs_to_prune = n;
}
const struct object_id *oid, const char *logmsg,
struct strbuf *err);
-static int files_rename_ref(struct ref_store *ref_store,
+static int files_copy_or_rename_ref(struct ref_store *ref_store,
const char *oldrefname, const char *newrefname,
- const char *logmsg)
+ const char *logmsg, int copy)
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
if (!refs_resolve_ref_unsafe(&refs->base, oldrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- orig_oid.hash, &flag)) {
+ &orig_oid, &flag)) {
ret = error("refname %s not found", oldrefname);
goto out;
}
if (flag & REF_ISSYMREF) {
- ret = error("refname %s is a symbolic ref, renaming it is not supported",
- oldrefname);
+ if (copy)
+ ret = error("refname %s is a symbolic ref, copying it is not supported",
+ oldrefname);
+ else
+ ret = error("refname %s is a symbolic ref, renaming it is not supported",
+ oldrefname);
goto out;
}
if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
goto out;
}
- if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
+ if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
oldrefname, strerror(errno));
goto out;
}
- if (refs_delete_ref(&refs->base, logmsg, oldrefname,
- orig_oid.hash, REF_NODEREF)) {
+ if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
+ ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
+ oldrefname, strerror(errno));
+ goto out;
+ }
+
+ if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
+ &orig_oid, REF_NODEREF)) {
error("unable to delete old %s", oldrefname);
goto rollback;
}
* the safety anyway; we want to delete the reference whatever
* its current value.
*/
- if (!refs_read_ref_full(&refs->base, newrefname,
+ if (!copy && !refs_read_ref_full(&refs->base, newrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
- oid.hash, NULL) &&
+ &oid, NULL) &&
refs_delete_ref(&refs->base, NULL, newrefname,
NULL, REF_NODEREF)) {
if (errno == EISDIR) {
logmoved = log;
- lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL,
+ REF_NODEREF, NULL, &err);
if (!lock) {
- error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ if (copy)
+ error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ else
+ error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
strbuf_release(&err);
goto rollback;
}
goto out;
rollback:
- lock = lock_ref_sha1_basic(refs, oldrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL,
+ REF_NODEREF, NULL, &err);
if (!lock) {
error("unable to lock %s for rollback: %s", oldrefname, err.buf);
strbuf_release(&err);
return ret;
}
-static int close_ref(struct ref_lock *lock)
+static int files_rename_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
{
- if (close_lock_file(lock->lk))
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 0);
+}
+
+static int files_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 1);
+}
+
+static int close_ref_gently(struct ref_lock *lock)
+{
+ if (close_lock_file_gently(&lock->lk))
return -1;
return 0;
}
static int commit_ref(struct ref_lock *lock)
{
- char *path = get_locked_file_path(lock->lk);
+ char *path = get_locked_file_path(&lock->lk);
struct stat st;
if (!lstat(path, &st) && S_ISDIR(st.st_mode)) {
free(path);
}
- if (commit_lock_file(lock->lk))
+ if (commit_lock_file(&lock->lk))
return -1;
return 0;
}
written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
free(logrec);
- if (written != len)
+ if (written < 0)
return -1;
return 0;
unlock_ref(lock);
return -1;
}
- fd = get_lock_file_fd(lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_in_full(fd, &term, 1) != 1 ||
- close_ref(lock) < 0) {
+ fd = get_lock_file_fd(&lock->lk);
+ if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_in_full(fd, &term, 1) < 0 ||
+ close_ref_gently(lock) < 0) {
strbuf_addf(err,
- "couldn't write '%s'", get_lock_file_path(lock->lk));
+ "couldn't write '%s'", get_lock_file_path(&lock->lk));
unlock_ref(lock);
return -1;
}
* check with HEAD only which should cover 99% of all usage
* scenarios (even 100% of the default ones).
*/
- struct object_id head_oid;
int head_flag;
const char *head_ref;
head_ref = refs_resolve_ref_unsafe(&refs->base, "HEAD",
RESOLVE_REF_READING,
- head_oid.hash, &head_flag);
+ NULL, &head_flag);
if (head_ref && (head_flag & REF_ISSYMREF) &&
!strcmp(head_ref, lock->ref_name)) {
struct strbuf log_err = STRBUF_INIT;
{
int ret = -1;
#ifndef NO_SYMLINK_HEAD
- char *ref_path = get_locked_file_path(lock->lk);
+ char *ref_path = get_locked_file_path(&lock->lk);
unlink(ref_path);
ret = symlink(target, ref_path);
free(ref_path);
struct object_id new_oid;
if (logmsg &&
!refs_read_ref_full(&refs->base, target,
- RESOLVE_REF_READING, new_oid.hash, NULL) &&
+ RESOLVE_REF_READING, &new_oid, NULL) &&
files_log_ref_write(refs, refname, &lock->old_oid,
&new_oid, logmsg, 0, &err)) {
error("%s", err.buf);
return 0;
}
- if (!fdopen_lock_file(lock->lk, "w"))
+ if (!fdopen_lock_file(&lock->lk, "w"))
return error("unable to fdopen %s: %s",
- lock->lk->tempfile.filename.buf, strerror(errno));
+ lock->lk.tempfile->filename.buf, strerror(errno));
update_symref_reflog(refs, lock, refname, target, logmsg);
/* no error check; commit_ref will check ferror */
- fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
+ fprintf(lock->lk.tempfile->fp, "ref: %s\n", target);
if (commit_ref(lock) < 0)
return error("unable to write symref for %s: %s", refname,
strerror(errno));
struct ref_lock *lock;
int ret;
- lock = lock_ref_sha1_basic(refs, refname, NULL,
- NULL, NULL, REF_NODEREF, NULL,
- &err);
+ lock = lock_ref_oid_basic(refs, refname, NULL,
+ NULL, NULL, REF_NODEREF, NULL,
+ &err);
if (!lock) {
error("%s", err.buf);
strbuf_release(&err);
if (refs_read_ref_full(iter->ref_store,
diter->relative_path, 0,
- iter->oid.hash, &flags)) {
+ &iter->oid, &flags)) {
error("bad ref for %s", diter->path.buf);
continue;
}
files_reflog_iterator_abort
};
-static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+static struct ref_iterator *reflog_iterator_begin(struct ref_store *ref_store,
+ const char *gitdir)
{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ,
- "reflog_iterator_begin");
struct files_reflog_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
- files_reflog_path(refs, &sb, NULL);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
+ strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
strbuf_release(&sb);
+
return ref_iterator;
}
+static enum iterator_selection reflog_iterator_select(
+ struct ref_iterator *iter_worktree,
+ struct ref_iterator *iter_common,
+ void *cb_data)
+{
+ if (iter_worktree) {
+ /*
+ * We're a bit loose here. We probably should ignore
+ * common refs if they are accidentally added as
+ * per-worktree refs.
+ */
+ return ITER_SELECT_0;
+ } else if (iter_common) {
+ if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
+ return ITER_SELECT_1;
+
+ /*
+ * The main ref store may contain main worktree's
+ * per-worktree refs, which should be ignored
+ */
+ return ITER_SKIP_1;
+ } else
+ return ITER_DONE;
+}
+
+static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
+{
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ,
+ "reflog_iterator_begin");
+
+ if (!strcmp(refs->gitdir, refs->gitcommondir)) {
+ return reflog_iterator_begin(ref_store, refs->gitcommondir);
+ } else {
+ return merge_ref_iterator_begin(
+ 0,
+ reflog_iterator_begin(ref_store, refs->gitdir),
+ reflog_iterator_begin(ref_store, refs->gitcommondir),
+ reflog_iterator_select, refs);
+ }
+}
+
/*
* If update is a direct update of head_ref (the reference pointed to
* by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
/*
* First make sure that HEAD is not already in the
- * transaction. This insertion is O(N) in the transaction
+ * transaction. This check is O(lg N) in the transaction
* size, but it happens at most once per transaction.
*/
- item = string_list_insert(affected_refnames, "HEAD");
- if (item->util) {
+ if (string_list_has_string(affected_refnames, "HEAD")) {
/* An entry already existed */
strbuf_addf(err,
"multiple updates for 'HEAD' (including one "
new_update = ref_transaction_add_update(
transaction, "HEAD",
update->flags | REF_LOG_ONLY | REF_NODEREF,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
update->msg);
+ /*
+ * Add "HEAD". This insertion is O(N) in the transaction
+ * size, but it happens at most once per transaction.
+ * Add new_update->refname instead of a literal "HEAD".
+ */
+ if (strcmp(new_update->refname, "HEAD"))
+ BUG("%s unexpectedly not 'HEAD'", new_update->refname);
+ item = string_list_insert(affected_refnames, new_update->refname);
item->util = new_update;
return 0;
/*
* First make sure that referent is not already in the
- * transaction. This insertion is O(N) in the transaction
+ * transaction. This check is O(lg N) in the transaction
* size, but it happens at most once per symref in a
* transaction.
*/
- item = string_list_insert(affected_refnames, referent);
- if (item->util) {
- /* An entry already existed */
+ if (string_list_has_string(affected_refnames, referent)) {
+ /* An entry already exists */
strbuf_addf(err,
"multiple updates for '%s' (including one "
"via symref '%s') are not allowed",
new_update = ref_transaction_add_update(
transaction, referent, new_flags,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
update->msg);
new_update->parent_update = update;
update->flags |= REF_LOG_ONLY | REF_NODEREF;
update->flags &= ~REF_HAVE_OLD;
+ /*
+ * Add the referent. This insertion is O(N) in the transaction
+ * size, but it happens at most once per symref in a
+ * transaction. Make sure to add new_update->refname, which will
+ * be valid as long as affected_refnames is in use, and NOT
+ * referent, which might soon be freed by our caller.
+ */
+ item = string_list_insert(affected_refnames, new_update->refname);
+ if (item->util)
+ BUG("%s unexpectedly found in affected_refnames",
+ new_update->refname);
item->util = new_update;
return 0;
struct strbuf referent = STRBUF_INIT;
int mustexist = (update->flags & REF_HAVE_OLD) &&
!is_null_oid(&update->old_oid);
- int ret;
+ int ret = 0;
struct ref_lock *lock;
files_assert_main_repository(refs, "lock_ref_for_update");
ret = split_head_update(update, transaction, head_ref,
affected_refnames, err);
if (ret)
- return ret;
+ goto out;
}
ret = lock_raw_ref(refs, update->refname, mustexist,
strbuf_addf(err, "cannot lock ref '%s': %s",
original_update_refname(update), reason);
free(reason);
- return ret;
+ goto out;
}
update->backend_data = lock;
*/
if (refs_read_ref_full(&refs->base,
referent.buf, 0,
- lock->old_oid.hash, NULL)) {
+ &lock->old_oid, NULL)) {
if (update->flags & REF_HAVE_OLD) {
strbuf_addf(err, "cannot lock ref '%s': "
"error reading reference",
original_update_refname(update));
- return -1;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
} else if (check_old_oid(update, &lock->old_oid, err)) {
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
} else {
/*
referent.buf, transaction,
affected_refnames, err);
if (ret)
- return ret;
+ goto out;
}
} else {
struct ref_update *parent_update;
- if (check_old_oid(update, &lock->old_oid, err))
- return TRANSACTION_GENERIC_ERROR;
+ if (check_old_oid(update, &lock->old_oid, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
+ }
/*
* If this update is happening indirectly because of a
"cannot update ref '%s': %s",
update->refname, write_err);
free(write_err);
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
} else {
update->flags |= REF_NEEDS_COMMIT;
}
* the lockfile is still open. Close it to
* free up the file descriptor:
*/
- if (close_ref(lock)) {
+ if (close_ref_gently(lock)) {
strbuf_addf(err, "couldn't close '%s.lock'",
update->refname);
- return TRANSACTION_GENERIC_ERROR;
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto out;
}
}
- return 0;
+
+out:
+ strbuf_release(&referent);
+ return ret;
}
struct files_transaction_backend_data {
struct string_list affected_refnames = STRING_LIST_INIT_NODUP;
char *head_ref = NULL;
int head_type;
- struct object_id head_oid;
struct files_transaction_backend_data *backend_data;
struct ref_transaction *packed_transaction = NULL;
*/
head_ref = refs_resolve_refdup(ref_store, "HEAD",
RESOLVE_REF_NO_RECURSE,
- head_oid.hash, &head_type);
+ NULL, &head_type);
if (head_ref && !(head_type & REF_ISSYMREF)) {
FREE_AND_NULL(head_ref);
ret = lock_ref_for_update(refs, update, transaction,
head_ref, &affected_refnames, err);
if (ret)
- break;
+ goto cleanup;
if (update->flags & REF_DELETING &&
!(update->flags & REF_LOG_ONLY) &&
ref_transaction_add_update(
packed_transaction, update->refname,
update->flags & ~REF_HAVE_OLD,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
NULL);
}
}
goto cleanup;
}
backend_data->packed_refs_locked = 1;
- ret = ref_transaction_prepare(packed_transaction, err);
+
+ if (is_packed_transaction_needed(refs->packed_ref_store,
+ packed_transaction)) {
+ ret = ref_transaction_prepare(packed_transaction, err);
+ } else {
+ /*
+ * We can skip rewriting the `packed-refs`
+ * file. But we do need to leave it locked, so
+ * that somebody else doesn't pack a reference
+ * that we are trying to delete.
+ */
+ if (ref_transaction_abort(packed_transaction, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ backend_data->packed_transaction = NULL;
+ }
}
cleanup:
*/
ref_transaction_add_update(packed_transaction, update->refname,
update->flags & ~REF_HAVE_OLD,
- update->new_oid.hash, update->old_oid.hash,
+ &update->new_oid, &update->old_oid,
NULL);
}
}
static int files_reflog_expire(struct ref_store *ref_store,
- const char *refname, const unsigned char *sha1,
+ const char *refname, const struct object_id *oid,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
int status = 0;
int type;
struct strbuf err = STRBUF_INIT;
- struct object_id oid;
memset(&cb, 0, sizeof(cb));
cb.flags = flags;
* reference itself, plus we might need to update the
* reference if --updateref was specified:
*/
- lock = lock_ref_sha1_basic(refs, refname, sha1,
- NULL, NULL, REF_NODEREF,
- &type, &err);
+ lock = lock_ref_oid_basic(refs, refname, oid,
+ NULL, NULL, REF_NODEREF,
+ &type, &err);
if (!lock) {
error("cannot lock ref '%s': %s", refname, err.buf);
strbuf_release(&err);
}
}
- hashcpy(oid.hash, sha1);
-
- (*prepare_fn)(refname, &oid, cb.policy_cb);
+ (*prepare_fn)(refname, oid, cb.policy_cb);
refs_for_each_reflog_ent(ref_store, refname, expire_reflog_ent, &cb);
(*cleanup_fn)(cb.policy_cb);
!(type & REF_ISSYMREF) &&
!is_null_oid(&cb.last_kept_oid);
- if (close_lock_file(&reflog_lock)) {
+ if (close_lock_file_gently(&reflog_lock)) {
status |= error("couldn't write %s: %s", log_file,
strerror(errno));
+ rollback_lock_file(&reflog_lock);
} else if (update &&
- (write_in_full(get_lock_file_fd(lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_str_in_full(get_lock_file_fd(lock->lk), "\n") != 1 ||
- close_ref(lock) < 0)) {
+ (write_in_full(get_lock_file_fd(&lock->lk),
+ oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
+ close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
- get_lock_file_path(lock->lk));
+ get_lock_file_path(&lock->lk));
rollback_lock_file(&reflog_lock);
} else if (commit_lock_file(&reflog_lock)) {
status |= error("unable to write reflog '%s' (%s)",
files_initial_transaction_commit,
files_pack_refs,
- files_peel_ref,
files_create_symref,
files_delete_refs,
files_rename_ref,
+ files_copy_ref,
files_ref_iterator_begin,
files_read_raw_ref,
#include "../config.h"
#include "../refs.h"
#include "refs-internal.h"
-#include "ref-cache.h"
#include "packed-backend.h"
#include "../iterator.h"
#include "../lockfile.h"
-struct packed_ref_cache {
- struct ref_cache *cache;
+enum mmap_strategy {
+ /*
+ * Don't use mmap() at all for reading `packed-refs`.
+ */
+ MMAP_NONE,
/*
- * Count of references to the data structure in this instance,
- * including the pointer from files_ref_store::packed if any.
- * The data will not be freed as long as the reference count
- * is nonzero.
+ * Can use mmap() for reading `packed-refs`, but the file must
+ * not remain mmapped. This is the usual option on Windows,
+ * where you cannot rename a new version of a file onto a file
+ * that is currently mmapped.
*/
- unsigned int referrers;
+ MMAP_TEMPORARY,
- /* The metadata from when this packed-refs cache was read */
- struct stat_validity validity;
+ /*
+ * It is OK to leave the `packed-refs` file mmapped while
+ * arbitrary other code is running.
+ */
+ MMAP_OK
};
-/*
- * Increment the reference count of *packed_refs.
- */
-static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
-{
- packed_refs->referrers++;
-}
+#if defined(NO_MMAP)
+static enum mmap_strategy mmap_strategy = MMAP_NONE;
+#elif defined(MMAP_PREVENTS_DELETE)
+static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
+#else
+static enum mmap_strategy mmap_strategy = MMAP_OK;
+#endif
+
+struct packed_ref_store;
/*
- * Decrease the reference count of *packed_refs. If it goes to zero,
- * free *packed_refs and return true; otherwise return false.
+ * A `snapshot` represents one snapshot of a `packed-refs` file.
+ *
+ * Normally, this will be a mmapped view of the contents of the
+ * `packed-refs` file at the time the snapshot was created. However,
+ * if the `packed-refs` file was not sorted, this might point at heap
+ * memory holding the contents of the `packed-refs` file with its
+ * records sorted by refname.
+ *
+ * `snapshot` instances are reference counted (via
+ * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
+ * an instance from disappearing while an iterator is still iterating
+ * over it. Instances are garbage collected when their `referrers`
+ * count goes to zero.
+ *
+ * The most recent `snapshot`, if available, is referenced by the
+ * `packed_ref_store`. Its freshness is checked whenever
+ * `get_snapshot()` is called; if the existing snapshot is obsolete, a
+ * new snapshot is taken.
*/
-static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
-{
- if (!--packed_refs->referrers) {
- free_ref_cache(packed_refs->cache);
- stat_validity_clear(&packed_refs->validity);
- free(packed_refs);
- return 1;
- } else {
- return 0;
- }
-}
+struct snapshot {
+ /*
+ * A back-pointer to the packed_ref_store with which this
+ * snapshot is associated:
+ */
+ struct packed_ref_store *refs;
+
+ /* Is the `packed-refs` file currently mmapped? */
+ int mmapped;
+
+ /*
+ * The contents of the `packed-refs` file. If the file was
+ * already sorted, this points at the mmapped contents of the
+ * file. If not, this points at heap-allocated memory
+ * containing the contents, sorted. If there were no contents
+ * (e.g., because the file didn't exist), `buf` and `eof` are
+ * both NULL.
+ */
+ char *buf, *eof;
+
+ /* The size of the header line, if any; otherwise, 0: */
+ size_t header_len;
+
+ /*
+ * What is the peeled state of the `packed-refs` file that
+ * this snapshot represents? (This is usually determined from
+ * the file's header.)
+ */
+ enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
+
+ /*
+ * Count of references to this instance, including the pointer
+ * from `packed_ref_store::snapshot`, if any. The instance
+ * will not be freed as long as the reference count is
+ * nonzero.
+ */
+ unsigned int referrers;
+
+ /*
+ * The metadata of the `packed-refs` file from which this
+ * snapshot was created, used to tell if the file has been
+ * replaced since we read it.
+ */
+ struct stat_validity validity;
+};
/*
- * A container for `packed-refs`-related data. It is not (yet) a
- * `ref_store`.
+ * A `ref_store` representing references stored in a `packed-refs`
+ * file. It implements the `ref_store` interface, though it has some
+ * limitations:
+ *
+ * - It cannot store symbolic references.
+ *
+ * - It cannot store reflogs.
+ *
+ * - It does not support reference renaming (though it could).
+ *
+ * On the other hand, it can be locked outside of a reference
+ * transaction. In that case, it remains locked even after the
+ * transaction is done and the new `packed-refs` file is activated.
*/
struct packed_ref_store {
struct ref_store base;
char *path;
/*
- * A cache of the values read from the `packed-refs` file, if
- * it might still be current; otherwise, NULL.
+ * A snapshot of the values read from the `packed-refs` file,
+ * if it might still be current; otherwise, NULL.
*/
- struct packed_ref_cache *cache;
+ struct snapshot *snapshot;
/*
* Lock used for the "packed-refs" file. Note that this (and
* "packed-refs" file. Note that this (and thus the enclosing
* `packed_ref_store`) must not be freed.
*/
- struct tempfile tempfile;
+ struct tempfile *tempfile;
};
+/*
+ * Increment the reference count of `*snapshot`.
+ */
+static void acquire_snapshot(struct snapshot *snapshot)
+{
+ snapshot->referrers++;
+}
+
+/*
+ * If the buffer in `snapshot` is active, then either munmap the
+ * memory and close the file, or free the memory. Then set the buffer
+ * pointers to NULL.
+ */
+static void clear_snapshot_buffer(struct snapshot *snapshot)
+{
+ if (snapshot->mmapped) {
+ if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
+ die_errno("error ummapping packed-refs file %s",
+ snapshot->refs->path);
+ snapshot->mmapped = 0;
+ } else {
+ free(snapshot->buf);
+ }
+ snapshot->buf = snapshot->eof = NULL;
+ snapshot->header_len = 0;
+}
+
+/*
+ * Decrease the reference count of `*snapshot`. If it goes to zero,
+ * free `*snapshot` and return true; otherwise return false.
+ */
+static int release_snapshot(struct snapshot *snapshot)
+{
+ if (!--snapshot->referrers) {
+ stat_validity_clear(&snapshot->validity);
+ clear_snapshot_buffer(snapshot);
+ free(snapshot);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
struct ref_store *packed_ref_store_create(const char *path,
unsigned int store_flags)
{
return refs;
}
-static void clear_packed_ref_cache(struct packed_ref_store *refs)
+static void clear_snapshot(struct packed_ref_store *refs)
+{
+ if (refs->snapshot) {
+ struct snapshot *snapshot = refs->snapshot;
+
+ refs->snapshot = NULL;
+ release_snapshot(snapshot);
+ }
+}
+
+static NORETURN void die_unterminated_line(const char *path,
+ const char *p, size_t len)
+{
+ if (len < 80)
+ die("unterminated line in %s: %.*s", path, (int)len, p);
+ else
+ die("unterminated line in %s: %.75s...", path, p);
+}
+
+static NORETURN void die_invalid_line(const char *path,
+ const char *p, size_t len)
{
- if (refs->cache) {
- struct packed_ref_cache *cache = refs->cache;
+ const char *eol = memchr(p, '\n', len);
- refs->cache = NULL;
- release_packed_ref_cache(cache);
+ if (!eol)
+ die_unterminated_line(path, p, len);
+ else if (eol - p < 80)
+ die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
+ else
+ die("unexpected line in %s: %.75s...", path, p);
+
+}
+
+struct snapshot_record {
+ const char *start;
+ size_t len;
+};
+
+static int cmp_packed_ref_records(const void *v1, const void *v2)
+{
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
+ const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 == '\n' ? 0 : -1;
+ if (*r1 != *r2) {
+ if (*r2 == '\n')
+ return 1;
+ else
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ }
+ r1++;
+ r2++;
}
}
-/* The length of a peeled reference line in packed-refs, including EOL: */
-#define PEELED_LINE_LENGTH 42
+/*
+ * Compare a snapshot record at `rec` to the specified NUL-terminated
+ * refname.
+ */
+static int cmp_record_to_refname(const char *rec, const char *refname)
+{
+ const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
+ const char *r2 = refname;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 ? -1 : 0;
+ if (!*r2)
+ return 1;
+ if (*r1 != *r2)
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ r1++;
+ r2++;
+ }
+}
/*
- * Parse one line from a packed-refs file. Write the SHA1 to sha1.
- * Return a pointer to the refname within the line (null-terminated),
- * or NULL if there was a problem.
+ * `snapshot->buf` is not known to be sorted. Check whether it is, and
+ * if not, sort it into new memory and munmap/free the old storage.
*/
-static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
+static void sort_snapshot(struct snapshot *snapshot)
{
- const char *ref;
+ struct snapshot_record *records = NULL;
+ size_t alloc = 0, nr = 0;
+ int sorted = 1;
+ const char *pos, *eof, *eol;
+ size_t len, i;
+ char *new_buffer, *dst;
- if (parse_oid_hex(line->buf, oid, &ref) < 0)
- return NULL;
- if (!isspace(*ref++))
- return NULL;
+ pos = snapshot->buf + snapshot->header_len;
+ eof = snapshot->eof;
+ len = eof - pos;
- if (isspace(*ref))
- return NULL;
+ if (!len)
+ return;
- if (line->buf[line->len - 1] != '\n')
- return NULL;
- line->buf[--line->len] = 0;
+ /*
+ * Initialize records based on a crude estimate of the number
+ * of references in the file (we'll grow it below if needed):
+ */
+ ALLOC_GROW(records, len / 80 + 20, alloc);
+
+ while (pos < eof) {
+ eol = memchr(pos, '\n', eof - pos);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated line found in packed-refs");
+ if (eol - pos < GIT_SHA1_HEXSZ + 2)
+ die_invalid_line(snapshot->refs->path,
+ pos, eof - pos);
+ eol++;
+ if (eol < eof && *eol == '^') {
+ /*
+ * Keep any peeled line together with its
+ * reference:
+ */
+ const char *peeled_start = eol;
+
+ eol = memchr(peeled_start, '\n', eof - peeled_start);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated peeled line found in packed-refs");
+ eol++;
+ }
+
+ ALLOC_GROW(records, nr + 1, alloc);
+ records[nr].start = pos;
+ records[nr].len = eol - pos;
+ nr++;
+
+ if (sorted &&
+ nr > 1 &&
+ cmp_packed_ref_records(&records[nr - 2],
+ &records[nr - 1]) >= 0)
+ sorted = 0;
+
+ pos = eol;
+ }
+
+ if (sorted)
+ goto cleanup;
+
+ /* We need to sort the memory. First we sort the records array: */
+ QSORT(records, nr, cmp_packed_ref_records);
+
+ /*
+ * Allocate a new chunk of memory, and copy the old memory to
+ * the new in the order indicated by `records` (not bothering
+ * with the header line):
+ */
+ new_buffer = xmalloc(len);
+ for (dst = new_buffer, i = 0; i < nr; i++) {
+ memcpy(dst, records[i].start, records[i].len);
+ dst += records[i].len;
+ }
+
+ /*
+ * Now munmap the old buffer and use the sorted buffer in its
+ * place:
+ */
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = new_buffer;
+ snapshot->eof = new_buffer + len;
+ snapshot->header_len = 0;
+
+cleanup:
+ free(records);
+}
+
+/*
+ * Return a pointer to the start of the record that contains the
+ * character `*p` (which must be within the buffer). If no other
+ * record start is found, return `buf`.
+ */
+static const char *find_start_of_record(const char *buf, const char *p)
+{
+ while (p > buf && (p[-1] != '\n' || p[0] == '^'))
+ p--;
+ return p;
+}
- return ref;
+/*
+ * Return a pointer to the start of the record following the record
+ * that contains `*p`. If none is found before `end`, return `end`.
+ */
+static const char *find_end_of_record(const char *p, const char *end)
+{
+ while (++p < end && (p[-1] != '\n' || p[0] == '^'))
+ ;
+ return p;
}
/*
- * Read from `packed_refs_file` into a newly-allocated
- * `packed_ref_cache` and return it. The return value will already
- * have its reference count incremented.
+ * We want to be able to compare mmapped reference records quickly,
+ * without totally parsing them. We can do so because the records are
+ * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
+ * + 1) bytes past the beginning of the record.
+ *
+ * But what if the `packed-refs` file contains garbage? We're willing
+ * to tolerate not detecting the problem, as long as we don't produce
+ * totally garbled output (we can't afford to check the integrity of
+ * the whole file during every Git invocation). But we do want to be
+ * sure that we never read past the end of the buffer in memory and
+ * perform an illegal memory access.
+ *
+ * Guarantee that minimum level of safety by verifying that the last
+ * record in the file is LF-terminated, and that it has at least
+ * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
+ * these checks fails.
+ */
+static void verify_buffer_safe(struct snapshot *snapshot)
+{
+ const char *buf = snapshot->buf + snapshot->header_len;
+ const char *eof = snapshot->eof;
+ const char *last_line;
+
+ if (buf == eof)
+ return;
+
+ last_line = find_start_of_record(buf, eof - 1);
+ if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
+ die_invalid_line(snapshot->refs->path,
+ last_line, eof - last_line);
+}
+
+/*
+ * Depending on `mmap_strategy`, either mmap or read the contents of
+ * the `packed-refs` file into the snapshot. Return 1 if the file
+ * existed and was read, or 0 if the file was absent. Die on errors.
+ */
+static int load_contents(struct snapshot *snapshot)
+{
+ int fd;
+ struct stat st;
+ size_t size;
+ ssize_t bytes_read;
+
+ fd = open(snapshot->refs->path, O_RDONLY);
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ /*
+ * This is OK; it just means that no
+ * "packed-refs" file has been written yet,
+ * which is equivalent to it being empty,
+ * which is its state when initialized with
+ * zeros.
+ */
+ return 0;
+ } else {
+ die_errno("couldn't read %s", snapshot->refs->path);
+ }
+ }
+
+ stat_validity_update(&snapshot->validity, fd);
+
+ if (fstat(fd, &st) < 0)
+ die_errno("couldn't stat %s", snapshot->refs->path);
+ size = xsize_t(st.st_size);
+
+ switch (mmap_strategy) {
+ case MMAP_NONE:
+ snapshot->buf = xmalloc(size);
+ bytes_read = read_in_full(fd, snapshot->buf, size);
+ if (bytes_read < 0 || bytes_read != size)
+ die_errno("couldn't read %s", snapshot->refs->path);
+ snapshot->eof = snapshot->buf + size;
+ snapshot->mmapped = 0;
+ break;
+ case MMAP_TEMPORARY:
+ case MMAP_OK:
+ snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ snapshot->eof = snapshot->buf + size;
+ snapshot->mmapped = 1;
+ break;
+ }
+ close(fd);
+
+ return 1;
+}
+
+/*
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted. In the latter mode, `refname` doesn't have to be a proper
+ * reference name; for example, one could search for "refs/replace/"
+ * to find the start of any replace references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+static const char *find_reference_location(struct snapshot *snapshot,
+ const char *refname, int mustexist)
+{
+ /*
+ * This is not *quite* a garden-variety binary search, because
+ * the data we're searching is made up of records, and we
+ * always need to find the beginning of a record to do a
+ * comparison. A "record" here is one line for the reference
+ * itself and zero or one peel lines that start with '^'. Our
+ * loop invariant is described in the next two comments.
+ */
+
+ /*
+ * A pointer to the character at the start of a record whose
+ * preceding records all have reference names that come
+ * *before* `refname`.
+ */
+ const char *lo = snapshot->buf + snapshot->header_len;
+
+ /*
+ * A pointer to a the first character of a record whose
+ * reference name comes *after* `refname`.
+ */
+ const char *hi = snapshot->eof;
+
+ while (lo < hi) {
+ const char *mid, *rec;
+ int cmp;
+
+ mid = lo + (hi - lo) / 2;
+ rec = find_start_of_record(lo, mid);
+ cmp = cmp_record_to_refname(rec, refname);
+ if (cmp < 0) {
+ lo = find_end_of_record(mid, hi);
+ } else if (cmp > 0) {
+ hi = rec;
+ } else {
+ return rec;
+ }
+ }
+
+ if (mustexist)
+ return NULL;
+ else
+ return lo;
+}
+
+/*
+ * Create a newly-allocated `snapshot` of the `packed-refs` file in
+ * its current state and return it. The return value will already have
+ * its reference count incremented.
*
* A comment line of the form "# pack-refs with: " may contain zero or
* more traits. We interpret the traits as follows:
*
- * No traits:
+ * Neither `peeled` nor `fully-peeled`:
*
* Probably no references are peeled. But if the file contains a
* peeled value for a reference, we will use it.
*
- * peeled:
+ * `peeled`:
*
* References under "refs/tags/", if they *can* be peeled, *are*
* peeled in this file. References outside of "refs/tags/" are
* probably not peeled even if they could have been, but if we find
* a peeled value for such a reference we will use it.
*
- * fully-peeled:
+ * `fully-peeled`:
*
* All references in the file that can be peeled are peeled.
* Inversely (and this is more important), any references in the
* trait should typically be written alongside "peeled" for
* compatibility with older clients, but we do not require it
* (i.e., "peeled" is a no-op if "fully-peeled" is set).
+ *
+ * `sorted`:
+ *
+ * The references in this file are known to be sorted by refname.
*/
-static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
+static struct snapshot *create_snapshot(struct packed_ref_store *refs)
{
- FILE *f;
- struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
- struct ref_entry *last = NULL;
- struct strbuf line = STRBUF_INIT;
- enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
- struct ref_dir *dir;
-
- acquire_packed_ref_cache(packed_refs);
- packed_refs->cache = create_ref_cache(NULL, NULL);
- packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
-
- f = fopen(packed_refs_file, "r");
- if (!f) {
- if (errno == ENOENT) {
- /*
- * This is OK; it just means that no
- * "packed-refs" file has been written yet,
- * which is equivalent to it being empty.
- */
- return packed_refs;
- } else {
- die_errno("couldn't read %s", packed_refs_file);
- }
- }
+ struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));
+ int sorted = 0;
- stat_validity_update(&packed_refs->validity, fileno(f));
+ snapshot->refs = refs;
+ acquire_snapshot(snapshot);
+ snapshot->peeled = PEELED_NONE;
- dir = get_ref_dir(packed_refs->cache->root);
- while (strbuf_getwholeline(&line, f, '\n') != EOF) {
- struct object_id oid;
- const char *refname;
- const char *traits;
-
- if (!line.len || line.buf[line.len - 1] != '\n')
- die("unterminated line in %s: %s", packed_refs_file, line.buf);
-
- if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
- if (strstr(traits, " fully-peeled "))
- peeled = PEELED_FULLY;
- else if (strstr(traits, " peeled "))
- peeled = PEELED_TAGS;
- /* perhaps other traits later as well */
- continue;
- }
+ if (!load_contents(snapshot))
+ return snapshot;
- refname = parse_ref_line(&line, &oid);
- if (refname) {
- int flag = REF_ISPACKED;
+ /* If the file has a header line, process it: */
+ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
+ struct strbuf tmp = STRBUF_INIT;
+ char *p;
+ const char *eol;
+ struct string_list traits = STRING_LIST_INIT_NODUP;
- if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(refname))
- die("packed refname is dangerous: %s", refname);
- oidclr(&oid);
- flag |= REF_BAD_NAME | REF_ISBROKEN;
- }
- last = create_ref_entry(refname, &oid, flag);
- if (peeled == PEELED_FULLY ||
- (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
- last->flag |= REF_KNOWS_PEELED;
- add_ref_entry(dir, last);
- } else if (last &&
- line.buf[0] == '^' &&
- line.len == PEELED_LINE_LENGTH &&
- line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
- !get_oid_hex(line.buf + 1, &oid)) {
- oidcpy(&last->u.value.peeled, &oid);
- /*
- * Regardless of what the file header said,
- * we definitely know the value of *this*
- * reference:
- */
- last->flag |= REF_KNOWS_PEELED;
- } else {
- strbuf_setlen(&line, line.len - 1);
- die("unexpected line in %s: %s", packed_refs_file, line.buf);
- }
+ eol = memchr(snapshot->buf, '\n',
+ snapshot->eof - snapshot->buf);
+ if (!eol)
+ die_unterminated_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+
+ if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+ die_invalid_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ string_list_split_in_place(&traits, p, ' ', -1);
+
+ if (unsorted_string_list_has_string(&traits, "fully-peeled"))
+ snapshot->peeled = PEELED_FULLY;
+ else if (unsorted_string_list_has_string(&traits, "peeled"))
+ snapshot->peeled = PEELED_TAGS;
+
+ sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+ /* perhaps other traits later as well */
+
+ /* The "+ 1" is for the LF character. */
+ snapshot->header_len = eol + 1 - snapshot->buf;
+
+ string_list_clear(&traits, 0);
+ strbuf_release(&tmp);
}
- fclose(f);
- strbuf_release(&line);
+ verify_buffer_safe(snapshot);
+
+ if (!sorted) {
+ sort_snapshot(snapshot);
+
+ /*
+ * Reordering the records might have moved a short one
+ * to the end of the buffer, so verify the buffer's
+ * safety again:
+ */
+ verify_buffer_safe(snapshot);
+ }
- return packed_refs;
+ if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ /*
+ * We don't want to leave the file mmapped, so we are
+ * forced to make a copy now:
+ */
+ size_t size = snapshot->eof -
+ (snapshot->buf + snapshot->header_len);
+ char *buf_copy = xmalloc(size);
+
+ memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = buf_copy;
+ snapshot->eof = buf_copy + size;
+ }
+
+ return snapshot;
}
/*
- * Check that the packed refs cache (if any) still reflects the
- * contents of the file. If not, clear the cache.
+ * Check that `refs->snapshot` (if present) still reflects the
+ * contents of the `packed-refs` file. If not, clear the snapshot.
*/
-static void validate_packed_ref_cache(struct packed_ref_store *refs)
+static void validate_snapshot(struct packed_ref_store *refs)
{
- if (refs->cache &&
- !stat_validity_check(&refs->cache->validity, refs->path))
- clear_packed_ref_cache(refs);
+ if (refs->snapshot &&
+ !stat_validity_check(&refs->snapshot->validity, refs->path))
+ clear_snapshot(refs);
}
/*
- * Get the packed_ref_cache for the specified packed_ref_store,
- * creating and populating it if it hasn't been read before or if the
- * file has been changed (according to its `validity` field) since it
- * was last read. On the other hand, if we hold the lock, then assume
- * that the file hasn't been changed out from under us, so skip the
- * extra `stat()` call in `stat_validity_check()`.
+ * Get the `snapshot` for the specified packed_ref_store, creating and
+ * populating it if it hasn't been read before or if the file has been
+ * changed (according to its `validity` field) since it was last read.
+ * On the other hand, if we hold the lock, then assume that the file
+ * hasn't been changed out from under us, so skip the extra `stat()`
+ * call in `stat_validity_check()`. This function does *not* increase
+ * the snapshot's reference count on behalf of the caller.
*/
-static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
+static struct snapshot *get_snapshot(struct packed_ref_store *refs)
{
if (!is_lock_file_locked(&refs->lock))
- validate_packed_ref_cache(refs);
+ validate_snapshot(refs);
- if (!refs->cache)
- refs->cache = read_packed_refs(refs->path);
+ if (!refs->snapshot)
+ refs->snapshot = create_snapshot(refs);
- return refs->cache;
-}
-
-static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
-{
- return get_ref_dir(packed_ref_cache->cache->root);
-}
-
-static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
-{
- return get_packed_ref_dir(get_packed_ref_cache(refs));
-}
-
-/*
- * Return the ref_entry for the given refname from the packed
- * references. If it does not exist, return NULL.
- */
-static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
- const char *refname)
-{
- return find_ref_entry(get_packed_refs(refs), refname);
+ return refs->snapshot;
}
static int packed_read_raw_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1,
+ const char *refname, struct object_id *oid,
struct strbuf *referent, unsigned int *type)
{
struct packed_ref_store *refs =
packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
-
- struct ref_entry *entry;
+ struct snapshot *snapshot = get_snapshot(refs);
+ const char *rec;
*type = 0;
- entry = get_packed_ref(refs, refname);
- if (!entry) {
+ rec = find_reference_location(snapshot, refname, 1);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
errno = ENOENT;
return -1;
}
- hashcpy(sha1, entry->u.value.oid.hash);
+ if (get_oid_hex(rec, oid))
+ die_invalid_line(refs->path, rec, snapshot->eof - rec);
+
*type = REF_ISPACKED;
return 0;
}
-static int packed_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- struct ref_entry *r = get_packed_ref(refs, refname);
-
- if (!r || peel_entry(r, 0))
- return -1;
-
- hashcpy(sha1, r->u.value.peeled.hash);
- return 0;
-}
+/*
+ * This value is set in `base.flags` if the peeled value of the
+ * current reference is known. In that case, `peeled` contains the
+ * correct peeled value for the reference, which might be `null_sha1`
+ * if the reference is not a tag or if it is broken.
+ */
+#define REF_KNOWS_PEELED 0x40
+/*
+ * An iterator over a snapshot of a `packed-refs` file.
+ */
struct packed_ref_iterator {
struct ref_iterator base;
- struct packed_ref_cache *cache;
- struct ref_iterator *iter0;
+ struct snapshot *snapshot;
+
+ /* The current position in the snapshot's buffer: */
+ const char *pos;
+
+ /* The end of the part of the buffer that will be iterated over: */
+ const char *eof;
+
+ /* Scratch space for current values: */
+ struct object_id oid, peeled;
+ struct strbuf refname_buf;
+
unsigned int flags;
};
+/*
+ * Move the iterator to the next record in the snapshot, without
+ * respect for whether the record is actually required by the current
+ * iteration. Adjust the fields in `iter` and return `ITER_OK` or
+ * `ITER_DONE`. This function does not free the iterator in the case
+ * of `ITER_DONE`.
+ */
+static int next_record(struct packed_ref_iterator *iter)
+{
+ const char *p = iter->pos, *eol;
+
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->pos == iter->eof)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
+ parse_oid_hex(p, &iter->oid, &p) ||
+ !isspace(*p++))
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ eol = memchr(p, '\n', iter->eof - p);
+ if (!eol)
+ die_unterminated_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ strbuf_add(&iter->refname_buf, p, eol - p);
+ iter->base.refname = iter->refname_buf.buf;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+ if (iter->snapshot->peeled == PEELED_FULLY ||
+ (iter->snapshot->peeled == PEELED_TAGS &&
+ starts_with(iter->base.refname, "refs/tags/")))
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ iter->pos = eol + 1;
+
+ if (iter->pos < iter->eof && *iter->pos == '^') {
+ p = iter->pos + 1;
+ if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
+ parse_oid_hex(p, &iter->peeled, &p) ||
+ *p++ != '\n')
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+ iter->pos = p;
+
+ /*
+ * Regardless of what the file header said, we
+ * definitely know the value of *this* reference. But
+ * we suppress it if the reference is broken:
+ */
+ if ((iter->base.flags & REF_ISBROKEN)) {
+ oidclr(&iter->peeled);
+ iter->base.flags &= ~REF_KNOWS_PEELED;
+ } else {
+ iter->base.flags |= REF_KNOWS_PEELED;
+ }
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ return ITER_OK;
+}
+
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
int ok;
- while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
+ while ((ok = next_record(iter)) == ITER_OK) {
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
- ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
+ ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
continue;
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
- !ref_resolves_to_object(iter->iter0->refname,
- iter->iter0->oid,
- iter->iter0->flags))
+ !ref_resolves_to_object(iter->base.refname, &iter->oid,
+ iter->flags))
continue;
- iter->base.refname = iter->iter0->refname;
- iter->base.oid = iter->iter0->oid;
- iter->base.flags = iter->iter0->flags;
return ITER_OK;
}
- iter->iter0 = NULL;
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
ok = ITER_ERROR;
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- return ref_iterator_peel(iter->iter0, peeled);
+ if ((iter->base.flags & REF_KNOWS_PEELED)) {
+ oidcpy(peeled, &iter->peeled);
+ return is_null_oid(&iter->peeled) ? -1 : 0;
+ } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
+ return -1;
+ } else {
+ return !!peel_object(&iter->oid, peeled);
+ }
}
static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
(struct packed_ref_iterator *)ref_iterator;
int ok = ITER_DONE;
- if (iter->iter0)
- ok = ref_iterator_abort(iter->iter0);
-
- release_packed_ref_cache(iter->cache);
+ strbuf_release(&iter->refname_buf);
+ release_snapshot(iter->snapshot);
base_ref_iterator_free(ref_iterator);
return ok;
}
const char *prefix, unsigned int flags)
{
struct packed_ref_store *refs;
+ struct snapshot *snapshot;
+ const char *start;
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
required_flags |= REF_STORE_ODB;
refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
+ /*
+ * Note that `get_snapshot()` internally checks whether the
+ * snapshot is up to date with what is on disk, and re-reads
+ * it if not.
+ */
+ snapshot = get_snapshot(refs);
+
+ if (!snapshot->buf)
+ return empty_ref_iterator_begin();
+
iter = xcalloc(1, sizeof(*iter));
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
- /*
- * Note that get_packed_ref_cache() internally checks whether
- * the packed-ref cache is up to date with what is on disk,
- * and re-reads it if not.
- */
+ iter->snapshot = snapshot;
+ acquire_snapshot(snapshot);
- iter->cache = get_packed_ref_cache(refs);
- acquire_packed_ref_cache(iter->cache);
- iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
+ if (prefix && *prefix)
+ start = find_reference_location(snapshot, prefix, 0);
+ else
+ start = snapshot->buf + snapshot->header_len;
+
+ iter->pos = start;
+ iter->eof = snapshot->eof;
+ strbuf_init(&iter->refname_buf, 0);
+
+ iter->base.oid = &iter->oid;
iter->flags = flags;
+ if (prefix && *prefix)
+ /* Stop iteration after we've gone *past* prefix: */
+ ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
+
return ref_iterator;
}
return -1;
}
- if (close_lock_file(&refs->lock)) {
+ if (close_lock_file_gently(&refs->lock)) {
strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
+ rollback_lock_file(&refs->lock);
return -1;
}
/*
* Now that we hold the `packed-refs` lock, make sure that our
- * cache matches the current version of the file. Normally
- * `get_packed_ref_cache()` does that for us, but that
- * function assumes that when the file is locked, any existing
- * cache is still valid. We've just locked the file, but it
- * might have changed the moment *before* we locked it.
+ * snapshot matches the current version of the file. Normally
+ * `get_snapshot()` does that for us, but that function
+ * assumes that when the file is locked, any existing snapshot
+ * is still valid. We've just locked the file, but it might
+ * have changed the moment *before* we locked it.
*/
- validate_packed_ref_cache(refs);
+ validate_snapshot(refs);
/*
* Now make sure that the packed-refs file as it exists in the
- * locked state is loaded into the cache:
+ * locked state is loaded into the snapshot:
*/
- get_packed_ref_cache(refs);
+ get_snapshot(refs);
return 0;
}
}
/*
- * The packed-refs header line that we write out. Perhaps other
- * traits will be added later. The trailing space is required.
+ * The packed-refs header line that we write out. Perhaps other traits
+ * will be added later.
+ *
+ * Note that earlier versions of Git used to parse these traits by
+ * looking for " trait " in the line. For this reason, the space after
+ * the colon and the trailing space are required.
*/
static const char PACKED_REFS_HEADER[] =
- "# pack-refs with: peeled fully-peeled \n";
+ "# pack-refs with: peeled fully-peeled sorted \n";
static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
{
}
/*
- * Write the packed-refs from the cache to the packed-refs tempfile,
- * incorporating any changes from `updates`. `updates` must be a
- * sorted string list whose keys are the refnames and whose util
+ * Write the packed refs from the current snapshot to the packed-refs
+ * tempfile, incorporating any changes from `updates`. `updates` must
+ * be a sorted string list whose keys are the refnames and whose util
* values are `struct ref_update *`. On error, rollback the tempfile,
* write an error message to `err`, and return a nonzero value.
*
packed_refs_path = get_locked_file_path(&refs->lock);
strbuf_addf(&sb, "%s.new", packed_refs_path);
free(packed_refs_path);
- if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
+ refs->tempfile = create_tempfile(sb.buf);
+ if (!refs->tempfile) {
strbuf_addf(err, "unable to create file %s: %s",
sb.buf, strerror(errno));
strbuf_release(&sb);
}
strbuf_release(&sb);
- out = fdopen_tempfile(&refs->tempfile, "w");
+ out = fdopen_tempfile(refs->tempfile, "w");
if (!out) {
strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
strerror(errno));
i++;
} else {
struct object_id peeled;
- int peel_error = peel_object(update->new_oid.hash,
- peeled.hash);
+ int peel_error = peel_object(&update->new_oid,
+ &peeled);
if (write_packed_entry(out, update->refname,
update->new_oid.hash,
}
if (ok != ITER_DONE) {
- strbuf_addf(err, "unable to write packed-refs file: "
- "error iterating over old contents");
+ strbuf_addstr(err, "unable to write packed-refs file: "
+ "error iterating over old contents");
goto error;
}
- if (close_tempfile(&refs->tempfile)) {
+ if (close_tempfile_gently(refs->tempfile)) {
strbuf_addf(err, "error closing file %s: %s",
- get_tempfile_path(&refs->tempfile),
+ get_tempfile_path(refs->tempfile),
strerror(errno));
strbuf_release(&sb);
+ delete_tempfile(&refs->tempfile);
return -1;
}
write_error:
strbuf_addf(err, "error writing to %s: %s",
- get_tempfile_path(&refs->tempfile), strerror(errno));
+ get_tempfile_path(refs->tempfile), strerror(errno));
error:
if (iter)
return -1;
}
- oid.hash, &referent, &type) ||
+ int is_packed_transaction_needed(struct ref_store *ref_store,
+ struct ref_transaction *transaction)
+ {
+ struct packed_ref_store *refs = packed_downcast(
+ ref_store,
+ REF_STORE_READ,
+ "is_packed_transaction_needed");
+ struct strbuf referent = STRBUF_INIT;
+ size_t i;
+ int ret;
+
+ if (!is_lock_file_locked(&refs->lock))
+ BUG("is_packed_transaction_needed() called while unlocked");
+
+ /*
+ * We're only going to bother returning false for the common,
+ * trivial case that references are only being deleted, their
+ * old values are not being checked, and the old `packed-refs`
+ * file doesn't contain any of those reference(s). This gives
+ * false positives for some other cases that could
+ * theoretically be optimized away:
+ *
+ * 1. It could be that the old value is being verified without
+ * setting a new value. In this case, we could verify the
+ * old value here and skip the update if it agrees. If it
+ * disagrees, we could either let the update go through
+ * (the actual commit would re-detect and report the
+ * problem), or come up with a way of reporting such an
+ * error to *our* caller.
+ *
+ * 2. It could be that a new value is being set, but that it
+ * is identical to the current packed value of the
+ * reference.
+ *
+ * Neither of these cases will come up in the current code,
+ * because the only caller of this function passes to it a
+ * transaction that only includes `delete` updates with no
+ * `old_id`. Even if that ever changes, false positives only
+ * cause an optimization to be missed; they do not affect
+ * correctness.
+ */
+
+ /*
+ * Start with the cheap checks that don't require old
+ * reference values to be read:
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+
+ if (update->flags & REF_HAVE_OLD)
+ /* Have to check the old value -> needed. */
+ return 1;
+
+ if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))
+ /* Have to set a new value -> needed. */
+ return 1;
+ }
+
+ /*
+ * The transaction isn't checking any old values nor is it
+ * setting any nonzero new values, so it still might be able
+ * to be skipped. Now do the more expensive check: the update
+ * is needed if any of the updates is a delete, and the old
+ * `packed-refs` file contains a value for that reference.
+ */
+ ret = 0;
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ unsigned int type;
+ struct object_id oid;
+
+ if (!(update->flags & REF_HAVE_NEW))
+ /*
+ * This reference isn't being deleted -> not
+ * needed.
+ */
+ continue;
+
+ if (!refs_read_raw_ref(ref_store, update->refname,
++ &oid, &referent, &type) ||
+ errno != ENOENT) {
+ /*
+ * We have to actually delete that reference
+ * -> this transaction is needed.
+ */
+ ret = 1;
+ break;
+ }
+ }
+
+ strbuf_release(&referent);
+ return ret;
+ }
+
struct packed_transaction_backend_data {
/* True iff the transaction owns the packed-refs lock. */
int own_lock;
if (data) {
string_list_clear(&data->updates, 0);
- if (is_tempfile_active(&refs->tempfile))
+ if (is_tempfile_active(refs->tempfile))
delete_tempfile(&refs->tempfile);
if (data->own_lock && is_lock_file_locked(&refs->lock)) {
/*
* Note that we *don't* skip transactions with zero updates,
* because such a transaction might be executed for the side
- * effect of ensuring that all of the references are peeled.
- * If the caller wants to optimize away empty transactions, it
- * should do so itself.
+ * effect of ensuring that all of the references are peeled or
+ * ensuring that the `packed-refs` file is sorted. If the
+ * caller wants to optimize away empty transactions, it should
+ * do so itself.
*/
data = xcalloc(1, sizeof(*data));
int ret = TRANSACTION_GENERIC_ERROR;
char *packed_refs_path;
+ clear_snapshot(refs);
+
packed_refs_path = get_locked_file_path(&refs->lock);
if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
strbuf_addf(err, "error replacing %s: %s",
goto cleanup;
}
- clear_packed_ref_cache(refs);
ret = 0;
cleanup:
die("BUG: packed reference store does not support renaming references");
}
+static int packed_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ die("BUG: packed reference store does not support copying references");
+}
+
static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
{
return empty_ref_iterator_begin();
}
static int packed_reflog_expire(struct ref_store *ref_store,
- const char *refname, const unsigned char *sha1,
+ const char *refname, const struct object_id *oid,
unsigned int flags,
reflog_expiry_prepare_fn prepare_fn,
reflog_expiry_should_prune_fn should_prune_fn,
packed_initial_transaction_commit,
packed_pack_refs,
- packed_peel_ref,
packed_create_symref,
packed_delete_refs,
packed_rename_ref,
+ packed_copy_ref,
packed_ref_iterator_begin,
packed_read_raw_ref,