return ret;
}
-static int files_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- int flag;
- unsigned char base[20];
-
- if (current_ref_iter && current_ref_iter->refname == refname) {
- struct object_id peeled;
-
- if (ref_iterator_peel(current_ref_iter, &peeled))
- return -1;
- hashcpy(sha1, peeled.hash);
- return 0;
- }
-
- if (refs_read_ref_full(ref_store, refname,
- RESOLVE_REF_READING, base, &flag))
- return -1;
-
- /*
- * If the reference is packed, read its ref_entry from the
- * cache in the hope that we already know its peeled value.
- * We only try this optimization on packed references because
- * (a) forcing the filling of the loose reference cache could
- * be expensive and (b) loose references anyway usually do not
- * have REF_KNOWS_PEELED.
- */
- if (flag & REF_ISPACKED &&
- !refs_peel_ref(refs->packed_ref_store, refname, sha1))
- return 0;
-
- return peel_object(base, sha1);
-}
-
struct files_ref_iterator {
struct ref_iterator base;
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_iterator *loose_iter, *packed_iter;
+ struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
- iter = xcalloc(1, sizeof(*iter));
- ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
-
/*
* We must make sure that all loose refs are read before
* accessing the packed-refs file; this avoids a race
refs->packed_ref_store, prefix, 0,
DO_FOR_EACH_INCLUDE_BROKEN);
- iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
+ overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
+
+ iter = xcalloc(1, sizeof(*iter));
+ ref_iterator = &iter->base;
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
+ overlay_iter->ordered);
+ iter->iter0 = overlay_iter;
iter->flags = flags;
return ref_iterator;
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
files_reflog_path(refs, &sb, NULL);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
}
}
+ /*
+ * Now that updates are safely completed, we can perform
+ * deletes. First delete the reflogs of any references that
+ * will be deleted, since (in the unexpected event of an
+ * error) leaving a reference without a reflog is less bad
+ * than leaving a reflog without a reference (the latter is a
+ * mildly invalid repository state):
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ if (update->flags & REF_DELETING &&
+ !(update->flags & REF_LOG_ONLY) &&
+ !(update->flags & REF_ISPRUNING)) {
+ strbuf_reset(&sb);
+ files_reflog_path(refs, &sb, update->refname);
+ if (!unlink_or_warn(sb.buf))
+ try_remove_empty_parents(refs, update->refname,
+ REMOVE_EMPTY_PARENTS_REFLOG);
+ }
+ }
+
/*
* Perform deletes now that updates are safely completed.
*
}
}
- /* Delete the reflogs of any references that were deleted: */
- for (i = 0; i < transaction->nr; i++) {
- struct ref_update *update = transaction->updates[i];
- if (update->flags & REF_DELETING &&
- !(update->flags & REF_LOG_ONLY) &&
- !(update->flags & REF_ISPRUNING)) {
- strbuf_reset(&sb);
- files_reflog_path(refs, &sb, update->refname);
- if (!unlink_or_warn(sb.buf))
- try_remove_empty_parents(refs, update->refname,
- REMOVE_EMPTY_PARENTS_REFLOG);
- }
- }
-
clear_loose_ref_cache(refs);
cleanup:
files_initial_transaction_commit,
files_pack_refs,
- files_peel_ref,
files_create_symref,
files_delete_refs,
files_rename_ref,