}
struct packed_ref_cache {
- struct ref_entry *root;
+ struct ref_cache *cache;
/*
* Count of references to the data structure in this instance,
char *gitcommondir;
char *packed_refs_path;
- struct ref_entry *loose;
+ struct ref_cache *loose;
struct packed_ref_cache *packed;
};
static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
{
if (!--packed_refs->referrers) {
- free_ref_entry(packed_refs->root);
+ free_ref_cache(packed_refs->cache);
stat_validity_clear(&packed_refs->validity);
free(packed_refs);
return 1;
static void clear_loose_ref_cache(struct files_ref_store *refs)
{
if (refs->loose) {
- free_ref_entry(refs->loose);
+ free_ref_cache(refs->loose);
refs->loose = NULL;
}
}
refs->packed = xcalloc(1, sizeof(*refs->packed));
acquire_packed_ref_cache(refs->packed);
- refs->packed->root = create_dir_entry(refs, "", 0, 0);
+ refs->packed->cache = create_ref_cache(&refs->base, NULL);
+ refs->packed->cache->root->flag &= ~REF_INCOMPLETE;
f = fopen(packed_refs_file, "r");
if (f) {
stat_validity_update(&refs->packed->validity, fileno(f));
- read_packed_refs(f, get_ref_dir(refs->packed->root));
+ read_packed_refs(f, get_ref_dir(refs->packed->cache->root));
fclose(f);
}
}
static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
{
- return get_ref_dir(packed_ref_cache->root);
+ return get_ref_dir(packed_ref_cache->cache->root);
}
static struct ref_dir *get_packed_refs(struct files_ref_store *refs)
* (without recursing). dirname must end with '/'. dir must be the
* directory entry corresponding to dirname.
*/
-void read_loose_refs(const char *dirname, struct ref_dir *dir)
+static void loose_fill_ref_dir(struct ref_store *ref_store,
+ struct ref_dir *dir, const char *dirname)
{
- struct files_ref_store *refs = dir->ref_store;
+ struct files_ref_store *refs =
+ files_downcast(ref_store, REF_STORE_READ, "fill_ref_dir");
DIR *d;
struct dirent *de;
int dirnamelen = strlen(dirname);
} else if (S_ISDIR(st.st_mode)) {
strbuf_addch(&refname, '/');
add_entry_to_dir(dir,
- create_dir_entry(refs, refname.buf,
+ create_dir_entry(dir->cache, refname.buf,
refname.len, 1));
} else {
if (!refs_resolve_ref_unsafe(&refs->base,
strbuf_release(&refname);
strbuf_release(&path);
closedir(d);
+
+ /*
+ * Manually add refs/bisect, which, being per-worktree, might
+ * not appear in the directory listing for refs/ in the main
+ * repo.
+ */
+ if (!strcmp(dirname, "refs/")) {
+ int pos = search_ref_dir(dir, "refs/bisect/", 12);
+
+ if (pos < 0) {
+ struct ref_entry *child_entry = create_dir_entry(
+ dir->cache, "refs/bisect/", 12, 1);
+ add_entry_to_dir(dir, child_entry);
+ }
+ }
}
-static struct ref_dir *get_loose_refs(struct files_ref_store *refs)
+static struct ref_cache *get_loose_ref_cache(struct files_ref_store *refs)
{
if (!refs->loose) {
/*
* are about to read the only subdirectory that can
* hold references:
*/
- refs->loose = create_dir_entry(refs, "", 0, 0);
+ refs->loose = create_ref_cache(&refs->base, loose_fill_ref_dir);
+
+ /* We're going to fill the top level ourselves: */
+ refs->loose->root->flag &= ~REF_INCOMPLETE;
+
/*
- * Create an incomplete entry for "refs/":
+ * Add an incomplete entry for "refs/" (to be filled
+ * lazily):
*/
- add_entry_to_dir(get_ref_dir(refs->loose),
- create_dir_entry(refs, "refs/", 5, 1));
+ add_entry_to_dir(get_ref_dir(refs->loose->root),
+ create_dir_entry(refs->loose, "refs/", 5, 1));
}
- return get_ref_dir(refs->loose);
+ return refs->loose;
+}
+
+static struct ref_dir *get_loose_ref_dir(struct files_ref_store *refs)
+{
+ return get_ref_dir(get_loose_ref_cache(refs)->root);
}
/*
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_dir *loose_dir, *packed_dir;
struct ref_iterator *loose_iter, *packed_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
* condition if loose refs are migrated to the packed-refs
* file by a simultaneous process, but our in-memory view is
* from before the migration. We ensure this as follows:
- * First, we call prime_ref_dir(), which pre-reads the loose
- * references for the subtree into the cache. (If they've
- * already been read, that's OK; we only need to guarantee
- * that they're read before the packed refs, not *how much*
- * before.) After that, we call get_packed_ref_cache(), which
- * internally checks whether the packed-ref cache is up to
- * date with what is on disk, and re-reads it if not.
+ * First, we call start the loose refs iteration with its
+ * `prime_ref` argument set to true. This causes the loose
+ * references in the subtree to be pre-read into the cache.
+ * (If they've already been read, that's OK; we only need to
+ * guarantee that they're read before the packed refs, not
+ * *how much* before.) After that, we call
+ * get_packed_ref_cache(), which internally checks whether the
+ * packed-ref cache is up to date with what is on disk, and
+ * re-reads it if not.
*/
- loose_dir = get_loose_refs(refs);
-
- if (prefix && *prefix)
- loose_dir = find_containing_dir(loose_dir, prefix, 0);
-
- if (loose_dir) {
- prime_ref_dir(loose_dir);
- loose_iter = cache_ref_iterator_begin(loose_dir);
- } else {
- /* There's nothing to iterate over. */
- loose_iter = empty_ref_iterator_begin();
- }
+ loose_iter = cache_ref_iterator_begin(get_loose_ref_cache(refs),
+ prefix, 1);
iter->packed_ref_cache = get_packed_ref_cache(refs);
acquire_packed_ref_cache(iter->packed_ref_cache);
- packed_dir = get_packed_ref_dir(iter->packed_ref_cache);
-
- if (prefix && *prefix)
- packed_dir = find_containing_dir(packed_dir, prefix, 0);
-
- if (packed_dir) {
- packed_iter = cache_ref_iterator_begin(packed_dir);
- } else {
- /* There's nothing to iterate over. */
- packed_iter = empty_ref_iterator_begin();
- }
+ packed_iter = cache_ref_iterator_begin(iter->packed_ref_cache->cache,
+ prefix, 0);
iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
iter->flags = flags;
* Write an entry to the packed-refs file for the specified refname.
* If peeled is non-NULL, write it as the entry's peeled value.
*/
-static void write_packed_entry(FILE *fh, char *refname, unsigned char *sha1,
- unsigned char *peeled)
+static void write_packed_entry(FILE *fh, const char *refname,
+ const unsigned char *sha1,
+ const unsigned char *peeled)
{
fprintf_or_die(fh, "%s %s\n", sha1_to_hex(sha1), refname);
if (peeled)
fprintf_or_die(fh, "^%s\n", sha1_to_hex(peeled));
}
-/*
- * An each_ref_entry_fn that writes the entry to a packed-refs file.
- */
-static int write_packed_entry_fn(struct ref_entry *entry, void *cb_data)
-{
- enum peel_status peel_status = peel_entry(entry, 0);
-
- if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
- error("internal error: %s is not a valid packed reference!",
- entry->name);
- write_packed_entry(cb_data, entry->name, entry->u.value.oid.hash,
- peel_status == PEEL_PEELED ?
- entry->u.value.peeled.hash : NULL);
- return 0;
-}
-
/*
* Lock the packed-refs file for writing. Flags is passed to
* hold_lock_file_for_update(). Return 0 on success. On errors, set
{
struct packed_ref_cache *packed_ref_cache =
get_packed_ref_cache(refs);
- int error = 0;
+ int ok, error = 0;
int save_errno = 0;
FILE *out;
+ struct ref_iterator *iter;
files_assert_main_repository(refs, "commit_packed_refs");
die_errno("unable to fdopen packed-refs descriptor");
fprintf_or_die(out, "%s", PACKED_REFS_HEADER);
- do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache),
- 0, write_packed_entry_fn, out);
+
+ iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0);
+ while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
+ struct object_id peeled;
+ int peel_error = ref_iterator_peel(iter, &peeled);
+
+ write_packed_entry(out, iter->refname, iter->oid->hash,
+ peel_error ? NULL : peeled.hash);
+ }
+
+ if (ok != ITER_DONE)
+ die("error while iterating over references");
if (commit_lock_file(packed_ref_cache->lock)) {
save_errno = errno;
lock_packed_refs(refs, LOCK_DIE_ON_ERROR);
cbdata.packed_refs = get_packed_refs(refs);
- do_for_each_entry_in_dir(get_loose_refs(refs), 0,
+ do_for_each_entry_in_dir(get_loose_ref_dir(refs),
pack_if_possible_fn, &cbdata);
if (commit_packed_refs(refs))