add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
+void fill_stat_data(struct stat_data *sd, struct stat *st)
+{
+ sd->sd_ctime.sec = (unsigned int)st->st_ctime;
+ sd->sd_mtime.sec = (unsigned int)st->st_mtime;
+ sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
+ sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
+ sd->sd_dev = st->st_dev;
+ sd->sd_ino = st->st_ino;
+ sd->sd_uid = st->st_uid;
+ sd->sd_gid = st->st_gid;
+ sd->sd_size = st->st_size;
+}
+
+int match_stat_data(const struct stat_data *sd, struct stat *st)
+{
+ int changed = 0;
+
+ if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
+ changed |= MTIME_CHANGED;
+ if (trust_ctime && check_stat &&
+ sd->sd_ctime.sec != (unsigned int)st->st_ctime)
+ changed |= CTIME_CHANGED;
+
+#ifdef USE_NSEC
+ if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
+ changed |= MTIME_CHANGED;
+ if (trust_ctime && check_stat &&
+ sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
+ changed |= CTIME_CHANGED;
+#endif
+
+ if (check_stat) {
+ if (sd->sd_uid != (unsigned int) st->st_uid ||
+ sd->sd_gid != (unsigned int) st->st_gid)
+ changed |= OWNER_CHANGED;
+ if (sd->sd_ino != (unsigned int) st->st_ino)
+ changed |= INODE_CHANGED;
+ }
+
+#ifdef USE_STDEV
+ /*
+ * st_dev breaks on network filesystems where different
+ * clients will have different views of what "device"
+ * the filesystem is on
+ */
+ if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
+ changed |= INODE_CHANGED;
+#endif
+
+ if (sd->sd_size != (unsigned int) st->st_size)
+ changed |= DATA_CHANGED;
+
+ return changed;
+}
+
/*
* This only updates the "non-critical" parts of the directory
* cache, ie the parts that aren't tracked by GIT, and only used
*/
void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
{
- ce->ce_ctime.sec = (unsigned int)st->st_ctime;
- ce->ce_mtime.sec = (unsigned int)st->st_mtime;
- ce->ce_ctime.nsec = ST_CTIME_NSEC(*st);
- ce->ce_mtime.nsec = ST_MTIME_NSEC(*st);
- ce->ce_dev = st->st_dev;
- ce->ce_ino = st->st_ino;
- ce->ce_uid = st->st_uid;
- ce->ce_gid = st->st_gid;
- ce->ce_size = st->st_size;
+ fill_stat_data(&ce->ce_stat_data, st);
if (assume_unchanged)
ce->ce_flags |= CE_VALID;
default:
die("internal error: ce_mode is %o", ce->ce_mode);
}
- if (ce->ce_mtime.sec != (unsigned int)st->st_mtime)
- changed |= MTIME_CHANGED;
- if (trust_ctime && check_stat &&
- ce->ce_ctime.sec != (unsigned int)st->st_ctime)
- changed |= CTIME_CHANGED;
-
-#ifdef USE_NSEC
- if (check_stat && ce->ce_mtime.nsec != ST_MTIME_NSEC(*st))
- changed |= MTIME_CHANGED;
- if (trust_ctime && check_stat &&
- ce->ce_ctime.nsec != ST_CTIME_NSEC(*st))
- changed |= CTIME_CHANGED;
-#endif
-
- if (check_stat) {
- if (ce->ce_uid != (unsigned int) st->st_uid ||
- ce->ce_gid != (unsigned int) st->st_gid)
- changed |= OWNER_CHANGED;
- if (ce->ce_ino != (unsigned int) st->st_ino)
- changed |= INODE_CHANGED;
- }
-#ifdef USE_STDEV
- /*
- * st_dev breaks on network filesystems where different
- * clients will have different views of what "device"
- * the filesystem is on
- */
- if (check_stat && ce->ce_dev != (unsigned int) st->st_dev)
- changed |= INODE_CHANGED;
-#endif
-
- if (ce->ce_size != (unsigned int) st->st_size)
- changed |= DATA_CHANGED;
+ changed |= match_stat_data(&ce->ce_stat_data, st);
/* Racily smudged entry? */
- if (!ce->ce_size) {
+ if (!ce->ce_stat_data.sd_size) {
if (!is_empty_blob_sha1(ce->sha1))
changed |= DATA_CHANGED;
}
istate->timestamp.sec &&
#ifdef USE_NSEC
/* nanosecond timestamped files can also be racy! */
- (istate->timestamp.sec < ce->ce_mtime.sec ||
- (istate->timestamp.sec == ce->ce_mtime.sec &&
- istate->timestamp.nsec <= ce->ce_mtime.nsec))
+ (istate->timestamp.sec < ce->ce_stat_data.sd_mtime.sec ||
+ (istate->timestamp.sec == ce->ce_stat_data.sd_mtime.sec &&
+ istate->timestamp.nsec <= ce->ce_stat_data.sd_mtime.nsec))
#else
- istate->timestamp.sec <= ce->ce_mtime.sec
+ istate->timestamp.sec <= ce->ce_stat_data.sd_mtime.sec
#endif
);
}
* then we know it is.
*/
if ((changed & DATA_CHANGED) &&
- (S_ISGITLINK(ce->ce_mode) || ce->ce_size != 0))
+ (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
return changed;
changed_fs = ce_modified_check_fs(ce, st);
{
struct cache_entry *ce = xmalloc(cache_entry_size(len));
- ce->ce_ctime.sec = ntoh_l(ondisk->ctime.sec);
- ce->ce_mtime.sec = ntoh_l(ondisk->mtime.sec);
- ce->ce_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
- ce->ce_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
- ce->ce_dev = ntoh_l(ondisk->dev);
- ce->ce_ino = ntoh_l(ondisk->ino);
+ ce->ce_stat_data.sd_ctime.sec = ntoh_l(ondisk->ctime.sec);
+ ce->ce_stat_data.sd_mtime.sec = ntoh_l(ondisk->mtime.sec);
+ ce->ce_stat_data.sd_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
+ ce->ce_stat_data.sd_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
+ ce->ce_stat_data.sd_dev = ntoh_l(ondisk->dev);
+ ce->ce_stat_data.sd_ino = ntoh_l(ondisk->ino);
ce->ce_mode = ntoh_l(ondisk->mode);
- ce->ce_uid = ntoh_l(ondisk->uid);
- ce->ce_gid = ntoh_l(ondisk->gid);
- ce->ce_size = ntoh_l(ondisk->size);
+ ce->ce_stat_data.sd_uid = ntoh_l(ondisk->uid);
+ ce->ce_stat_data.sd_gid = ntoh_l(ondisk->gid);
+ ce->ce_stat_data.sd_size = ntoh_l(ondisk->size);
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
hashcpy(ce->sha1, ondisk->sha1);
* The only thing we care about in this function is to smudge the
* falsely clean entry due to touch-update-touch race, so we leave
* everything else as they are. We are called for entries whose
- * ce_mtime match the index file mtime.
+ * ce_stat_data.sd_mtime match the index file mtime.
*
* Note that this actually does not do much for gitlinks, for
* which ce_match_stat_basic() always goes to the actual
* file, and never calls us, so the cached size information
* for "frotz" stays 6 which does not match the filesystem.
*/
- ce->ce_size = 0;
+ ce->ce_stat_data.sd_size = 0;
}
}
{
short flags;
- ondisk->ctime.sec = htonl(ce->ce_ctime.sec);
- ondisk->mtime.sec = htonl(ce->ce_mtime.sec);
- ondisk->ctime.nsec = htonl(ce->ce_ctime.nsec);
- ondisk->mtime.nsec = htonl(ce->ce_mtime.nsec);
- ondisk->dev = htonl(ce->ce_dev);
- ondisk->ino = htonl(ce->ce_ino);
+ ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
+ ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
+ ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
+ ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
+ ondisk->dev = htonl(ce->ce_stat_data.sd_dev);
+ ondisk->ino = htonl(ce->ce_stat_data.sd_ino);
ondisk->mode = htonl(ce->ce_mode);
- ondisk->uid = htonl(ce->ce_uid);
- ondisk->gid = htonl(ce->ce_gid);
- ondisk->size = htonl(ce->ce_size);
+ ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
+ ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
+ ondisk->size = htonl(ce->ce_stat_data.sd_size);
hashcpy(ondisk->sha1, ce->sha1);
flags = ce->ce_flags;
*size = sz;
return data;
}
+
+void stat_validity_clear(struct stat_validity *sv)
+{
+ free(sv->sd);
+ sv->sd = NULL;
+}
+
+int stat_validity_check(struct stat_validity *sv, const char *path)
+{
+ struct stat st;
+
+ if (stat(path, &st) < 0)
+ return sv->sd == NULL;
+ if (!sv->sd)
+ return 0;
+ return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
+}
+
+void stat_validity_update(struct stat_validity *sv, int fd)
+{
+ struct stat st;
+
+ if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
+ stat_validity_clear(sv);
+ else {
+ if (!sv->sd)
+ sv->sd = xcalloc(1, sizeof(struct stat_data));
+ fill_stat_data(sv->sd, &st);
+ }
+}
}
}
+/*
+ * Load all of the refs from the dir into our in-memory cache. The hard work
+ * of loading loose refs is done by get_ref_dir(), so we just need to recurse
+ * through all of the sub-directories. We do not even need to care about
+ * sorting, as traversal order does not matter to us.
+ */
+static void prime_ref_dir(struct ref_dir *dir)
+{
+ int i;
+ for (i = 0; i < dir->nr; i++) {
+ struct ref_entry *entry = dir->entries[i];
+ if (entry->flag & REF_DIR)
+ prime_ref_dir(get_ref_dir(entry));
+ }
+}
/*
* Return true iff refname1 and refname2 conflict with each other.
* Two reference names conflict if one of them exactly matches the
return 1;
}
+struct packed_ref_cache {
+ struct ref_entry *root;
+
+ /*
+ * Count of references to the data structure in this instance,
+ * including the pointer from ref_cache::packed if any. The
+ * data will not be freed as long as the reference count is
+ * nonzero.
+ */
+ unsigned int referrers;
+
+ /*
+ * Iff the packed-refs file associated with this instance is
+ * currently locked for writing, this points at the associated
+ * lock (which is owned by somebody else). The referrer count
+ * is also incremented when the file is locked and decremented
+ * when it is unlocked.
+ */
+ struct lock_file *lock;
+
+ /* The metadata from when this packed-refs cache was read */
+ struct stat_validity validity;
+};
+
/*
* Future: need to be in "struct repository"
* when doing a full libification.
static struct ref_cache {
struct ref_cache *next;
struct ref_entry *loose;
- struct ref_entry *packed;
+ struct packed_ref_cache *packed;
/*
* The submodule name, or "" for the main repo. We allocate
* length 1 rather than FLEX_ARRAY so that the main ref_cache
char name[1];
} ref_cache, *submodule_ref_caches;
+/* Lock used for the main packed-refs file: */
+static struct lock_file packlock;
+
+/*
+ * Increment the reference count of *packed_refs.
+ */
+static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
+{
+ packed_refs->referrers++;
+}
+
+/*
+ * Decrease the reference count of *packed_refs. If it goes to zero,
+ * free *packed_refs and return true; otherwise return false.
+ */
+static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
+{
+ if (!--packed_refs->referrers) {
+ free_ref_entry(packed_refs->root);
+ stat_validity_clear(&packed_refs->validity);
+ free(packed_refs);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
static void clear_packed_ref_cache(struct ref_cache *refs)
{
if (refs->packed) {
- free_ref_entry(refs->packed);
+ struct packed_ref_cache *packed_refs = refs->packed;
+
+ if (packed_refs->lock)
+ die("internal error: packed-ref cache cleared while locked");
refs->packed = NULL;
+ release_packed_ref_cache(packed_refs);
}
}
}
}
-static struct ref_dir *get_packed_refs(struct ref_cache *refs)
+/*
+ * Get the packed_ref_cache for the specified ref_cache, creating it
+ * if necessary.
+ */
+static struct packed_ref_cache *get_packed_ref_cache(struct ref_cache *refs)
{
+ const char *packed_refs_file;
+
+ if (*refs->name)
+ packed_refs_file = git_path_submodule(refs->name, "packed-refs");
+ else
+ packed_refs_file = git_path("packed-refs");
+
+ if (refs->packed &&
+ !stat_validity_check(&refs->packed->validity, packed_refs_file))
+ clear_packed_ref_cache(refs);
+
if (!refs->packed) {
- const char *packed_refs_file;
FILE *f;
- refs->packed = create_dir_entry(refs, "", 0, 0);
- if (*refs->name)
- packed_refs_file = git_path_submodule(refs->name, "packed-refs");
- else
- packed_refs_file = git_path("packed-refs");
+ refs->packed = xcalloc(1, sizeof(*refs->packed));
+ acquire_packed_ref_cache(refs->packed);
+ refs->packed->root = create_dir_entry(refs, "", 0, 0);
f = fopen(packed_refs_file, "r");
if (f) {
- read_packed_refs(f, get_ref_dir(refs->packed));
+ stat_validity_update(&refs->packed->validity, fileno(f));
+ read_packed_refs(f, get_ref_dir(refs->packed->root));
fclose(f);
}
}
- return get_ref_dir(refs->packed);
+ return refs->packed;
+}
+
+static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
+{
+ return get_ref_dir(packed_ref_cache->root);
+}
+
+static struct ref_dir *get_packed_refs(struct ref_cache *refs)
+{
+ return get_packed_ref_dir(get_packed_ref_cache(refs));
}
void add_packed_ref(const char *refname, const unsigned char *sha1)
{
- add_ref(get_packed_refs(&ref_cache),
+ struct packed_ref_cache *packed_ref_cache =
+ get_packed_ref_cache(&ref_cache);
+
+ if (!packed_ref_cache->lock)
+ die("internal error: packed refs not locked");
+ add_ref(get_packed_ref_dir(packed_ref_cache),
create_ref_entry(refname, sha1, REF_ISPACKED, 1));
}
static int do_for_each_entry(struct ref_cache *refs, const char *base,
each_ref_entry_fn fn, void *cb_data)
{
- struct ref_dir *packed_dir = get_packed_refs(refs);
- struct ref_dir *loose_dir = get_loose_refs(refs);
+ struct packed_ref_cache *packed_ref_cache;
+ struct ref_dir *loose_dir;
+ struct ref_dir *packed_dir;
int retval = 0;
+ /*
+ * We must make sure that all loose refs are read before accessing the
+ * packed-refs file; this avoids a race condition in which loose refs
+ * are migrated to the packed-refs file by a simultaneous process, but
+ * our in-memory view is from before the migration. get_packed_ref_cache()
+ * takes care of making sure our view is up to date with what is on
+ * disk.
+ */
+ loose_dir = get_loose_refs(refs);
if (base && *base) {
- packed_dir = find_containing_dir(packed_dir, base, 0);
loose_dir = find_containing_dir(loose_dir, base, 0);
}
+ if (loose_dir)
+ prime_ref_dir(loose_dir);
+
+ packed_ref_cache = get_packed_ref_cache(refs);
+ acquire_packed_ref_cache(packed_ref_cache);
+ packed_dir = get_packed_ref_dir(packed_ref_cache);
+ if (base && *base) {
+ packed_dir = find_containing_dir(packed_dir, base, 0);
+ }
if (packed_dir && loose_dir) {
sort_ref_dir(packed_dir);
loose_dir, 0, fn, cb_data);
}
+ release_packed_ref_cache(packed_ref_cache);
return retval;
}
}
}
+/*
+ * An each_ref_entry_fn that writes the entry to a packed-refs file.
+ */
+static int write_packed_entry_fn(struct ref_entry *entry, void *cb_data)
+{
+ int *fd = cb_data;
+ enum peel_status peel_status = peel_entry(entry, 0);
+
+ if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
+ error("internal error: %s is not a valid packed reference!",
+ entry->name);
+ write_packed_entry(*fd, entry->name, entry->u.value.sha1,
+ peel_status == PEEL_PEELED ?
+ entry->u.value.peeled : NULL);
+ return 0;
+}
+
+int lock_packed_refs(int flags)
+{
+ struct packed_ref_cache *packed_ref_cache;
+
+ /* Discard the old cache because it might be invalid: */
+ clear_packed_ref_cache(&ref_cache);
+ if (hold_lock_file_for_update(&packlock, git_path("packed-refs"), flags) < 0)
+ return -1;
+ /* Read the current packed-refs while holding the lock: */
+ packed_ref_cache = get_packed_ref_cache(&ref_cache);
+ packed_ref_cache->lock = &packlock;
+ /* Increment the reference count to prevent it from being freed: */
+ acquire_packed_ref_cache(packed_ref_cache);
+ return 0;
+}
+
+int commit_packed_refs(void)
+{
+ struct packed_ref_cache *packed_ref_cache =
+ get_packed_ref_cache(&ref_cache);
+ int error = 0;
+
+ if (!packed_ref_cache->lock)
+ die("internal error: packed-refs not locked");
+ write_or_die(packed_ref_cache->lock->fd,
+ PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
+
+ do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache),
+ 0, write_packed_entry_fn,
+ &packed_ref_cache->lock->fd);
+ if (commit_lock_file(packed_ref_cache->lock))
+ error = -1;
+ packed_ref_cache->lock = NULL;
+ release_packed_ref_cache(packed_ref_cache);
+ return error;
+}
+
+void rollback_packed_refs(void)
+{
+ struct packed_ref_cache *packed_ref_cache =
+ get_packed_ref_cache(&ref_cache);
+
+ if (!packed_ref_cache->lock)
+ die("internal error: packed-refs not locked");
+ rollback_lock_file(packed_ref_cache->lock);
+ packed_ref_cache->lock = NULL;
+ release_packed_ref_cache(packed_ref_cache);
+ clear_packed_ref_cache(&ref_cache);
+}
+
struct ref_to_prune {
struct ref_to_prune *next;
unsigned char sha1[20];
struct pack_refs_cb_data {
unsigned int flags;
+ struct ref_dir *packed_refs;
struct ref_to_prune *ref_to_prune;
- int fd;
};
-static int pack_one_ref(struct ref_entry *entry, void *cb_data)
+/*
+ * An each_ref_entry_fn that is run over loose references only. If
+ * the loose reference can be packed, add an entry in the packed ref
+ * cache. If the reference should be pruned, also add it to
+ * ref_to_prune in the pack_refs_cb_data.
+ */
+static int pack_if_possible_fn(struct ref_entry *entry, void *cb_data)
{
struct pack_refs_cb_data *cb = cb_data;
enum peel_status peel_status;
+ struct ref_entry *packed_entry;
int is_tag_ref = !prefixcmp(entry->name, "refs/tags/");
- /* ALWAYS pack refs that were already packed or are tags */
- if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref &&
- !(entry->flag & REF_ISPACKED))
+ /* ALWAYS pack tags */
+ if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref)
return 0;
/* Do not pack symbolic or broken refs: */
if ((entry->flag & REF_ISSYMREF) || !ref_resolves_to_object(entry))
return 0;
+ /* Add a packed ref cache entry equivalent to the loose entry. */
peel_status = peel_entry(entry, 1);
if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
die("internal error peeling reference %s (%s)",
entry->name, sha1_to_hex(entry->u.value.sha1));
- write_packed_entry(cb->fd, entry->name, entry->u.value.sha1,
- peel_status == PEEL_PEELED ?
- entry->u.value.peeled : NULL);
+ packed_entry = find_ref(cb->packed_refs, entry->name);
+ if (packed_entry) {
+ /* Overwrite existing packed entry with info from loose entry */
+ packed_entry->flag = REF_ISPACKED | REF_KNOWS_PEELED;
+ hashcpy(packed_entry->u.value.sha1, entry->u.value.sha1);
+ } else {
+ packed_entry = create_ref_entry(entry->name, entry->u.value.sha1,
+ REF_ISPACKED | REF_KNOWS_PEELED, 0);
+ add_ref(cb->packed_refs, packed_entry);
+ }
+ hashcpy(packed_entry->u.value.peeled, entry->u.value.peeled);
- /* If the ref was already packed, there is no need to prune it. */
- if ((cb->flags & PACK_REFS_PRUNE) && !(entry->flag & REF_ISPACKED)) {
+ /* Schedule the loose reference for pruning if requested. */
+ if ((cb->flags & PACK_REFS_PRUNE)) {
int namelen = strlen(entry->name) + 1;
struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen);
hashcpy(n->sha1, entry->u.value.sha1);
}
}
-static struct lock_file packlock;
-
int pack_refs(unsigned int flags)
{
struct pack_refs_cb_data cbdata;
memset(&cbdata, 0, sizeof(cbdata));
cbdata.flags = flags;
- cbdata.fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"),
- LOCK_DIE_ON_ERROR);
+ lock_packed_refs(LOCK_DIE_ON_ERROR);
+ cbdata.packed_refs = get_packed_refs(&ref_cache);
- write_or_die(cbdata.fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
+ do_for_each_entry_in_dir(get_loose_refs(&ref_cache), 0,
+ pack_if_possible_fn, &cbdata);
- do_for_each_entry(&ref_cache, "", pack_one_ref, &cbdata);
- if (commit_lock_file(&packlock) < 0)
+ if (commit_packed_refs())
die_errno("unable to overwrite old ref-pack file");
+
prune_refs(cbdata.ref_to_prune);
return 0;
}
-static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
+/*
+ * If entry is no longer needed in packed-refs, add it to the string
+ * list pointed to by cb_data. Reasons for deleting entries:
+ *
+ * - Entry is broken.
+ * - Entry is overridden by a loose ref.
+ * - Entry does not point at a valid object.
+ *
+ * In the first and third cases, also emit an error message because these
+ * are indications of repository corruption.
+ */
+static int curate_packed_ref_fn(struct ref_entry *entry, void *cb_data)
{
- int *fd = cb_data;
- enum peel_status peel_status;
+ struct string_list *refs_to_delete = cb_data;
if (entry->flag & REF_ISBROKEN) {
/* This shouldn't happen to packed refs. */
error("%s is broken!", entry->name);
+ string_list_append(refs_to_delete, entry->name);
return 0;
}
if (!has_sha1_file(entry->u.value.sha1)) {
if (read_ref_full(entry->name, sha1, 0, &flags))
/* We should at least have found the packed ref. */
die("Internal error");
- if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED))
+ if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED)) {
/*
* This packed reference is overridden by a
* loose reference, so it is OK that its value
* collected. For this purpose we don't even
* care whether the loose reference itself is
* invalid, broken, symbolic, etc. Silently
- * omit the packed reference from the output.
+ * remove the packed reference.
*/
+ string_list_append(refs_to_delete, entry->name);
return 0;
+ }
/*
* There is no overriding loose reference, so the fact
* that this reference doesn't refer to a valid object
* the output.
*/
error("%s does not point to a valid object!", entry->name);
+ string_list_append(refs_to_delete, entry->name);
return 0;
}
- peel_status = peel_entry(entry, 0);
- write_packed_entry(*fd, entry->name, entry->u.value.sha1,
- peel_status == PEEL_PEELED ?
- entry->u.value.peeled : NULL);
-
return 0;
}
static int repack_without_ref(const char *refname)
{
- int fd;
struct ref_dir *packed;
+ struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
+ struct string_list_item *ref_to_delete;
if (!get_packed_ref(refname))
return 0; /* refname does not exist in packed refs */
- fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"), 0);
- if (fd < 0) {
+ if (lock_packed_refs(0)) {
unable_to_lock_error(git_path("packed-refs"), errno);
return error("cannot delete '%s' from packed refs", refname);
}
- clear_packed_ref_cache(&ref_cache);
packed = get_packed_refs(&ref_cache);
- /* Remove refname from the cache. */
+
+ /* Remove refname from the cache: */
if (remove_entry(packed, refname) == -1) {
/*
* The packed entry disappeared while we were
* acquiring the lock.
*/
- rollback_lock_file(&packlock);
+ rollback_packed_refs();
return 0;
}
- write_or_die(fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
- do_for_each_entry_in_dir(packed, 0, repack_ref_fn, &fd);
- return commit_lock_file(&packlock);
+
+ /* Remove any other accumulated cruft: */
+ do_for_each_entry_in_dir(packed, 0, curate_packed_ref_fn, &refs_to_delete);
+ for_each_string_list_item(ref_to_delete, &refs_to_delete) {
+ if (remove_entry(packed, ref_to_delete->string) == -1)
+ die("internal error");
+ }
+
+ /* Write what remains: */
+ return commit_packed_refs();
}
int delete_ref(const char *refname, const unsigned char *sha1, int delopt)