Merge branch 'mh/ref-races'
authorJunio C Hamano <gitster@pobox.com>
Sun, 30 Jun 2013 22:40:01 +0000 (15:40 -0700)
committerJunio C Hamano <gitster@pobox.com>
Sun, 30 Jun 2013 22:40:05 +0000 (15:40 -0700)
"git pack-refs" that races with new ref creation or deletion have
been susceptible to lossage of refs under right conditions, which
has been tightened up.

* mh/ref-races:
for_each_ref: load all loose refs before packed refs
get_packed_ref_cache: reload packed-refs file when it changes
add a stat_validity struct
Extract a struct stat_data from cache_entry
packed_ref_cache: increment refcount when locked
do_for_each_entry(): increment the packed refs cache refcount
refs: manage lifetime of packed refs cache via reference counting
refs: implement simple transactions for the packed-refs file
refs: wrap the packed refs cache in a level of indirection
pack_refs(): split creation of packed refs and entry writing
repack_without_ref(): split list curation and entry writing

1  2 
builtin/ls-files.c
cache.h
read-cache.c
refs.c
diff --combined builtin/ls-files.c
index 87f3b331ca68715f074b2a5396dda77eea3c3154,6a0730f720ab53b996ebd9a4e1faec10c7afe1bd..3a410c35d99ae030a75c4d3065b7023467e0d1c6
@@@ -165,11 -165,13 +165,13 @@@ static void show_ce_entry(const char *t
        }
        write_name(ce->name, ce_namelen(ce));
        if (debug_mode) {
-               printf("  ctime: %d:%d\n", ce->ce_ctime.sec, ce->ce_ctime.nsec);
-               printf("  mtime: %d:%d\n", ce->ce_mtime.sec, ce->ce_mtime.nsec);
-               printf("  dev: %d\tino: %d\n", ce->ce_dev, ce->ce_ino);
-               printf("  uid: %d\tgid: %d\n", ce->ce_uid, ce->ce_gid);
-               printf("  size: %d\tflags: %x\n", ce->ce_size, ce->ce_flags);
+               struct stat_data *sd = &ce->ce_stat_data;
+               printf("  ctime: %d:%d\n", sd->sd_ctime.sec, sd->sd_ctime.nsec);
+               printf("  mtime: %d:%d\n", sd->sd_mtime.sec, sd->sd_mtime.nsec);
+               printf("  dev: %d\tino: %d\n", sd->sd_dev, sd->sd_ino);
+               printf("  uid: %d\tgid: %d\n", sd->sd_uid, sd->sd_gid);
+               printf("  size: %d\tflags: %x\n", sd->sd_size, ce->ce_flags);
        }
  }
  
@@@ -219,7 -221,7 +221,7 @@@ static void show_files(struct dir_struc
                if (show_killed)
                        show_killed_files(dir);
        }
 -      if (show_cached | show_stage) {
 +      if (show_cached || show_stage) {
                for (i = 0; i < active_nr; i++) {
                        struct cache_entry *ce = active_cache[i];
                        if ((dir->flags & DIR_SHOW_IGNORED) &&
                                (ce_skip_worktree(ce) ? tag_skip_worktree : tag_cached), ce);
                }
        }
 -      if (show_deleted | show_modified) {
 +      if (show_deleted || show_modified) {
                for (i = 0; i < active_nr; i++) {
                        struct cache_entry *ce = active_cache[i];
                        struct stat st;
@@@ -571,8 -573,8 +573,8 @@@ int cmd_ls_files(int argc, const char *
                die("ls-files --ignored needs some exclude pattern");
  
        /* With no flags, we default to showing the cached files */
 -      if (!(show_stage | show_deleted | show_others | show_unmerged |
 -            show_killed | show_modified | show_resolve_undo))
 +      if (!(show_stage || show_deleted || show_others || show_unmerged ||
 +            show_killed || show_modified || show_resolve_undo))
                show_cached = 1;
  
        if (max_prefix)
diff --combined cache.h
index ec8240f62a77cf718436d3de62f8ab7f66d4a322,50f33db2ccafceee047f2e387bb3bc03d4b23170..dd0fb33a1520edf2a243555474d1875818294598
+++ b/cache.h
@@@ -119,15 -119,19 +119,19 @@@ struct cache_time 
        unsigned int nsec;
  };
  
+ struct stat_data {
+       struct cache_time sd_ctime;
+       struct cache_time sd_mtime;
+       unsigned int sd_dev;
+       unsigned int sd_ino;
+       unsigned int sd_uid;
+       unsigned int sd_gid;
+       unsigned int sd_size;
+ };
  struct cache_entry {
-       struct cache_time ce_ctime;
-       struct cache_time ce_mtime;
-       unsigned int ce_dev;
-       unsigned int ce_ino;
+       struct stat_data ce_stat_data;
        unsigned int ce_mode;
-       unsigned int ce_uid;
-       unsigned int ce_gid;
-       unsigned int ce_size;
        unsigned int ce_flags;
        unsigned int ce_namelen;
        unsigned char sha1[20];
@@@ -511,6 -515,21 +515,21 @@@ extern int limit_pathspec_to_literal(vo
  #define HASH_FORMAT_CHECK 2
  extern int index_fd(unsigned char *sha1, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
  extern int index_path(unsigned char *sha1, const char *path, struct stat *st, unsigned flags);
+ /*
+  * Record to sd the data from st that we use to check whether a file
+  * might have changed.
+  */
+ extern void fill_stat_data(struct stat_data *sd, struct stat *st);
+ /*
+  * Return 0 if st is consistent with a file not having been changed
+  * since sd was filled.  If there are differences, return a
+  * combination of MTIME_CHANGED, CTIME_CHANGED, OWNER_CHANGED,
+  * INODE_CHANGED, and DATA_CHANGED.
+  */
+ extern int match_stat_data(const struct stat_data *sd, struct stat *st);
  extern void fill_stat_cache_info(struct cache_entry *ce, struct stat *st);
  
  #define REFRESH_REALLY                0x0001  /* ignore_valid */
@@@ -774,6 -793,9 +793,6 @@@ extern int parse_sha1_header(const cha
  /* global flag to enable extra checks when accessing packed objects */
  extern int do_check_packed_object_crc;
  
 -/* for development: log offset of pack access */
 -extern const char *log_pack_access;
 -
  extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
  
  extern int move_temp_to_file(const char *tmpfile, const char *filename);
@@@ -1338,4 -1360,31 +1357,31 @@@ int checkout_fast_forward(const unsigne
  
  int sane_execvp(const char *file, char *const argv[]);
  
+ /*
+  * A struct to encapsulate the concept of whether a file has changed
+  * since we last checked it. This uses criteria similar to those used
+  * for the index.
+  */
+ struct stat_validity {
+       struct stat_data *sd;
+ };
+ void stat_validity_clear(struct stat_validity *sv);
+ /*
+  * Returns 1 if the path is a regular file (or a symlink to a regular
+  * file) and matches the saved stat_validity, 0 otherwise.  A missing
+  * or inaccessible file is considered a match if the struct was just
+  * initialized, or if the previous update found an inaccessible file.
+  */
+ int stat_validity_check(struct stat_validity *sv, const char *path);
+ /*
+  * Update the stat_validity from a file opened at descriptor fd. If
+  * the file is missing, inaccessible, or not a regular file, then
+  * future calls to stat_validity_check will match iff one of those
+  * conditions continues to be true.
+  */
+ void stat_validity_update(struct stat_validity *sv, int fd);
  #endif /* CACHE_H */
diff --combined read-cache.c
index b297addb576dec45fa9b82ef4a0ffb350f9cfc6c,b15bc096ea42f8d70d475e3c20fa9e5d54486a1c..d5201f9b06c3f5ac53b8a6f5396fbf3392f1b3b2
@@@ -67,6 -67,61 +67,61 @@@ void rename_index_entry_at(struct index
        add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
  }
  
+ void fill_stat_data(struct stat_data *sd, struct stat *st)
+ {
+       sd->sd_ctime.sec = (unsigned int)st->st_ctime;
+       sd->sd_mtime.sec = (unsigned int)st->st_mtime;
+       sd->sd_ctime.nsec = ST_CTIME_NSEC(*st);
+       sd->sd_mtime.nsec = ST_MTIME_NSEC(*st);
+       sd->sd_dev = st->st_dev;
+       sd->sd_ino = st->st_ino;
+       sd->sd_uid = st->st_uid;
+       sd->sd_gid = st->st_gid;
+       sd->sd_size = st->st_size;
+ }
+ int match_stat_data(const struct stat_data *sd, struct stat *st)
+ {
+       int changed = 0;
+       if (sd->sd_mtime.sec != (unsigned int)st->st_mtime)
+               changed |= MTIME_CHANGED;
+       if (trust_ctime && check_stat &&
+           sd->sd_ctime.sec != (unsigned int)st->st_ctime)
+               changed |= CTIME_CHANGED;
+ #ifdef USE_NSEC
+       if (check_stat && sd->sd_mtime.nsec != ST_MTIME_NSEC(*st))
+               changed |= MTIME_CHANGED;
+       if (trust_ctime && check_stat &&
+           sd->sd_ctime.nsec != ST_CTIME_NSEC(*st))
+               changed |= CTIME_CHANGED;
+ #endif
+       if (check_stat) {
+               if (sd->sd_uid != (unsigned int) st->st_uid ||
+                       sd->sd_gid != (unsigned int) st->st_gid)
+                       changed |= OWNER_CHANGED;
+               if (sd->sd_ino != (unsigned int) st->st_ino)
+                       changed |= INODE_CHANGED;
+       }
+ #ifdef USE_STDEV
+       /*
+        * st_dev breaks on network filesystems where different
+        * clients will have different views of what "device"
+        * the filesystem is on
+        */
+       if (check_stat && sd->sd_dev != (unsigned int) st->st_dev)
+                       changed |= INODE_CHANGED;
+ #endif
+       if (sd->sd_size != (unsigned int) st->st_size)
+               changed |= DATA_CHANGED;
+       return changed;
+ }
  /*
   * This only updates the "non-critical" parts of the directory
   * cache, ie the parts that aren't tracked by GIT, and only used
   */
  void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
  {
-       ce->ce_ctime.sec = (unsigned int)st->st_ctime;
-       ce->ce_mtime.sec = (unsigned int)st->st_mtime;
-       ce->ce_ctime.nsec = ST_CTIME_NSEC(*st);
-       ce->ce_mtime.nsec = ST_MTIME_NSEC(*st);
-       ce->ce_dev = st->st_dev;
-       ce->ce_ino = st->st_ino;
-       ce->ce_uid = st->st_uid;
-       ce->ce_gid = st->st_gid;
-       ce->ce_size = st->st_size;
+       fill_stat_data(&ce->ce_stat_data, st);
  
        if (assume_unchanged)
                ce->ce_flags |= CE_VALID;
@@@ -195,43 -242,11 +242,11 @@@ static int ce_match_stat_basic(const st
        default:
                die("internal error: ce_mode is %o", ce->ce_mode);
        }
-       if (ce->ce_mtime.sec != (unsigned int)st->st_mtime)
-               changed |= MTIME_CHANGED;
-       if (trust_ctime && check_stat &&
-           ce->ce_ctime.sec != (unsigned int)st->st_ctime)
-               changed |= CTIME_CHANGED;
- #ifdef USE_NSEC
-       if (check_stat && ce->ce_mtime.nsec != ST_MTIME_NSEC(*st))
-               changed |= MTIME_CHANGED;
-       if (trust_ctime && check_stat &&
-           ce->ce_ctime.nsec != ST_CTIME_NSEC(*st))
-               changed |= CTIME_CHANGED;
- #endif
-       if (check_stat) {
-               if (ce->ce_uid != (unsigned int) st->st_uid ||
-                       ce->ce_gid != (unsigned int) st->st_gid)
-                       changed |= OWNER_CHANGED;
-               if (ce->ce_ino != (unsigned int) st->st_ino)
-                       changed |= INODE_CHANGED;
-       }
  
- #ifdef USE_STDEV
-       /*
-        * st_dev breaks on network filesystems where different
-        * clients will have different views of what "device"
-        * the filesystem is on
-        */
-       if (check_stat && ce->ce_dev != (unsigned int) st->st_dev)
-                       changed |= INODE_CHANGED;
- #endif
-       if (ce->ce_size != (unsigned int) st->st_size)
-               changed |= DATA_CHANGED;
+       changed |= match_stat_data(&ce->ce_stat_data, st);
  
        /* Racily smudged entry? */
-       if (!ce->ce_size) {
+       if (!ce->ce_stat_data.sd_size) {
                if (!is_empty_blob_sha1(ce->sha1))
                        changed |= DATA_CHANGED;
        }
@@@ -246,11 -261,11 +261,11 @@@ static int is_racy_timestamp(const stru
                istate->timestamp.sec &&
  #ifdef USE_NSEC
                 /* nanosecond timestamped files can also be racy! */
-               (istate->timestamp.sec < ce->ce_mtime.sec ||
-                (istate->timestamp.sec == ce->ce_mtime.sec &&
-                 istate->timestamp.nsec <= ce->ce_mtime.nsec))
+               (istate->timestamp.sec < ce->ce_stat_data.sd_mtime.sec ||
+                (istate->timestamp.sec == ce->ce_stat_data.sd_mtime.sec &&
+                 istate->timestamp.nsec <= ce->ce_stat_data.sd_mtime.nsec))
  #else
-               istate->timestamp.sec <= ce->ce_mtime.sec
+               istate->timestamp.sec <= ce->ce_stat_data.sd_mtime.sec
  #endif
                 );
  }
@@@ -342,7 -357,7 +357,7 @@@ int ie_modified(const struct index_stat
         * then we know it is.
         */
        if ((changed & DATA_CHANGED) &&
-           (S_ISGITLINK(ce->ce_mode) || ce->ce_size != 0))
+           (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
                return changed;
  
        changed_fs = ce_modified_check_fs(ce, st);
@@@ -1324,16 -1339,16 +1339,16 @@@ static struct cache_entry *cache_entry_
  {
        struct cache_entry *ce = xmalloc(cache_entry_size(len));
  
-       ce->ce_ctime.sec = ntoh_l(ondisk->ctime.sec);
-       ce->ce_mtime.sec = ntoh_l(ondisk->mtime.sec);
-       ce->ce_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
-       ce->ce_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
-       ce->ce_dev   = ntoh_l(ondisk->dev);
-       ce->ce_ino   = ntoh_l(ondisk->ino);
+       ce->ce_stat_data.sd_ctime.sec = ntoh_l(ondisk->ctime.sec);
+       ce->ce_stat_data.sd_mtime.sec = ntoh_l(ondisk->mtime.sec);
+       ce->ce_stat_data.sd_ctime.nsec = ntoh_l(ondisk->ctime.nsec);
+       ce->ce_stat_data.sd_mtime.nsec = ntoh_l(ondisk->mtime.nsec);
+       ce->ce_stat_data.sd_dev   = ntoh_l(ondisk->dev);
+       ce->ce_stat_data.sd_ino   = ntoh_l(ondisk->ino);
        ce->ce_mode  = ntoh_l(ondisk->mode);
-       ce->ce_uid   = ntoh_l(ondisk->uid);
-       ce->ce_gid   = ntoh_l(ondisk->gid);
-       ce->ce_size  = ntoh_l(ondisk->size);
+       ce->ce_stat_data.sd_uid   = ntoh_l(ondisk->uid);
+       ce->ce_stat_data.sd_gid   = ntoh_l(ondisk->gid);
+       ce->ce_stat_data.sd_size  = ntoh_l(ondisk->size);
        ce->ce_flags = flags & ~CE_NAMEMASK;
        ce->ce_namelen = len;
        hashcpy(ce->sha1, ondisk->sha1);
@@@ -1520,9 -1535,8 +1535,9 @@@ int discard_index(struct index_state *i
        free_name_hash(istate);
        cache_tree_free(&(istate->cache_tree));
        istate->initialized = 0;
 -
 -      /* no need to throw away allocated active_cache */
 +      free(istate->cache);
 +      istate->cache = NULL;
 +      istate->cache_alloc = 0;
        return 0;
  }
  
@@@ -1611,7 -1625,7 +1626,7 @@@ static void ce_smudge_racily_clean_entr
         * The only thing we care about in this function is to smudge the
         * falsely clean entry due to touch-update-touch race, so we leave
         * everything else as they are.  We are called for entries whose
-        * ce_mtime match the index file mtime.
+        * ce_stat_data.sd_mtime match the index file mtime.
         *
         * Note that this actually does not do much for gitlinks, for
         * which ce_match_stat_basic() always goes to the actual
                 * file, and never calls us, so the cached size information
                 * for "frotz" stays 6 which does not match the filesystem.
                 */
-               ce->ce_size = 0;
+               ce->ce_stat_data.sd_size = 0;
        }
  }
  
@@@ -1660,16 -1674,16 +1675,16 @@@ static char *copy_cache_entry_to_ondisk
  {
        short flags;
  
-       ondisk->ctime.sec = htonl(ce->ce_ctime.sec);
-       ondisk->mtime.sec = htonl(ce->ce_mtime.sec);
-       ondisk->ctime.nsec = htonl(ce->ce_ctime.nsec);
-       ondisk->mtime.nsec = htonl(ce->ce_mtime.nsec);
-       ondisk->dev  = htonl(ce->ce_dev);
-       ondisk->ino  = htonl(ce->ce_ino);
+       ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
+       ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
+       ondisk->ctime.nsec = htonl(ce->ce_stat_data.sd_ctime.nsec);
+       ondisk->mtime.nsec = htonl(ce->ce_stat_data.sd_mtime.nsec);
+       ondisk->dev  = htonl(ce->ce_stat_data.sd_dev);
+       ondisk->ino  = htonl(ce->ce_stat_data.sd_ino);
        ondisk->mode = htonl(ce->ce_mode);
-       ondisk->uid  = htonl(ce->ce_uid);
-       ondisk->gid  = htonl(ce->ce_gid);
-       ondisk->size = htonl(ce->ce_size);
+       ondisk->uid  = htonl(ce->ce_stat_data.sd_uid);
+       ondisk->gid  = htonl(ce->ce_stat_data.sd_gid);
+       ondisk->size = htonl(ce->ce_stat_data.sd_size);
        hashcpy(ondisk->sha1, ce->sha1);
  
        flags = ce->ce_flags;
@@@ -1936,3 -1950,33 +1951,33 @@@ void *read_blob_data_from_index(struct 
                *size = sz;
        return data;
  }
+ void stat_validity_clear(struct stat_validity *sv)
+ {
+       free(sv->sd);
+       sv->sd = NULL;
+ }
+ int stat_validity_check(struct stat_validity *sv, const char *path)
+ {
+       struct stat st;
+       if (stat(path, &st) < 0)
+               return sv->sd == NULL;
+       if (!sv->sd)
+               return 0;
+       return S_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);
+ }
+ void stat_validity_update(struct stat_validity *sv, int fd)
+ {
+       struct stat st;
+       if (fstat(fd, &st) < 0 || !S_ISREG(st.st_mode))
+               stat_validity_clear(sv);
+       else {
+               if (!sv->sd)
+                       sv->sd = xcalloc(1, sizeof(struct stat_data));
+               fill_stat_data(sv->sd, &st);
+       }
+ }
diff --combined refs.c
index 70b2c829fd1cc5403789b4c95e54983b2bde1e5b,787cc1c9bc6af558bceb18d933321e93b399f195..43022066499ffaf828ac31e82b77f53539bc232d
--- 1/refs.c
--- 2/refs.c
+++ b/refs.c
@@@ -749,6 -749,21 +749,21 @@@ static int do_for_each_entry_in_dirs(st
        }
  }
  
+ /*
+  * Load all of the refs from the dir into our in-memory cache. The hard work
+  * of loading loose refs is done by get_ref_dir(), so we just need to recurse
+  * through all of the sub-directories. We do not even need to care about
+  * sorting, as traversal order does not matter to us.
+  */
+ static void prime_ref_dir(struct ref_dir *dir)
+ {
+       int i;
+       for (i = 0; i < dir->nr; i++) {
+               struct ref_entry *entry = dir->entries[i];
+               if (entry->flag & REF_DIR)
+                       prime_ref_dir(get_ref_dir(entry));
+       }
+ }
  /*
   * Return true iff refname1 and refname2 conflict with each other.
   * Two reference names conflict if one of them exactly matches the
@@@ -806,6 -821,30 +821,30 @@@ static int is_refname_available(const c
        return 1;
  }
  
+ struct packed_ref_cache {
+       struct ref_entry *root;
+       /*
+        * Count of references to the data structure in this instance,
+        * including the pointer from ref_cache::packed if any.  The
+        * data will not be freed as long as the reference count is
+        * nonzero.
+        */
+       unsigned int referrers;
+       /*
+        * Iff the packed-refs file associated with this instance is
+        * currently locked for writing, this points at the associated
+        * lock (which is owned by somebody else).  The referrer count
+        * is also incremented when the file is locked and decremented
+        * when it is unlocked.
+        */
+       struct lock_file *lock;
+       /* The metadata from when this packed-refs cache was read */
+       struct stat_validity validity;
+ };
  /*
   * Future: need to be in "struct repository"
   * when doing a full libification.
  static struct ref_cache {
        struct ref_cache *next;
        struct ref_entry *loose;
-       struct ref_entry *packed;
+       struct packed_ref_cache *packed;
        /*
         * The submodule name, or "" for the main repo.  We allocate
         * length 1 rather than FLEX_ARRAY so that the main ref_cache
        char name[1];
  } ref_cache, *submodule_ref_caches;
  
+ /* Lock used for the main packed-refs file: */
+ static struct lock_file packlock;
+ /*
+  * Increment the reference count of *packed_refs.
+  */
+ static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
+ {
+       packed_refs->referrers++;
+ }
+ /*
+  * Decrease the reference count of *packed_refs.  If it goes to zero,
+  * free *packed_refs and return true; otherwise return false.
+  */
+ static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
+ {
+       if (!--packed_refs->referrers) {
+               free_ref_entry(packed_refs->root);
+               stat_validity_clear(&packed_refs->validity);
+               free(packed_refs);
+               return 1;
+       } else {
+               return 0;
+       }
+ }
  static void clear_packed_ref_cache(struct ref_cache *refs)
  {
        if (refs->packed) {
-               free_ref_entry(refs->packed);
+               struct packed_ref_cache *packed_refs = refs->packed;
+               if (packed_refs->lock)
+                       die("internal error: packed-ref cache cleared while locked");
                refs->packed = NULL;
+               release_packed_ref_cache(packed_refs);
        }
  }
  
@@@ -996,29 -1066,57 +1066,57 @@@ static void read_packed_refs(FILE *f, s
        }
  }
  
- static struct ref_dir *get_packed_refs(struct ref_cache *refs)
+ /*
+  * Get the packed_ref_cache for the specified ref_cache, creating it
+  * if necessary.
+  */
+ static struct packed_ref_cache *get_packed_ref_cache(struct ref_cache *refs)
  {
+       const char *packed_refs_file;
+       if (*refs->name)
+               packed_refs_file = git_path_submodule(refs->name, "packed-refs");
+       else
+               packed_refs_file = git_path("packed-refs");
+       if (refs->packed &&
+           !stat_validity_check(&refs->packed->validity, packed_refs_file))
+               clear_packed_ref_cache(refs);
        if (!refs->packed) {
-               const char *packed_refs_file;
                FILE *f;
  
-               refs->packed = create_dir_entry(refs, "", 0, 0);
-               if (*refs->name)
-                       packed_refs_file = git_path_submodule(refs->name, "packed-refs");
-               else
-                       packed_refs_file = git_path("packed-refs");
+               refs->packed = xcalloc(1, sizeof(*refs->packed));
+               acquire_packed_ref_cache(refs->packed);
+               refs->packed->root = create_dir_entry(refs, "", 0, 0);
                f = fopen(packed_refs_file, "r");
                if (f) {
-                       read_packed_refs(f, get_ref_dir(refs->packed));
+                       stat_validity_update(&refs->packed->validity, fileno(f));
+                       read_packed_refs(f, get_ref_dir(refs->packed->root));
                        fclose(f);
                }
        }
-       return get_ref_dir(refs->packed);
+       return refs->packed;
+ }
+ static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
+ {
+       return get_ref_dir(packed_ref_cache->root);
+ }
+ static struct ref_dir *get_packed_refs(struct ref_cache *refs)
+ {
+       return get_packed_ref_dir(get_packed_ref_cache(refs));
  }
  
  void add_packed_ref(const char *refname, const unsigned char *sha1)
  {
-       add_ref(get_packed_refs(&ref_cache),
+       struct packed_ref_cache *packed_ref_cache =
+               get_packed_ref_cache(&ref_cache);
+       if (!packed_ref_cache->lock)
+               die("internal error: packed refs not locked");
+       add_ref(get_packed_ref_dir(packed_ref_cache),
                create_ref_entry(refname, sha1, REF_ISPACKED, 1));
  }
  
@@@ -1197,37 -1295,6 +1295,37 @@@ static struct ref_entry *get_packed_ref
        return find_ref(get_packed_refs(&ref_cache), refname);
  }
  
 +/*
 + * A loose ref file doesn't exist; check for a packed ref.  The
 + * options are forwarded from resolve_safe_unsafe().
 + */
 +static const char *handle_missing_loose_ref(const char *refname,
 +                                          unsigned char *sha1,
 +                                          int reading,
 +                                          int *flag)
 +{
 +      struct ref_entry *entry;
 +
 +      /*
 +       * The loose reference file does not exist; check for a packed
 +       * reference.
 +       */
 +      entry = get_packed_ref(refname);
 +      if (entry) {
 +              hashcpy(sha1, entry->u.value.sha1);
 +              if (flag)
 +                      *flag |= REF_ISPACKED;
 +              return refname;
 +      }
 +      /* The reference is not a packed reference, either. */
 +      if (reading) {
 +              return NULL;
 +      } else {
 +              hashclr(sha1);
 +              return refname;
 +      }
 +}
 +
  const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int reading, int *flag)
  {
        int depth = MAXDEPTH;
  
                git_snpath(path, sizeof(path), "%s", refname);
  
 +              /*
 +               * We might have to loop back here to avoid a race
 +               * condition: first we lstat() the file, then we try
 +               * to read it as a link or as a file.  But if somebody
 +               * changes the type of the file (file <-> directory
 +               * <-> symlink) between the lstat() and reading, then
 +               * we don't want to report that as an error but rather
 +               * try again starting with the lstat().
 +               */
 +      stat_ref:
                if (lstat(path, &st) < 0) {
 -                      struct ref_entry *entry;
 -
 -                      if (errno != ENOENT)
 -                              return NULL;
 -                      /*
 -                       * The loose reference file does not exist;
 -                       * check for a packed reference.
 -                       */
 -                      entry = get_packed_ref(refname);
 -                      if (entry) {
 -                              hashcpy(sha1, entry->u.value.sha1);
 -                              if (flag)
 -                                      *flag |= REF_ISPACKED;
 -                              return refname;
 -                      }
 -                      /* The reference is not a packed reference, either. */
 -                      if (reading) {
 +                      if (errno == ENOENT)
 +                              return handle_missing_loose_ref(refname, sha1,
 +                                                              reading, flag);
 +                      else
                                return NULL;
 -                      } else {
 -                              hashclr(sha1);
 -                              return refname;
 -                      }
                }
  
                /* Follow "normalized" - ie "refs/.." symlinks by hand */
                if (S_ISLNK(st.st_mode)) {
                        len = readlink(path, buffer, sizeof(buffer)-1);
 -                      if (len < 0)
 -                              return NULL;
 +                      if (len < 0) {
 +                              if (errno == ENOENT || errno == EINVAL)
 +                                      /* inconsistent with lstat; retry */
 +                                      goto stat_ref;
 +                              else
 +                                      return NULL;
 +                      }
                        buffer[len] = 0;
                        if (!prefixcmp(buffer, "refs/") &&
                                        !check_refname_format(buffer, 0)) {
                 * a ref
                 */
                fd = open(path, O_RDONLY);
 -              if (fd < 0)
 -                      return NULL;
 +              if (fd < 0) {
 +                      if (errno == ENOENT)
 +                              /* inconsistent with lstat; retry */
 +                              goto stat_ref;
 +                      else
 +                              return NULL;
 +              }
                len = read_in_full(fd, buffer, sizeof(buffer)-1);
                close(fd);
                if (len < 0)
                /*
                 * Is it a symbolic ref?
                 */
 -              if (prefixcmp(buffer, "ref:"))
 -                      break;
 +              if (prefixcmp(buffer, "ref:")) {
 +                      /*
 +                       * Please note that FETCH_HEAD has a second
 +                       * line containing other data.
 +                       */
 +                      if (get_sha1_hex(buffer, sha1) ||
 +                          (buffer[40] != '\0' && !isspace(buffer[40]))) {
 +                              if (flag)
 +                                      *flag |= REF_ISBROKEN;
 +                              return NULL;
 +                      }
 +                      return refname;
 +              }
                if (flag)
                        *flag |= REF_ISSYMREF;
                buf = buffer + 4;
                }
                refname = strcpy(refname_buffer, buf);
        }
 -      /* Please note that FETCH_HEAD has a second line containing other data. */
 -      if (get_sha1_hex(buffer, sha1) || (buffer[40] != '\0' && !isspace(buffer[40]))) {
 -              if (flag)
 -                      *flag |= REF_ISBROKEN;
 -              return NULL;
 -      }
 -      return refname;
  }
  
  char *resolve_refdup(const char *ref, unsigned char *sha1, int reading, int *flag)
@@@ -1558,14 -1618,32 +1656,32 @@@ void warn_dangling_symref(FILE *fp, con
  static int do_for_each_entry(struct ref_cache *refs, const char *base,
                             each_ref_entry_fn fn, void *cb_data)
  {
-       struct ref_dir *packed_dir = get_packed_refs(refs);
-       struct ref_dir *loose_dir = get_loose_refs(refs);
+       struct packed_ref_cache *packed_ref_cache;
+       struct ref_dir *loose_dir;
+       struct ref_dir *packed_dir;
        int retval = 0;
  
+       /*
+        * We must make sure that all loose refs are read before accessing the
+        * packed-refs file; this avoids a race condition in which loose refs
+        * are migrated to the packed-refs file by a simultaneous process, but
+        * our in-memory view is from before the migration. get_packed_ref_cache()
+        * takes care of making sure our view is up to date with what is on
+        * disk.
+        */
+       loose_dir = get_loose_refs(refs);
        if (base && *base) {
-               packed_dir = find_containing_dir(packed_dir, base, 0);
                loose_dir = find_containing_dir(loose_dir, base, 0);
        }
+       if (loose_dir)
+               prime_ref_dir(loose_dir);
+       packed_ref_cache = get_packed_ref_cache(refs);
+       acquire_packed_ref_cache(packed_ref_cache);
+       packed_dir = get_packed_ref_dir(packed_ref_cache);
+       if (base && *base) {
+               packed_dir = find_containing_dir(packed_dir, base, 0);
+       }
  
        if (packed_dir && loose_dir) {
                sort_ref_dir(packed_dir);
                                loose_dir, 0, fn, cb_data);
        }
  
+       release_packed_ref_cache(packed_ref_cache);
        return retval;
  }
  
@@@ -2036,6 -2115,73 +2153,73 @@@ static void write_packed_entry(int fd, 
        }
  }
  
+ /*
+  * An each_ref_entry_fn that writes the entry to a packed-refs file.
+  */
+ static int write_packed_entry_fn(struct ref_entry *entry, void *cb_data)
+ {
+       int *fd = cb_data;
+       enum peel_status peel_status = peel_entry(entry, 0);
+       if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
+               error("internal error: %s is not a valid packed reference!",
+                     entry->name);
+       write_packed_entry(*fd, entry->name, entry->u.value.sha1,
+                          peel_status == PEEL_PEELED ?
+                          entry->u.value.peeled : NULL);
+       return 0;
+ }
+ int lock_packed_refs(int flags)
+ {
+       struct packed_ref_cache *packed_ref_cache;
+       /* Discard the old cache because it might be invalid: */
+       clear_packed_ref_cache(&ref_cache);
+       if (hold_lock_file_for_update(&packlock, git_path("packed-refs"), flags) < 0)
+               return -1;
+       /* Read the current packed-refs while holding the lock: */
+       packed_ref_cache = get_packed_ref_cache(&ref_cache);
+       packed_ref_cache->lock = &packlock;
+       /* Increment the reference count to prevent it from being freed: */
+       acquire_packed_ref_cache(packed_ref_cache);
+       return 0;
+ }
+ int commit_packed_refs(void)
+ {
+       struct packed_ref_cache *packed_ref_cache =
+               get_packed_ref_cache(&ref_cache);
+       int error = 0;
+       if (!packed_ref_cache->lock)
+               die("internal error: packed-refs not locked");
+       write_or_die(packed_ref_cache->lock->fd,
+                    PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
+       do_for_each_entry_in_dir(get_packed_ref_dir(packed_ref_cache),
+                                0, write_packed_entry_fn,
+                                &packed_ref_cache->lock->fd);
+       if (commit_lock_file(packed_ref_cache->lock))
+               error = -1;
+       packed_ref_cache->lock = NULL;
+       release_packed_ref_cache(packed_ref_cache);
+       return error;
+ }
+ void rollback_packed_refs(void)
+ {
+       struct packed_ref_cache *packed_ref_cache =
+               get_packed_ref_cache(&ref_cache);
+       if (!packed_ref_cache->lock)
+               die("internal error: packed-refs not locked");
+       rollback_lock_file(packed_ref_cache->lock);
+       packed_ref_cache->lock = NULL;
+       release_packed_ref_cache(packed_ref_cache);
+       clear_packed_ref_cache(&ref_cache);
+ }
  struct ref_to_prune {
        struct ref_to_prune *next;
        unsigned char sha1[20];
  
  struct pack_refs_cb_data {
        unsigned int flags;
+       struct ref_dir *packed_refs;
        struct ref_to_prune *ref_to_prune;
-       int fd;
  };
  
- static int pack_one_ref(struct ref_entry *entry, void *cb_data)
+ /*
+  * An each_ref_entry_fn that is run over loose references only.  If
+  * the loose reference can be packed, add an entry in the packed ref
+  * cache.  If the reference should be pruned, also add it to
+  * ref_to_prune in the pack_refs_cb_data.
+  */
+ static int pack_if_possible_fn(struct ref_entry *entry, void *cb_data)
  {
        struct pack_refs_cb_data *cb = cb_data;
        enum peel_status peel_status;
+       struct ref_entry *packed_entry;
        int is_tag_ref = !prefixcmp(entry->name, "refs/tags/");
  
-       /* ALWAYS pack refs that were already packed or are tags */
-       if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref &&
-           !(entry->flag & REF_ISPACKED))
+       /* ALWAYS pack tags */
+       if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref)
                return 0;
  
        /* Do not pack symbolic or broken refs: */
        if ((entry->flag & REF_ISSYMREF) || !ref_resolves_to_object(entry))
                return 0;
  
+       /* Add a packed ref cache entry equivalent to the loose entry. */
        peel_status = peel_entry(entry, 1);
        if (peel_status != PEEL_PEELED && peel_status != PEEL_NON_TAG)
                die("internal error peeling reference %s (%s)",
                    entry->name, sha1_to_hex(entry->u.value.sha1));
-       write_packed_entry(cb->fd, entry->name, entry->u.value.sha1,
-                          peel_status == PEEL_PEELED ?
-                          entry->u.value.peeled : NULL);
+       packed_entry = find_ref(cb->packed_refs, entry->name);
+       if (packed_entry) {
+               /* Overwrite existing packed entry with info from loose entry */
+               packed_entry->flag = REF_ISPACKED | REF_KNOWS_PEELED;
+               hashcpy(packed_entry->u.value.sha1, entry->u.value.sha1);
+       } else {
+               packed_entry = create_ref_entry(entry->name, entry->u.value.sha1,
+                                               REF_ISPACKED | REF_KNOWS_PEELED, 0);
+               add_ref(cb->packed_refs, packed_entry);
+       }
+       hashcpy(packed_entry->u.value.peeled, entry->u.value.peeled);
  
-       /* If the ref was already packed, there is no need to prune it. */
-       if ((cb->flags & PACK_REFS_PRUNE) && !(entry->flag & REF_ISPACKED)) {
+       /* Schedule the loose reference for pruning if requested. */
+       if ((cb->flags & PACK_REFS_PRUNE)) {
                int namelen = strlen(entry->name) + 1;
                struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen);
                hashcpy(n->sha1, entry->u.value.sha1);
@@@ -2134,8 -2295,6 +2333,6 @@@ static void prune_refs(struct ref_to_pr
        }
  }
  
- static struct lock_file packlock;
  int pack_refs(unsigned int flags)
  {
        struct pack_refs_cb_data cbdata;
        memset(&cbdata, 0, sizeof(cbdata));
        cbdata.flags = flags;
  
-       cbdata.fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"),
-                                             LOCK_DIE_ON_ERROR);
+       lock_packed_refs(LOCK_DIE_ON_ERROR);
+       cbdata.packed_refs = get_packed_refs(&ref_cache);
  
-       write_or_die(cbdata.fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
+       do_for_each_entry_in_dir(get_loose_refs(&ref_cache), 0,
+                                pack_if_possible_fn, &cbdata);
  
-       do_for_each_entry(&ref_cache, "", pack_one_ref, &cbdata);
-       if (commit_lock_file(&packlock) < 0)
+       if (commit_packed_refs())
                die_errno("unable to overwrite old ref-pack file");
        prune_refs(cbdata.ref_to_prune);
        return 0;
  }
  
- static int repack_ref_fn(struct ref_entry *entry, void *cb_data)
+ /*
+  * If entry is no longer needed in packed-refs, add it to the string
+  * list pointed to by cb_data.  Reasons for deleting entries:
+  *
+  * - Entry is broken.
+  * - Entry is overridden by a loose ref.
+  * - Entry does not point at a valid object.
+  *
+  * In the first and third cases, also emit an error message because these
+  * are indications of repository corruption.
+  */
+ static int curate_packed_ref_fn(struct ref_entry *entry, void *cb_data)
  {
-       int *fd = cb_data;
-       enum peel_status peel_status;
+       struct string_list *refs_to_delete = cb_data;
  
        if (entry->flag & REF_ISBROKEN) {
                /* This shouldn't happen to packed refs. */
                error("%s is broken!", entry->name);
+               string_list_append(refs_to_delete, entry->name);
                return 0;
        }
        if (!has_sha1_file(entry->u.value.sha1)) {
                if (read_ref_full(entry->name, sha1, 0, &flags))
                        /* We should at least have found the packed ref. */
                        die("Internal error");
-               if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED))
+               if ((flags & REF_ISSYMREF) || !(flags & REF_ISPACKED)) {
                        /*
                         * This packed reference is overridden by a
                         * loose reference, so it is OK that its value
                         * collected.  For this purpose we don't even
                         * care whether the loose reference itself is
                         * invalid, broken, symbolic, etc.  Silently
-                        * omit the packed reference from the output.
+                        * remove the packed reference.
                         */
+                       string_list_append(refs_to_delete, entry->name);
                        return 0;
+               }
                /*
                 * There is no overriding loose reference, so the fact
                 * that this reference doesn't refer to a valid object
                 * the output.
                 */
                error("%s does not point to a valid object!", entry->name);
+               string_list_append(refs_to_delete, entry->name);
                return 0;
        }
  
-       peel_status = peel_entry(entry, 0);
-       write_packed_entry(*fd, entry->name, entry->u.value.sha1,
-                          peel_status == PEEL_PEELED ?
-                          entry->u.value.peeled : NULL);
        return 0;
  }
  
  static int repack_without_ref(const char *refname)
  {
-       int fd;
        struct ref_dir *packed;
+       struct string_list refs_to_delete = STRING_LIST_INIT_DUP;
+       struct string_list_item *ref_to_delete;
  
        if (!get_packed_ref(refname))
                return 0; /* refname does not exist in packed refs */
  
-       fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"), 0);
-       if (fd < 0) {
+       if (lock_packed_refs(0)) {
                unable_to_lock_error(git_path("packed-refs"), errno);
                return error("cannot delete '%s' from packed refs", refname);
        }
-       clear_packed_ref_cache(&ref_cache);
        packed = get_packed_refs(&ref_cache);
-       /* Remove refname from the cache. */
+       /* Remove refname from the cache: */
        if (remove_entry(packed, refname) == -1) {
                /*
                 * The packed entry disappeared while we were
                 * acquiring the lock.
                 */
-               rollback_lock_file(&packlock);
+               rollback_packed_refs();
                return 0;
        }
-       write_or_die(fd, PACKED_REFS_HEADER, strlen(PACKED_REFS_HEADER));
-       do_for_each_entry_in_dir(packed, 0, repack_ref_fn, &fd);
-       return commit_lock_file(&packlock);
+       /* Remove any other accumulated cruft: */
+       do_for_each_entry_in_dir(packed, 0, curate_packed_ref_fn, &refs_to_delete);
+       for_each_string_list_item(ref_to_delete, &refs_to_delete) {
+               if (remove_entry(packed, ref_to_delete->string) == -1)
+                       die("internal error");
+       }
+       /* Write what remains: */
+       return commit_packed_refs();
  }
  
  int delete_ref(const char *refname, const unsigned char *sha1, int delopt)