Merge branch 'tb/char-may-be-unsigned' into maint
[gitweb.git] / read-cache.c
index 8cef7b671548e5fdc3aa1daa1bff02e3c4efdb47..8f644f68b432e173dc31d5739944bbdfe9c4ad27 100644 (file)
@@ -6,11 +6,14 @@
 #define NO_THE_INDEX_COMPATIBILITY_MACROS
 #include "cache.h"
 #include "config.h"
+#include "diff.h"
+#include "diffcore.h"
 #include "tempfile.h"
 #include "lockfile.h"
 #include "cache-tree.h"
 #include "refs.h"
 #include "dir.h"
+#include "object-store.h"
 #include "tree.h"
 #include "commit.h"
 #include "blob.h"
@@ -19,6 +22,7 @@
 #include "varint.h"
 #include "split-index.h"
 #include "utf8.h"
+#include "fsmonitor.h"
 
 /* Mask for the name length in ce_flags in the on-disk index */
 
 #define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
 #define CACHE_EXT_LINK 0x6c696e6b        /* "link" */
 #define CACHE_EXT_UNTRACKED 0x554E5452   /* "UNTR" */
+#define CACHE_EXT_FSMONITOR 0x46534D4E   /* "FSMN" */
 
 /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
 #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
                 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
-                SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED)
+                SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
+
+
+/*
+ * This is an estimate of the pathname length in the index.  We use
+ * this for V4 index files to guess the un-deltafied size of the index
+ * in memory because of pathname deltafication.  This is not required
+ * for V2/V3 index formats because their pathnames are not compressed.
+ * If the initial amount of memory set aside is not sufficient, the
+ * mem pool will allocate extra memory.
+ */
+#define CACHE_ENTRY_PATH_LENGTH 80
+
+static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
+{
+       struct cache_entry *ce;
+       ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
+       ce->mem_pool_allocated = 1;
+       return ce;
+}
+
+static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
+{
+       struct cache_entry * ce;
+       ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
+       ce->mem_pool_allocated = 1;
+       return ce;
+}
+
+static struct mem_pool *find_mem_pool(struct index_state *istate)
+{
+       struct mem_pool **pool_ptr;
+
+       if (istate->split_index && istate->split_index->base)
+               pool_ptr = &istate->split_index->base->ce_mem_pool;
+       else
+               pool_ptr = &istate->ce_mem_pool;
+
+       if (!*pool_ptr)
+               mem_pool_init(pool_ptr, 0);
+
+       return *pool_ptr;
+}
 
 struct index_state the_index;
 static const char *alternate_index_output;
@@ -59,28 +106,30 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
 
        replace_index_entry_in_base(istate, old, ce);
        remove_name_hash(istate, old);
-       free(old);
+       discard_cache_entry(old);
+       ce->ce_flags &= ~CE_HASHED;
        set_index_entry(istate, nr, ce);
        ce->ce_flags |= CE_UPDATE_IN_BASE;
+       mark_fsmonitor_invalid(istate, ce);
        istate->cache_changed |= CE_ENTRY_CHANGED;
 }
 
 void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
 {
-       struct cache_entry *old = istate->cache[nr], *new;
+       struct cache_entry *old_entry = istate->cache[nr], *new_entry;
        int namelen = strlen(new_name);
 
-       new = xmalloc(cache_entry_size(namelen));
-       copy_cache_entry(new, old);
-       new->ce_flags &= ~CE_HASHED;
-       new->ce_namelen = namelen;
-       new->index = 0;
-       memcpy(new->name, new_name, namelen + 1);
+       new_entry = make_empty_cache_entry(istate, namelen);
+       copy_cache_entry(new_entry, old_entry);
+       new_entry->ce_flags &= ~CE_HASHED;
+       new_entry->ce_namelen = namelen;
+       new_entry->index = 0;
+       memcpy(new_entry->name, new_name, namelen + 1);
 
-       cache_tree_invalidate_path(istate, old->name);
-       untracked_cache_remove_from_index(istate, old->name);
+       cache_tree_invalidate_path(istate, old_entry->name);
+       untracked_cache_remove_from_index(istate, old_entry->name);
        remove_index_entry_at(istate, nr);
-       add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+       add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
 }
 
 void fill_stat_data(struct stat_data *sd, struct stat *st)
@@ -150,8 +199,10 @@ void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
        if (assume_unchanged)
                ce->ce_flags |= CE_VALID;
 
-       if (S_ISREG(st->st_mode))
+       if (S_ISREG(st->st_mode)) {
                ce_mark_uptodate(ce);
+               mark_fsmonitor_valid(ce);
+       }
 }
 
 static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
@@ -179,7 +230,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
        if (strbuf_readlink(&sb, ce->name, expected_size))
                return -1;
 
-       buffer = read_sha1_file(ce->oid.hash, &type, &size);
+       buffer = read_object_file(&ce->oid, &type, &size);
        if (buffer) {
                if (size == sb.len)
                        match = memcmp(buffer, sb.buf, size);
@@ -191,7 +242,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
 
 static int ce_compare_gitlink(const struct cache_entry *ce)
 {
-       unsigned char sha1[20];
+       struct object_id oid;
 
        /*
         * We don't actually require that the .git directory
@@ -201,9 +252,9 @@ static int ce_compare_gitlink(const struct cache_entry *ce)
         *
         * If so, we consider it always to match.
         */
-       if (resolve_gitlink_ref(ce->name, "HEAD", sha1) < 0)
+       if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
                return 0;
-       return hashcmp(sha1, ce->oid.hash);
+       return oidcmp(&oid, &ce->oid);
 }
 
 static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st)
@@ -286,7 +337,7 @@ static int is_racy_stat(const struct index_state *istate,
                );
 }
 
-static int is_racy_timestamp(const struct index_state *istate,
+int is_racy_timestamp(const struct index_state *istate,
                             const struct cache_entry *ce)
 {
        return (!S_ISGITLINK(ce->ce_mode) &&
@@ -301,7 +352,7 @@ int match_stat_data_racy(const struct index_state *istate,
        return match_stat_data(sd, st);
 }
 
-int ie_match_stat(const struct index_state *istate,
+int ie_match_stat(struct index_state *istate,
                  const struct cache_entry *ce, struct stat *st,
                  unsigned int options)
 {
@@ -309,7 +360,10 @@ int ie_match_stat(const struct index_state *istate,
        int ignore_valid = options & CE_MATCH_IGNORE_VALID;
        int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
        int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
+       int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
 
+       if (!ignore_fsmonitor)
+               refresh_fsmonitor(istate);
        /*
         * If it's marked as always valid in the index, it's
         * valid whatever the checked-out copy says.
@@ -320,6 +374,8 @@ int ie_match_stat(const struct index_state *istate,
                return 0;
        if (!ignore_valid && (ce->ce_flags & CE_VALID))
                return 0;
+       if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
+               return 0;
 
        /*
         * Intent-to-add entries have not been added, so the index entry
@@ -357,7 +413,7 @@ int ie_match_stat(const struct index_state *istate,
        return changed;
 }
 
-int ie_modified(const struct index_state *istate,
+int ie_modified(struct index_state *istate,
                const struct cache_entry *ce,
                struct stat *st, unsigned int options)
 {
@@ -605,39 +661,43 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
                                           struct cache_entry *alias)
 {
        int len;
-       struct cache_entry *new;
+       struct cache_entry *new_entry;
 
        if (alias->ce_flags & CE_ADDED)
                die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
 
        /* Ok, create the new entry using the name of the existing alias */
        len = ce_namelen(alias);
-       new = xcalloc(1, cache_entry_size(len));
-       memcpy(new->name, alias->name, len);
-       copy_cache_entry(new, ce);
+       new_entry = make_empty_cache_entry(istate, len);
+       memcpy(new_entry->name, alias->name, len);
+       copy_cache_entry(new_entry, ce);
        save_or_free_index_entry(istate, ce);
-       return new;
+       return new_entry;
 }
 
 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
 {
-       unsigned char sha1[20];
-       if (write_sha1_file("", 0, blob_type, sha1))
+       struct object_id oid;
+       if (write_object_file("", 0, blob_type, &oid))
                die("cannot create an empty blob in the object database");
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, &oid);
 }
 
 int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
 {
-       int size, namelen, was_same;
+       int namelen, was_same;
        mode_t st_mode = st->st_mode;
-       struct cache_entry *ce, *alias;
+       struct cache_entry *ce, *alias = NULL;
        unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
        int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND);
        int pretend = flags & ADD_CACHE_PRETEND;
        int intent_only = flags & ADD_CACHE_INTENT;
        int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
                          (intent_only ? ADD_CACHE_NEW_ONLY : 0));
+       int newflags = HASH_WRITE_OBJECT;
+
+       if (flags & HASH_RENORMALIZE)
+               newflags |= HASH_RENORMALIZE;
 
        if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
                return error("%s: can only add regular files, symbolic links or git-directories", path);
@@ -647,8 +707,7 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
                while (namelen && path[namelen-1] == '/')
                        namelen--;
        }
-       size = cache_entry_size(namelen);
-       ce = xcalloc(1, size);
+       ce = make_empty_cache_entry(istate, namelen);
        memcpy(ce->name, path, namelen);
        ce->ce_namelen = namelen;
        if (!intent_only)
@@ -678,20 +737,24 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
        if (ignore_case) {
                adjust_dirname_case(istate, ce->name);
        }
+       if (!(flags & HASH_RENORMALIZE)) {
+               alias = index_file_exists(istate, ce->name,
+                                         ce_namelen(ce), ignore_case);
+               if (alias &&
+                   !ce_stage(alias) &&
+                   !ie_match_stat(istate, alias, st, ce_option)) {
+                       /* Nothing changed, really */
+                       if (!S_ISGITLINK(alias->ce_mode))
+                               ce_mark_uptodate(alias);
+                       alias->ce_flags |= CE_ADDED;
 
-       alias = index_file_exists(istate, ce->name, ce_namelen(ce), ignore_case);
-       if (alias && !ce_stage(alias) && !ie_match_stat(istate, alias, st, ce_option)) {
-               /* Nothing changed, really */
-               if (!S_ISGITLINK(alias->ce_mode))
-                       ce_mark_uptodate(alias);
-               alias->ce_flags |= CE_ADDED;
-
-               free(ce);
-               return 0;
+                       discard_cache_entry(ce);
+                       return 0;
+               }
        }
        if (!intent_only) {
-               if (index_path(&ce->oid, path, st, HASH_WRITE_OBJECT)) {
-                       free(ce);
+               if (index_path(&ce->oid, path, st, newflags)) {
+                       discard_cache_entry(ce);
                        return error("unable to index file %s", path);
                }
        } else
@@ -708,9 +771,9 @@ int add_to_index(struct index_state *istate, const char *path, struct stat *st,
                    ce->ce_mode == alias->ce_mode);
 
        if (pretend)
-               free(ce);
+               discard_cache_entry(ce);
        else if (add_index_entry(istate, ce, add_option)) {
-               free(ce);
+               discard_cache_entry(ce);
                return error("unable to add %s to index", path);
        }
        if (verbose && !was_same)
@@ -726,12 +789,25 @@ int add_file_to_index(struct index_state *istate, const char *path, int flags)
        return add_to_index(istate, path, &st, flags);
 }
 
-struct cache_entry *make_cache_entry(unsigned int mode,
-               const unsigned char *sha1, const char *path, int stage,
-               unsigned int refresh_options)
+struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
+{
+       return mem_pool__ce_calloc(find_mem_pool(istate), len);
+}
+
+struct cache_entry *make_empty_transient_cache_entry(size_t len)
+{
+       return xcalloc(1, cache_entry_size(len));
+}
+
+struct cache_entry *make_cache_entry(struct index_state *istate,
+                                    unsigned int mode,
+                                    const struct object_id *oid,
+                                    const char *path,
+                                    int stage,
+                                    unsigned int refresh_options)
 {
-       int size, len;
        struct cache_entry *ce, *ret;
+       int len;
 
        if (!verify_path(path, mode)) {
                error("Invalid path '%s'", path);
@@ -739,21 +815,43 @@ struct cache_entry *make_cache_entry(unsigned int mode,
        }
 
        len = strlen(path);
-       size = cache_entry_size(len);
-       ce = xcalloc(1, size);
+       ce = make_empty_cache_entry(istate, len);
 
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        memcpy(ce->name, path, len);
        ce->ce_flags = create_ce_flags(stage);
        ce->ce_namelen = len;
        ce->ce_mode = create_ce_mode(mode);
 
-       ret = refresh_cache_entry(ce, refresh_options);
+       ret = refresh_cache_entry(&the_index, ce, refresh_options);
        if (ret != ce)
-               free(ce);
+               discard_cache_entry(ce);
        return ret;
 }
 
+struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid,
+                                              const char *path, int stage)
+{
+       struct cache_entry *ce;
+       int len;
+
+       if (!verify_path(path, mode)) {
+               error("Invalid path '%s'", path);
+               return NULL;
+       }
+
+       len = strlen(path);
+       ce = make_empty_transient_cache_entry(len);
+
+       oidcpy(&ce->oid, oid);
+       memcpy(ce->name, path, len);
+       ce->ce_flags = create_ce_flags(stage);
+       ce->ce_namelen = len;
+       ce->ce_mode = create_ce_mode(mode);
+
+       return ce;
+}
+
 /*
  * Chmod an index entry with either +x or -x.
  *
@@ -778,6 +876,7 @@ int chmod_index_entry(struct index_state *istate, struct cache_entry *ce,
        }
        cache_tree_invalidate_path(istate, ce->name);
        ce->ce_flags |= CE_UPDATE_IN_BASE;
+       mark_fsmonitor_invalid(istate, ce);
        istate->cache_changed |= CE_ENTRY_CHANGED;
 
        return 0;
@@ -1223,9 +1322,8 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
        /* Add it in.. */
        istate->cache_nr++;
        if (istate->cache_nr > pos + 1)
-               memmove(istate->cache + pos + 1,
-                       istate->cache + pos,
-                       (istate->cache_nr - pos - 1) * sizeof(ce));
+               MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+                          istate->cache_nr - pos - 1);
        set_index_entry(istate, pos, ce);
        istate->cache_changed |= CE_ENTRY_ADDED;
        return 0;
@@ -1249,15 +1347,18 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
 {
        struct stat st;
        struct cache_entry *updated;
-       int changed, size;
+       int changed;
        int refresh = options & CE_MATCH_REFRESH;
        int ignore_valid = options & CE_MATCH_IGNORE_VALID;
        int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
        int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
+       int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
 
        if (!refresh || ce_uptodate(ce))
                return ce;
 
+       if (!ignore_fsmonitor)
+               refresh_fsmonitor(istate);
        /*
         * CE_VALID or CE_SKIP_WORKTREE means the user promised us
         * that the change to the work tree does not matter and told
@@ -1271,6 +1372,10 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
                ce_mark_uptodate(ce);
                return ce;
        }
+       if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
+               ce_mark_uptodate(ce);
+               return ce;
+       }
 
        if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
                if (ignore_missing)
@@ -1308,8 +1413,10 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
                         * because CE_UPTODATE flag is in-core only;
                         * we are not going to write this change out.
                         */
-                       if (!S_ISGITLINK(ce->ce_mode))
+                       if (!S_ISGITLINK(ce->ce_mode)) {
                                ce_mark_uptodate(ce);
+                               mark_fsmonitor_valid(ce);
+                       }
                        return ce;
                }
        }
@@ -1320,9 +1427,9 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
                return NULL;
        }
 
-       size = ce_size(ce);
-       updated = xmalloc(size);
-       memcpy(updated, ce, size);
+       updated = make_empty_cache_entry(istate, ce_namelen(ce));
+       copy_cache_entry(updated, ce);
+       memcpy(updated->name, ce->name, ce->ce_namelen + 1);
        fill_stat_cache_info(updated, &st);
        /*
         * If ignore_valid is not set, we should leave CE_VALID bit
@@ -1369,6 +1476,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
+       uint64_t start = getnanotime();
 
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
@@ -1376,7 +1484,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
        unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
        for (i = 0; i < istate->cache_nr; i++) {
-               struct cache_entry *ce, *new;
+               struct cache_entry *ce, *new_entry;
                int cache_errno = 0;
                int changed = 0;
                int filtered = 0;
@@ -1385,7 +1493,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
                        continue;
 
-               if (pathspec && !ce_path_match(ce, pathspec, seen))
+               if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen))
                        filtered = 1;
 
                if (ce_stage(ce)) {
@@ -1405,10 +1513,10 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                if (filtered)
                        continue;
 
-               new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
-               if (new == ce)
+               new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+               if (new_entry == ce)
                        continue;
-               if (!new) {
+               if (!new_entry) {
                        const char *fmt;
 
                        if (really && cache_errno == EINVAL) {
@@ -1417,6 +1525,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                                 */
                                ce->ce_flags &= ~CE_VALID;
                                ce->ce_flags |= CE_UPDATE_IN_BASE;
+                               mark_fsmonitor_invalid(istate, ce);
                                istate->cache_changed |= CE_ENTRY_CHANGED;
                        }
                        if (quiet)
@@ -1436,15 +1545,17 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                        continue;
                }
 
-               replace_index_entry(istate, i, new);
+               replace_index_entry(istate, i, new_entry);
        }
+       trace_performance_since(start, "refresh index");
        return has_errors;
 }
 
-struct cache_entry *refresh_cache_entry(struct cache_entry *ce,
-                                              unsigned int options)
+struct cache_entry *refresh_cache_entry(struct index_state *istate,
+                                       struct cache_entry *ce,
+                                       unsigned int options)
 {
-       return refresh_cache_ent(&the_index, ce, options, NULL, NULL);
+       return refresh_cache_ent(istate, ce, options, NULL, NULL);
 }
 
 
@@ -1536,10 +1647,13 @@ struct ondisk_cache_entry_extended {
 /* Allow fsck to force verification of the index checksum. */
 int verify_index_checksum;
 
+/* Allow fsck to force verification of the cache entry order. */
+int verify_ce_order;
+
 static int verify_hdr(struct cache_header *hdr, unsigned long size)
 {
-       git_SHA_CTX c;
-       unsigned char sha1[20];
+       git_hash_ctx c;
+       unsigned char hash[GIT_MAX_RAWSZ];
        int hdr_version;
 
        if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
@@ -1551,10 +1665,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
        if (!verify_index_checksum)
                return 0;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, size - 20);
-       git_SHA1_Final(sha1, &c);
-       if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+       the_hash_algo->final_fn(hash, &c);
+       if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
                return error("bad index file sha1 signature");
        return 0;
 }
@@ -1576,6 +1690,9 @@ static int read_index_extension(struct index_state *istate,
        case CACHE_EXT_UNTRACKED:
                istate->untracked = read_untracked_extension(data, sz);
                break;
+       case CACHE_EXT_FSMONITOR:
+               read_fsmonitor_extension(istate, data, sz);
+               break;
        default:
                if (*ext < 'A' || 'Z' < *ext)
                        return error("index uses %.4s extension, which we do not understand",
@@ -1593,15 +1710,16 @@ int hold_locked_index(struct lock_file *lk, int lock_flags)
 
 int read_index(struct index_state *istate)
 {
-       return read_index_from(istate, get_index_file());
+       return read_index_from(istate, get_index_file(), get_git_dir());
 }
 
-static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool,
+                                                  struct ondisk_cache_entry *ondisk,
                                                   unsigned int flags,
                                                   const char *name,
                                                   size_t len)
 {
-       struct cache_entry *ce = xmalloc(cache_entry_size(len));
+       struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len);
 
        ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
        ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
@@ -1643,7 +1761,8 @@ static unsigned long expand_name_field(struct strbuf *name, const char *cp_)
        return (const char *)ep + 1 - cp_;
 }
 
-static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *create_from_disk(struct mem_pool *mem_pool,
+                                           struct ondisk_cache_entry *ondisk,
                                            unsigned long *ent_size,
                                            struct strbuf *previous_name)
 {
@@ -1674,13 +1793,13 @@ static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
                /* v3 and earlier */
                if (len == CE_NAMEMASK)
                        len = strlen(name);
-               ce = cache_entry_from_ondisk(ondisk, flags, name, len);
+               ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len);
 
                *ent_size = ondisk_ce_size(ce);
        } else {
                unsigned long consumed;
                consumed = expand_name_field(previous_name, name);
-               ce = cache_entry_from_ondisk(ondisk, flags,
+               ce = cache_entry_from_ondisk(mem_pool, ondisk, flags,
                                             previous_name->buf,
                                             previous_name->len);
 
@@ -1693,6 +1812,9 @@ static void check_ce_order(struct index_state *istate)
 {
        unsigned int i;
 
+       if (!verify_ce_order)
+               return;
+
        for (i = 1; i < istate->cache_nr; i++) {
                struct cache_entry *ce = istate->cache[i - 1];
                struct cache_entry *next_ce = istate->cache[i];
@@ -1748,6 +1870,23 @@ static void post_read_index_from(struct index_state *istate)
        check_ce_order(istate);
        tweak_untracked_cache(istate);
        tweak_split_index(istate);
+       tweak_fsmonitor(istate);
+}
+
+static size_t estimate_cache_size_from_compressed(unsigned int entries)
+{
+       return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
+}
+
+static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
+{
+       long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
+
+       /*
+        * Account for potential alignment differences.
+        */
+       per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
+       return ondisk_size + entries * per_entry;
 }
 
 /* remember to discard_cache() before reading a different cache! */
@@ -1777,7 +1916,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
                die_errno("cannot stat the open index");
 
        mmap_size = xsize_t(st.st_size);
-       if (mmap_size < sizeof(struct cache_header) + 20)
+       if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                die("index file smaller than expected");
 
        mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1789,17 +1928,22 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        if (verify_hdr(hdr, mmap_size) < 0)
                goto unmap;
 
-       hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+       hashcpy(istate->oid.hash, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
        istate->version = ntohl(hdr->hdr_version);
        istate->cache_nr = ntohl(hdr->hdr_entries);
        istate->cache_alloc = alloc_nr(istate->cache_nr);
        istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
        istate->initialized = 1;
 
-       if (istate->version == 4)
+       if (istate->version == 4) {
                previous_name = &previous_name_buf;
-       else
+               mem_pool_init(&istate->ce_mem_pool,
+                             estimate_cache_size_from_compressed(istate->cache_nr));
+       } else {
                previous_name = NULL;
+               mem_pool_init(&istate->ce_mem_pool,
+                             estimate_cache_size(mmap_size, istate->cache_nr));
+       }
 
        src_offset = sizeof(*hdr);
        for (i = 0; i < istate->cache_nr; i++) {
@@ -1808,7 +1952,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
                unsigned long consumed;
 
                disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
-               ce = create_from_disk(disk_ce, &consumed, previous_name);
+               ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name);
                set_index_entry(istate, i, ce);
 
                src_offset += consumed;
@@ -1817,7 +1961,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        istate->timestamp.sec = st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
 
-       while (src_offset <= mmap_size - 20 - 8) {
+       while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
                /* After an array of active_nr index entries,
                 * there can be arbitrary number of extended
                 * sections, each of which is prefixed with
@@ -1849,29 +1993,30 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
  * This way, shared index can be removed if they have not been used
  * for some time.
  */
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
 {
-       char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
        if (!check_and_freshen_file(shared_index, 1) && warn)
                warning("could not freshen shared index '%s'", shared_index);
-       free(shared_index);
 }
 
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+                   const char *gitdir)
 {
+       uint64_t start = getnanotime();
        struct split_index *split_index;
        int ret;
-       char *base_sha1_hex;
-       const char *base_path;
+       char *base_oid_hex;
+       char *base_path;
 
        /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
        if (istate->initialized)
                return istate->cache_nr;
 
        ret = do_read_index(istate, path, 0);
+       trace_performance_since(start, "read cache %s", path);
 
        split_index = istate->split_index;
-       if (!split_index || is_null_sha1(split_index->base_sha1)) {
+       if (!split_index || is_null_oid(&split_index->base_oid)) {
                post_read_index_from(istate);
                return ret;
        }
@@ -1881,17 +2026,19 @@ int read_index_from(struct index_state *istate, const char *path)
        else
                split_index->base = xcalloc(1, sizeof(*split_index->base));
 
-       base_sha1_hex = sha1_to_hex(split_index->base_sha1);
-       base_path = git_path("sharedindex.%s", base_sha1_hex);
+       base_oid_hex = oid_to_hex(&split_index->base_oid);
+       base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
        ret = do_read_index(split_index->base, base_path, 1);
-       if (hashcmp(split_index->base_sha1, split_index->base->sha1))
+       if (oidcmp(&split_index->base_oid, &split_index->base->oid))
                die("broken index, expect %s in %s, got %s",
-                   base_sha1_hex, base_path,
-                   sha1_to_hex(split_index->base->sha1));
+                   base_oid_hex, base_path,
+                   oid_to_hex(&split_index->base->oid));
 
-       freshen_shared_index(base_sha1_hex, 0);
+       freshen_shared_index(base_path, 0);
        merge_base_index(istate);
        post_read_index_from(istate);
+       trace_performance_since(start, "read cache %s", base_path);
+       free(base_path);
        return ret;
 }
 
@@ -1902,17 +2049,15 @@ int is_index_unborn(struct index_state *istate)
 
 int discard_index(struct index_state *istate)
 {
-       int i;
+       /*
+        * Cache entries in istate->cache[] should have been allocated
+        * from the memory pool associated with this index, or from an
+        * associated split_index. There is no need to free individual
+        * cache entries. validate_cache_entries can detect when this
+        * assertion does not hold.
+        */
+       validate_cache_entries(istate);
 
-       for (i = 0; i < istate->cache_nr; i++) {
-               if (istate->cache[i]->index &&
-                   istate->split_index &&
-                   istate->split_index->base &&
-                   istate->cache[i]->index <= istate->split_index->base->cache_nr &&
-                   istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1])
-                       continue;
-               free(istate->cache[i]);
-       }
        resolve_undo_clear_index(istate);
        istate->cache_nr = 0;
        istate->cache_changed = 0;
@@ -1926,9 +2071,47 @@ int discard_index(struct index_state *istate)
        discard_split_index(istate);
        free_untracked_cache(istate->untracked);
        istate->untracked = NULL;
+
+       if (istate->ce_mem_pool) {
+               mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
+               istate->ce_mem_pool = NULL;
+       }
+
        return 0;
 }
 
+/*
+ * Validate the cache entries of this index.
+ * All cache entries associated with this index
+ * should have been allocated by the memory pool
+ * associated with this index, or by a referenced
+ * split index.
+ */
+void validate_cache_entries(const struct index_state *istate)
+{
+       int i;
+
+       if (!should_validate_cache_entries() ||!istate || !istate->initialized)
+               return;
+
+       for (i = 0; i < istate->cache_nr; i++) {
+               if (!istate) {
+                       die("internal error: cache entry is not allocated from expected memory pool");
+               } else if (!istate->ce_mem_pool ||
+                       !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
+                       if (!istate->split_index ||
+                               !istate->split_index->base ||
+                               !istate->split_index->base->ce_mem_pool ||
+                               !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
+                               die("internal error: cache entry is not allocated from expected memory pool");
+                       }
+               }
+       }
+
+       if (istate->split_index)
+               validate_cache_entries(istate->split_index->base);
+}
+
 int unmerged_index(const struct index_state *istate)
 {
        int i;
@@ -1939,15 +2122,53 @@ int unmerged_index(const struct index_state *istate)
        return 0;
 }
 
+int index_has_changes(const struct index_state *istate,
+                     struct tree *tree,
+                     struct strbuf *sb)
+{
+       struct object_id cmp;
+       int i;
+
+       if (istate != &the_index) {
+               BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
+       }
+       if (tree)
+               cmp = tree->object.oid;
+       if (tree || !get_oid_tree("HEAD", &cmp)) {
+               struct diff_options opt;
+
+               diff_setup(&opt);
+               opt.flags.exit_with_status = 1;
+               if (!sb)
+                       opt.flags.quick = 1;
+               do_diff_cache(&cmp, &opt);
+               diffcore_std(&opt);
+               for (i = 0; sb && i < diff_queued_diff.nr; i++) {
+                       if (i)
+                               strbuf_addch(sb, ' ');
+                       strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
+               }
+               diff_flush(&opt);
+               return opt.flags.has_changes != 0;
+       } else {
+               for (i = 0; sb && i < istate->cache_nr; i++) {
+                       if (i)
+                               strbuf_addch(sb, ' ');
+                       strbuf_addstr(sb, istate->cache[i]->name);
+               }
+               return !!istate->cache_nr;
+       }
+}
+
 #define WRITE_BUFFER_SIZE 8192
 static unsigned char write_buffer[WRITE_BUFFER_SIZE];
 static unsigned long write_buffer_len;
 
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
 {
        unsigned int buffered = write_buffer_len;
        if (buffered) {
-               git_SHA1_Update(context, write_buffer, buffered);
+               the_hash_algo->update_fn(context, write_buffer, buffered);
                if (write_in_full(fd, write_buffer, buffered) < 0)
                        return -1;
                write_buffer_len = 0;
@@ -1955,7 +2176,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd)
        return 0;
 }
 
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
 {
        while (len) {
                unsigned int buffered = write_buffer_len;
@@ -1977,7 +2198,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
        return 0;
 }
 
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
                                  unsigned int ext, unsigned int sz)
 {
        ext = htonl(ext);
@@ -1986,26 +2207,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd,
                (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
 }
 
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
 {
        unsigned int left = write_buffer_len;
 
        if (left) {
                write_buffer_len = 0;
-               git_SHA1_Update(context, write_buffer, left);
+               the_hash_algo->update_fn(context, write_buffer, left);
        }
 
-       /* Flush first if not enough space for SHA1 signature */
-       if (left + 20 > WRITE_BUFFER_SIZE) {
+       /* Flush first if not enough space for hash signature */
+       if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
                if (write_in_full(fd, write_buffer, left) < 0)
                        return -1;
                left = 0;
        }
 
-       /* Append the SHA1 signature at the end */
-       git_SHA1_Final(write_buffer + left, context);
-       hashcpy(sha1, write_buffer + left);
-       left += 20;
+       /* Append the hash signature at the end */
+       the_hash_algo->final_fn(write_buffer + left, context);
+       hashcpy(hash, write_buffer + left);
+       left += the_hash_algo->rawsz;
        return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
 }
 
@@ -2086,17 +2307,19 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
        }
 }
 
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
                          struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
 {
        int size;
-       int saved_namelen = saved_namelen; /* compiler workaround */
        int result;
+       unsigned int saved_namelen;
+       int stripped_name = 0;
        static unsigned char padding[8] = { 0x00 };
 
        if (ce->ce_flags & CE_STRIP_NAME) {
                saved_namelen = ce_namelen(ce);
                ce->ce_namelen = 0;
+               stripped_name = 1;
        }
 
        if (ce->ce_flags & CE_EXTENDED)
@@ -2136,7 +2359,7 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
                strbuf_splice(previous_name, common, to_remove,
                              ce->name + common, ce_namelen(ce) - common);
        }
-       if (ce->ce_flags & CE_STRIP_NAME) {
+       if (stripped_name) {
                ce->ce_namelen = saved_namelen;
                ce->ce_flags &= ~CE_STRIP_NAME;
        }
@@ -2153,7 +2376,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        int fd;
        ssize_t n;
        struct stat st;
-       unsigned char sha1[20];
+       unsigned char hash[GIT_MAX_RAWSZ];
 
        if (!istate->initialized)
                return 0;
@@ -2165,14 +2388,14 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        if (fstat(fd, &st))
                goto out;
 
-       if (st.st_size < sizeof(struct cache_header) + 20)
+       if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                goto out;
 
-       n = pread_in_full(fd, sha1, 20, st.st_size - 20);
-       if (n != 20)
+       n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+       if (n != the_hash_algo->rawsz)
                goto out;
 
-       if (hashcmp(istate->sha1, sha1))
+       if (hashcmp(istate->oid.hash, hash))
                goto out;
 
        close(fd);
@@ -2201,22 +2424,28 @@ static int has_racy_timestamp(struct index_state *istate)
        return 0;
 }
 
-/*
- * Opportunistically update the index but do not complain if we can't
- */
 void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
 {
        if ((istate->cache_changed || has_racy_timestamp(istate)) &&
-           verify_index(istate) &&
-           write_locked_index(istate, lockfile, COMMIT_LOCK))
+           verify_index(istate))
+               write_locked_index(istate, lockfile, COMMIT_LOCK);
+       else
                rollback_lock_file(lockfile);
 }
 
+/*
+ * On success, `tempfile` is closed. If it is the temporary file
+ * of a `struct lock_file`, we will therefore effectively perform
+ * a 'close_lock_file_gently()`. Since that is an implementation
+ * detail of lockfiles, callers of `do_write_index()` should not
+ * rely on it.
+ */
 static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                          int strip_extensions)
 {
+       uint64_t start = getnanotime();
        int newfd = tempfile->fd;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        struct cache_header hdr;
        int i, err = 0, removed, extended, hdr_version;
        struct cache_entry **cache = istate->cache;
@@ -2224,7 +2453,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
        struct stat st;
        struct ondisk_cache_entry_extended ondisk;
        struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
-       int drop_cache_tree = 0;
+       int drop_cache_tree = istate->drop_cache_tree;
 
        for (i = removed = extended = 0; i < entries; i++) {
                if (cache[i]->ce_flags & CE_REMOVE)
@@ -2240,7 +2469,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
 
        if (!istate->version) {
                istate->version = get_index_format_default();
-               if (getenv("GIT_TEST_SPLIT_INDEX"))
+               if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
                        init_split_index(istate);
        }
 
@@ -2254,7 +2483,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
        hdr.hdr_version = htonl(hdr_version);
        hdr.hdr_entries = htonl(entries - removed);
 
-       git_SHA1_Init(&c);
+       the_hash_algo->init_fn(&c);
        if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
                return -1;
 
@@ -2334,18 +2563,28 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                if (err)
                        return -1;
        }
+       if (!strip_extensions && istate->fsmonitor_last_update) {
+               struct strbuf sb = STRBUF_INIT;
 
-       if (ce_flush(&c, newfd, istate->sha1))
+               write_fsmonitor_extension(&sb, istate);
+               err = write_index_ext_header(&c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0
+                       || ce_write(&c, newfd, sb.buf, sb.len) < 0;
+               strbuf_release(&sb);
+               if (err)
+                       return -1;
+       }
+
+       if (ce_flush(&c, newfd, istate->oid.hash))
                return -1;
        if (close_tempfile_gently(tempfile)) {
                error(_("could not close '%s'"), tempfile->filename.buf);
-               delete_tempfile(&tempfile);
                return -1;
        }
        if (stat(tempfile->filename.buf, &st))
                return -1;
        istate->timestamp.sec = (unsigned int)st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
+       trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
        return 0;
 }
 
@@ -2368,14 +2607,9 @@ static int do_write_locked_index(struct index_state *istate, struct lock_file *l
        int ret = do_write_index(istate, lock->tempfile, 0);
        if (ret)
                return ret;
-       assert((flags & (COMMIT_LOCK | CLOSE_LOCK)) !=
-              (COMMIT_LOCK | CLOSE_LOCK));
        if (flags & COMMIT_LOCK)
                return commit_locked_index(lock);
-       else if (flags & CLOSE_LOCK)
-               return close_lock_file_gently(lock);
-       else
-               return ret;
+       return close_lock_file_gently(lock);
 }
 
 static int write_split_index(struct index_state *istate,
@@ -2449,36 +2683,25 @@ static int clean_shared_index_files(const char *current_hex)
 }
 
 static int write_shared_index(struct index_state *istate,
-                             struct lock_file *lock, unsigned flags)
+                             struct tempfile **temp)
 {
-       struct tempfile *temp;
        struct split_index *si = istate->split_index;
        int ret;
 
-       temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
-       if (!temp) {
-               hashclr(si->base_sha1);
-               return do_write_locked_index(istate, lock, flags);
-       }
        move_cache_to_base_index(istate);
-       ret = do_write_index(si->base, temp, 1);
-       if (ret) {
-               delete_tempfile(&temp);
+       ret = do_write_index(si->base, *temp, 1);
+       if (ret)
                return ret;
-       }
-       ret = adjust_shared_perm(get_tempfile_path(temp));
+       ret = adjust_shared_perm(get_tempfile_path(*temp));
        if (ret) {
-               int save_errno = errno;
-               error("cannot fix permission bits on %s", get_tempfile_path(temp));
-               delete_tempfile(&temp);
-               errno = save_errno;
+               error("cannot fix permission bits on %s", get_tempfile_path(*temp));
                return ret;
        }
-       ret = rename_tempfile(&temp,
-                             git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
+       ret = rename_tempfile(temp,
+                             git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
        if (!ret) {
-               hashcpy(si->base_sha1, si->base->sha1);
-               clean_shared_index_files(sha1_to_hex(si->base->sha1));
+               oidcpy(&si->base_oid, &si->base->oid);
+               clean_shared_index_files(oid_to_hex(&si->base->oid));
        }
 
        return ret;
@@ -2520,15 +2743,25 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
        int new_shared_index, ret;
        struct split_index *si = istate->split_index;
 
+       if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
+               if (flags & COMMIT_LOCK)
+                       rollback_lock_file(lock);
+               return 0;
+       }
+
+       if (istate->fsmonitor_last_update)
+               fill_fsmonitor_bitmap(istate);
+
        if (!si || alternate_index_output ||
            (istate->cache_changed & ~EXTMASK)) {
                if (si)
-                       hashclr(si->base_sha1);
-               return do_write_locked_index(istate, lock, flags);
+                       oidclr(&si->base_oid);
+               ret = do_write_locked_index(istate, lock, flags);
+               goto out;
        }
 
-       if (getenv("GIT_TEST_SPLIT_INDEX")) {
-               int v = si->base_sha1[0];
+       if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
+               int v = si->base_oid.hash[0];
                if ((v & 15) < 6)
                        istate->cache_changed |= SPLIT_INDEX_ORDERED;
        }
@@ -2538,26 +2771,50 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
        new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
 
        if (new_shared_index) {
-               ret = write_shared_index(istate, lock, flags);
+               struct tempfile *temp;
+               int saved_errno;
+
+               temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+               if (!temp) {
+                       oidclr(&si->base_oid);
+                       ret = do_write_locked_index(istate, lock, flags);
+                       goto out;
+               }
+               ret = write_shared_index(istate, &temp);
+
+               saved_errno = errno;
+               if (is_tempfile_active(temp))
+                       delete_tempfile(&temp);
+               errno = saved_errno;
+
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = write_split_index(istate, lock, flags);
 
        /* Freshen the shared index only if the split-index was written */
-       if (!ret && !new_shared_index)
-               freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+       if (!ret && !new_shared_index) {
+               const char *shared_index = git_path("sharedindex.%s",
+                                                   oid_to_hex(&si->base_oid));
+               freshen_shared_index(shared_index, 1);
+       }
 
+out:
+       if (flags & COMMIT_LOCK)
+               rollback_lock_file(lock);
        return ret;
 }
 
 /*
  * Read the index file that is potentially unmerged into given
- * index_state, dropping any unmerged entries.  Returns true if
- * the index is unmerged.  Callers who want to refuse to work
- * from an unmerged state can call this and check its return value,
- * instead of calling read_cache().
+ * index_state, dropping any unmerged entries to stage #0 (potentially
+ * resulting in a path appearing as both a file and a directory in the
+ * index; the caller is responsible to clear out the extra entries
+ * before writing the index to a tree).  Returns true if the index is
+ * unmerged.  Callers who want to refuse to work from an unmerged
+ * state can call this and check its return value, instead of calling
+ * read_cache().
  */
 int read_index_unmerged(struct index_state *istate)
 {
@@ -2568,19 +2825,18 @@ int read_index_unmerged(struct index_state *istate)
        for (i = 0; i < istate->cache_nr; i++) {
                struct cache_entry *ce = istate->cache[i];
                struct cache_entry *new_ce;
-               int size, len;
+               int len;
 
                if (!ce_stage(ce))
                        continue;
                unmerged = 1;
                len = ce_namelen(ce);
-               size = cache_entry_size(len);
-               new_ce = xcalloc(1, size);
+               new_ce = make_empty_cache_entry(istate, len);
                memcpy(new_ce->name, ce->name, len);
                new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
                new_ce->ce_namelen = len;
                new_ce->ce_mode = ce->ce_mode;
-               if (add_index_entry(istate, new_ce, 0))
+               if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
                        return error("%s: cannot drop to stage #0",
                                     new_ce->name);
        }
@@ -2640,7 +2896,7 @@ void *read_blob_data_from_index(const struct index_state *istate,
        }
        if (pos < 0)
                return NULL;
-       data = read_sha1_file(istate->cache[pos]->oid.hash, &type, &sz);
+       data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return NULL;
@@ -2684,3 +2940,41 @@ void move_index_extensions(struct index_state *dst, struct index_state *src)
        dst->untracked = src->untracked;
        src->untracked = NULL;
 }
+
+struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
+                                   struct index_state *istate)
+{
+       unsigned int size = ce_size(ce);
+       int mem_pool_allocated;
+       struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
+       mem_pool_allocated = new_entry->mem_pool_allocated;
+
+       memcpy(new_entry, ce, size);
+       new_entry->mem_pool_allocated = mem_pool_allocated;
+       return new_entry;
+}
+
+void discard_cache_entry(struct cache_entry *ce)
+{
+       if (ce && should_validate_cache_entries())
+               memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
+
+       if (ce && ce->mem_pool_allocated)
+               return;
+
+       free(ce);
+}
+
+int should_validate_cache_entries(void)
+{
+       static int validate_index_cache_entries = -1;
+
+       if (validate_index_cache_entries < 0) {
+               if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
+                       validate_index_cache_entries = 1;
+               else
+                       validate_index_cache_entries = 0;
+       }
+
+       return validate_index_cache_entries;
+}