);
}
-static int is_racy_timestamp(const struct index_state *istate,
+int is_racy_timestamp(const struct index_state *istate,
const struct cache_entry *ce)
{
return (!S_ISGITLINK(ce->ce_mode) &&
typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
+ /*
+ * Use the multi-threaded preload_index() to refresh most of the
+ * cache entries quickly then in the single threaded loop below,
+ * we only have to do the special cases that are left.
+ */
+ preload_index(istate, pathspec, 0);
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce, *new_entry;
int cache_errno = 0;
size_t len;
const char *name;
unsigned int flags;
- size_t copy_len;
+ size_t copy_len = 0;
/*
* Adjacent cache entries tend to share the leading paths, so it makes
* sense to only store the differences in later entries. In the v4
die(_("malformed name field in the index, near path '%s'"),
previous_ce->name);
copy_len = previous_len - strip_len;
- } else {
- copy_len = 0;
}
name = (const char *)cp;
}
struct index_entry_offset entries[FLEX_ARRAY];
};
-#ifndef NO_PTHREADS
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
-#endif
static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
struct load_index_extensions
{
-#ifndef NO_PTHREADS
pthread_t pthread;
-#endif
struct index_state *istate;
const char *mmap;
size_t mmap_size;
return consumed;
}
-#ifndef NO_PTHREADS
-
/*
* Mostly randomly chosen maximum thread counts: we
* cap the parallelism to online_cpus() threads, and we want
return consumed;
}
-#endif
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
size_t mmap_size;
struct load_index_extensions p;
size_t extension_offset = 0;
-#ifndef NO_PTHREADS
int nr_threads, cpus;
struct index_entry_offset_table *ieot = NULL;
-#endif
if (istate->initialized)
return istate->cache_nr;
src_offset = sizeof(*hdr);
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
+ if (git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
/* TODO: does creating more threads than cores help? */
if (!nr_threads) {
nr_threads = cpus;
}
+ if (!HAVE_THREADS)
+ nr_threads = 1;
+
if (nr_threads > 1) {
extension_offset = read_eoie_extension(mmap, mmap_size);
if (extension_offset) {
} else {
src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
}
-#else
- src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
-#endif
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
/* if we created a thread, join it otherwise load the extensions on the primary thread */
-#ifndef NO_PTHREADS
if (extension_offset) {
int ret = pthread_join(p.pthread, NULL);
if (ret)
die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
- }
-#endif
- if (!extension_offset) {
+ } else {
p.src_offset = src_offset;
load_index_extensions(&p);
}
freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
- free(base_path);
trace_performance_leave("read cache %s", base_path);
+ free(base_path);
return ret;
}
rollback_lock_file(lockfile);
}
+static int record_eoie(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordendofindexentries", &val))
+ return val;
+
+ /*
+ * As a convenience, the end of index entries extension
+ * used for threading is written by default if the user
+ * explicitly requested threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
+static int record_ieot(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordoffsettable", &val))
+ return val;
+
+ /*
+ * As a convenience, the offset table used for threading is
+ * written by default if the user explicitly requested
+ * threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
/*
* On success, `tempfile` is closed. If it is the temporary file
* of a `struct lock_file`, we will therefore effectively perform
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
- if (nr_threads != 1) {
+ if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
+
+ if (nr_threads != 1 && record_ieot()) {
int ieot_blocks, cpus;
/*
ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
}
}
-#endif
offset = lseek(newfd, 0, SEEK_CUR);
if (offset < 0) {
* strip_extensions parameter as we need it when loading the shared
* index.
*/
-#ifndef NO_PTHREADS
if (ieot) {
struct strbuf sb = STRBUF_INIT;
if (err)
return -1;
}
-#endif
if (!strip_extensions && istate->split_index) {
struct strbuf sb = STRBUF_INIT;
* read. Write it out regardless of the strip_extensions parameter as we need it
* when loading the shared index.
*/
- if (offset) {
+ if (offset && record_eoie()) {
struct strbuf sb = STRBUF_INIT;
write_eoie_extension(&sb, &eoie_c, offset);
struct tempfile *temp;
int saved_errno;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ /* Same initial permissions as the main .git/index file */
+ temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
if (!temp) {
oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
strbuf_add(sb, hash, the_hash_algo->rawsz);
}
-#ifndef NO_PTHREADS
#define IEOT_VERSION (1)
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
strbuf_add(sb, &buffer, sizeof(uint32_t));
}
}
-#endif