#include "utf8.h"
#include "fsmonitor.h"
#include "thread-utils.h"
+#include "progress.h"
/* Mask for the name length in ce_flags in the on-disk index */
}
}
-static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
+static int ce_compare_data(struct index_state *istate,
+ const struct cache_entry *ce,
+ struct stat *st)
{
int match = -1;
int fd = git_open_cloexec(ce->name, O_RDONLY);
if (fd >= 0) {
struct object_id oid;
- if (!index_fd(&oid, fd, st, OBJ_BLOB, ce->name, 0))
+ if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
match = !oideq(&oid, &ce->oid);
/* index_fd() closed the file descriptor already */
}
return !oideq(&oid, &ce->oid);
}
-static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st)
+static int ce_modified_check_fs(struct index_state *istate,
+ const struct cache_entry *ce,
+ struct stat *st)
{
switch (st->st_mode & S_IFMT) {
case S_IFREG:
- if (ce_compare_data(ce, st))
+ if (ce_compare_data(istate, ce, st))
return DATA_CHANGED;
break;
case S_IFLNK:
);
}
-static int is_racy_timestamp(const struct index_state *istate,
+int is_racy_timestamp(const struct index_state *istate,
const struct cache_entry *ce)
{
return (!S_ISGITLINK(ce->ce_mode) &&
if (assume_racy_is_modified)
changed |= DATA_CHANGED;
else
- changed |= ce_modified_check_fs(ce, st);
+ changed |= ce_modified_check_fs(istate, ce, st);
}
return changed;
(S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
return changed;
- changed_fs = ce_modified_check_fs(ce, st);
+ changed_fs = ce_modified_check_fs(istate, ce, st);
if (changed_fs)
return changed | changed_fs;
return 0;
}
}
if (!intent_only) {
- if (index_path(&ce->oid, path, st, newflags)) {
+ if (index_path(istate, &ce->oid, path, st, newflags)) {
discard_cache_entry(ce);
return error("unable to index file %s", path);
}
ce->ce_namelen = len;
ce->ce_mode = create_ce_mode(mode);
- ret = refresh_cache_entry(&the_index, ce, refresh_options);
+ ret = refresh_cache_entry(istate, ce, refresh_options);
if (ret != ce)
discard_cache_entry(ce);
return ret;
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ struct progress *progress = NULL;
+
+ if (flags & REFRESH_PROGRESS && isatty(2))
+ progress = start_delayed_progress(_("Refresh index"),
+ istate->cache_nr);
trace_performance_enter();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
+ /*
+ * Use the multi-threaded preload_index() to refresh most of the
+ * cache entries quickly then in the single threaded loop below,
+ * we only have to do the special cases that are left.
+ */
+ preload_index(istate, pathspec, 0);
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce, *new_entry;
int cache_errno = 0;
if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
continue;
- if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen))
+ if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
filtered = 1;
if (ce_stage(ce)) {
new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
if (new_entry == ce)
continue;
+ if (progress)
+ display_progress(progress, i);
if (!new_entry) {
const char *fmt;
replace_index_entry(istate, i, new_entry);
}
+ if (progress) {
+ display_progress(progress, istate->cache_nr);
+ stop_progress(&progress);
+ }
trace_performance_leave("refresh index");
return has_errors;
}
size_t len;
const char *name;
unsigned int flags;
- size_t copy_len;
+ size_t copy_len = 0;
/*
* Adjacent cache entries tend to share the leading paths, so it makes
* sense to only store the differences in later entries. In the v4
die(_("malformed name field in the index, near path '%s'"),
previous_ce->name);
copy_len = previous_len - strip_len;
- } else {
- copy_len = 0;
}
name = (const char *)cp;
}
struct index_entry_offset entries[FLEX_ARRAY];
};
-#ifndef NO_PTHREADS
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
-#endif
static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
struct load_index_extensions
{
-#ifndef NO_PTHREADS
pthread_t pthread;
-#endif
struct index_state *istate;
const char *mmap;
size_t mmap_size;
return consumed;
}
-#ifndef NO_PTHREADS
-
/*
* Mostly randomly chosen maximum thread counts: we
* cap the parallelism to online_cpus() threads, and we want
return consumed;
}
-#endif
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
size_t mmap_size;
struct load_index_extensions p;
size_t extension_offset = 0;
-#ifndef NO_PTHREADS
int nr_threads, cpus;
struct index_entry_offset_table *ieot = NULL;
-#endif
if (istate->initialized)
return istate->cache_nr;
src_offset = sizeof(*hdr);
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
+ if (git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
/* TODO: does creating more threads than cores help? */
if (!nr_threads) {
nr_threads = cpus;
}
+ if (!HAVE_THREADS)
+ nr_threads = 1;
+
if (nr_threads > 1) {
extension_offset = read_eoie_extension(mmap, mmap_size);
if (extension_offset) {
} else {
src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
}
-#else
- src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
-#endif
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
/* if we created a thread, join it otherwise load the extensions on the primary thread */
-#ifndef NO_PTHREADS
if (extension_offset) {
int ret = pthread_join(p.pthread, NULL);
if (ret)
die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
- }
-#endif
- if (!extension_offset) {
+ } else {
p.src_offset = src_offset;
load_index_extensions(&p);
}
freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
- free(base_path);
trace_performance_leave("read cache %s", base_path);
+ free(base_path);
return ret;
}
return 0;
}
-int index_has_changes(const struct index_state *istate,
+int index_has_changes(struct index_state *istate,
struct tree *tree,
struct strbuf *sb)
{
if (tree || !get_oid_tree("HEAD", &cmp)) {
struct diff_options opt;
- diff_setup(&opt);
+ repo_diff_setup(the_repository, &opt);
opt.flags.exit_with_status = 1;
if (!sb)
opt.flags.quick = 1;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
-static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
+static void ce_smudge_racily_clean_entry(struct index_state *istate,
+ struct cache_entry *ce)
{
/*
* The only thing we care about in this function is to smudge the
return;
if (ce_match_stat_basic(ce, &st))
return;
- if (ce_modified_check_fs(ce, &st)) {
+ if (ce_modified_check_fs(istate, ce, &st)) {
/* This is "racily clean"; smudge it. Note that this
* is a tricky code. At first glance, it may appear
* that it can break with this sequence:
rollback_lock_file(lockfile);
}
+static int record_eoie(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordendofindexentries", &val))
+ return val;
+
+ /*
+ * As a convenience, the end of index entries extension
+ * used for threading is written by default if the user
+ * explicitly requested threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
+static int record_ieot(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordoffsettable", &val))
+ return val;
+
+ /*
+ * As a convenience, the offset table used for threading is
+ * written by default if the user explicitly requested
+ * threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
/*
* On success, `tempfile` is closed. If it is the temporary file
* of a `struct lock_file`, we will therefore effectively perform
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
- if (nr_threads != 1) {
+ if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
+
+ if (nr_threads != 1 && record_ieot()) {
int ieot_blocks, cpus;
/*
ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
}
}
-#endif
offset = lseek(newfd, 0, SEEK_CUR);
if (offset < 0) {
if (ce->ce_flags & CE_REMOVE)
continue;
if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
- ce_smudge_racily_clean_entry(ce);
+ ce_smudge_racily_clean_entry(istate, ce);
if (is_null_oid(&ce->oid)) {
static const char msg[] = "cache entry has null sha1: %s";
static int allow = -1;
* strip_extensions parameter as we need it when loading the shared
* index.
*/
-#ifndef NO_PTHREADS
if (ieot) {
struct strbuf sb = STRBUF_INIT;
if (err)
return -1;
}
-#endif
if (!strip_extensions && istate->split_index) {
struct strbuf sb = STRBUF_INIT;
* read. Write it out regardless of the strip_extensions parameter as we need it
* when loading the shared index.
*/
- if (offset) {
+ if (offset && record_eoie()) {
struct strbuf sb = STRBUF_INIT;
write_eoie_extension(&sb, &eoie_c, offset);
struct tempfile *temp;
int saved_errno;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ /* Same initial permissions as the main .git/index file */
+ temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
if (!temp) {
oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
strbuf_add(sb, hash, the_hash_algo->rawsz);
}
-#ifndef NO_PTHREADS
#define IEOT_VERSION (1)
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
strbuf_add(sb, &buffer, sizeof(uint32_t));
}
}
-#endif