* cache, ie the parts that aren't tracked by GIT, and only used
* to validate the cache.
*/
-void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
+void fill_stat_cache_info(struct index_state *istate, struct cache_entry *ce, struct stat *st)
{
fill_stat_data(&ce->ce_stat_data, st);
if (S_ISREG(st->st_mode)) {
ce_mark_uptodate(ce);
- mark_fsmonitor_valid(ce);
+ mark_fsmonitor_valid(istate, ce);
}
}
memcpy(ce->name, path, namelen);
ce->ce_namelen = namelen;
if (!intent_only)
- fill_stat_cache_info(ce, st);
+ fill_stat_cache_info(istate, ce, st);
else
ce->ce_flags |= CE_INTENT_TO_ADD;
*/
if (!S_ISGITLINK(ce->ce_mode)) {
ce_mark_uptodate(ce);
- mark_fsmonitor_valid(ce);
+ mark_fsmonitor_valid(istate, ce);
}
return ce;
}
updated = make_empty_cache_entry(istate, ce_namelen(ce));
copy_cache_entry(updated, ce);
memcpy(updated->name, ce->name, ce->ce_namelen + 1);
- fill_stat_cache_info(updated, &st);
+ fill_stat_cache_info(istate, updated, &st);
/*
* If ignore_valid is not set, we should leave CE_VALID bit
* alone. Otherwise, paths marked with --no-assume-unchanged
}
static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
- unsigned long src_offset, int nr_threads, struct index_entry_offset_table *ieot)
+ int nr_threads, struct index_entry_offset_table *ieot)
{
int i, offset, ieot_blocks, ieot_start, err;
struct load_cache_entries_thread_data *data;
ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
if (ieot) {
- src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, src_offset, nr_threads, ieot);
+ src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
free(ieot);
} else {
src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
free_name_hash(istate);
cache_tree_free(&(istate->cache_tree));
istate->initialized = 0;
+ istate->fsmonitor_has_run_once = 0;
FREE_AND_NULL(istate->cache);
istate->cache_alloc = 0;
discard_split_index(istate);