first = 0;
last = istate->cache_nr;
while (last > first) {
- int next = (last + first) >> 1;
+ int next = first + ((last - first) >> 1);
struct cache_entry *ce = istate->cache[next];
int cmp = cache_name_stage_compare(name, namelen, stage, ce->name, ce_namelen(ce), ce_stage(ce));
if (!cmp)
}
static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
- unsigned long src_offset, int nr_threads, struct index_entry_offset_table *ieot)
+ int nr_threads, struct index_entry_offset_table *ieot)
{
int i, offset, ieot_blocks, ieot_start, err;
struct load_cache_entries_thread_data *data;
if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die(_("%s: index file smaller than expected"), path);
- mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ mmap = xmmap_gently(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mmap == MAP_FAILED)
die_errno(_("%s: unable to map index file"), path);
close(fd);
ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
if (ieot) {
- src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, src_offset, nr_threads, ieot);
+ src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, nr_threads, ieot);
free(ieot);
} else {
src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);