struct index_state the_index;
+static unsigned int hash_name(const char *name, int namelen)
+{
+ unsigned int hash = 0x123;
+
+ do {
+ unsigned char c = *name++;
+ hash = hash*101 + c;
+ } while (--namelen);
+ return hash;
+}
+
+static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
+{
+ void **pos;
+ unsigned int hash = hash_name(ce->name, ce_namelen(ce));
+
+ pos = insert_hash(hash, ce, &istate->name_hash);
+ if (pos) {
+ ce->next = *pos;
+ *pos = ce;
+ }
+}
+
+static void lazy_init_name_hash(struct index_state *istate)
+{
+ int nr;
+
+ if (istate->name_hash_initialized)
+ return;
+ for (nr = 0; nr < istate->cache_nr; nr++)
+ hash_index_entry(istate, istate->cache[nr]);
+ istate->name_hash_initialized = 1;
+}
+
+static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
+{
+ istate->cache[nr] = ce;
+ if (istate->name_hash_initialized)
+ hash_index_entry(istate, ce);
+}
+
+/*
+ * We don't actually *remove* it, we can just mark it invalid so that
+ * we won't find it in lookups.
+ *
+ * Not only would we have to search the lists (simple enough), but
+ * we'd also have to rehash other hash buckets in case this makes the
+ * hash bucket empty (common). So it's much better to just mark
+ * it.
+ */
+static void remove_hash_entry(struct index_state *istate, struct cache_entry *ce)
+{
+ ce->ce_flags |= CE_UNHASHED;
+}
+
+static void replace_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
+{
+ struct cache_entry *old = istate->cache[nr];
+
+ if (ce != old) {
+ remove_hash_entry(istate, old);
+ set_index_entry(istate, nr, ce);
+ }
+ istate->cache_changed = 1;
+}
+
+int index_name_exists(struct index_state *istate, const char *name, int namelen)
+{
+ unsigned int hash = hash_name(name, namelen);
+ struct cache_entry *ce;
+
+ lazy_init_name_hash(istate);
+ ce = lookup_hash(hash, &istate->name_hash);
+
+ while (ce) {
+ if (!(ce->ce_flags & CE_UNHASHED)) {
+ if (!cache_name_compare(name, namelen, ce->name, ce->ce_flags))
+ return 1;
+ }
+ ce = ce->next;
+ }
+ return 0;
+}
+
/*
* This only updates the "non-critical" parts of the directory
* cache, ie the parts that aren't tracked by GIT, and only used
if (assume_unchanged)
ce->ce_flags |= CE_VALID;
+
+ if (S_ISREG(st->st_mode))
+ ce_mark_uptodate(ce);
}
static int ce_compare_data(struct cache_entry *ce, struct stat *st)
return changed;
}
+static int is_racy_timestamp(struct index_state *istate, struct cache_entry *ce)
+{
+ return (istate->timestamp &&
+ ((unsigned int)istate->timestamp) <= ce->ce_mtime);
+}
+
int ie_match_stat(struct index_state *istate,
struct cache_entry *ce, struct stat *st,
unsigned int options)
* whose mtime are the same as the index file timestamp more
* carefully than others.
*/
- if (!changed &&
- istate->timestamp &&
- istate->timestamp <= ce->ce_mtime) {
+ if (!changed && is_racy_timestamp(istate, ce)) {
if (assume_racy_is_modified)
changed |= DATA_CHANGED;
else
/* Remove entry, return true if there are more entries to go.. */
int remove_index_entry_at(struct index_state *istate, int pos)
{
+ struct cache_entry *ce = istate->cache[pos];
+
+ remove_hash_entry(istate, ce);
istate->cache_changed = 1;
istate->cache_nr--;
if (pos >= istate->cache_nr)
!ie_match_stat(istate, istate->cache[pos], &st, ce_option)) {
/* Nothing changed, really */
free(ce);
+ ce_mark_uptodate(istate->cache[pos]);
return 0;
}
* it is Ok to have a directory at the same
* path.
*/
- if (stage || istate->cache[pos]->ce_mode) {
+ if (!(istate->cache[pos]->ce_flags & CE_REMOVE)) {
retval = -1;
if (!ok_to_replace)
break;
(p->name[len] != '/') ||
memcmp(p->name, name, len))
break; /* not our subdirectory */
- if (ce_stage(p) == stage && (stage || p->ce_mode))
- /* p is at the same stage as our entry, and
+ if (ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))
+ /*
+ * p is at the same stage as our entry, and
* is a subdirectory of what we are looking
* at, so we cannot have conflicts at our
* level or anything shorter.
/* existing match? Just replace it. */
if (pos >= 0) {
- istate->cache_changed = 1;
- istate->cache[pos] = ce;
+ replace_index_entry(istate, pos, ce);
return 0;
}
pos = -pos-1;
memmove(istate->cache + pos + 1,
istate->cache + pos,
(istate->cache_nr - pos - 1) * sizeof(ce));
- istate->cache[pos] = ce;
+ set_index_entry(istate, pos, ce);
istate->cache_changed = 1;
return 0;
}
int changed, size;
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
+ if (ce_uptodate(ce))
+ return ce;
+
if (lstat(ce->name, &st) < 0) {
if (err)
*err = errno;
if (ignore_valid && assume_unchanged &&
!(ce->ce_flags & CE_VALID))
; /* mark this one VALID again */
- else
+ else {
+ /*
+ * We do not mark the index itself "modified"
+ * because CE_UPTODATE flag is in-core only;
+ * we are not going to write this change out.
+ */
+ ce_mark_uptodate(ce);
return ce;
+ }
}
if (ie_modified(istate, ce, &st, options)) {
has_errors = 1;
continue;
}
- istate->cache_changed = 1;
- /* You can NOT just free istate->cache[i] here, since it
- * might not be necessarily malloc()ed but can also come
- * from mmap(). */
- istate->cache[i] = new;
+
+ replace_index_entry(istate, i, new);
}
return has_errors;
}
static void convert_from_disk(struct ondisk_cache_entry *ondisk, struct cache_entry *ce)
{
+ size_t len;
+
ce->ce_ctime = ntohl(ondisk->ctime.sec);
ce->ce_mtime = ntohl(ondisk->mtime.sec);
ce->ce_dev = ntohl(ondisk->dev);
/* On-disk flags are just 16 bits */
ce->ce_flags = ntohs(ondisk->flags);
hashcpy(ce->sha1, ondisk->sha1);
- memcpy(ce->name, ondisk->name, ce_namelen(ce)+1);
+
+ len = ce->ce_flags & CE_NAMEMASK;
+ if (len == CE_NAMEMASK)
+ len = strlen(ondisk->name);
+ /*
+ * NEEDSWORK: If the original index is crafted, this copy could
+ * go unchecked.
+ */
+ memcpy(ce->name, ondisk->name, len + 1);
+}
+
+static inline size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
+{
+ long per_entry;
+
+ per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
+
+ /*
+ * Alignment can cause differences. This should be "alignof", but
+ * since that's a gcc'ism, just use the size of a pointer.
+ */
+ per_entry += sizeof(void *);
+ return ondisk_size + entries*per_entry;
}
/* remember to discard_cache() before reading a different cache! */
* has room for a few more flags, we can allocate using the same
* index size
*/
- istate->alloc = xmalloc(mmap_size);
+ istate->alloc = xmalloc(estimate_cache_size(mmap_size, istate->cache_nr));
src_offset = sizeof(*hdr);
dst_offset = 0;
disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
ce = (struct cache_entry *)((char *)istate->alloc + dst_offset);
convert_from_disk(disk_ce, ce);
- istate->cache[i] = ce;
+ set_index_entry(istate, i, ce);
src_offset += ondisk_ce_size(ce);
dst_offset += ce_size(ce);
istate->cache_nr = 0;
istate->cache_changed = 0;
istate->timestamp = 0;
+ free_hash(&istate->name_hash);
cache_tree_free(&(istate->cache_tree));
free(istate->alloc);
istate->alloc = NULL;
return 0;
}
+int unmerged_index(struct index_state *istate)
+{
+ int i;
+ for (i = 0; i < istate->cache_nr; i++) {
+ if (ce_stage(istate->cache[i]))
+ return 1;
+ }
+ return 0;
+}
+
#define WRITE_BUFFER_SIZE 8192
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
struct cache_entry *ce = cache[i];
if (ce->ce_flags & CE_REMOVE)
continue;
- if (istate->timestamp &&
- istate->timestamp <= ce->ce_mtime)
+ if (is_racy_timestamp(istate, ce))
ce_smudge_racily_clean_entry(ce);
if (ce_write_entry(&c, newfd, ce) < 0)
return -1;