#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */
#define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
#define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
+#define ADD_CACHE_RENORMALIZE 64 /* Pass along HASH_RENORMALIZE */
extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
extern void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
#define CE_MATCH_REFRESH 0x10
/* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */
#define CE_MATCH_IGNORE_FSMONITOR 0X20
+extern int is_racy_timestamp(const struct index_state *istate,
+ const struct cache_entry *ce);
extern int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
extern int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
{
+ /*
+ * This is a temporary optimization hack. By asserting the size here,
+ * we let the compiler know that it's always going to be 20, which lets
+ * it turn this fixed-size memcmp into a few inline instructions.
+ *
+ * This will need to be extended or ripped out when we learn about
+ * hashes of different sizes.
+ */
+ if (the_hash_algo->rawsz != 20)
+ BUG("hash size not yet supported by hashcmp");
return memcmp(sha1, sha2, the_hash_algo->rawsz);
}