*
* Copyright (C) Linus Torvalds, 2005
*/
-#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
#include "diff.h"
#include "commit.h"
#include "blob.h"
#include "resolve-undo.h"
+#include "run-command.h"
#include "strbuf.h"
#include "varint.h"
#include "split-index.h"
return *pool_ptr;
}
-struct index_state the_index;
static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
* CE_REMOVE is set in ce_flags. This is much more effective than
* calling remove_index_entry_at() for each entry to be removed.
*/
-void remove_marked_cache_entries(struct index_state *istate)
+void remove_marked_cache_entries(struct index_state *istate, int invalidate)
{
struct cache_entry **ce_array = istate->cache;
unsigned int i, j;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
+ if (invalidate) {
+ cache_tree_invalidate_path(istate,
+ ce_array[i]->name);
+ untracked_cache_remove_from_index(istate,
+ ce_array[i]->name);
+ }
remove_name_hash(istate, ce_array[i]);
save_or_free_index_entry(istate, ce_array[i]);
}
int intent_only = flags & ADD_CACHE_INTENT;
int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
(intent_only ? ADD_CACHE_NEW_ONLY : 0));
- int newflags = HASH_WRITE_OBJECT;
+ int hash_flags = HASH_WRITE_OBJECT;
+ struct object_id oid;
- if (flags & HASH_RENORMALIZE)
- newflags |= HASH_RENORMALIZE;
+ if (flags & ADD_CACHE_RENORMALIZE)
+ hash_flags |= HASH_RENORMALIZE;
if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
+ if (resolve_gitlink_ref(path, "HEAD", &oid) < 0)
+ return error(_("'%s' does not have a commit checked out"), path);
while (namelen && path[namelen-1] == '/')
namelen--;
}
if (ignore_case) {
adjust_dirname_case(istate, ce->name);
}
- if (!(flags & HASH_RENORMALIZE)) {
+ if (!(flags & ADD_CACHE_RENORMALIZE)) {
alias = index_file_exists(istate, ce->name,
ce_namelen(ce), ignore_case);
if (alias &&
}
}
if (!intent_only) {
- if (index_path(istate, &ce->oid, path, st, newflags)) {
+ if (index_path(istate, &ce->oid, path, st, hash_flags)) {
discard_cache_entry(ce);
return error(_("unable to index file '%s'"), path);
}
uint32_t uid;
uint32_t gid;
uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- char name[FLEX_ARRAY]; /* more */
-};
-
-/*
- * This struct is used when CE_EXTENDED bit is 1
- * The struct must match ondisk_cache_entry exactly from
- * ctime till flags
- */
-struct ondisk_cache_entry_extended {
- struct cache_time ctime;
- struct cache_time mtime;
- uint32_t dev;
- uint32_t ino;
- uint32_t mode;
- uint32_t uid;
- uint32_t gid;
- uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- uint16_t flags2;
- char name[FLEX_ARRAY]; /* more */
+ /*
+ * unsigned char hash[hashsz];
+ * uint16_t flags;
+ * if (flags & CE_EXTENDED)
+ * uint16_t flags2;
+ */
+ unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
+ char name[FLEX_ARRAY];
};
/* These are only used for v3 or lower */
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
-#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
+#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
-#define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
-#define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
- ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
- ondisk_cache_entry_size(ce_namelen(ce)))
+#define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
+ ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
+#define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
+#define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
/* Allow fsck to force verification of the index checksum. */
int verify_index_checksum;
return 0;
}
-int read_index(struct index_state *istate)
-{
- return read_index_from(istate, get_index_file(), get_git_dir());
-}
-
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
unsigned int version,
struct ondisk_cache_entry *ondisk,
struct cache_entry *ce;
size_t len;
const char *name;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
unsigned int flags;
size_t copy_len = 0;
/*
int expand_name_field = version == 4;
/* On-disk flags are just 16 bits */
- flags = get_be16(&ondisk->flags);
+ flags = get_be16(flagsp);
len = flags & CE_NAMEMASK;
if (flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
int extended_flags;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- extended_flags = get_be16(&ondisk2->flags2) << 16;
+ extended_flags = get_be16(flagsp + 1) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = ondisk2->name;
+ name = (const char *)(flagsp + 2);
}
else
- name = ondisk->name;
+ name = (const char *)(flagsp + 1);
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- hashcpy(ce->oid.hash, ondisk->sha1);
+ hashcpy(ce->oid.hash, ondisk->data);
+ memcpy(ce->name, name, len);
+ ce->name[len] = '\0';
if (expand_name_field) {
if (copy_len)
load_index_extensions(&p);
}
munmap((void *)mmap, mmap_size);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "read/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr",
+ istate->cache_nr);
+
return istate->cache_nr;
unmap:
if (istate->initialized)
return istate->cache_nr;
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_read_index", the_repository,
+ "%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
+ trace2_region_leave_printf("index", "do_read_index", the_repository,
+ "%s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
ret = do_read_index(split_index->base, base_path, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
return 0;
}
-int index_has_changes(struct index_state *istate,
- struct tree *tree,
- struct strbuf *sb)
+int repo_index_has_changes(struct repository *repo,
+ struct tree *tree,
+ struct strbuf *sb)
{
+ struct index_state *istate = repo->index;
struct object_id cmp;
int i;
- if (istate != &the_index) {
- BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
- }
if (tree)
cmp = tree->object.oid;
if (tree || !get_oid_tree("HEAD", &cmp)) {
struct diff_options opt;
- repo_diff_setup(the_repository, &opt);
+ repo_diff_setup(repo, &opt);
opt.flags.exit_with_status = 1;
if (!sb)
opt.flags.quick = 1;
struct cache_entry *ce)
{
short flags;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
- hashcpy(ondisk->sha1, ce->oid.hash);
+ hashcpy(ondisk->data, ce->oid.hash);
flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
- ondisk->flags = htons(flags);
+ flagsp[0] = htons(flags);
if (ce->ce_flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
+ flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
}
}
stripped_name = 1;
}
- if (ce->ce_flags & CE_EXTENDED)
- size = offsetof(struct ondisk_cache_entry_extended, name);
- else
- size = offsetof(struct ondisk_cache_entry, name);
+ size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
if (!previous_name) {
int len = ce_namelen(ce);
return 0;
}
-static int verify_index(const struct index_state *istate)
+static int repo_verify_index(struct repository *repo)
{
- return verify_index_from(istate, get_index_file());
+ return verify_index_from(repo->index, repo->index_file);
}
static int has_racy_timestamp(struct index_state *istate)
return 0;
}
-void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
+void repo_update_index_if_able(struct repository *repo,
+ struct lock_file *lockfile)
{
- if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- verify_index(istate))
- write_locked_index(istate, lockfile, COMMIT_LOCK);
+ if ((repo->index->cache_changed ||
+ has_racy_timestamp(repo->index)) &&
+ repo_verify_index(repo))
+ write_locked_index(repo->index, lockfile, COMMIT_LOCK);
else
rollback_lock_file(lockfile);
}
struct cache_entry **cache = istate->cache;
int entries = istate->cache_nr;
struct stat st;
- struct ondisk_cache_entry_extended ondisk;
+ struct ondisk_cache_entry ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
return -1;
}
- if (!strip_extensions && istate->split_index) {
+ if (!strip_extensions && istate->split_index &&
+ !is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
err = write_link_extension(&sb, istate) < 0 ||
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "write/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "write/cache_nr",
+ istate->cache_nr);
+
return 0;
}
static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int ret = do_write_index(istate, lock->tempfile, 0);
+ int ret;
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+ ret = do_write_index(istate, lock->tempfile, 0);
+ trace2_region_leave_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+
if (ret)
return ret;
if (flags & COMMIT_LOCK)
- return commit_locked_index(lock);
- return close_lock_file_gently(lock);
+ ret = commit_locked_index(lock);
+ else
+ ret = close_lock_file_gently(lock);
+
+ run_hook_le(NULL, "post-index-change",
+ istate->updated_workdir ? "1" : "0",
+ istate->updated_skipworktree ? "1" : "0", NULL);
+ istate->updated_workdir = 0;
+ istate->updated_skipworktree = 0;
+
+ return ret;
}
static int write_split_index(struct index_state *istate,
int ret;
move_cache_to_base_index(istate);
+
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
ret = do_write_index(si->base, *temp, 1);
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
+
if (ret)
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index) {
+ if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
const char *shared_index = git_path("sharedindex.%s",
oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
* state can call this and check its return value, instead of calling
* read_cache().
*/
-int read_index_unmerged(struct index_state *istate)
+int repo_read_index_unmerged(struct repository *repo)
{
+ struct index_state *istate;
int i;
int unmerged = 0;
- read_index(istate);
+ repo_read_index(repo);
+ istate = repo->index;
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
struct cache_entry *new_ce;
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
{
- const char *index = NULL;
- uint32_t extsize, ext_version;
- struct index_entry_offset_table *ieot;
- int i, nr;
-
- /* find the IEOT extension */
- if (!offset)
- return NULL;
- while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
- extsize = get_be32(mmap + offset + 4);
- if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
- index = mmap + offset + 4 + 4;
- break;
- }
- offset += 8;
- offset += extsize;
- }
- if (!index)
- return NULL;
-
- /* validate the version is IEOT_VERSION */
- ext_version = get_be32(index);
- if (ext_version != IEOT_VERSION) {
- error("invalid IEOT version %d", ext_version);
- return NULL;
- }
- index += sizeof(uint32_t);
-
- /* extension size - version bytes / bytes per entry */
- nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
- if (!nr) {
- error("invalid number of IEOT entries %d", nr);
- return NULL;
- }
- ieot = xmalloc(sizeof(struct index_entry_offset_table)
- + (nr * sizeof(struct index_entry_offset)));
- ieot->nr = nr;
- for (i = 0; i < nr; i++) {
- ieot->entries[i].offset = get_be32(index);
- index += sizeof(uint32_t);
- ieot->entries[i].nr = get_be32(index);
- index += sizeof(uint32_t);
- }
-
- return ieot;
+ const char *index = NULL;
+ uint32_t extsize, ext_version;
+ struct index_entry_offset_table *ieot;
+ int i, nr;
+
+ /* find the IEOT extension */
+ if (!offset)
+ return NULL;
+ while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
+ extsize = get_be32(mmap + offset + 4);
+ if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
+ index = mmap + offset + 4 + 4;
+ break;
+ }
+ offset += 8;
+ offset += extsize;
+ }
+ if (!index)
+ return NULL;
+
+ /* validate the version is IEOT_VERSION */
+ ext_version = get_be32(index);
+ if (ext_version != IEOT_VERSION) {
+ error("invalid IEOT version %d", ext_version);
+ return NULL;
+ }
+ index += sizeof(uint32_t);
+
+ /* extension size - version bytes / bytes per entry */
+ nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
+ if (!nr) {
+ error("invalid number of IEOT entries %d", nr);
+ return NULL;
+ }
+ ieot = xmalloc(sizeof(struct index_entry_offset_table)
+ + (nr * sizeof(struct index_entry_offset)));
+ ieot->nr = nr;
+ for (i = 0; i < nr; i++) {
+ ieot->entries[i].offset = get_be32(index);
+ index += sizeof(uint32_t);
+ ieot->entries[i].nr = get_be32(index);
+ index += sizeof(uint32_t);
+ }
+
+ return ieot;
}
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
{
- uint32_t buffer;
- int i;
+ uint32_t buffer;
+ int i;
- /* version */
- put_be32(&buffer, IEOT_VERSION);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* version */
+ put_be32(&buffer, IEOT_VERSION);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* ieot */
- for (i = 0; i < ieot->nr; i++) {
+ /* ieot */
+ for (i = 0; i < ieot->nr; i++) {
- /* offset */
- put_be32(&buffer, ieot->entries[i].offset);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* offset */
+ put_be32(&buffer, ieot->entries[i].offset);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* count */
- put_be32(&buffer, ieot->entries[i].nr);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
- }
+ /* count */
+ put_be32(&buffer, ieot->entries[i].nr);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
+ }
}