*
* Copyright (C) Linus Torvalds, 2005
*/
-#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
#include "diff.h"
return *pool_ptr;
}
-struct index_state the_index;
static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
* CE_REMOVE is set in ce_flags. This is much more effective than
* calling remove_index_entry_at() for each entry to be removed.
*/
-void remove_marked_cache_entries(struct index_state *istate)
+void remove_marked_cache_entries(struct index_state *istate, int invalidate)
{
struct cache_entry **ce_array = istate->cache;
unsigned int i, j;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
+ if (invalidate) {
+ cache_tree_invalidate_path(istate,
+ ce_array[i]->name);
+ untracked_cache_remove_from_index(istate,
+ ce_array[i]->name);
+ }
remove_name_hash(istate, ce_array[i]);
save_or_free_index_entry(istate, ce_array[i]);
}
int intent_only = flags & ADD_CACHE_INTENT;
int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
(intent_only ? ADD_CACHE_NEW_ONLY : 0));
- int newflags = HASH_WRITE_OBJECT;
+ int hash_flags = HASH_WRITE_OBJECT;
- if (flags & HASH_RENORMALIZE)
- newflags |= HASH_RENORMALIZE;
+ if (flags & ADD_CACHE_RENORMALIZE)
+ hash_flags |= HASH_RENORMALIZE;
if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
if (ignore_case) {
adjust_dirname_case(istate, ce->name);
}
- if (!(flags & HASH_RENORMALIZE)) {
+ if (!(flags & ADD_CACHE_RENORMALIZE)) {
alias = index_file_exists(istate, ce->name,
ce_namelen(ce), ignore_case);
if (alias &&
}
}
if (!intent_only) {
- if (index_path(istate, &ce->oid, path, st, newflags)) {
+ if (index_path(istate, &ce->oid, path, st, hash_flags)) {
discard_cache_entry(ce);
return error(_("unable to index file '%s'"), path);
}
load_index_extensions(&p);
}
munmap((void *)mmap, mmap_size);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "read/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr",
+ istate->cache_nr);
+
return istate->cache_nr;
unmap:
if (istate->initialized)
return istate->cache_nr;
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_read_index", the_repository,
+ "%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
+ trace2_region_leave_printf("index", "do_read_index", the_repository,
+ "%s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
ret = do_read_index(split_index->base, base_path, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
return 0;
}
-int index_has_changes(struct index_state *istate,
- struct tree *tree,
- struct strbuf *sb)
+int repo_index_has_changes(struct repository *repo,
+ struct tree *tree,
+ struct strbuf *sb)
{
+ struct index_state *istate = repo->index;
struct object_id cmp;
int i;
- if (istate != &the_index) {
- BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
- }
if (tree)
cmp = tree->object.oid;
if (tree || !get_oid_tree("HEAD", &cmp)) {
struct diff_options opt;
- repo_diff_setup(the_repository, &opt);
+ repo_diff_setup(repo, &opt);
opt.flags.exit_with_status = 1;
if (!sb)
opt.flags.quick = 1;
return -1;
}
- if (!strip_extensions && istate->split_index) {
+ if (!strip_extensions && istate->split_index &&
+ !is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
err = write_link_extension(&sb, istate) < 0 ||
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "write/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "write/cache_nr",
+ istate->cache_nr);
+
return 0;
}
static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int ret = do_write_index(istate, lock->tempfile, 0);
+ int ret;
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+ ret = do_write_index(istate, lock->tempfile, 0);
+ trace2_region_leave_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+
if (ret)
return ret;
if (flags & COMMIT_LOCK)
int ret;
move_cache_to_base_index(istate);
+
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
ret = do_write_index(si->base, *temp, 1);
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
+
if (ret)
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index) {
+ if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
const char *shared_index = git_path("sharedindex.%s",
oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
{
- const char *index = NULL;
- uint32_t extsize, ext_version;
- struct index_entry_offset_table *ieot;
- int i, nr;
-
- /* find the IEOT extension */
- if (!offset)
- return NULL;
- while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
- extsize = get_be32(mmap + offset + 4);
- if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
- index = mmap + offset + 4 + 4;
- break;
- }
- offset += 8;
- offset += extsize;
- }
- if (!index)
- return NULL;
-
- /* validate the version is IEOT_VERSION */
- ext_version = get_be32(index);
- if (ext_version != IEOT_VERSION) {
- error("invalid IEOT version %d", ext_version);
- return NULL;
- }
- index += sizeof(uint32_t);
-
- /* extension size - version bytes / bytes per entry */
- nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
- if (!nr) {
- error("invalid number of IEOT entries %d", nr);
- return NULL;
- }
- ieot = xmalloc(sizeof(struct index_entry_offset_table)
- + (nr * sizeof(struct index_entry_offset)));
- ieot->nr = nr;
- for (i = 0; i < nr; i++) {
- ieot->entries[i].offset = get_be32(index);
- index += sizeof(uint32_t);
- ieot->entries[i].nr = get_be32(index);
- index += sizeof(uint32_t);
- }
-
- return ieot;
+ const char *index = NULL;
+ uint32_t extsize, ext_version;
+ struct index_entry_offset_table *ieot;
+ int i, nr;
+
+ /* find the IEOT extension */
+ if (!offset)
+ return NULL;
+ while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
+ extsize = get_be32(mmap + offset + 4);
+ if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
+ index = mmap + offset + 4 + 4;
+ break;
+ }
+ offset += 8;
+ offset += extsize;
+ }
+ if (!index)
+ return NULL;
+
+ /* validate the version is IEOT_VERSION */
+ ext_version = get_be32(index);
+ if (ext_version != IEOT_VERSION) {
+ error("invalid IEOT version %d", ext_version);
+ return NULL;
+ }
+ index += sizeof(uint32_t);
+
+ /* extension size - version bytes / bytes per entry */
+ nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
+ if (!nr) {
+ error("invalid number of IEOT entries %d", nr);
+ return NULL;
+ }
+ ieot = xmalloc(sizeof(struct index_entry_offset_table)
+ + (nr * sizeof(struct index_entry_offset)));
+ ieot->nr = nr;
+ for (i = 0; i < nr; i++) {
+ ieot->entries[i].offset = get_be32(index);
+ index += sizeof(uint32_t);
+ ieot->entries[i].nr = get_be32(index);
+ index += sizeof(uint32_t);
+ }
+
+ return ieot;
}
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
{
- uint32_t buffer;
- int i;
+ uint32_t buffer;
+ int i;
- /* version */
- put_be32(&buffer, IEOT_VERSION);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* version */
+ put_be32(&buffer, IEOT_VERSION);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* ieot */
- for (i = 0; i < ieot->nr; i++) {
+ /* ieot */
+ for (i = 0; i < ieot->nr; i++) {
- /* offset */
- put_be32(&buffer, ieot->entries[i].offset);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* offset */
+ put_be32(&buffer, ieot->entries[i].offset);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* count */
- put_be32(&buffer, ieot->entries[i].nr);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
- }
+ /* count */
+ put_be32(&buffer, ieot->entries[i].nr);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
+ }
}