*
* Copyright (C) Linus Torvalds, 2005
*/
-#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
#include "diff.h"
return *pool_ptr;
}
-struct index_state the_index;
static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
changed |= DATA_CHANGED;
return changed;
default:
- die("internal error: ce_mode is %o", ce->ce_mode);
+ BUG("unsupported ce_mode: %o", ce->ce_mode);
}
changed |= match_stat_data(&ce->ce_stat_data, st);
* CE_REMOVE is set in ce_flags. This is much more effective than
* calling remove_index_entry_at() for each entry to be removed.
*/
-void remove_marked_cache_entries(struct index_state *istate)
+void remove_marked_cache_entries(struct index_state *istate, int invalidate)
{
struct cache_entry **ce_array = istate->cache;
unsigned int i, j;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
+ if (invalidate) {
+ cache_tree_invalidate_path(istate,
+ ce_array[i]->name);
+ untracked_cache_remove_from_index(istate,
+ ce_array[i]->name);
+ }
remove_name_hash(istate, ce_array[i]);
save_or_free_index_entry(istate, ce_array[i]);
}
struct cache_entry *new_entry;
if (alias->ce_flags & CE_ADDED)
- die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
+ die(_("will not add file alias '%s' ('%s' already exists in index)"),
+ ce->name, alias->name);
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
{
struct object_id oid;
if (write_object_file("", 0, blob_type, &oid))
- die("cannot create an empty blob in the object database");
+ die(_("cannot create an empty blob in the object database"));
oidcpy(&ce->oid, &oid);
}
int intent_only = flags & ADD_CACHE_INTENT;
int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|
(intent_only ? ADD_CACHE_NEW_ONLY : 0));
- int newflags = HASH_WRITE_OBJECT;
+ int hash_flags = HASH_WRITE_OBJECT;
- if (flags & HASH_RENORMALIZE)
- newflags |= HASH_RENORMALIZE;
+ if (flags & ADD_CACHE_RENORMALIZE)
+ hash_flags |= HASH_RENORMALIZE;
if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
- return error("%s: can only add regular files, symbolic links or git-directories", path);
+ return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
if (ignore_case) {
adjust_dirname_case(istate, ce->name);
}
- if (!(flags & HASH_RENORMALIZE)) {
+ if (!(flags & ADD_CACHE_RENORMALIZE)) {
alias = index_file_exists(istate, ce->name,
ce_namelen(ce), ignore_case);
if (alias &&
}
}
if (!intent_only) {
- if (index_path(istate, &ce->oid, path, st, newflags)) {
+ if (index_path(istate, &ce->oid, path, st, hash_flags)) {
discard_cache_entry(ce);
- return error("unable to index file %s", path);
+ return error(_("unable to index file '%s'"), path);
}
} else
set_object_name_for_intent_to_add_entry(ce);
discard_cache_entry(ce);
else if (add_index_entry(istate, ce, add_option)) {
discard_cache_entry(ce);
- return error("unable to add %s to index", path);
+ return error(_("unable to add '%s' to index"), path);
}
if (verbose && !was_same)
printf("add '%s'\n", path);
{
struct stat st;
if (lstat(path, &st))
- die_errno("unable to stat '%s'", path);
+ die_errno(_("unable to stat '%s'"), path);
return add_to_index(istate, path, &st, flags);
}
int len;
if (!verify_path(path, mode)) {
- error("Invalid path '%s'", path);
+ error(_("invalid path '%s'"), path);
return NULL;
}
int len;
if (!verify_path(path, mode)) {
- error("Invalid path '%s'", path);
+ error(_("invalid path '%s'"), path);
return NULL;
}
if (!ok_to_add)
return -1;
if (!verify_path(ce->name, ce->ce_mode))
- return error("Invalid path '%s'", ce->name);
+ return error(_("invalid path '%s'"), ce->name);
if (!skip_df_check &&
check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
if (!ok_to_replace)
- return error("'%s' appears as both a file and as a directory",
+ return error(_("'%s' appears as both a file and as a directory"),
ce->name);
pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
pos = -pos-1;
istate->cache_nr);
trace_performance_enter();
- modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
- deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
- typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
- added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
- unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
+ modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
+ deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
+ typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
+ added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
+ unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
/*
* Use the multi-threaded preload_index() to refresh most of the
* cache entries quickly then in the single threaded loop below,
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
- return error("bad signature");
+ return error(_("bad signature 0x%08x"), hdr->hdr_signature);
hdr_version = ntohl(hdr->hdr_version);
if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
- return error("bad index version %d", hdr_version);
+ return error(_("bad index version %d"), hdr_version);
if (!verify_index_checksum)
return 0;
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
- return error("bad index file sha1 signature");
+ return error(_("bad index file sha1 signature"));
return 0;
}
break;
default:
if (*ext < 'A' || 'Z' < *ext)
- return error("index uses %.4s extension, which we do not understand",
+ return error(_("index uses %.4s extension, which we do not understand"),
ext);
- fprintf(stderr, "ignoring %.4s extension\n", ext);
+ fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
break;
}
return 0;
}
-int hold_locked_index(struct lock_file *lk, int lock_flags)
-{
- return hold_lock_file_for_update(lk, get_index_file(), lock_flags);
-}
-
-int read_index(struct index_state *istate)
-{
- return read_index_from(istate, get_index_file(), get_git_dir());
-}
-
static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
unsigned int version,
struct ondisk_cache_entry *ondisk,
extended_flags = get_be16(&ondisk2->flags2) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
- die("Unknown index entry format %08x", extended_flags);
+ die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
name = ondisk2->name;
}
int name_compare = strcmp(ce->name, next_ce->name);
if (0 < name_compare)
- die("unordered stage entries in index");
+ die(_("unordered stage entries in index"));
if (!name_compare) {
if (!ce_stage(ce))
- die("multiple stage entries for merged file '%s'",
+ die(_("multiple stage entries for merged file '%s'"),
ce->name);
if (ce_stage(ce) > ce_stage(next_ce))
- die("unordered stage entries for '%s'",
+ die(_("unordered stage entries for '%s'"),
ce->name);
}
}
if (fd < 0) {
if (!must_exist && errno == ENOENT)
return 0;
- die_errno("%s: index file open failed", path);
+ die_errno(_("%s: index file open failed"), path);
}
if (fstat(fd, &st))
- die_errno("cannot stat the open index");
+ die_errno(_("%s: cannot stat the open index"), path);
mmap_size = xsize_t(st.st_size);
if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
- die("index file smaller than expected");
+ die(_("%s: index file smaller than expected"), path);
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mmap == MAP_FAILED)
- die_errno("unable to map index file");
+ die_errno(_("%s: unable to map index file"), path);
close(fd);
hdr = (const struct cache_header *)mmap;
load_index_extensions(&p);
}
munmap((void *)mmap, mmap_size);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "read/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "read/cache_nr",
+ istate->cache_nr);
+
return istate->cache_nr;
unmap:
munmap((void *)mmap, mmap_size);
- die("index file corrupt");
+ die(_("index file corrupt"));
}
/*
static void freshen_shared_index(const char *shared_index, int warn)
{
if (!check_and_freshen_file(shared_index, 1) && warn)
- warning("could not freshen shared index '%s'", shared_index);
+ warning(_("could not freshen shared index '%s'"), shared_index);
}
int read_index_from(struct index_state *istate, const char *path,
if (istate->initialized)
return istate->cache_nr;
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_read_index", the_repository,
+ "%s", path);
trace_performance_enter();
ret = do_read_index(istate, path, 0);
trace_performance_leave("read cache %s", path);
+ trace2_region_leave_printf("index", "do_read_index", the_repository,
+ "%s", path);
split_index = istate->split_index;
if (!split_index || is_null_oid(&split_index->base_oid)) {
base_oid_hex = oid_to_hex(&split_index->base_oid);
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
+ trace2_region_enter_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
ret = do_read_index(split_index->base, base_path, 1);
+ trace2_region_leave_printf("index", "shared/do_read_index",
+ the_repository, "%s", base_path);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
- die("broken index, expect %s in %s, got %s",
+ die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
oid_to_hex(&split_index->base->oid));
for (i = 0; i < istate->cache_nr; i++) {
if (!istate) {
- die("internal error: cache entry is not allocated from expected memory pool");
+ BUG("cache entry is not allocated from expected memory pool");
} else if (!istate->ce_mem_pool ||
!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
if (!istate->split_index ||
!istate->split_index->base ||
!istate->split_index->base->ce_mem_pool ||
!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
- die("internal error: cache entry is not allocated from expected memory pool");
+ BUG("cache entry is not allocated from expected memory pool");
}
}
}
return 0;
}
-int index_has_changes(struct index_state *istate,
- struct tree *tree,
- struct strbuf *sb)
+int repo_index_has_changes(struct repository *repo,
+ struct tree *tree,
+ struct strbuf *sb)
{
+ struct index_state *istate = repo->index;
struct object_id cmp;
int i;
- if (istate != &the_index) {
- BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
- }
if (tree)
cmp = tree->object.oid;
if (tree || !get_oid_tree("HEAD", &cmp)) {
struct diff_options opt;
- repo_diff_setup(the_repository, &opt);
+ repo_diff_setup(repo, &opt);
opt.flags.exit_with_status = 1;
if (!sb)
opt.flags.quick = 1;
return 0;
}
-static int verify_index(const struct index_state *istate)
+static int repo_verify_index(struct repository *repo)
{
- return verify_index_from(istate, get_index_file());
+ return verify_index_from(repo->index, repo->index_file);
}
static int has_racy_timestamp(struct index_state *istate)
return 0;
}
-void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
+void repo_update_index_if_able(struct repository *repo,
+ struct lock_file *lockfile)
{
- if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- verify_index(istate))
- write_locked_index(istate, lockfile, COMMIT_LOCK);
+ if ((repo->index->cache_changed ||
+ has_racy_timestamp(repo->index)) &&
+ repo_verify_index(repo))
+ write_locked_index(repo->index, lockfile, COMMIT_LOCK);
else
rollback_lock_file(lockfile);
}
return -1;
}
- if (!strip_extensions && istate->split_index) {
+ if (!strip_extensions && istate->split_index &&
+ !is_null_oid(&istate->split_index->base_oid)) {
struct strbuf sb = STRBUF_INIT;
err = write_link_extension(&sb, istate) < 0 ||
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_data_intmax("index", the_repository, "write/version",
+ istate->version);
+ trace2_data_intmax("index", the_repository, "write/cache_nr",
+ istate->cache_nr);
+
return 0;
}
static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
unsigned flags)
{
- int ret = do_write_index(istate, lock->tempfile, 0);
+ int ret;
+
+ /*
+ * TODO trace2: replace "the_repository" with the actual repo instance
+ * that is associated with the given "istate".
+ */
+ trace2_region_enter_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+ ret = do_write_index(istate, lock->tempfile, 0);
+ trace2_region_leave_printf("index", "do_write_index", the_repository,
+ "%s", lock->tempfile->filename.buf);
+
if (ret)
return ret;
if (flags & COMMIT_LOCK)
int ret;
move_cache_to_base_index(istate);
+
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
ret = do_write_index(si->base, *temp, 1);
+ trace2_region_enter_printf("index", "shared/do_write_index",
+ the_repository, "%s", (*temp)->filename.buf);
+
if (ret)
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- error("cannot fix permission bits on %s", get_tempfile_path(*temp));
+ error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
return ret;
}
ret = rename_tempfile(temp,
struct split_index *si = istate->split_index;
if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
- cache_tree_verify(istate);
+ cache_tree_verify(the_repository, istate);
if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
if (flags & COMMIT_LOCK)
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index) {
+ if (!ret && !new_shared_index && !is_null_oid(&si->base_oid)) {
const char *shared_index = git_path("sharedindex.%s",
oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
* state can call this and check its return value, instead of calling
* read_cache().
*/
-int read_index_unmerged(struct index_state *istate)
+int repo_read_index_unmerged(struct repository *repo)
{
+ struct index_state *istate;
int i;
int unmerged = 0;
- read_index(istate);
+ repo_read_index(repo);
+ istate = repo->index;
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
struct cache_entry *new_ce;
new_ce->ce_namelen = len;
new_ce->ce_mode = ce->ce_mode;
if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
- return error("%s: cannot drop to stage #0",
+ return error(_("%s: cannot drop to stage #0"),
new_ce->name);
}
return unmerged;
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
{
- const char *index = NULL;
- uint32_t extsize, ext_version;
- struct index_entry_offset_table *ieot;
- int i, nr;
-
- /* find the IEOT extension */
- if (!offset)
- return NULL;
- while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
- extsize = get_be32(mmap + offset + 4);
- if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
- index = mmap + offset + 4 + 4;
- break;
- }
- offset += 8;
- offset += extsize;
- }
- if (!index)
- return NULL;
-
- /* validate the version is IEOT_VERSION */
- ext_version = get_be32(index);
- if (ext_version != IEOT_VERSION) {
- error("invalid IEOT version %d", ext_version);
- return NULL;
- }
- index += sizeof(uint32_t);
-
- /* extension size - version bytes / bytes per entry */
- nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
- if (!nr) {
- error("invalid number of IEOT entries %d", nr);
- return NULL;
- }
- ieot = xmalloc(sizeof(struct index_entry_offset_table)
- + (nr * sizeof(struct index_entry_offset)));
- ieot->nr = nr;
- for (i = 0; i < nr; i++) {
- ieot->entries[i].offset = get_be32(index);
- index += sizeof(uint32_t);
- ieot->entries[i].nr = get_be32(index);
- index += sizeof(uint32_t);
- }
-
- return ieot;
+ const char *index = NULL;
+ uint32_t extsize, ext_version;
+ struct index_entry_offset_table *ieot;
+ int i, nr;
+
+ /* find the IEOT extension */
+ if (!offset)
+ return NULL;
+ while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
+ extsize = get_be32(mmap + offset + 4);
+ if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
+ index = mmap + offset + 4 + 4;
+ break;
+ }
+ offset += 8;
+ offset += extsize;
+ }
+ if (!index)
+ return NULL;
+
+ /* validate the version is IEOT_VERSION */
+ ext_version = get_be32(index);
+ if (ext_version != IEOT_VERSION) {
+ error("invalid IEOT version %d", ext_version);
+ return NULL;
+ }
+ index += sizeof(uint32_t);
+
+ /* extension size - version bytes / bytes per entry */
+ nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
+ if (!nr) {
+ error("invalid number of IEOT entries %d", nr);
+ return NULL;
+ }
+ ieot = xmalloc(sizeof(struct index_entry_offset_table)
+ + (nr * sizeof(struct index_entry_offset)));
+ ieot->nr = nr;
+ for (i = 0; i < nr; i++) {
+ ieot->entries[i].offset = get_be32(index);
+ index += sizeof(uint32_t);
+ ieot->entries[i].nr = get_be32(index);
+ index += sizeof(uint32_t);
+ }
+
+ return ieot;
}
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
{
- uint32_t buffer;
- int i;
+ uint32_t buffer;
+ int i;
- /* version */
- put_be32(&buffer, IEOT_VERSION);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* version */
+ put_be32(&buffer, IEOT_VERSION);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* ieot */
- for (i = 0; i < ieot->nr; i++) {
+ /* ieot */
+ for (i = 0; i < ieot->nr; i++) {
- /* offset */
- put_be32(&buffer, ieot->entries[i].offset);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
+ /* offset */
+ put_be32(&buffer, ieot->entries[i].offset);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
- /* count */
- put_be32(&buffer, ieot->entries[i].nr);
- strbuf_add(sb, &buffer, sizeof(uint32_t));
- }
+ /* count */
+ put_be32(&buffer, ieot->entries[i].nr);
+ strbuf_add(sb, &buffer, sizeof(uint32_t));
+ }
}