/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ hashclose(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
- typename(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ type_name(type), (unsigned long)dat->len) + 1;
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
die("Can't load tree %s", oid_to_hex(oid));
} else {
enum object_type type;
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
if (!buf || type != OBJ_TREE)
die("Can't load tree %s", oid_to_hex(oid));
}
die("corrupt mark line: %s", line);
e = find_object(&oid);
if (!e) {
- enum object_type type = sha1_object_info(oid.hash, NULL);
+ enum object_type type = oid_object_info(&oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(&oid));
e = insert_object(&oid);
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
else if (oe) {
if (oe->type != OBJ_COMMIT)
die("Not a commit (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
}
/*
* Accept the sha1 without checking; it expected to be in
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
enum object_type type = oe ? oe->type :
- sha1_object_info(oid.hash, NULL);
+ oid_object_info(&oid, NULL);
if (type < 0)
die("%s not found: %s",
S_ISDIR(mode) ? "Tree" : "Blob",
command_buf.buf);
if (type != expected)
die("Not a %s (actually a %s): %s",
- typename(expected), typename(type),
+ type_name(expected), type_name(type),
command_buf.buf);
}
oidcpy(&commit_oid, &commit_oe->idx.oid);
} else if (!get_oid(p, &commit_oid)) {
unsigned long size;
- char *buf = read_object_with_reference(commit_oid.hash,
- commit_type, &size, commit_oid.hash);
+ char *buf = read_object_with_reference(&commit_oid,
+ commit_type, &size,
+ &commit_oid);
if (!buf || size < 46)
die("Not a valid commit: %s", p);
free(buf);
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
- enum object_type type = sha1_object_info(oid.hash, NULL);
+ enum object_type type = oid_object_info(&oid, NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
if (type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(type), command_buf.buf);
+ type_name(type), command_buf.buf);
}
construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
unsigned long size;
char *buf;
- buf = read_object_with_reference(b->oid.hash,
- commit_type, &size,
- b->oid.hash);
+ buf = read_object_with_reference(&b->oid, commit_type, &size,
+ &b->oid);
parse_from_commit(b, buf, size);
free(buf);
}
oidcpy(&n->oid, &oe->idx.oid);
} else if (!get_oid(from, &n->oid)) {
unsigned long size;
- char *buf = read_object_with_reference(n->oid.hash,
- commit_type, &size, n->oid.hash);
+ char *buf = read_object_with_reference(&n->oid,
+ commit_type,
+ &size, &n->oid);
if (!buf || size < 46)
die("Not a valid commit: %s", from);
free(buf);
} else if (!get_oid(from, &oid)) {
struct object_entry *oe = find_object(&oid);
if (!oe) {
- type = sha1_object_info(oid.hash, NULL);
+ type = oid_object_info(&oid, NULL);
if (type < 0)
die("Not a valid object: %s", from);
} else
"object %s\n"
"type %s\n"
"tag %s\n",
- oid_to_hex(&oid), typename(type), t->name);
+ oid_to_hex(&oid), type_name(type), t->name);
if (tagger)
strbuf_addf(&new_data,
"tagger %s\n", tagger);
char *buf;
if (!oe || oe->pack_id == MAX_PACK_ID) {
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
} else {
type = oe->type;
buf = gfi_unpack_entry(oe, &size);
die("Can't read object %s", oid_to_hex(oid));
if (type != OBJ_BLOB)
die("Object %s is a %s but a blob was expected.",
- oid_to_hex(oid), typename(type));
+ oid_to_hex(oid), type_name(type));
strbuf_reset(&line);
strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
- typename(type), size);
+ type_name(type), size);
cat_blob_write(line.buf, line.len);
strbuf_release(&line);
cat_blob_write(buf, size);
static void parse_get_mark(const char *p)
{
- struct object_entry *oe = oe;
+ struct object_entry *oe;
char output[GIT_MAX_HEXSZ + 2];
/* get-mark SP <object> LF */
static void parse_cat_blob(const char *p)
{
- struct object_entry *oe = oe;
+ struct object_entry *oe;
struct object_id oid;
/* cat-blob SP <object> LF */
unsigned long size;
char *buf = NULL;
if (!oe) {
- enum object_type type = sha1_object_info(oid->hash, NULL);
+ enum object_type type = oid_object_info(oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
/* cache it! */
buf = gfi_unpack_entry(oe, &size);
} else {
enum object_type unused;
- buf = read_sha1_file(oid->hash, &unused, &size);
+ buf = read_object_file(oid, &unused, &size);
}
if (!buf)
die("Can't load object %s", oid_to_hex(oid));
return ignore_case ? strihash(path) : strhash(path);
}
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+ char *dir)
+{
+ struct dir_rename_entry key;
+
+ if (dir == NULL)
+ return NULL;
+ hashmap_entry_init(&key, strhash(dir));
+ key.dir = dir;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+ const void *entry,
+ const void *entry_or_key,
+ const void *unused_keydata)
+{
+ const struct dir_rename_entry *e1 = entry;
+ const struct dir_rename_entry *e2 = entry_or_key;
+
+ return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+ hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+ char *directory)
+{
+ hashmap_entry_init(entry, strhash(directory));
+ entry->dir = directory;
+ entry->non_unique_new_dir = 0;
+ strbuf_init(&entry->new_dir, 0);
+ string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+ char *target_file)
+{
+ struct collision_entry key;
+
+ hashmap_entry_init(&key, strhash(target_file));
+ key.target_file = target_file;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+ const struct collision_entry *e1,
+ const struct collision_entry *e2,
+ const void *unused_keydata)
+{
+ return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+ hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
static void flush_output(struct merge_options *o)
{
if (o->buffer_output < 2 && o->obuf.len) {
enum rename_type {
RENAME_NORMAL = 0,
+ RENAME_DIR,
RENAME_DELETE,
RENAME_ONE_FILE_TO_ONE,
RENAME_ONE_FILE_TO_TWO,
strbuf_addf(&o->obuf, "virtual %s\n",
merge_remote_util(commit)->name);
else {
- strbuf_add_unique_abbrev(&o->obuf, commit->object.oid.hash,
+ strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
DEFAULT_ABBREV);
strbuf_addch(&o->obuf, ' ');
if (parse_commit(commit) != 0)
init_tree_desc(desc, tree->buffer, tree->size);
}
-static int git_merge_trees(int index_only,
+static int git_merge_trees(struct merge_options *o,
struct tree *common,
struct tree *head,
struct tree *merge)
{
int rc;
struct tree_desc t[3];
- struct unpack_trees_options opts;
- memset(&opts, 0, sizeof(opts));
- if (index_only)
- opts.index_only = 1;
+ memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+ if (o->call_depth)
+ o->unpack_opts.index_only = 1;
else
- opts.update = 1;
- opts.merge = 1;
- opts.head_idx = 2;
- opts.fn = threeway_merge;
- opts.src_index = &the_index;
- opts.dst_index = &the_index;
- setup_unpack_trees_porcelain(&opts, "merge");
+ o->unpack_opts.update = 1;
+ o->unpack_opts.merge = 1;
+ o->unpack_opts.head_idx = 2;
+ o->unpack_opts.fn = threeway_merge;
+ o->unpack_opts.src_index = &the_index;
+ o->unpack_opts.dst_index = &the_index;
+ setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
init_tree_desc_from_tree(t+2, merge);
- rc = unpack_trees(3, t, &opts);
+ rc = unpack_trees(3, t, &o->unpack_opts);
+ /*
+ * unpack_trees NULLifies src_index, but it's used in verify_uptodate,
+ * so set to the new index which will usually have modification
+ * timestamp info copied over.
+ */
+ o->unpack_opts.src_index = &the_index;
cache_tree_free(&active_cache_tree);
return rc;
}
return result;
}
-static int save_files_dirs(const unsigned char *sha1,
+static int save_files_dirs(const struct object_id *oid,
struct strbuf *base, const char *path,
unsigned int mode, int stage, void *context)
{
read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
}
+static int get_tree_entry_if_blob(struct tree *tree,
+ const char *path,
+ struct object_id *hashy,
+ unsigned int *mode_o)
+{
+ int ret;
+
+ ret = get_tree_entry(&tree->object.oid, path, hashy, mode_o);
+ if (S_ISDIR(*mode_o)) {
+ oidcpy(hashy, &null_oid);
+ *mode_o = 0;
+ }
+ return ret;
+}
+
/*
* Returns an index_entry instance which doesn't have to correspond to
* a real cache entry in Git's index.
{
struct string_list_item *item;
struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
- get_tree_entry(o->object.oid.hash, path,
- e->stages[1].oid.hash, &e->stages[1].mode);
- get_tree_entry(a->object.oid.hash, path,
- e->stages[2].oid.hash, &e->stages[2].mode);
- get_tree_entry(b->object.oid.hash, path,
- e->stages[3].oid.hash, &e->stages[3].mode);
+ get_tree_entry_if_blob(o, path,
+ &e->stages[1].oid, &e->stages[1].mode);
+ get_tree_entry_if_blob(a, path,
+ &e->stages[2].oid, &e->stages[2].mode);
+ get_tree_entry_if_blob(b, path,
+ &e->stages[3].oid, &e->stages[3].mode);
item = string_list_insert(entries, path);
item->util = e;
return e;
struct rename {
struct diff_filepair *pair;
+ /*
+ * Purpose of src_entry and dst_entry:
+ *
+ * If 'before' is renamed to 'after' then src_entry will contain
+ * the versions of 'before' from the merge_base, HEAD, and MERGE in
+ * stages 1, 2, and 3; dst_entry will contain the respective
+ * versions of 'after' in corresponding locations. Thus, we have a
+ * total of six modes and oids, though some will be null. (Stage 0
+ * is ignored; we're interested in handling conflicts.)
+ *
+ * Since we don't turn on break-rewrites by default, neither
+ * src_entry nor dst_entry can have all three of their stages have
+ * non-null oids, meaning at most four of the six will be non-null.
+ * Also, since this is a rename, both src_entry and dst_entry will
+ * have at least one non-null oid, meaning at least two will be
+ * non-null. Of the six oids, a typical rename will have three be
+ * non-null. Only two implies a rename/delete, and four implies a
+ * rename/add.
+ */
struct stage_data *src_entry;
struct stage_data *dst_entry;
+ unsigned add_turned_into_rename:1;
unsigned processed:1;
};
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
- struct tree *tree,
- struct tree *o_tree,
- struct tree *a_tree,
- struct tree *b_tree,
- struct string_list *entries)
-{
- int i;
- struct string_list *renames;
- struct diff_options opts;
-
- renames = xcalloc(1, sizeof(struct string_list));
- if (!o->detect_rename)
- return renames;
-
- diff_setup(&opts);
- opts.flags.recursive = 1;
- opts.flags.rename_empty = 0;
- opts.detect_rename = DIFF_DETECT_RENAME;
- opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
- o->diff_rename_limit >= 0 ? o->diff_rename_limit :
- 1000;
- opts.rename_score = o->rename_score;
- opts.show_rename_progress = o->show_rename_progress;
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_setup_done(&opts);
- diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
- diffcore_std(&opts);
- if (opts.needed_rename_limit > o->needed_rename_limit)
- o->needed_rename_limit = opts.needed_rename_limit;
- for (i = 0; i < diff_queued_diff.nr; ++i) {
- struct string_list_item *item;
- struct rename *re;
- struct diff_filepair *pair = diff_queued_diff.queue[i];
- if (pair->status != 'R') {
- diff_free_filepair(pair);
- continue;
- }
- re = xmalloc(sizeof(*re));
- re->processed = 0;
- re->pair = pair;
- item = string_list_lookup(entries, re->pair->one->path);
- if (!item)
- re->src_entry = insert_stage_data(re->pair->one->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->src_entry = item->util;
-
- item = string_list_lookup(entries, re->pair->two->path);
- if (!item)
- re->dst_entry = insert_stage_data(re->pair->two->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->dst_entry = item->util;
- item = string_list_insert(renames, pair->one->path);
- item->util = re;
- }
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_queued_diff.nr = 0;
- diff_flush(&opts);
- return renames;
-}
-
static int update_stages(struct merge_options *opt, const char *path,
const struct diff_filespec *o,
const struct diff_filespec *a,
return 0;
}
+static int update_stages_for_stage_data(struct merge_options *opt,
+ const char *path,
+ const struct stage_data *stage_data)
+{
+ struct diff_filespec o, a, b;
+
+ o.mode = stage_data->stages[1].mode;
+ oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+ a.mode = stage_data->stages[2].mode;
+ oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+ b.mode = stage_data->stages[3].mode;
+ oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+ return update_stages(opt, path,
+ is_null_oid(&o.oid) ? NULL : &o,
+ is_null_oid(&a.oid) ? NULL : &a,
+ is_null_oid(&b.oid) ? NULL : &b);
+}
+
static void update_entry(struct stage_data *entry,
struct diff_filespec *o,
struct diff_filespec *a,
return !was_tracked(path) && file_exists(path);
}
+static int was_dirty(struct merge_options *o, const char *path)
+{
+ struct cache_entry *ce;
+ int dirty = 1;
+
+ if (o->call_depth || !was_tracked(path))
+ return !dirty;
+
+ ce = cache_file_exists(path, strlen(path), ignore_case);
+ dirty = (ce->ce_stat_data.sd_mtime.sec > 0 &&
+ verify_uptodate(ce, &o->unpack_opts) != 0);
+ return dirty;
+}
+
static int make_room_for_path(struct merge_options *o, const char *path)
{
int status, i;
goto update_index;
}
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
if (!buf)
return err(o, _("cannot read object %s '%s'"), oid_to_hex(oid), path);
if (type != OBJ_BLOB) {
if ((merge_status < 0) || !result_buf.ptr)
ret = err(o, _("Failed to execute internal merge"));
- if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, result->oid.hash))
+ if (!ret &&
+ write_object_file(result_buf.ptr, result_buf.size,
+ blob_type, &result->oid))
ret = err(o, _("Unable to add %s to database"),
a->path);
return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
}
+static int conflict_rename_dir(struct merge_options *o,
+ struct diff_filepair *pair,
+ const char *rename_branch,
+ const char *other_branch)
+{
+ const struct diff_filespec *dest = pair->two;
+
+ if (!o->call_depth && would_lose_untracked(dest->path)) {
+ char *alt_path = unique_path(o, dest->path, rename_branch);
+
+ output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+ "writing to %s instead."),
+ dest->path, alt_path);
+ /*
+ * Write the file in worktree at alt_path, but not in the
+ * index. Instead, write to dest->path for the index but
+ * only at the higher appropriate stage.
+ */
+ if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+ return -1;
+ free(alt_path);
+ return update_stages(o, dest->path, NULL,
+ rename_branch == o->branch1 ? dest : NULL,
+ rename_branch == o->branch1 ? NULL : dest);
+ }
+
+ /* Update dest->path both in index and in worktree */
+ if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+ return -1;
+ return 0;
+}
+
static int handle_change_delete(struct merge_options *o,
const char *path, const char *old_path,
const struct object_id *o_oid, int o_mode,
const char *update_path = path;
int ret = 0;
- if (dir_in_way(path, !o->call_depth, 0)) {
+ if (dir_in_way(path, !o->call_depth, 0) ||
+ (!o->call_depth && would_lose_untracked(path))) {
update_path = alt_path = unique_path(o, path, change_branch);
}
add = filespec_from_entry(&other, dst_entry, stage ^ 1);
if (add) {
+ int ren_src_was_dirty = was_dirty(o, rename->path);
char *add_name = unique_path(o, rename->path, other_branch);
if (update_file(o, 0, &add->oid, add->mode, add_name))
return -1;
- remove_file(o, 0, rename->path, 0);
+ if (ren_src_was_dirty) {
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ rename->path);
+ }
+ /*
+ * Because the double negatives somehow keep confusing me...
+ * 1) update_wd iff !ren_src_was_dirty.
+ * 2) no_wd iff !update_wd
+ * 3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+ */
+ remove_file(o, 0, rename->path, ren_src_was_dirty);
dst_name = unique_path(o, rename->path, cur_branch);
} else {
if (dir_in_way(rename->path, !o->call_depth, 0)) {
dst_name = unique_path(o, rename->path, cur_branch);
output(o, 1, _("%s is a directory in %s adding as %s instead"),
rename->path, other_branch, dst_name);
+ } else if (!o->call_depth &&
+ would_lose_untracked(rename->path)) {
+ dst_name = unique_path(o, rename->path, cur_branch);
+ output(o, 1, _("Refusing to lose untracked file at %s; "
+ "adding as %s instead"),
+ rename->path, dst_name);
}
}
if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
char *new_path2 = unique_path(o, path, ci->branch2);
output(o, 1, _("Renaming %s to %s and %s to %s instead"),
a->path, new_path1, b->path, new_path2);
- remove_file(o, 0, path, 0);
+ if (was_dirty(o, path))
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ path);
+ else if (would_lose_untracked(path))
+ /*
+ * Only way we get here is if both renames were from
+ * a directory rename AND user had an untracked file
+ * at the location where both files end up after the
+ * two directory renames. See testcase 10d of t6043.
+ */
+ output(o, 1, _("Refusing to lose untracked file at "
+ "%s, even though it's in the way."),
+ path);
+ else
+ remove_file(o, 0, path, 0);
ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
if (!ret)
ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
new_path2);
+ /*
+ * unpack_trees() actually populates the index for us for
+ * "normal" rename/rename(2to1) situtations so that the
+ * correct entries are at the higher stages, which would
+ * make the call below to update_stages_for_stage_data
+ * unnecessary. However, if either of the renames came
+ * from a directory rename, then unpack_trees() will not
+ * have gotten the right data loaded into the index, so we
+ * need to do so now. (While it'd be tempting to move this
+ * call to update_stages_for_stage_data() to
+ * apply_directory_rename_modifications(), that would break
+ * our intermediate calls to would_lose_untracked() since
+ * those rely on the current in-memory index. See also the
+ * big "NOTE" in update_stages()).
+ */
+ if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+ ret = -1;
+
free(new_path2);
free(new_path1);
}
return ret;
}
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+ struct tree *o_tree,
+ struct tree *tree)
+{
+ struct diff_queue_struct *ret;
+ struct diff_options opts;
+
+ diff_setup(&opts);
+ opts.flags.recursive = 1;
+ opts.flags.rename_empty = 0;
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+ o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+ 1000;
+ opts.rename_score = o->rename_score;
+ opts.show_rename_progress = o->show_rename_progress;
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_setup_done(&opts);
+ diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+ diffcore_std(&opts);
+ if (opts.needed_rename_limit > o->needed_rename_limit)
+ o->needed_rename_limit = opts.needed_rename_limit;
+
+ ret = xmalloc(sizeof(*ret));
+ *ret = diff_queued_diff;
+
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_queued_diff.nr = 0;
+ diff_queued_diff.queue = NULL;
+ diff_flush(&opts);
+ return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+ struct object_id hashy;
+ unsigned int mode_o;
+
+ return !get_tree_entry(&tree->object.oid, path,
+ &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir. In perl-speak:
+ * new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ * Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+ const char *old_path)
+{
+ struct strbuf new_path = STRBUF_INIT;
+ int oldlen, newlen;
+
+ if (entry->non_unique_new_dir)
+ return NULL;
+
+ oldlen = strlen(entry->dir);
+ newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+ strbuf_grow(&new_path, newlen);
+ strbuf_addbuf(&new_path, &entry->new_dir);
+ strbuf_addstr(&new_path, &old_path[oldlen]);
+
+ return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+ char **old_dir, char **new_dir)
+{
+ char *end_of_old, *end_of_new;
+ int old_len, new_len;
+
+ *old_dir = NULL;
+ *new_dir = NULL;
+
+ /*
+ * For
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * the "e/foo.c" part is the same, we just want to know that
+ * "a/b/c/d" was renamed to "a/b/some/thing/else"
+ * so, for this example, this function returns "a/b/c/d" in
+ * *old_dir and "a/b/some/thing/else" in *new_dir.
+ *
+ * Also, if the basename of the file changed, we don't care. We
+ * want to know which portion of the directory, if any, changed.
+ */
+ end_of_old = strrchr(old_path, '/');
+ end_of_new = strrchr(new_path, '/');
+
+ if (end_of_old == NULL || end_of_new == NULL)
+ return;
+ while (*--end_of_new == *--end_of_old &&
+ end_of_old != old_path &&
+ end_of_new != new_path)
+ ; /* Do nothing; all in the while loop */
+ /*
+ * We've found the first non-matching character in the directory
+ * paths. That means the current directory we were comparing
+ * represents the rename. Move end_of_old and end_of_new back
+ * to the full directory name.
+ */
+ if (*end_of_old == '/')
+ end_of_old++;
+ if (*end_of_old != '/')
+ end_of_new++;
+ end_of_old = strchr(end_of_old, '/');
+ end_of_new = strchr(end_of_new, '/');
+
+ /*
+ * It may have been the case that old_path and new_path were the same
+ * directory all along. Don't claim a rename if they're the same.
+ */
+ old_len = end_of_old - old_path;
+ new_len = end_of_new - new_path;
+
+ if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+ *old_dir = xstrndup(old_path, old_len);
+ *new_dir = xstrndup(new_path, new_len);
+ }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+ struct string_list *items_to_remove)
+{
+ int i;
+ struct dir_rename_entry *entry;
+
+ for (i = 0; i < items_to_remove->nr; i++) {
+ entry = items_to_remove->items[i].util;
+ hashmap_remove(dir_renames, entry, NULL);
+ }
+ string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location. If there is a rename and
+ * there are no conflicts, return the new name. Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+ const char *path,
+ struct dir_rename_entry *entry,
+ struct hashmap *collisions,
+ struct tree *tree)
+{
+ char *new_path = NULL;
+ struct collision_entry *collision_ent;
+ int clean = 1;
+ struct strbuf collision_paths = STRBUF_INIT;
+
+ /*
+ * entry has the mapping of old directory name to new directory name
+ * that we want to apply to path.
+ */
+ new_path = apply_dir_rename(entry, path);
+
+ if (!new_path) {
+ /* This should only happen when entry->non_unique_new_dir set */
+ if (!entry->non_unique_new_dir)
+ BUG("entry->non_unqiue_dir not set and !new_path");
+ output(o, 1, _("CONFLICT (directory rename split): "
+ "Unclear where to place %s because directory "
+ "%s was renamed to multiple other directories, "
+ "with no destination getting a majority of the "
+ "files."),
+ path, entry->dir);
+ clean = 0;
+ return NULL;
+ }
+
+ /*
+ * The caller needs to have ensured that it has pre-populated
+ * collisions with all paths that map to new_path. Do a quick check
+ * to ensure that's the case.
+ */
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (collision_ent == NULL)
+ BUG("collision_ent is NULL");
+
+ /*
+ * Check for one-sided add/add/.../add conflicts, i.e.
+ * where implicit renames from the other side doing
+ * directory rename(s) can affect this side of history
+ * to put multiple paths into the same location. Warn
+ * and bail on directory renames for such paths.
+ */
+ if (collision_ent->reported_already) {
+ clean = 0;
+ } else if (tree_has_path(tree, new_path)) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+ "file/dir at %s in the way of implicit "
+ "directory rename(s) putting the following "
+ "path(s) there: %s."),
+ new_path, collision_paths.buf);
+ clean = 0;
+ } else if (collision_ent->source_files.nr > 1) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+ "more than one path to %s; implicit directory "
+ "renames tried to put these paths there: %s"),
+ new_path, collision_paths.buf);
+ clean = 0;
+ }
+
+ /* Free memory we no longer need */
+ strbuf_release(&collision_paths);
+ if (!clean && new_path) {
+ free(new_path);
+ return NULL;
+ }
+
+ return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ * 1. Check for both sides renaming to the same thing, in order to avoid
+ * implicit renaming of files that should be left in place. (See
+ * testcase 6b in t6043 for details.)
+ * 2. Prune directory renames if there are still files left in the
+ * the original directory. These represent a partial directory rename,
+ * i.e. a rename where only some of the files within the directory
+ * were renamed elsewhere. (Technically, this could be done earlier
+ * in get_directory_renames(), except that would prevent us from
+ * doing the previous check and thus failing testcase 6b.)
+ * 3. Check for rename/rename(1to2) conflicts (at the directory level).
+ * In the future, we could potentially record this info as well and
+ * omit reporting rename/rename(1to2) conflicts for each path within
+ * the affected directories, thus cleaning up the merge output.
+ * NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ * directory level, because merging directories is fine. If it
+ * causes conflicts for files within those merged directories, then
+ * that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+ struct hashmap *dir_re_head,
+ struct tree *head,
+ struct hashmap *dir_re_merge,
+ struct tree *merge)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *head_ent;
+ struct dir_rename_entry *merge_ent;
+
+ struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+ struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+ hashmap_iter_init(dir_re_head, &iter);
+ while ((head_ent = hashmap_iter_next(&iter))) {
+ merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+ if (merge_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir &&
+ !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+ /* 1. Renamed identically; remove it from both sides */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ } else if (tree_has_path(head, head_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+ hashmap_iter_init(dir_re_merge, &iter);
+ while ((merge_ent = hashmap_iter_next(&iter))) {
+ head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+ if (tree_has_path(merge, merge_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ } else if (head_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir) {
+ /* 3. rename/rename(1to2) */
+ /*
+ * We can assume it's not rename/rename(1to1) because
+ * that was case (1), already checked above. So we
+ * know that head_ent->new_dir and merge_ent->new_dir
+ * are different strings.
+ */
+ output(o, 1, _("CONFLICT (rename/rename): "
+ "Rename directory %s->%s in %s. "
+ "Rename directory %s->%s in %s"),
+ head_ent->dir, head_ent->new_dir.buf, o->branch1,
+ head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+ struct tree *tree)
+{
+ struct hashmap *dir_renames;
+ struct hashmap_iter iter;
+ struct dir_rename_entry *entry;
+ int i;
+
+ /*
+ * Typically, we think of a directory rename as all files from a
+ * certain directory being moved to a target directory. However,
+ * what if someone first moved two files from the original
+ * directory in one commit, and then renamed the directory
+ * somewhere else in a later commit? At merge time, we just know
+ * that files from the original directory went to two different
+ * places, and that the bulk of them ended up in the same place.
+ * We want each directory rename to represent where the bulk of the
+ * files from that directory end up; this function exists to find
+ * where the bulk of the files went.
+ *
+ * The first loop below simply iterates through the list of file
+ * renames, finding out how often each directory rename pair
+ * possibility occurs.
+ */
+ dir_renames = xmalloc(sizeof(struct hashmap));
+ dir_rename_init(dir_renames);
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ int *count;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *old_dir, *new_dir;
+
+ /* File not part of directory rename if it wasn't renamed */
+ if (pair->status != 'R')
+ continue;
+
+ get_renamed_dir_portion(pair->one->path, pair->two->path,
+ &old_dir, &new_dir);
+ if (!old_dir)
+ /* Directory didn't change at all; ignore this one. */
+ continue;
+
+ entry = dir_rename_find_entry(dir_renames, old_dir);
+ if (!entry) {
+ entry = xmalloc(sizeof(struct dir_rename_entry));
+ dir_rename_entry_init(entry, old_dir);
+ hashmap_put(dir_renames, entry);
+ } else {
+ free(old_dir);
+ }
+ item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+ if (!item) {
+ item = string_list_insert(&entry->possible_new_dirs,
+ new_dir);
+ item->util = xcalloc(1, sizeof(int));
+ } else {
+ free(new_dir);
+ }
+ count = item->util;
+ *count += 1;
+ }
+
+ /*
+ * For each directory with files moved out of it, we find out which
+ * target directory received the most files so we can declare it to
+ * be the "winning" target location for the directory rename. This
+ * winner gets recorded in new_dir. If there is no winner
+ * (multiple target directories received the same number of files),
+ * we set non_unique_new_dir. Once we've determined the winner (or
+ * that there is no winner), we no longer need possible_new_dirs.
+ */
+ hashmap_iter_init(dir_renames, &iter);
+ while ((entry = hashmap_iter_next(&iter))) {
+ int max = 0;
+ int bad_max = 0;
+ char *best = NULL;
+
+ for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+ int *count = entry->possible_new_dirs.items[i].util;
+
+ if (*count == max)
+ bad_max = max;
+ else if (*count > max) {
+ max = *count;
+ best = entry->possible_new_dirs.items[i].string;
+ }
+ }
+ if (bad_max == max)
+ entry->non_unique_new_dir = 1;
+ else {
+ assert(entry->new_dir.len == 0);
+ strbuf_addstr(&entry->new_dir, best);
+ }
+ /*
+ * The relevant directory sub-portion of the original full
+ * filepaths were xstrndup'ed before inserting into
+ * possible_new_dirs, and instead of manually iterating the
+ * list and free'ing each, just lie and tell
+ * possible_new_dirs that it did the strdup'ing so that it
+ * will free them for us.
+ */
+ entry->possible_new_dirs.strdup_strings = 1;
+ string_list_clear(&entry->possible_new_dirs, 1);
+ }
+
+ return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+ struct hashmap *dir_renames)
+{
+ char temp[PATH_MAX];
+ char *end;
+ struct dir_rename_entry *entry;
+
+ strcpy(temp, path);
+ while ((end = strrchr(temp, '/'))) {
+ *end = '\0';
+ entry = dir_rename_find_entry(dir_renames, temp);
+ if (entry)
+ return entry;
+ }
+ return NULL;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+ struct hashmap *dir_renames,
+ struct diff_queue_struct *pairs)
+{
+ int i;
+
+ /*
+ * Multiple files can be mapped to the same path due to directory
+ * renames done by the other side of history. Since that other
+ * side of history could have merged multiple directories into one,
+ * if our side of history added the same file basename to each of
+ * those directories, then all N of them would get implicitly
+ * renamed by the directory rename detection into the same path,
+ * and we'd get an add/add/.../add conflict, and all those adds
+ * from *this* side of history. This is not representable in the
+ * index, and users aren't going to easily be able to make sense of
+ * it. So we need to provide a good warning about what's
+ * happening, and fall back to no-directory-rename detection
+ * behavior for those paths.
+ *
+ * See testcases 9e and all of section 5 from t6043 for examples.
+ */
+ collision_init(collisions);
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct dir_rename_entry *dir_rename_ent;
+ struct collision_entry *collision_ent;
+ char *new_path;
+ struct diff_filepair *pair = pairs->queue[i];
+
+ if (pair->status != 'A' && pair->status != 'R')
+ continue;
+ dir_rename_ent = check_dir_renamed(pair->two->path,
+ dir_renames);
+ if (!dir_rename_ent)
+ continue;
+
+ new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+ if (!new_path)
+ /*
+ * dir_rename_ent->non_unique_new_path is true, which
+ * means there is no directory rename for us to use,
+ * which means it won't cause us any additional
+ * collisions.
+ */
+ continue;
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (!collision_ent) {
+ collision_ent = xcalloc(1,
+ sizeof(struct collision_entry));
+ hashmap_entry_init(collision_ent, strhash(new_path));
+ hashmap_put(collisions, collision_ent);
+ collision_ent->target_file = new_path;
+ } else {
+ free(new_path);
+ }
+ string_list_insert(&collision_ent->source_files,
+ pair->two->path);
+ }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+ const char *path,
+ struct tree *tree,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct hashmap *collisions,
+ int *clean_merge)
+{
+ char *new_path = NULL;
+ struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+ struct dir_rename_entry *oentry = NULL;
+
+ if (!entry)
+ return new_path;
+
+ /*
+ * This next part is a little weird. We do not want to do an
+ * implicit rename into a directory we renamed on our side, because
+ * that will result in a spurious rename/rename(1to2) conflict. An
+ * example:
+ * Base commit: dumbdir/afile, otherdir/bfile
+ * Side 1: smrtdir/afile, otherdir/bfile
+ * Side 2: dumbdir/afile, dumbdir/bfile
+ * Here, while working on Side 1, we could notice that otherdir was
+ * renamed/merged to dumbdir, and change the diff_filepair for
+ * otherdir/bfile into a rename into dumbdir/bfile. However, Side
+ * 2 will notice the rename from dumbdir to smrtdir, and do the
+ * transitive rename to move it from dumbdir/bfile to
+ * smrtdir/bfile. That gives us bfile in dumbdir vs being in
+ * smrtdir, a rename/rename(1to2) conflict. We really just want
+ * the file to end up in smrtdir. And the way to achieve that is
+ * to not let Side1 do the rename to dumbdir, since we know that is
+ * the source of one of our directory renames.
+ *
+ * That's why oentry and dir_rename_exclusions is here.
+ *
+ * As it turns out, this also prevents N-way transient rename
+ * confusion; See testcases 9c and 9d of t6043.
+ */
+ oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+ if (oentry) {
+ output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+ "to %s, because %s itself was renamed."),
+ entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+ } else {
+ new_path = handle_path_level_conflicts(o, path, entry,
+ collisions, tree);
+ *clean_merge &= (new_path != NULL);
+ }
+
+ return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+ struct diff_filepair *pair,
+ char *new_path,
+ struct rename *re,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean)
+{
+ struct string_list_item *item;
+ int stage = (tree == a_tree ? 2 : 3);
+ int update_wd;
+
+ /*
+ * In all cases where we can do directory rename detection,
+ * unpack_trees() will have read pair->two->path into the
+ * index and the working copy. We need to remove it so that
+ * we can instead place it at new_path. It is guaranteed to
+ * not be untracked (unpack_trees() would have errored out
+ * saying the file would have been overwritten), but it might
+ * be dirty, though.
+ */
+ update_wd = !was_dirty(o, pair->two->path);
+ if (!update_wd)
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ pair->two->path);
+ remove_file(o, 1, pair->two->path, !update_wd);
+
+ /* Find or create a new re->dst_entry */
+ item = string_list_lookup(entries, new_path);
+ if (item) {
+ /*
+ * Since we're renaming on this side of history, and it's
+ * due to a directory rename on the other side of history
+ * (which we only allow when the directory in question no
+ * longer exists on the other side of history), the
+ * original entry for re->dst_entry is no longer
+ * necessary...
+ */
+ re->dst_entry->processed = 1;
+
+ /*
+ * ...because we'll be using this new one.
+ */
+ re->dst_entry = item->util;
+ } else {
+ /*
+ * re->dst_entry is for the before-dir-rename path, and we
+ * need it to hold information for the after-dir-rename
+ * path. Before creating a new entry, we need to mark the
+ * old one as unnecessary (...unless it is shared by
+ * src_entry, i.e. this didn't use to be a rename, in which
+ * case we can just allow the normal processing to happen
+ * for it).
+ */
+ if (pair->status == 'R')
+ re->dst_entry->processed = 1;
+
+ re->dst_entry = insert_stage_data(new_path,
+ o_tree, a_tree, b_tree,
+ entries);
+ item = string_list_insert(entries, new_path);
+ item->util = re->dst_entry;
+ }
+
+ /*
+ * Update the stage_data with the information about the path we are
+ * moving into place. That slot will be empty and available for us
+ * to write to because of the collision checks in
+ * handle_path_level_conflicts(). In other words,
+ * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+ * open for us to write to.
+ *
+ * It may be tempting to actually update the index at this point as
+ * well, using update_stages_for_stage_data(), but as per the big
+ * "NOTE" in update_stages(), doing so will modify the current
+ * in-memory index which will break calls to would_lose_untracked()
+ * that we need to make. Instead, we need to just make sure that
+ * the various conflict_rename_*() functions update the index
+ * explicitly rather than relying on unpack_trees() to have done it.
+ */
+ get_tree_entry(&tree->object.oid,
+ pair->two->path,
+ &re->dst_entry->stages[stage].oid,
+ &re->dst_entry->stages[stage].mode);
+
+ /* Update pair status */
+ if (pair->status == 'A') {
+ /*
+ * Recording rename information for this add makes it look
+ * like a rename/delete conflict. Make sure we can
+ * correctly handle this as an add that was moved to a new
+ * directory instead of reporting a rename/delete conflict.
+ */
+ re->add_turned_into_rename = 1;
+ }
+ /*
+ * We don't actually look at pair->status again, but it seems
+ * pedagogically correct to adjust it.
+ */
+ pair->status = 'R';
+
+ /*
+ * Finally, record the new location.
+ */
+ pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+ struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean_merge)
+{
+ int i;
+ struct hashmap collisions;
+ struct hashmap_iter iter;
+ struct collision_entry *e;
+ struct string_list *renames;
+
+ compute_collisions(&collisions, dir_renames, pairs);
+ renames = xcalloc(1, sizeof(struct string_list));
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ struct rename *re;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *new_path; /* non-NULL only with directory renames */
+
+ if (pair->status != 'A' && pair->status != 'R') {
+ diff_free_filepair(pair);
+ continue;
+ }
+ new_path = check_for_directory_rename(o, pair->two->path, tree,
+ dir_renames,
+ dir_rename_exclusions,
+ &collisions,
+ clean_merge);
+ if (pair->status != 'R' && !new_path) {
+ diff_free_filepair(pair);
+ continue;
+ }
+
+ re = xmalloc(sizeof(*re));
+ re->processed = 0;
+ re->add_turned_into_rename = 0;
+ re->pair = pair;
+ item = string_list_lookup(entries, re->pair->one->path);
+ if (!item)
+ re->src_entry = insert_stage_data(re->pair->one->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->src_entry = item->util;
+
+ item = string_list_lookup(entries, re->pair->two->path);
+ if (!item)
+ re->dst_entry = insert_stage_data(re->pair->two->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->dst_entry = item->util;
+ item = string_list_insert(renames, pair->one->path);
+ item->util = re;
+ if (new_path)
+ apply_directory_rename_modifications(o, pair, new_path,
+ re, tree, o_tree,
+ a_tree, b_tree,
+ entries,
+ clean_merge);
+ }
+
+ hashmap_iter_init(&collisions, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->target_file);
+ string_list_clear(&e->source_files, 0);
+ }
+ hashmap_free(&collisions, 1);
+ return renames;
+}
+
static int process_renames(struct merge_options *o,
struct string_list *a_renames,
struct string_list *b_renames)
dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
try_merge = 0;
- if (oid_eq(&src_other.oid, &null_oid)) {
+ if (oid_eq(&src_other.oid, &null_oid) &&
+ ren1->add_turned_into_rename) {
+ setup_rename_conflict_info(RENAME_DIR,
+ ren1->pair,
+ NULL,
+ branch1,
+ branch2,
+ ren1->dst_entry,
+ NULL,
+ o,
+ NULL,
+ NULL);
+ } else if (oid_eq(&src_other.oid, &null_oid)) {
setup_rename_conflict_info(RENAME_DELETE,
ren1->pair,
NULL,
return clean_merge;
}
+struct rename_info {
+ struct string_list *head_renames;
+ struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *e;
+
+ hashmap_iter_init(dir_renames, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->dir);
+ strbuf_release(&e->new_dir);
+ /* possible_new_dirs already cleared in get_directory_renames */
+ }
+ hashmap_free(dir_renames, 1);
+ free(dir_renames);
+
+ free(pairs->queue);
+ free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+ struct tree *common,
+ struct tree *head,
+ struct tree *merge,
+ struct string_list *entries,
+ struct rename_info *ri)
+{
+ struct diff_queue_struct *head_pairs, *merge_pairs;
+ struct hashmap *dir_re_head, *dir_re_merge;
+ int clean = 1;
+
+ ri->head_renames = NULL;
+ ri->merge_renames = NULL;
+
+ if (!o->detect_rename)
+ return 1;
+
+ head_pairs = get_diffpairs(o, common, head);
+ merge_pairs = get_diffpairs(o, common, merge);
+
+ dir_re_head = get_directory_renames(head_pairs, head);
+ dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+ handle_directory_level_conflicts(o,
+ dir_re_head, head,
+ dir_re_merge, merge);
+
+ ri->head_renames = get_renames(o, head_pairs,
+ dir_re_merge, dir_re_head, head,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ ri->merge_renames = get_renames(o, merge_pairs,
+ dir_re_head, dir_re_merge, merge,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+ /*
+ * Some cleanup is deferred until cleanup_renames() because the
+ * data structures are still needed and referenced in
+ * process_entry(). But there are a few things we can free now.
+ */
+ initial_cleanup_rename(head_pairs, dir_re_head);
+ initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+ return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+ const struct rename *re;
+ int i;
+
+ if (rename == NULL)
+ return;
+
+ for (i = 0; i < rename->nr; i++) {
+ re = rename->items[i].util;
+ diff_free_filepair(re->pair);
+ }
+ string_list_clear(rename, 1);
+ free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+ final_cleanup_rename(re_info->head_renames);
+ final_cleanup_rename(re_info->merge_renames);
+}
+
static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
{
return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
void *buf;
enum object_type type;
unsigned long size;
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
if (!buf)
return err(o, _("cannot read object %s"), oid_to_hex(oid));
if (type != OBJ_BLOB) {
static int merge_content(struct merge_options *o,
const char *path,
+ int file_in_way,
struct object_id *o_oid, int o_mode,
struct object_id *a_oid, int a_mode,
struct object_id *b_oid, int b_mode,
if (mfi.clean && !df_conflict_remains &&
oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
- int path_renamed_outside_HEAD;
output(o, 3, _("Skipped %s (merged same as existing)"), path);
/*
* The content merge resulted in the same file contents we
* are recorded at the correct path (which may not be true
* if the merge involves a rename).
*/
- path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
- if (!path_renamed_outside_HEAD) {
+ if (was_tracked(path)) {
add_cacheinfo(o, mfi.mode, &mfi.oid, path,
0, (!o->call_depth), 0);
return mfi.clean;
return -1;
}
- if (df_conflict_remains) {
+ if (df_conflict_remains || file_in_way) {
char *new_path;
if (o->call_depth) {
remove_file_from_cache(path);
return mfi.clean;
}
+static int conflict_rename_normal(struct merge_options *o,
+ const char *path,
+ struct object_id *o_oid, unsigned int o_mode,
+ struct object_id *a_oid, unsigned int a_mode,
+ struct object_id *b_oid, unsigned int b_mode,
+ struct rename_conflict_info *ci)
+{
+ int clean_merge;
+ int file_in_the_way = 0;
+
+ if (was_dirty(o, path)) {
+ file_in_the_way = 1;
+ output(o, 1, _("Refusing to lose dirty file at %s"), path);
+ }
+
+ /* Merge the content and write it out */
+ clean_merge = merge_content(o, path, file_in_the_way,
+ o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+ ci);
+ if (clean_merge > 0 && file_in_the_way)
+ clean_merge = 0;
+ return clean_merge;
+}
+
/* Per entry merge function */
static int process_entry(struct merge_options *o,
const char *path, struct stage_data *entry)
switch (conflict_info->rename_type) {
case RENAME_NORMAL:
case RENAME_ONE_FILE_TO_ONE:
- clean_merge = merge_content(o, path,
- o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
- conflict_info);
+ clean_merge = conflict_rename_normal(o,
+ path,
+ o_oid, o_mode,
+ a_oid, a_mode,
+ b_oid, b_mode,
+ conflict_info);
+ break;
+ case RENAME_DIR:
+ clean_merge = 1;
+ if (conflict_rename_dir(o,
+ conflict_info->pair1,
+ conflict_info->branch1,
+ conflict_info->branch2))
+ clean_merge = -1;
break;
case RENAME_DELETE:
clean_merge = 0;
} else if (a_oid && b_oid) {
/* Case C: Added in both (check for same permissions) and */
/* case D: Modified in both, but differently. */
- clean_merge = merge_content(o, path,
+ clean_merge = merge_content(o, path, 0 /* file_in_way */,
o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
NULL);
} else if (!o_oid && !a_oid && !b_oid) {
return 1;
}
- code = git_merge_trees(o->call_depth, common, head, merge);
+ code = git_merge_trees(o, common, head, merge);
if (code != 0) {
if (show(o, 4) || o->call_depth)
}
if (unmerged_cache()) {
- struct string_list *entries, *re_head, *re_merge;
+ struct string_list *entries;
+ struct rename_info re_info;
int i;
/*
* Only need the hashmap while processing entries, so
get_files_dirs(o, merge);
entries = get_unmerged();
+ clean = handle_renames(o, common, head, merge, entries,
+ &re_info);
record_df_conflict_files(o, entries);
- re_head = get_renames(o, head, common, head, merge, entries);
- re_merge = get_renames(o, merge, common, head, merge, entries);
- clean = process_renames(o, re_head, re_merge);
if (clean < 0)
goto cleanup;
for (i = entries->nr-1; 0 <= i; i--) {
}
cleanup:
- string_list_clear(re_merge, 0);
- string_list_clear(re_head, 0);
+ final_cleanup_renames(&re_info);
+
string_list_clear(entries, 1);
+ free(entries);
hashmap_free(&o->current_file_dir_set, 1);
- free(re_merge);
- free(re_head);
- free(entries);
-
if (clean < 0)
return clean;
}
{
struct commit_list *iter;
struct commit *merged_common_ancestors;
- struct tree *mrtree = mrtree;
+ struct tree *mrtree;
int clean;
if (show(o, 4)) {
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
clean = merge_recursive(o, head_commit, next_commit, ca,
result);
- if (clean < 0)
+ if (clean < 0) {
+ rollback_lock_file(&lock);
return clean;
+ }
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
return err(o, _("Unable to write index."));
return clean ? 0 : 1;
replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
free(old);
+ ce->ce_flags &= ~CE_HASHED;
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
mark_fsmonitor_invalid(istate, ce);
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
{
- struct cache_entry *old = istate->cache[nr], *new;
+ struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
- new = xmalloc(cache_entry_size(namelen));
- copy_cache_entry(new, old);
- new->ce_flags &= ~CE_HASHED;
- new->ce_namelen = namelen;
- new->index = 0;
- memcpy(new->name, new_name, namelen + 1);
+ new_entry = xmalloc(cache_entry_size(namelen));
+ copy_cache_entry(new_entry, old_entry);
+ new_entry->ce_flags &= ~CE_HASHED;
+ new_entry->ce_namelen = namelen;
+ new_entry->index = 0;
+ memcpy(new_entry->name, new_name, namelen + 1);
- cache_tree_invalidate_path(istate, old->name);
- untracked_cache_remove_from_index(istate, old->name);
+ cache_tree_invalidate_path(istate, old_entry->name);
+ untracked_cache_remove_from_index(istate, old_entry->name);
remove_index_entry_at(istate, nr);
- add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
void fill_stat_data(struct stat_data *sd, struct stat *st)
if (strbuf_readlink(&sb, ce->name, expected_size))
return -1;
- buffer = read_sha1_file(ce->oid.hash, &type, &size);
+ buffer = read_object_file(&ce->oid, &type, &size);
if (buffer) {
if (size == sb.len)
match = memcmp(buffer, sb.buf, size);
struct cache_entry *alias)
{
int len;
- struct cache_entry *new;
+ struct cache_entry *new_entry;
if (alias->ce_flags & CE_ADDED)
die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
- new = xcalloc(1, cache_entry_size(len));
- memcpy(new->name, alias->name, len);
- copy_cache_entry(new, ce);
+ new_entry = xcalloc(1, cache_entry_size(len));
+ memcpy(new_entry->name, alias->name, len);
+ copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
- return new;
+ return new_entry;
}
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
- unsigned char sha1[20];
- if (write_sha1_file("", 0, blob_type, sha1))
+ struct object_id oid;
+ if (write_object_file("", 0, blob_type, &oid))
die("cannot create an empty blob in the object database");
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, &oid);
}
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos + 1)
- memmove(istate->cache + pos + 1,
- istate->cache + pos,
- (istate->cache_nr - pos - 1) * sizeof(ce));
+ MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+ istate->cache_nr - pos - 1);
set_index_entry(istate, pos, ce);
istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
size = ce_size(ce);
updated = xmalloc(size);
- memcpy(updated, ce, size);
+ copy_cache_entry(updated, ce);
+ memcpy(updated->name, ce->name, ce->ce_namelen + 1);
fill_stat_cache_info(updated, &st);
/*
* If ignore_valid is not set, we should leave CE_VALID bit
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ uint64_t start = getnanotime();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
for (i = 0; i < istate->cache_nr; i++) {
- struct cache_entry *ce, *new;
+ struct cache_entry *ce, *new_entry;
int cache_errno = 0;
int changed = 0;
int filtered = 0;
if (filtered)
continue;
- new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
- if (new == ce)
+ new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+ if (new_entry == ce)
continue;
- if (!new) {
+ if (!new_entry) {
const char *fmt;
if (really && cache_errno == EINVAL) {
continue;
}
- replace_index_entry(istate, i, new);
+ replace_index_entry(istate, i, new_entry);
}
+ trace_performance_since(start, "refresh index");
return has_errors;
}
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
- git_SHA_CTX c;
- unsigned char sha1[20];
+ git_hash_ctx c;
+ unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
if (!verify_index_checksum)
return 0;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, size - 20);
- git_SHA1_Final(sha1, &c);
- if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+ the_hash_algo->final_fn(hash, &c);
+ if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
int read_index(struct index_state *istate)
{
- return read_index_from(istate, get_index_file());
+ return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
die_errno("cannot stat the open index");
mmap_size = xsize_t(st.st_size);
- if (mmap_size < sizeof(struct cache_header) + 20)
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die("index file smaller than expected");
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
- while (src_offset <= mmap_size - 20 - 8) {
+ while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
/* After an array of active_nr index entries,
* there can be arbitrary number of extended
* sections, each of which is prefixed with
* This way, shared index can be removed if they have not been used
* for some time.
*/
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
{
- char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
if (!check_and_freshen_file(shared_index, 1) && warn)
warning("could not freshen shared index '%s'", shared_index);
- free(shared_index);
}
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
{
+ uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_sha1_hex;
- const char *base_path;
+ char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
if (istate->initialized)
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+ trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
split_index->base = xcalloc(1, sizeof(*split_index->base));
base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = git_path("sharedindex.%s", base_sha1_hex);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (hashcmp(split_index->base_sha1, split_index->base->sha1))
die("broken index, expect %s in %s, got %s",
base_sha1_hex, base_path,
sha1_to_hex(split_index->base->sha1));
- freshen_shared_index(base_sha1_hex, 0);
+ freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
+ trace_performance_since(start, "read cache %s", base_path);
+ free(base_path);
return ret;
}
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
{
unsigned int buffered = write_buffer_len;
if (buffered) {
- git_SHA1_Update(context, write_buffer, buffered);
+ the_hash_algo->update_fn(context, write_buffer, buffered);
if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
return 0;
}
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
{
while (len) {
unsigned int buffered = write_buffer_len;
return 0;
}
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
unsigned int ext, unsigned int sz)
{
ext = htonl(ext);
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
{
unsigned int left = write_buffer_len;
if (left) {
write_buffer_len = 0;
- git_SHA1_Update(context, write_buffer, left);
+ the_hash_algo->update_fn(context, write_buffer, left);
}
- /* Flush first if not enough space for SHA1 signature */
- if (left + 20 > WRITE_BUFFER_SIZE) {
+ /* Flush first if not enough space for hash signature */
+ if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
- /* Append the SHA1 signature at the end */
- git_SHA1_Final(write_buffer + left, context);
- hashcpy(sha1, write_buffer + left);
- left += 20;
+ /* Append the hash signature at the end */
+ the_hash_algo->final_fn(write_buffer + left, context);
+ hashcpy(hash, write_buffer + left);
+ left += the_hash_algo->rawsz;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
}
}
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
{
int size;
- int saved_namelen = saved_namelen; /* compiler workaround */
int result;
+ unsigned int saved_namelen;
+ int stripped_name = 0;
static unsigned char padding[8] = { 0x00 };
if (ce->ce_flags & CE_STRIP_NAME) {
saved_namelen = ce_namelen(ce);
ce->ce_namelen = 0;
+ stripped_name = 1;
}
if (ce->ce_flags & CE_EXTENDED)
strbuf_splice(previous_name, common, to_remove,
ce->name + common, ce_namelen(ce) - common);
}
- if (ce->ce_flags & CE_STRIP_NAME) {
+ if (stripped_name) {
ce->ce_namelen = saved_namelen;
ce->ce_flags &= ~CE_STRIP_NAME;
}
int fd;
ssize_t n;
struct stat st;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
if (!istate->initialized)
return 0;
if (fstat(fd, &st))
goto out;
- if (st.st_size < sizeof(struct cache_header) + 20)
+ if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
goto out;
- n = pread_in_full(fd, sha1, 20, st.st_size - 20);
- if (n != 20)
+ n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+ if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, sha1))
+ if (hashcmp(istate->sha1, hash))
goto out;
close(fd);
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
+ uint64_t start = getnanotime();
int newfd = tempfile->fd;
- git_SHA_CTX c;
+ git_hash_ctx c;
struct cache_header hdr;
int i, err = 0, removed, extended, hdr_version;
struct cache_entry **cache = istate->cache;
struct stat st;
struct ondisk_cache_entry_extended ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
- int drop_cache_tree = 0;
+ int drop_cache_tree = istate->drop_cache_tree;
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
hdr.hdr_version = htonl(hdr_version);
hdr.hdr_entries = htonl(entries - removed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
+ trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
return 0;
}
}
static int write_shared_index(struct index_state *istate,
- struct lock_file *lock, unsigned flags)
+ struct tempfile **temp)
{
- struct tempfile *temp;
struct split_index *si = istate->split_index;
int ret;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
- if (!temp) {
- hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
- }
move_cache_to_base_index(istate);
- ret = do_write_index(si->base, temp, 1);
- if (ret) {
- delete_tempfile(&temp);
+ ret = do_write_index(si->base, *temp, 1);
+ if (ret)
return ret;
- }
- ret = adjust_shared_perm(get_tempfile_path(temp));
+ ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- int save_errno = errno;
- error("cannot fix permission bits on %s", get_tempfile_path(temp));
- delete_tempfile(&temp);
- errno = save_errno;
+ error("cannot fix permission bits on %s", get_tempfile_path(*temp));
return ret;
}
- ret = rename_tempfile(&temp,
+ ret = rename_tempfile(temp,
git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
if (!ret) {
hashcpy(si->base_sha1, si->base->sha1);
int new_shared_index, ret;
struct split_index *si = istate->split_index;
+ if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
+ if (flags & COMMIT_LOCK)
+ rollback_lock_file(lock);
+ return 0;
+ }
+
if (istate->fsmonitor_last_update)
fill_fsmonitor_bitmap(istate);
new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
if (new_shared_index) {
- ret = write_shared_index(istate, lock, flags);
+ struct tempfile *temp;
+ int saved_errno;
+
+ temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ if (!temp) {
+ hashclr(si->base_sha1);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
+ }
+ ret = write_shared_index(istate, &temp);
+
+ saved_errno = errno;
+ if (is_tempfile_active(temp))
+ delete_tempfile(&temp);
+ errno = saved_errno;
+
if (ret)
goto out;
}
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index)
- freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+ if (!ret && !new_shared_index) {
+ const char *shared_index = git_path("sharedindex.%s",
+ sha1_to_hex(si->base_sha1));
+ freshen_shared_index(shared_index, 1);
+ }
out:
if (flags & COMMIT_LOCK)
}
if (pos < 0)
return NULL;
- data = read_sha1_file(istate->cache[pos]->oid.hash, &type, &sz);
+ data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
if (!data || type != OBJ_BLOB) {
free(data);
return NULL;