if (postlen
? postlen < new_buf - postimage->buf
: postimage->len < new_buf - postimage->buf)
- die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
+ BUG("caller miscounted postlen: asked %d, orig = %d, used = %d",
(int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
/* Fix the length of the whole thing */
unsigned mode = patch->new_mode;
if (!patch->is_new)
- die("BUG: patch to %s is not a creation", patch->old_name);
+ BUG("patch to %s is not a creation", patch->old_name);
pos = cache_name_pos(name, strlen(name));
if (pos < 0)
if (!patch->is_delete)
new_name = patch->new_name;
- if (old_name && !verify_path(old_name))
+ if (old_name && !verify_path(old_name, patch->old_mode))
return error(_("invalid path '%s'"), old_name);
- if (new_name && !verify_path(new_name))
+ if (new_name && !verify_path(new_name, patch->new_mode))
return error(_("invalid path '%s'"), new_name);
return 0;
}
{
struct patch *patch;
struct index_state result = { NULL };
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
int res;
/* Once we start supporting the reverse patch, it may be
memcpy(header.name, path, pathlen);
if (S_ISREG(mode) && !args->convert &&
- oid_object_info(oid, &size) == OBJ_BLOB &&
+ oid_object_info(the_repository, oid, &size) == OBJ_BLOB &&
size > big_file_threshold)
buffer = NULL;
else if (S_ISLNK(mode) || S_ISREG(mode)) {
int r;
if (!ar->data)
- die("BUG: tar-filter archiver called with no filter defined");
+ BUG("tar-filter archiver called with no filter defined");
strbuf_addstr(&cmd, ar->data);
if (args->compression_level >= 0)
unsigned mode;
if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
- oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
+ oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
return;
}
return 0;
if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
goto error_out;
- if (oid_object_info(&origin->blob_oid, NULL) != OBJ_BLOB)
+ if (oid_object_info(the_repository, &origin->blob_oid, NULL) != OBJ_BLOB)
goto error_out;
return 0;
error_out:
diff_setup_done(&diff_opts);
if (is_null_oid(&origin->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &origin->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(origin->commit),
"", &diff_opts);
diffcore_std(&diff_opts);
diff_setup_done(&diff_opts);
if (is_null_oid(&origin->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &origin->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(origin->commit),
"", &diff_opts);
diffcore_std(&diff_opts);
diff_opts.flags.find_copies_harder = 1;
if (is_null_oid(&target->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &target->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(target->commit),
"", &diff_opts);
if (!diff_opts.flags.find_copies_harder)
l->item = c;
if (add_decoration(&sb->revs->children,
&c->parents->item->object, l))
- die("BUG: not unique item in first-parent chain");
+ BUG("not unique item in first-parent chain");
c = c->parents->item;
}
struct ref_array array;
int maxwidth = 0;
const char *remote_prefix = "";
- struct strbuf out = STRBUF_INIT;
char *to_free = NULL;
/*
ref_array_sort(sorting, &array);
for (i = 0; i < array.nr; i++) {
- format_ref_array_item(array.items[i], format, &out);
+ struct strbuf out = STRBUF_INIT;
+ struct strbuf err = STRBUF_INIT;
+ if (format_ref_array_item(array.items[i], format, &out, &err))
+ die("%s", err.buf);
if (column_active(colopts)) {
assert(!filter->verbose && "--column and --verbose are incompatible");
/* format to a string_list to let print_columns() do its job */
fwrite(out.buf, 1, out.len, stdout);
putchar('\n');
}
+ strbuf_release(&err);
strbuf_release(&out);
}
if (!skip_prefix(oldref.buf, "refs/heads/", &interpreted_oldname) ||
!skip_prefix(newref.buf, "refs/heads/", &interpreted_newname)) {
- die("BUG: expected prefix missing for refs");
+ BUG("expected prefix missing for refs");
}
if (copy)
switch (opt) {
case 't':
oi.type_name = &sb;
- if (oid_object_info_extended(&oid, &oi, flags) < 0)
+ if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
die("git cat-file: could not get object info");
if (sb.len) {
printf("%s\n", sb.buf);
case 's':
oi.sizep = &size;
- if (oid_object_info_extended(&oid, &oi, flags) < 0)
+ if (oid_object_info_extended(the_repository, &oid, &oi, flags) < 0)
die("git cat-file: could not get object info");
printf("%lu\n", size);
return 0;
/* else fallthrough */
case 'p':
- type = oid_object_info(&oid, NULL);
+ type = oid_object_info(the_repository, &oid, NULL);
if (type < 0)
die("Not a valid object name %s", obj_name);
case 0:
if (type_from_string(exp_type) == OBJ_BLOB) {
struct object_id blob_oid;
- if (oid_object_info(&oid, NULL) == OBJ_TAG) {
+ if (oid_object_info(the_repository, &oid, NULL) == OBJ_TAG) {
char *buffer = read_object_file(&oid, &type,
&size);
const char *target;
} else
oidcpy(&blob_oid, &oid);
- if (oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
+ if (oid_object_info(the_repository, &blob_oid, NULL) == OBJ_BLOB)
return stream_blob_to_fd(1, &blob_oid, NULL, 0);
/*
* we attempted to dereference a tag to a blob
die("could not convert '%s' %s",
oid_to_hex(oid), data->rest);
} else
- die("BUG: invalid cmdmode: %c", opt->cmdmode);
+ BUG("invalid cmdmode: %c", opt->cmdmode);
batch_write(opt, contents, size);
free(contents);
} else if (stream_blob_to_fd(1, oid, NULL, 0) < 0)
struct strbuf buf = STRBUF_INIT;
if (!data->skip_object_info &&
- oid_object_info_extended(&data->oid, &data->info,
+ oid_object_info_extended(the_repository, &data->oid, &data->info,
OBJECT_INFO_LOOKUP_REPLACE) < 0) {
printf("%s missing\n",
obj_name ? obj_name : oid_to_hex(&data->oid));
(uintmax_t)strlen(obj_name), obj_name);
break;
default:
- die("BUG: unknown get_sha1_with_context result %d\n",
+ BUG("unknown get_sha1_with_context result %d\n",
result);
break;
}
} else if (remote_head_points_at) {
const char *head = remote_head_points_at->name;
if (!skip_prefix(head, "refs/heads/", &head))
- die("BUG: remote HEAD points at non-head?");
+ BUG("remote HEAD points at non-head?");
strbuf_addf(&value, "+%s:%s%s", remote_head_points_at->name,
branch_top->buf, head);
if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
- refs = transport_get_remote_refs(transport);
+ refs = transport_get_remote_refs(transport, NULL);
if (refs) {
mapped_refs = wanted_peer_refs(refs, refspec);
static void status_init_config(struct wt_status *s, config_fn_t fn)
{
wt_status_prepare(s);
+ init_diff_ui_defaults();
git_config(fn, s);
determine_whence(s);
- init_diff_ui_defaults();
s->hints = advice_status_hints; /* must come after git_config() */
}
static void assert_split_ident(struct ident_split *id, const struct strbuf *buf)
{
if (split_ident_line(id, buf->buf, buf->len) || !id->date_begin)
- die("BUG: unable to parse our own ident: %s", buf->buf);
+ BUG("unable to parse our own ident: %s", buf->buf);
}
static void export_one(const char *var, const char *s, const char *e, int hack)
static int use_global_config, use_system_config, use_local_config;
static struct git_config_source given_config_source;
-static int actions, types;
+static int actions, type;
+static char *default_value;
static int end_null;
static int respect_includes_opt = -1;
static struct config_options config_options;
#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
-#define TYPE_BOOL (1<<0)
-#define TYPE_INT (1<<1)
-#define TYPE_BOOL_OR_INT (1<<2)
-#define TYPE_PATH (1<<3)
-#define TYPE_EXPIRY_DATE (1<<4)
+#define TYPE_BOOL 1
+#define TYPE_INT 2
+#define TYPE_BOOL_OR_INT 3
+#define TYPE_PATH 4
+#define TYPE_EXPIRY_DATE 5
+#define TYPE_COLOR 6
+
+#define OPT_CALLBACK_VALUE(s, l, v, h, i) \
+ { OPTION_CALLBACK, (s), (l), (v), NULL, (h), PARSE_OPT_NOARG | \
+ PARSE_OPT_NONEG, option_parse_type, (i) }
+
+static struct option builtin_config_options[];
+
+static int option_parse_type(const struct option *opt, const char *arg,
+ int unset)
+{
+ int new_type, *to_type;
+
+ if (unset) {
+ *((int *) opt->value) = 0;
+ return 0;
+ }
+
+ /*
+ * To support '--<type>' style flags, begin with new_type equal to
+ * opt->defval.
+ */
+ new_type = opt->defval;
+ if (!new_type) {
+ if (!strcmp(arg, "bool"))
+ new_type = TYPE_BOOL;
+ else if (!strcmp(arg, "int"))
+ new_type = TYPE_INT;
+ else if (!strcmp(arg, "bool-or-int"))
+ new_type = TYPE_BOOL_OR_INT;
+ else if (!strcmp(arg, "path"))
+ new_type = TYPE_PATH;
+ else if (!strcmp(arg, "expiry-date"))
+ new_type = TYPE_EXPIRY_DATE;
+ else if (!strcmp(arg, "color"))
+ new_type = TYPE_COLOR;
+ else
+ die(_("unrecognized --type argument, %s"), arg);
+ }
+
+ to_type = opt->value;
+ if (*to_type && *to_type != new_type) {
+ /*
+ * Complain when there is a new type not equal to the old type.
+ * This allows for combinations like '--int --type=int' and
+ * '--type=int --type=int', but disallows ones like '--type=bool
+ * --int' and '--type=bool
+ * --type=int'.
+ */
+ error("only one type at a time.");
+ usage_with_options(builtin_config_usage,
+ builtin_config_options);
+ }
+ *to_type = new_type;
+
+ return 0;
+}
static struct option builtin_config_options[] = {
OPT_GROUP(N_("Config file location")),
OPT_BIT(0, "get-color", &actions, N_("find the color configured: slot [default]"), ACTION_GET_COLOR),
OPT_BIT(0, "get-colorbool", &actions, N_("find the color setting: slot [stdout-is-tty]"), ACTION_GET_COLORBOOL),
OPT_GROUP(N_("Type")),
- OPT_BIT(0, "bool", &types, N_("value is \"true\" or \"false\""), TYPE_BOOL),
- OPT_BIT(0, "int", &types, N_("value is decimal number"), TYPE_INT),
- OPT_BIT(0, "bool-or-int", &types, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
- OPT_BIT(0, "path", &types, N_("value is a path (file or directory name)"), TYPE_PATH),
- OPT_BIT(0, "expiry-date", &types, N_("value is an expiry date"), TYPE_EXPIRY_DATE),
+ OPT_CALLBACK('t', "type", &type, "", N_("value is given this type"), option_parse_type),
+ OPT_CALLBACK_VALUE(0, "bool", &type, N_("value is \"true\" or \"false\""), TYPE_BOOL),
+ OPT_CALLBACK_VALUE(0, "int", &type, N_("value is decimal number"), TYPE_INT),
+ OPT_CALLBACK_VALUE(0, "bool-or-int", &type, N_("value is --bool or --int"), TYPE_BOOL_OR_INT),
+ OPT_CALLBACK_VALUE(0, "path", &type, N_("value is a path (file or directory name)"), TYPE_PATH),
+ OPT_CALLBACK_VALUE(0, "expiry-date", &type, N_("value is an expiry date"), TYPE_EXPIRY_DATE),
OPT_GROUP(N_("Other")),
OPT_BOOL('z', "null", &end_null, N_("terminate values with NUL byte")),
OPT_BOOL(0, "name-only", &omit_values, N_("show variable names only")),
OPT_BOOL(0, "includes", &respect_includes_opt, N_("respect include directives on lookup")),
OPT_BOOL(0, "show-origin", &show_origin, N_("show origin of config (file, standard input, blob, command line)")),
+ OPT_STRING(0, "default", &default_value, N_("value"), N_("with --get, use default value when missing entry")),
OPT_END(),
};
if (show_keys)
strbuf_addch(buf, key_delim);
- if (types == TYPE_INT)
+ if (type == TYPE_INT)
strbuf_addf(buf, "%"PRId64,
git_config_int64(key_, value_ ? value_ : ""));
- else if (types == TYPE_BOOL)
+ else if (type == TYPE_BOOL)
strbuf_addstr(buf, git_config_bool(key_, value_) ?
"true" : "false");
- else if (types == TYPE_BOOL_OR_INT) {
+ else if (type == TYPE_BOOL_OR_INT) {
int is_bool, v;
v = git_config_bool_or_int(key_, value_, &is_bool);
if (is_bool)
strbuf_addstr(buf, v ? "true" : "false");
else
strbuf_addf(buf, "%d", v);
- } else if (types == TYPE_PATH) {
+ } else if (type == TYPE_PATH) {
const char *v;
if (git_config_pathname(&v, key_, value_) < 0)
return -1;
strbuf_addstr(buf, v);
free((char *)v);
- } else if (types == TYPE_EXPIRY_DATE) {
+ } else if (type == TYPE_EXPIRY_DATE) {
timestamp_t t;
if (git_config_expiry_date(&t, key_, value_) < 0)
return -1;
strbuf_addf(buf, "%"PRItime, t);
+ } else if (type == TYPE_COLOR) {
+ char v[COLOR_MAXLEN];
+ if (git_config_color(v, key_, value_) < 0)
+ return -1;
+ strbuf_addstr(buf, v);
} else if (value_) {
strbuf_addstr(buf, value_);
} else {
config_with_options(collect_config, &values,
&given_config_source, &config_options);
+ if (!values.nr && default_value) {
+ struct strbuf *item;
+ ALLOC_GROW(values.items, values.nr + 1, values.alloc);
+ item = &values.items[values.nr++];
+ strbuf_init(item, 0);
+ if (format_config(item, key_, default_value) < 0)
+ die(_("failed to format default config value: %s"),
+ default_value);
+ }
+
ret = !values.nr;
for (i = 0; i < values.nr; i++) {
if (!value)
return NULL;
- if (types == 0 || types == TYPE_PATH || types == TYPE_EXPIRY_DATE)
+ if (type == 0 || type == TYPE_PATH || type == TYPE_EXPIRY_DATE)
/*
* We don't do normalization for TYPE_PATH here: If
* the path is like ~/foobar/, we prefer to store
* Also don't do normalization for expiry dates.
*/
return xstrdup(value);
- if (types == TYPE_INT)
+ if (type == TYPE_INT)
return xstrfmt("%"PRId64, git_config_int64(key, value));
- if (types == TYPE_BOOL)
+ if (type == TYPE_BOOL)
return xstrdup(git_config_bool(key, value) ? "true" : "false");
- if (types == TYPE_BOOL_OR_INT) {
+ if (type == TYPE_BOOL_OR_INT) {
int is_bool, v;
v = git_config_bool_or_int(key, value, &is_bool);
if (!is_bool)
else
return xstrdup(v ? "true" : "false");
}
+ if (type == TYPE_COLOR) {
+ char v[COLOR_MAXLEN];
+ if (git_config_color(v, key, value))
+ die("cannot parse color '%s'", value);
+
+ /*
+ * The contents of `v` now contain an ANSI escape
+ * sequence, not suitable for including within a
+ * configuration file. Treat the above as a
+ * "sanity-check", and return the given value, which we
+ * know is representable as valid color code.
+ */
+ return xstrdup(value);
+ }
- die("BUG: cannot normalize type %d", type);
- BUG("cannot normalize type %d", types);
++ BUG("cannot normalize type %d", type);
}
static int get_color_found;
key_delim = '\n';
}
- if (HAS_MULTI_BITS(types)) {
- error("only one type at a time.");
- usage_with_options(builtin_config_usage, builtin_config_options);
- }
-
- if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && types) {
+ if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && type) {
error("--get-color and variable type are incoherent");
usage_with_options(builtin_config_usage, builtin_config_options);
}
usage_with_options(builtin_config_usage, builtin_config_options);
}
+ if (default_value && !(actions & ACTION_GET)) {
+ error("--default is only applicable to --get");
+ usage_with_options(builtin_config_usage,
+ builtin_config_options);
+ }
+
if (actions & PAGING_ACTIONS)
setup_auto_pager("config", 1);
}
}
-/* Since intptr_t is C99, we do not use it here */
-static inline uint32_t *mark_to_ptr(uint32_t mark)
+static inline void *mark_to_ptr(uint32_t mark)
{
- return ((uint32_t *)NULL) + mark;
+ return (void *)(uintptr_t)mark;
}
static inline uint32_t ptr_to_mark(void * mark)
{
- return (uint32_t *)mark - (uint32_t *)NULL;
+ return (uint32_t)(uintptr_t)mark;
}
static inline void mark_object(struct object *object, uint32_t mark)
/* skip "committer", "author", "tagger", etc */
end_of_header = strchr(*beg, ' ');
if (!end_of_header)
- die("BUG: malformed line fed to anonymize_ident_line: %.*s",
+ BUG("malformed line fed to anonymize_ident_line: %.*s",
(int)(*end - *beg), *beg);
end_of_header++;
strbuf_add(out, *beg, end_of_header - *beg);
get_object_mark(&commit->parents->item->object) != 0 &&
!full_tree) {
parse_commit_or_die(commit->parents->item);
- diff_tree_oid(&commit->parents->item->tree->object.oid,
- &commit->tree->object.oid, "", &rev->diffopt);
+ diff_tree_oid(get_commit_tree_oid(commit->parents->item),
+ get_commit_tree_oid(commit), "", &rev->diffopt);
}
else
- diff_root_tree_oid(&commit->tree->object.oid,
+ diff_root_tree_oid(get_commit_tree_oid(commit),
"", &rev->diffopt);
/* Export the referenced blobs, and remember the marks. */
struct commit *commit;
while (commits->nr) {
commit = (struct commit *)object_array_pop(commits);
- if (has_unshown_parent(commit))
+ if (has_unshown_parent(commit)) {
+ /* Queue again, to be handled later */
+ add_object_array(&commit->object, NULL, commits);
return;
+ }
handle_commit(commit, revs, paths_of_changed_objects);
}
}
if (last_idnum < mark)
last_idnum = mark;
- type = oid_object_info(&oid, NULL);
+ type = oid_object_info(the_repository, &oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(&oid));
if (!(obj->flags & FLAG_CHECKED)) {
unsigned long size;
- int type = oid_object_info(&obj->oid, &size);
+ int type = oid_object_info(the_repository, &obj->oid, &size);
if (type <= 0)
die(_("did not receive expected object %s"),
oid_to_hex(&obj->oid));
enum object_type has_type;
unsigned long has_size;
read_lock();
- has_type = oid_object_info(oid, &has_size);
+ has_type = oid_object_info(the_repository, oid, &has_size);
if (has_type < 0)
die(_("cannot read existing object info %s"), oid_to_hex(oid));
if (has_type != type || has_size != size)
blob->object.flags |= FLAG_CHECKED;
else
die(_("invalid blob object %s"), oid_to_hex(oid));
+ if (do_fsck_object &&
+ fsck_object(&blob->object, (void *)data, size, &fsck_options))
+ die(_("fsck error in packed object"));
} else {
struct object *obj;
int eaten;
die(_("invalid %s"), type_name(type));
if (do_fsck_object &&
fsck_object(obj, buf, size, &fsck_options))
- die(_("Error in object"));
+ die(_("fsck error in packed object"));
if (strict && fsck_walk(obj, NULL, &fsck_options))
die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
if (obj->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *) obj;
if (detach_commit_buffer(commit, NULL) != data)
- die("BUG: parse_object_buffer transmogrified our buffer");
+ BUG("parse_object_buffer transmogrified our buffer");
}
obj->flags |= FLAG_CHECKED;
}
if (!compare_and_swap_type(&child->real_type, OBJ_REF_DELTA,
base->obj->real_type))
- die("BUG: child->real_type != OBJ_REF_DELTA");
+ BUG("child->real_type != OBJ_REF_DELTA");
resolve_delta(child, base, result);
if (base->ref_first == base->ref_last && base->ofs_last == -1)
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- hashclose(f, tail_hash, 0);
+ finalize_hashfile(f, tail_hash, 0);
hashcpy(read_hash, pack_hash);
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
} else
chmod(final_index_name, 0444);
+ if (do_fsck_object)
+ add_packed_git(final_index_name, strlen(final_index_name), 0);
+
if (!from_stdin) {
printf("%s\n", sha1_to_hex(hash));
} else {
pack_hash);
else
close(input_fd);
+
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
+
free(objects);
strbuf_release(&index_name_buf);
if (pack_name == NULL)
#include "list.h"
#include "packfile.h"
#include "object-store.h"
+#include "dir.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int local;
static int have_non_local_packs;
static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
static int exclude_promisor_objects;
static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
die("unable to read %s",
- oid_to_hex(&entry->delta->idx.oid));
+ oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
- if (!delta_buf || delta_size != entry->delta_size)
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
die("delta size changed");
free(buf);
free(base_buf);
struct git_istream *st = NULL;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
FREE_AND_NULL(entry->delta_data);
entry->z_delta_size = 0;
} else if (entry->delta_data) {
- size = entry->delta_size;
+ size = DELTA_SIZE(entry);
buf = entry->delta_data;
entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
* encoding of the relative offset for the delta
* base from this object's position in the pack.
*/
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
- struct packed_git *p = entry->in_pack;
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
- enum object_type type = entry->type;
+ enum object_type type = oe_type(entry);
off_t datalen;
unsigned char header[MAX_PACK_OBJECT_HEADER],
dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
hdrlen = encode_in_pack_object_header(header, sizeof(header),
- type, entry->size);
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
datalen -= entry->in_pack_header_size;
if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
error("corrupt packed object for %s",
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
}
if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
else
limit = pack_size_limit - write_offset;
- if (!entry->delta)
+ if (!DELTA(entry))
usable_delta = 0; /* no delta */
else if (!pack_size_limit)
usable_delta = 1; /* unlimited packfile */
- else if (entry->delta->idx.offset == (off_t)-1)
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
usable_delta = 0; /* base was written to another pack */
- else if (entry->delta->idx.offset)
+ else if (DELTA(entry)->idx.offset)
usable_delta = 1; /* base already exists in this pack */
else
usable_delta = 0; /* base could end up in another pack */
if (!reuse_object)
to_reuse = 0; /* explicit */
- else if (!entry->in_pack)
+ else if (!IN_PACK(entry))
to_reuse = 0; /* can't reuse what we don't have */
- else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (entry->type != entry->in_pack_type)
+ else if (oe_type(entry) != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
- else if (entry->delta)
+ else if (DELTA(entry))
to_reuse = 0; /* we want to pack afresh */
else
to_reuse = 1; /* we have it in-pack undeltified,
}
/* if we are deltified, write out base object first. */
- if (e->delta) {
+ if (DELTA(e)) {
e->idx.offset = 1; /* now recurse */
- switch (write_one(f, e->delta, offset)) {
+ switch (write_one(f, DELTA(e), offset)) {
case WRITE_ONE_RECURSIVE:
/* we cannot depend on this one */
- e->delta = NULL;
+ SET_DELTA(e, NULL);
break;
default:
break;
/* add this node... */
add_to_write_order(wo, endp, e);
/* all its siblings... */
- for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
add_to_write_order(wo, endp, s);
}
}
/* drop down a level to add left subtree nodes if possible */
- if (e->delta_child) {
+ if (DELTA_CHILD(e)) {
add_to_order = 1;
- e = e->delta_child;
+ e = DELTA_CHILD(e);
} else {
add_to_order = 0;
/* our sibling might have some children, it is next */
- if (e->delta_sibling) {
- e = e->delta_sibling;
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
continue;
}
/* go back to our parent node */
- e = e->delta;
- while (e && !e->delta_sibling) {
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
/* we're on the right side of a subtree, keep
* going up until we can go right again */
- e = e->delta;
+ e = DELTA(e);
}
if (!e) {
/* done- we hit our original root node */
return;
}
/* pass it off to sibling at this level */
- e = e->delta_sibling;
+ e = DELTA_SIBLING(e);
}
};
}
{
struct object_entry *root;
- for (root = e; root->delta; root = root->delta)
+ for (root = e; DELTA(root); root = DELTA(root))
; /* nothing */
add_descendants_to_write_order(wo, endp, root);
}
for (i = 0; i < to_pack.nr_objects; i++) {
objects[i].tagged = 0;
objects[i].filled = 0;
- objects[i].delta_child = NULL;
- objects[i].delta_sibling = NULL;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
}
/*
*/
for (i = to_pack.nr_objects; i > 0;) {
struct object_entry *e = &objects[--i];
- if (!e->delta)
+ if (!DELTA(e))
continue;
/* Mark me as the first child */
- e->delta_sibling = e->delta->delta_child;
- e->delta->delta_child = e;
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
}
/*
* And then all remaining commits and tags.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_COMMIT &&
- objects[i].type != OBJ_TAG)
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
* And then all the trees.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_TREE)
+ if (oe_type(&objects[i]) != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- hashclose(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- hashclose(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
if (write_bitmap_index) {
bitmap_writer_set_checksum(oid.hash);
- bitmap_writer_build_type_index(written_list, nr_written);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep &&
+ if (!ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs))
return 1;
if (local && !p->pack_local)
return 0;
- if (ignore_packed_keep && p->pack_local && p->pack_keep)
+ if (p->pack_local &&
+ ((ignore_packed_keep_on_disk && p->pack_keep) ||
+ (ignore_packed_keep_in_core && p->pack_keep_in_core)))
return 0;
/* we don't know yet; keep looking for more packs */
entry = packlist_alloc(&to_pack, oid->hash, index_pos);
entry->hash = hash;
- if (type)
- entry->type = type;
+ oe_set_type(entry, type);
if (exclude)
entry->preferred_base = 1;
else
nr_result++;
if (found_pack) {
- entry->in_pack = found_pack;
+ oe_set_in_pack(&to_pack, entry, found_pack);
entry->in_pack_offset = found_offset;
}
off_t found_offset = 0;
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
{
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
return 0;
create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
static void check_object(struct object_entry *entry)
{
- if (entry->in_pack) {
- struct packed_git *p = entry->in_pack;
+ unsigned long canonical_size;
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
unsigned long avail;
off_t ofs;
unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
* since non-delta representations could still be reused.
*/
used = unpack_object_header_buffer(buf, avail,
- &entry->in_pack_type,
- &entry->size);
+ &type,
+ &in_pack_size);
if (used == 0)
goto give_up;
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
/*
* Determine if this is a delta and if so whether we can
* reuse it or not. Otherwise let's find out as cheaply as
switch (entry->in_pack_type) {
default:
/* Not a delta hence we've already got all we need. */
- entry->type = entry->in_pack_type;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
entry->in_pack_header_size = used;
- if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
goto give_up;
unuse_pack(&w_curs);
return;
* deltify other objects against, in order to avoid
* circular deltas.
*/
- entry->type = entry->in_pack_type;
- entry->delta = base_entry;
- entry->delta_size = entry->size;
- entry->delta_sibling = base_entry->delta_child;
- base_entry->delta_child = entry;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA(entry, base_entry);
+ SET_DELTA_SIZE(entry, in_pack_size);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
unuse_pack(&w_curs);
return;
}
- if (entry->type) {
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
/*
* This must be a delta and we already know what the
* final object type is. Let's extract the actual
* object size from the delta header.
*/
- entry->size = get_size_from_delta(p, &w_curs,
- entry->in_pack_offset + entry->in_pack_header_size);
- if (entry->size == 0)
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
goto give_up;
+ SET_SIZE(entry, canonical_size);
unuse_pack(&w_curs);
return;
}
unuse_pack(&w_curs);
}
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
- /*
- * The error condition is checked in prepare_pack(). This is
- * to permit a missing preferred base object to be ignored
- * as a preferred base. Doing so can result in a larger
- * pack file, but the transfer will still take place.
- */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
}
static int pack_offset_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
/* avoid filesystem trashing with loose objects */
- if (!a->in_pack && !b->in_pack)
+ if (!a_in_pack && !b_in_pack)
return oidcmp(&a->idx.oid, &b->idx.oid);
- if (a->in_pack < b->in_pack)
+ if (a_in_pack < b_in_pack)
return -1;
- if (a->in_pack > b->in_pack)
+ if (a_in_pack > b_in_pack)
return 1;
return a->in_pack_offset < b->in_pack_offset ? -1 :
(a->in_pack_offset > b->in_pack_offset);
*/
static void drop_reused_delta(struct object_entry *entry)
{
- struct object_entry **p = &entry->delta->delta_child;
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
+
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
- while (*p) {
- if (*p == entry)
- *p = (*p)->delta_sibling;
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
else
- p = &(*p)->delta_sibling;
+ idx = &oe->delta_sibling_idx;
}
- entry->delta = NULL;
+ SET_DELTA(entry, NULL);
entry->depth = 0;
- oi.sizep = &entry->size;
- oi.typep = &entry->type;
- if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
* fall back to sha1_object_info, which may find another copy.
- * And if that fails, the error will be recorded in entry->type
+ * And if that fails, the error will be recorded in oe_type(entry)
* and dealt with in prepare_pack().
*/
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
}
+ SET_SIZE(entry, size);
}
/*
for (cur = entry, total_depth = 0;
cur;
- cur = cur->delta, total_depth++) {
+ cur = DELTA(cur), total_depth++) {
if (cur->dfs_state == DFS_DONE) {
/*
* We've already seen this object and know it isn't
* is a bug.
*/
if (cur->dfs_state != DFS_NONE)
- die("BUG: confusing delta dfs state in first pass: %d",
+ BUG("confusing delta dfs state in first pass: %d",
cur->dfs_state);
/*
* it's not a delta, we're done traversing, but we'll mark it
* done to save time on future traversals.
*/
- if (!cur->delta) {
+ if (!DELTA(cur)) {
cur->dfs_state = DFS_DONE;
break;
}
* We keep all commits in the chain that we examined.
*/
cur->dfs_state = DFS_ACTIVE;
- if (cur->delta->dfs_state == DFS_ACTIVE) {
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
drop_reused_delta(cur);
cur->dfs_state = DFS_DONE;
break;
* an extra "next" pointer to keep going after we reset cur->delta.
*/
for (cur = entry; cur; cur = next) {
- next = cur->delta;
+ next = DELTA(cur);
/*
* We should have a chain of zero or more ACTIVE states down to
if (cur->dfs_state == DFS_DONE)
break;
else if (cur->dfs_state != DFS_ACTIVE)
- die("BUG: confusing delta dfs state in second pass: %d",
+ BUG("confusing delta dfs state in second pass: %d",
cur->dfs_state);
/*
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold < entry->size)
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
/*
* This must happen in a second pass, since we rely on the delta
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ enum object_type a_type = oe_type(a);
+ enum object_type b_type = oe_type(b);
+ unsigned long a_size = SIZE(a);
+ unsigned long b_size = SIZE(b);
- if (a->type > b->type)
+ if (a_type > b_type)
return -1;
- if (a->type < b->type)
+ if (a_type < b_type)
return 1;
if (a->hash > b->hash)
return -1;
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
- if (a->size > b->size)
+ if (a_size > b_size)
return -1;
- if (a->size < b->size)
+ if (a_size < b_size)
return 1;
return a < b ? -1 : (a > b); /* newest first */
}
#endif
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
+
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ read_lock();
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ read_unlock();
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ read_lock();
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ read_unlock();
+ return size;
+}
+
static int try_delta(struct unpacked *trg, struct unpacked *src,
unsigned max_depth, unsigned long *mem_usage)
{
void *delta_buf;
/* Don't bother doing diffs between different types */
- if (trg_entry->type != src_entry->type)
+ if (oe_type(trg_entry) != oe_type(src_entry))
return -1;
/*
* it, we will still save the transfer cost, as we already know
* the other side has it and we won't send src_entry at all.
*/
- if (reuse_delta && trg_entry->in_pack &&
- trg_entry->in_pack == src_entry->in_pack &&
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
!src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
max_size = trg_size/2 - 20;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
(max_depth - ref_depth + 1);
if (max_size == 0)
return 0;
- src_size = src_entry->size;
+ src_size = SIZE(src_entry);
sizediff = src_size < trg_size ? trg_size - src_size : 0;
if (sizediff >= max_size)
return 0;
delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
if (!delta_buf)
return 0;
+ if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
+ free(delta_buf);
+ return 0;
+ }
- if (trg_entry->delta) {
+ if (DELTA(trg_entry)) {
/* Prefer only shallower same-sized deltas. */
- if (delta_size == trg_entry->delta_size &&
+ if (delta_size == DELTA_SIZE(trg_entry) &&
src->depth + 1 >= trg->depth) {
free(delta_buf);
return 0;
free(trg_entry->delta_data);
cache_lock();
if (trg_entry->delta_data) {
- delta_cache_size -= trg_entry->delta_size;
+ delta_cache_size -= DELTA_SIZE(trg_entry);
trg_entry->delta_data = NULL;
}
if (delta_cacheable(src_size, trg_size, delta_size)) {
free(delta_buf);
}
- trg_entry->delta = src_entry;
- trg_entry->delta_size = delta_size;
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
trg->depth = src->depth + 1;
return 1;
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{
- struct object_entry *child = me->delta_child;
+ struct object_entry *child = DELTA_CHILD(me);
unsigned int m = n;
while (child) {
unsigned int c = check_delta_limit(child, n + 1);
if (m < c)
m = c;
- child = child->delta_sibling;
+ child = DELTA_SIBLING(child);
}
return m;
}
free_delta_index(n->index);
n->index = NULL;
if (n->data) {
- freed_mem += n->entry->size;
+ freed_mem += SIZE(n->entry);
FREE_AND_NULL(n->data);
}
n->entry = NULL;
* otherwise they would become too deep.
*/
max_depth = depth;
- if (entry->delta_child) {
+ if (DELTA_CHILD(entry)) {
max_depth -= check_delta_limit(entry, 0);
if (max_depth <= 0)
goto next;
* between writes at that moment.
*/
if (entry->delta_data && !pack_to_stdout) {
- entry->z_delta_size = do_compress(&entry->delta_data,
- entry->delta_size);
- cache_lock();
- delta_cache_size -= entry->delta_size;
- delta_cache_size += entry->z_delta_size;
- cache_unlock();
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
}
/* if we made n a delta, and if n is already at max
* depth, leaving it in the window is pointless. we
* should evict it first.
*/
- if (entry->delta && max_depth <= n->depth)
+ if (DELTA(entry) && max_depth <= n->depth)
continue;
/*
* currently deltified object, to keep it longer. It will
* be the first base object to be attempted next.
*/
- if (entry->delta) {
+ if (DELTA(entry)) {
struct unpacked swap = array[best_base];
int dist = (window + idx - best_base) % window;
int dst = best_base;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (entry->delta)
+ if (DELTA(entry))
/* This happens if we decided to reuse existing
* delta from a pack. "reuse_delta &&" is implied.
*/
continue;
- if (entry->size < 50)
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
continue;
if (entry->no_try_delta)
if (!entry->preferred_base) {
nr_deltas++;
- if (entry->type < 0)
+ if (oe_type(entry) < 0)
die("unable to get type of object %s",
oid_to_hex(&entry->idx.oid));
} else {
- if (entry->type < 0) {
+ if (oe_type(entry) < 0) {
/*
* This object is not found, but we
* don't have to include it anyway.
die("expected object ID, got garbage:\n %s", line);
add_preferred_base_object(p + 1);
- add_object_entry(&oid, 0, p + 1, 0);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
}
}
struct object_id oid;
struct object *o;
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
die("cannot open pack index");
static int add_loose_object(const struct object_id *oid, const char *path,
void *data)
{
- enum object_type type = oid_object_info(oid, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
warning("loose object at %s could not be examined", path);
get_packed_git(the_repository);
while (p) {
- if ((!p->pack_local || p->pack_keep) &&
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
find_pack_entry_one(oid->hash, p)) {
last_found = p;
return 1;
struct object_id oid;
for (p = get_packed_git(the_repository); p; p = p->next) {
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
{
return pack_to_stdout &&
allow_ofs_delta &&
- !ignore_packed_keep &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs) &&
!incremental;
}
oid_array_clear(&recent_objects);
}
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
+}
+
static int option_parse_index_version(const struct option *opt,
const char *arg, int unset)
{
struct argv_array rp = ARGV_ARRAY_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
N_("create thin packs")),
OPT_BOOL(0, "shallow", &shallow,
N_("create packs suitable for shallow fetches")),
- OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
OPT_INTEGER(0, "compression", &pack_compression_level,
N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
OPT_END(),
};
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
check_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
if (pack_to_stdout != !base_name || argc)
usage_with_options(pack_usage, pack_objects_options);
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+
argv_array_push(&rp, "pack-objects");
if (thin) {
use_internal_rev_list = 1;
fetch_if_missing = 0;
argv_array_push(&rp, "--exclude-promisor-objects");
}
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
if (!reuse_object)
reuse_delta = 0;
if (progress && all_progress_implied)
progress = 2;
- if (ignore_packed_keep) {
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
- ignore_packed_keep = 0;
+ ignore_packed_keep_on_disk = 0;
}
if (local) {
/*
- * unlike ignore_packed_keep above, we do not want to
- * unset "local" based on looking at packs, as it
- * also covers non-local objects
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
*/
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next) {
}
}
+ prepare_packing_data(&to_pack);
+
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
REBASE_FALSE = 0,
REBASE_TRUE,
REBASE_PRESERVE,
+ REBASE_MERGES,
REBASE_INTERACTIVE
};
/**
* Parses the value of --rebase. If value is a false value, returns
* REBASE_FALSE. If value is a true value, returns REBASE_TRUE. If value is
- * "preserve", returns REBASE_PRESERVE. If value is a invalid value, dies with
- * a fatal error if fatal is true, otherwise returns REBASE_INVALID.
+ * "merges", returns REBASE_MERGES. If value is "preserve", returns
+ * REBASE_PRESERVE. If value is a invalid value, dies with a fatal error if
+ * fatal is true, otherwise returns REBASE_INVALID.
*/
static enum rebase_type parse_config_rebase(const char *key, const char *value,
int fatal)
return REBASE_TRUE;
else if (!strcmp(value, "preserve"))
return REBASE_PRESERVE;
+ else if (!strcmp(value, "merges"))
+ return REBASE_MERGES;
else if (!strcmp(value, "interactive"))
return REBASE_INTERACTIVE;
/* Options passed to git-merge or git-rebase */
OPT_GROUP(N_("Options related to merging")),
{ OPTION_CALLBACK, 'r', "rebase", &opt_rebase,
- "false|true|preserve|interactive",
+ "false|true|merges|preserve|interactive",
N_("incorporate changes by rebasing rather than merging"),
PARSE_OPT_OPTARG, parse_opt_rebase },
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
argv_array_push(&args, repo);
argv_array_pushv(&args, refspecs);
} else if (*refspecs)
- die("BUG: refspecs without repo?");
+ BUG("refspecs without repo?");
ret = run_command_v_opt(args.argv, RUN_GIT_CMD);
argv_array_clear(&args);
return ret;
argv_push_verbosity(&args);
/* Options passed to git-rebase */
- if (opt_rebase == REBASE_PRESERVE)
+ if (opt_rebase == REBASE_MERGES)
+ argv_array_push(&args, "--rebase-merges");
+ else if (opt_rebase == REBASE_PRESERVE)
argv_array_push(&args, "--preserve-merges");
else if (opt_rebase == REBASE_INTERACTIVE)
argv_array_push(&args, "--interactive");
static int command_singleton_iterator(void *cb_data, struct object_id *oid);
static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
{
- static struct lock_file shallow_lock;
+ struct lock_file shallow_lock = LOCK_INIT;
struct oid_array extra = OID_ARRAY_INIT;
struct check_connected_options opt = CHECK_CONNECTED_INIT;
uint32_t mask = 1 << (cmd->index % 32);
}
}
if (!checked_connectivity)
- die("BUG: connectivity check skipped???");
+ BUG("connectivity check skipped???");
}
static void execute_commands_non_atomic(struct command *commands,
unpack_limit = receive_unpack_limit;
switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
return error("%s: is a directory - add files inside instead", path);
}
-static int process_path(const char *path)
+static int process_path(const char *path, struct stat *st, int stat_errno)
{
int pos, len;
- struct stat st;
const struct cache_entry *ce;
len = strlen(path);
* First things first: get the stat information, to decide
* what to do about the pathname!
*/
- if (lstat(path, &st) < 0)
- return process_lstat_error(path, errno);
+ if (stat_errno)
+ return process_lstat_error(path, stat_errno);
- if (S_ISDIR(st.st_mode))
- return process_directory(path, len, &st);
+ if (S_ISDIR(st->st_mode))
+ return process_directory(path, len, st);
- return add_one_path(ce, path, len, &st);
+ return add_one_path(ce, path, len, st);
}
static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
int size, len, option;
struct cache_entry *ce;
- if (!verify_path(path))
+ if (!verify_path(path, mode))
return error("Invalid path '%s'", path);
len = strlen(path);
static void update_one(const char *path)
{
- if (!verify_path(path)) {
+ int stat_errno = 0;
+ struct stat st;
+
+ if (mark_valid_only || mark_skip_worktree_only || force_remove ||
+ mark_fsmonitor_only)
+ st.st_mode = 0;
+ else if (lstat(path, &st) < 0) {
+ st.st_mode = 0;
+ stat_errno = errno;
+ } /* else stat is valid */
+
+ if (!verify_path(path, st.st_mode)) {
fprintf(stderr, "Ignoring path %s\n", path);
return;
}
report("remove '%s'", path);
return;
}
- if (process_path(path))
+ if (process_path(path, &st, stat_errno))
die("Unable to process path %s", path);
report("add '%s'", path);
}
path_name = uq.buf;
}
- if (!verify_path(path_name)) {
+ if (!verify_path(path_name, mode)) {
fprintf(stderr, "Ignoring path %s\n", path_name);
continue;
}
report(_("Untracked cache enabled for '%s'"), get_git_work_tree());
break;
default:
- die("BUG: bad untracked_cache value: %d", untracked_cache);
+ BUG("bad untracked_cache value: %d", untracked_cache);
}
if (fsmonitor > 0) {
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- hashclose(state->f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(state->f, oid.hash, 0);
+ int fd = finalize_hashfile(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
* pack, and write into it.
*/
if (!idx)
- die("BUG: should not happen");
+ BUG("should not happen");
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
break;
case COLOR_ANSI:
if (len < 2)
- die("BUG: color parsing ran out of space");
+ BUG("color parsing ran out of space");
*out++ = type;
*out++ = '0' + c->value;
break;
#undef OUT
#define OUT(x) do { \
if (dst == end) \
- die("BUG: color parsing ran out of space"); \
+ BUG("color parsing ran out of space"); \
*dst++ = (x); \
} while(0)
return GIT_COLOR_AUTO;
}
-static int check_auto_color(void)
+static int check_auto_color(int fd)
{
- if (color_stdout_is_tty < 0)
- color_stdout_is_tty = isatty(1);
- if (color_stdout_is_tty || (pager_in_use() && pager_use_color)) {
+ static int color_stderr_is_tty = -1;
+ int *is_tty_p = fd == 1 ? &color_stdout_is_tty : &color_stderr_is_tty;
+ if (*is_tty_p < 0)
+ *is_tty_p = isatty(fd);
+ if (*is_tty_p || (fd == 1 && pager_in_use() && pager_use_color)) {
if (!is_terminal_dumb())
return 1;
}
return 0;
}
-int want_color(int var)
+int want_color_fd(int fd, int var)
{
/*
* NEEDSWORK: This function is sometimes used from multiple threads, and
* is listed in .tsan-suppressions for the time being.
*/
- static int want_auto = -1;
+ static int want_auto[3] = { -1, -1, -1 };
if (var < 0)
var = git_use_color_default;
if (var == GIT_COLOR_AUTO) {
- if (want_auto < 0)
- want_auto = check_auto_color();
- return want_auto;
+ if (want_auto[fd] < 0)
+ want_auto[fd] = check_auto_color(fd);
+ return want_auto[fd];
}
return var;
}
#include "string-list.h"
#include "utf8.h"
#include "dir.h"
+#include "color.h"
struct config_source {
struct config_source *prev;
if (conf->u.buf.pos > 0) {
conf->u.buf.pos--;
if (conf->u.buf.buf[conf->u.buf.pos] != c)
- die("BUG: config_buf can only ungetc the same character");
+ BUG("config_buf can only ungetc the same character");
return c;
}
strbuf_realpath(&path, cf->path, 1);
slash = find_last_dir_sep(path.buf);
if (!slash)
- die("BUG: how is this possible?");
+ BUG("how is this possible?");
strbuf_splice(pat, 0, 1, path.buf, slash - path.buf);
prefix = slash - path.buf + 1 /* slash */;
} else if (!is_absolute_path(pat->buf))
}
}
-static int git_parse_source(config_fn_t fn, void *data)
+struct parse_event_data {
+ enum config_event_t previous_type;
+ size_t previous_offset;
+ const struct config_options *opts;
+};
+
+static int do_event(enum config_event_t type, struct parse_event_data *data)
+{
+ size_t offset;
+
+ if (!data->opts || !data->opts->event_fn)
+ return 0;
+
+ if (type == CONFIG_EVENT_WHITESPACE &&
+ data->previous_type == type)
+ return 0;
+
+ offset = cf->do_ftell(cf);
+ /*
+ * At EOF, the parser always "inserts" an extra '\n', therefore
+ * the end offset of the event is the current file position, otherwise
+ * we will already have advanced to the next event.
+ */
+ if (type != CONFIG_EVENT_EOF)
+ offset--;
+
+ if (data->previous_type != CONFIG_EVENT_EOF &&
+ data->opts->event_fn(data->previous_type, data->previous_offset,
+ offset, data->opts->event_fn_data) < 0)
+ return -1;
+
+ data->previous_type = type;
+ data->previous_offset = offset;
+
+ return 0;
+}
+
+static int git_parse_source(config_fn_t fn, void *data,
+ const struct config_options *opts)
{
int comment = 0;
int baselen = 0;
/* U+FEFF Byte Order Mark in UTF8 */
const char *bomptr = utf8_bom;
+ /* For the parser event callback */
+ struct parse_event_data event_data = {
+ CONFIG_EVENT_EOF, 0, opts
+ };
+
for (;;) {
- int c = get_next_char();
+ int c;
+
+ c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
}
}
if (c == '\n') {
- if (cf->eof)
+ if (cf->eof) {
+ if (do_event(CONFIG_EVENT_EOF, &event_data) < 0)
+ return -1;
return 0;
+ }
+ if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+ return -1;
comment = 0;
continue;
}
- if (comment || isspace(c))
+ if (comment)
continue;
+ if (isspace(c)) {
+ if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+ return -1;
+ continue;
+ }
if (c == '#' || c == ';') {
+ if (do_event(CONFIG_EVENT_COMMENT, &event_data) < 0)
+ return -1;
comment = 1;
continue;
}
if (c == '[') {
+ if (do_event(CONFIG_EVENT_SECTION, &event_data) < 0)
+ return -1;
+
/* Reset prior to determining a new stem */
strbuf_reset(var);
if (get_base_var(var) < 0 || var->len < 1)
}
if (!isalpha(c))
break;
+
+ if (do_event(CONFIG_EVENT_ENTRY, &event_data) < 0)
+ return -1;
+
/*
* Truncate the var name back to the section header
* stem prior to grabbing the suffix part of the name
break;
}
+ if (do_event(CONFIG_EVENT_ERROR, &event_data) < 0)
+ return -1;
+
switch (cf->origin_type) {
case CONFIG_ORIGIN_BLOB:
error_msg = xstrfmt(_("bad config line %d in blob %s"),
return 0;
}
+int git_config_color(char *dest, const char *var, const char *value)
+{
+ if (!value)
+ return config_error_nonbool(var);
+ if (color_parse(value, dest) < 0)
+ return -1;
+ return 0;
+}
+
static int git_default_core_config(const char *var, const char *value)
{
/* This needs a better name */
return 0;
}
+ if (!strcmp(var, "core.checkroundtripencoding")) {
+ check_roundtrip_encoding = xstrdup(value);
+ return 0;
+ }
+
if (!strcmp(var, "core.notesref")) {
notes_ref_name = xstrdup(value);
return 0;
return 0;
}
+ if (!strcmp(var, "core.commitgraph")) {
+ core_commit_graph = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
if (starts_with(var, "mailmap."))
return git_default_mailmap_config(var, value);
- if (starts_with(var, "advice."))
+ if (starts_with(var, "advice.") || starts_with(var, "color.advice"))
return git_default_advice_config(var, value);
if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) {
* fgetc, ungetc, ftell of top need to be initialized before calling
* this function.
*/
-static int do_config_from(struct config_source *top, config_fn_t fn, void *data)
+static int do_config_from(struct config_source *top, config_fn_t fn, void *data,
+ const struct config_options *opts)
{
int ret;
strbuf_init(&top->var, 1024);
cf = top;
- ret = git_parse_source(fn, data);
+ ret = git_parse_source(fn, data, opts);
/* pop config-file parsing state stack */
strbuf_release(&top->value);
static int do_config_from_file(config_fn_t fn,
const enum config_origin_type origin_type,
const char *name, const char *path, FILE *f,
- void *data)
+ void *data, const struct config_options *opts)
{
struct config_source top;
int ret;
top.do_ftell = config_file_ftell;
flockfile(f);
- ret = do_config_from(&top, fn, data);
+ ret = do_config_from(&top, fn, data, opts);
funlockfile(f);
return ret;
}
static int git_config_from_stdin(config_fn_t fn, void *data)
{
- return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin, data);
+ return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin,
+ data, NULL);
}
-int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+int git_config_from_file_with_options(config_fn_t fn, const char *filename,
+ void *data,
+ const struct config_options *opts)
{
int ret = -1;
FILE *f;
f = fopen_or_warn(filename, "r");
if (f) {
- ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, filename, f, data);
+ ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename,
+ filename, f, data, opts);
fclose(f);
}
return ret;
}
+int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+ return git_config_from_file_with_options(fn, filename, data, NULL);
+}
+
int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_type,
const char *name, const char *buf, size_t len, void *data)
{
top.do_ungetc = config_buf_ungetc;
top.do_ftell = config_buf_ftell;
- return do_config_from(&top, fn, data);
+ return do_config_from(&top, fn, data, NULL);
}
int git_config_from_blob_oid(config_fn_t fn,
l_item->value_index = e->value_list.nr - 1;
if (!cf)
- die("BUG: configset_add_value has no source");
+ BUG("configset_add_value has no source");
if (cf->name) {
kv_info->filename = strintern(cf->name);
kv_info->linenr = cf->linenr;
* Find all the stuff for git_config_set() below.
*/
-static struct {
+struct config_store_data {
int baselen;
char *key;
int do_not_match;
regex_t *value_regex;
int multi_replace;
- size_t *offset;
- unsigned int offset_alloc;
- enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
- unsigned int seen;
-} store;
+ struct {
+ size_t begin, end;
+ enum config_event_t type;
+ int is_keys_section;
+ } *parsed;
+ unsigned int parsed_nr, parsed_alloc, *seen, seen_nr, seen_alloc;
+ unsigned int key_seen:1, section_seen:1, is_keys_section:1;
+};
-static int matches(const char *key, const char *value)
+static int matches(const char *key, const char *value,
+ const struct config_store_data *store)
{
- if (strcmp(key, store.key))
+ if (strcmp(key, store->key))
return 0; /* not ours */
- if (!store.value_regex)
+ if (!store->value_regex)
return 1; /* always matches */
- if (store.value_regex == CONFIG_REGEX_NONE)
+ if (store->value_regex == CONFIG_REGEX_NONE)
return 0; /* never matches */
- return store.do_not_match ^
- (value && !regexec(store.value_regex, value, 0, NULL, 0));
+ return store->do_not_match ^
+ (value && !regexec(store->value_regex, value, 0, NULL, 0));
+}
+
+static int store_aux_event(enum config_event_t type,
+ size_t begin, size_t end, void *data)
+{
+ struct config_store_data *store = data;
+
+ ALLOC_GROW(store->parsed, store->parsed_nr + 1, store->parsed_alloc);
+ store->parsed[store->parsed_nr].begin = begin;
+ store->parsed[store->parsed_nr].end = end;
+ store->parsed[store->parsed_nr].type = type;
+
+ if (type == CONFIG_EVENT_SECTION) {
+ if (cf->var.len < 2 || cf->var.buf[cf->var.len - 1] != '.')
+ BUG("Invalid section name '%s'", cf->var.buf);
+
+ /* Is this the section we were looking for? */
+ store->is_keys_section =
+ store->parsed[store->parsed_nr].is_keys_section =
+ cf->var.len - 1 == store->baselen &&
+ !strncasecmp(cf->var.buf, store->key, store->baselen);
+ if (store->is_keys_section) {
+ store->section_seen = 1;
+ ALLOC_GROW(store->seen, store->seen_nr + 1,
+ store->seen_alloc);
+ store->seen[store->seen_nr] = store->parsed_nr;
+ }
+ }
+
+ store->parsed_nr++;
+
+ return 0;
}
static int store_aux(const char *key, const char *value, void *cb)
{
- const char *ep;
- size_t section_len;
+ struct config_store_data *store = cb;
- switch (store.state) {
- case KEY_SEEN:
- if (matches(key, value)) {
- if (store.seen == 1 && store.multi_replace == 0) {
+ if (store->key_seen) {
+ if (matches(key, value, store)) {
+ if (store->seen_nr == 1 && store->multi_replace == 0) {
warning(_("%s has multiple values"), key);
}
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
+ ALLOC_GROW(store->seen, store->seen_nr + 1,
+ store->seen_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- store.seen++;
+ store->seen[store->seen_nr] = store->parsed_nr;
+ store->seen_nr++;
}
- break;
- case SECTION_SEEN:
+ } else if (store->is_keys_section) {
/*
- * What we are looking for is in store.key (both
- * section and var), and its section part is baselen
- * long. We found key (again, both section and var).
- * We would want to know if this key is in the same
- * section as what we are looking for. We already
- * know we are in the same section as what should
- * hold store.key.
+ * Do not increment matches yet: this may not be a match, but we
+ * are in the desired section.
*/
- ep = strrchr(key, '.');
- section_len = ep - key;
+ ALLOC_GROW(store->seen, store->seen_nr + 1, store->seen_alloc);
+ store->seen[store->seen_nr] = store->parsed_nr;
+ store->section_seen = 1;
- if ((section_len != store.baselen) ||
- memcmp(key, store.key, section_len+1)) {
- store.state = SECTION_END_SEEN;
- break;
- }
-
- /*
- * Do not increment matches: this is no match, but we
- * just made sure we are in the desired section.
- */
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- /* fallthru */
- case SECTION_END_SEEN:
- case START:
- if (matches(key, value)) {
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- store.state = KEY_SEEN;
- store.seen++;
- } else {
- if (strrchr(key, '.') - key == store.baselen &&
- !strncmp(key, store.key, store.baselen)) {
- store.state = SECTION_SEEN;
- ALLOC_GROW(store.offset,
- store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- }
+ if (matches(key, value, store)) {
+ store->seen_nr++;
+ store->key_seen = 1;
}
}
+
return 0;
}
return 4;
}
-static struct strbuf store_create_section(const char *key)
+static struct strbuf store_create_section(const char *key,
+ const struct config_store_data *store)
{
const char *dot;
int i;
struct strbuf sb = STRBUF_INIT;
- dot = memchr(key, '.', store.baselen);
+ dot = memchr(key, '.', store->baselen);
if (dot) {
strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
- for (i = dot - key + 1; i < store.baselen; i++) {
+ for (i = dot - key + 1; i < store->baselen; i++) {
if (key[i] == '"' || key[i] == '\\')
strbuf_addch(&sb, '\\');
strbuf_addch(&sb, key[i]);
}
strbuf_addstr(&sb, "\"]\n");
} else {
- strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
+ strbuf_addf(&sb, "[%.*s]\n", store->baselen, key);
}
return sb;
}
-static ssize_t write_section(int fd, const char *key)
+static ssize_t write_section(int fd, const char *key,
+ const struct config_store_data *store)
{
- struct strbuf sb = store_create_section(key);
+ struct strbuf sb = store_create_section(key, store);
ssize_t ret;
ret = write_in_full(fd, sb.buf, sb.len);
return ret;
}
-static ssize_t write_pair(int fd, const char *key, const char *value)
+static ssize_t write_pair(int fd, const char *key, const char *value,
+ const struct config_store_data *store)
{
int i;
ssize_t ret;
- int length = strlen(key + store.baselen + 1);
+ int length = strlen(key + store->baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
quote = "\"";
strbuf_addf(&sb, "\t%.*s = %s",
- length, key + store.baselen + 1, quote);
+ length, key + store->baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
return ret;
}
-static ssize_t find_beginning_of_line(const char *contents, size_t size,
- size_t offset_, int *found_bracket)
+/*
+ * If we are about to unset the last key(s) in a section, and if there are
+ * no comments surrounding (or included in) the section, we will want to
+ * extend begin/end to remove the entire section.
+ *
+ * Note: the parameter `seen_ptr` points to the index into the store.seen
+ * array. * This index may be incremented if a section has more than one
+ * entry (which all are to be removed).
+ */
+static void maybe_remove_section(struct config_store_data *store,
+ const char *contents,
+ size_t *begin_offset, size_t *end_offset,
+ int *seen_ptr)
{
- size_t equal_offset = size, bracket_offset = size;
- ssize_t offset;
+ size_t begin;
+ int i, seen, section_seen = 0;
+
+ /*
+ * First, ensure that this is the first key, and that there are no
+ * comments before the entry nor before the section header.
+ */
+ seen = *seen_ptr;
+ for (i = store->seen[seen]; i > 0; i--) {
+ enum config_event_t type = store->parsed[i - 1].type;
+
+ if (type == CONFIG_EVENT_COMMENT)
+ /* There is a comment before this entry or section */
+ return;
+ if (type == CONFIG_EVENT_ENTRY) {
+ if (!section_seen)
+ /* This is not the section's first entry. */
+ return;
+ /* We encountered no comment before the section. */
+ break;
+ }
+ if (type == CONFIG_EVENT_SECTION) {
+ if (!store->parsed[i - 1].is_keys_section)
+ break;
+ section_seen = 1;
+ }
+ }
+ begin = store->parsed[i].begin;
+
+ /*
+ * Next, make sure that we are removing he last key(s) in the section,
+ * and that there are no comments that are possibly about the current
+ * section.
+ */
+ for (i = store->seen[seen] + 1; i < store->parsed_nr; i++) {
+ enum config_event_t type = store->parsed[i].type;
-contline:
- for (offset = offset_-2; offset > 0
- && contents[offset] != '\n'; offset--)
- switch (contents[offset]) {
- case '=': equal_offset = offset; break;
- case ']': bracket_offset = offset; break;
+ if (type == CONFIG_EVENT_COMMENT)
+ return;
+ if (type == CONFIG_EVENT_SECTION) {
+ if (store->parsed[i].is_keys_section)
+ continue;
+ break;
+ }
+ if (type == CONFIG_EVENT_ENTRY) {
+ if (++seen < store->seen_nr &&
+ i == store->seen[seen])
+ /* We want to remove this entry, too */
+ continue;
+ /* There is another entry in this section. */
+ return;
}
- if (offset > 0 && contents[offset-1] == '\\') {
- offset_ = offset;
- goto contline;
}
- if (bracket_offset < equal_offset) {
- *found_bracket = 1;
- offset = bracket_offset+1;
- } else
- offset++;
- return offset;
+ /*
+ * We are really removing the last entry/entries from this section, and
+ * there are no enclosed or surrounding comments. Remove the entire,
+ * now-empty section.
+ */
+ *seen_ptr = seen;
+ *begin_offset = begin;
+ if (i < store->parsed_nr)
+ *end_offset = store->parsed[i].begin;
+ else
+ *end_offset = store->parsed[store->parsed_nr - 1].end;
}
int git_config_set_in_file_gently(const char *config_filename,
char *filename_buf = NULL;
char *contents = NULL;
size_t contents_sz;
+ struct config_store_data store;
+
+ memset(&store, 0, sizeof(store));
/* parse-key returns negative; flip the sign to feed exit(3) */
ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
}
store.key = (char *)key;
- if (write_section(fd, key) < 0 ||
- write_pair(fd, key, value) < 0)
+ if (write_section(fd, key, &store) < 0 ||
+ write_pair(fd, key, value, &store) < 0)
goto write_err_out;
} else {
struct stat st;
size_t copy_begin, copy_end;
int i, new_line = 0;
+ struct config_options opts;
if (value_regex == NULL)
store.value_regex = NULL;
}
}
- ALLOC_GROW(store.offset, 1, store.offset_alloc);
- store.offset[0] = 0;
- store.state = START;
- store.seen = 0;
+ ALLOC_GROW(store.parsed, 1, store.parsed_alloc);
+ store.parsed[0].end = 0;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.event_fn = store_aux_event;
+ opts.event_fn_data = &store;
/*
- * After this, store.offset will contain the *end* offset
- * of the last match, or remain at 0 if no match was found.
+ * After this, store.parsed will contain offsets of all the
+ * parsed elements, and store.seen will contain a list of
+ * matches, as indices into store.parsed.
+ *
* As a side effect, we make sure to transform only a valid
* existing config file.
*/
- if (git_config_from_file(store_aux, config_filename, NULL)) {
+ if (git_config_from_file_with_options(store_aux,
+ config_filename,
+ &store, &opts)) {
error("invalid config file %s", config_filename);
free(store.key);
if (store.value_regex != NULL &&
}
/* if nothing to unset, or too many matches, error out */
- if ((store.seen == 0 && value == NULL) ||
- (store.seen > 1 && multi_replace == 0)) {
+ if ((store.seen_nr == 0 && value == NULL) ||
+ (store.seen_nr > 1 && multi_replace == 0)) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
goto out_free;
}
- if (store.seen == 0)
- store.seen = 1;
+ if (store.seen_nr == 0) {
+ if (!store.seen_alloc) {
+ /* Did not see key nor section */
+ ALLOC_GROW(store.seen, 1, store.seen_alloc);
+ store.seen[0] = store.parsed_nr
+ - !!store.parsed_nr;
+ }
+ store.seen_nr = 1;
+ }
- for (i = 0, copy_begin = 0; i < store.seen; i++) {
- if (store.offset[i] == 0) {
- store.offset[i] = copy_end = contents_sz;
- } else if (store.state != KEY_SEEN) {
- copy_end = store.offset[i];
- } else
- copy_end = find_beginning_of_line(
- contents, contents_sz,
- store.offset[i]-2, &new_line);
+ for (i = 0, copy_begin = 0; i < store.seen_nr; i++) {
+ size_t replace_end;
+ int j = store.seen[i];
+
+ new_line = 0;
+ if (!store.key_seen) {
+ copy_end = store.parsed[j].end;
+ /* include '\n' when copying section header */
+ if (copy_end > 0 && copy_end < contents_sz &&
+ contents[copy_end - 1] != '\n' &&
+ contents[copy_end] == '\n')
+ copy_end++;
+ replace_end = copy_end;
+ } else {
+ replace_end = store.parsed[j].end;
+ copy_end = store.parsed[j].begin;
+ if (!value)
+ maybe_remove_section(&store, contents,
+ ©_end,
+ &replace_end, &i);
+ /*
+ * Swallow preceding white-space on the same
+ * line.
+ */
+ while (copy_end > 0 ) {
+ char c = contents[copy_end - 1];
+
+ if (isspace(c) && c != '\n')
+ copy_end--;
+ else
+ break;
+ }
+ }
if (copy_end > 0 && contents[copy_end-1] != '\n')
new_line = 1;
write_str_in_full(fd, "\n") < 0)
goto write_err_out;
}
- copy_begin = store.offset[i];
+ copy_begin = replace_end;
}
/* write the pair (value == NULL means unset) */
if (value != NULL) {
- if (store.state == START) {
- if (write_section(fd, key) < 0)
+ if (!store.section_seen) {
+ if (write_section(fd, key, &store) < 0)
goto write_err_out;
}
- if (write_pair(fd, key, value) < 0)
+ if (write_pair(fd, key, value, &store) < 0)
goto write_err_out;
}
/* if new_name == NULL, the section is removed instead */
static int git_config_copy_or_rename_section_in_file(const char *config_filename,
- const char *old_name, const char *new_name, int copy)
+ const char *old_name,
+ const char *new_name, int copy)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
FILE *config_file = NULL;
struct stat st;
struct strbuf copystr = STRBUF_INIT;
+ struct config_store_data store;
+
+ memset(&store, 0, sizeof(store));
if (new_name && !section_name_is_ok(new_name)) {
ret = error("invalid section name: %s", new_name);
}
store.baselen = strlen(new_name);
if (!copy) {
- if (write_section(out_fd, new_name) < 0) {
+ if (write_section(out_fd, new_name, &store) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
output[0] = '\t';
}
} else {
- copystr = store_create_section(new_name);
+ copystr = store_create_section(new_name, &store);
}
}
remove = 0;
else if(cf)
type = cf->origin_type;
else
- die("BUG: current_config_origin_type called outside config callback");
+ BUG("current_config_origin_type called outside config callback");
switch (type) {
case CONFIG_ORIGIN_BLOB:
case CONFIG_ORIGIN_CMDLINE:
return "command line";
default:
- die("BUG: unknown config origin type");
+ BUG("unknown config origin type");
}
}
else if (cf)
name = cf->name;
else
- die("BUG: current_config_name called outside config callback");
+ BUG("current_config_name called outside config callback");
return name ? name : "";
}
return 0;
}
-static int git_config_rename(const char *var, const char *value)
+int git_config_rename(const char *var, const char *value)
{
if (!value)
return DIFF_DETECT_RENAME;
fputs(o->stat_sep, o->file);
break;
default:
- die("BUG: unknown diff symbol");
+ BUG("unknown diff symbol");
}
strbuf_release(&sb);
}
for (i = 0; i < ARRAY_SIZE(diff_temp); i++)
if (!diff_temp[i].name)
return diff_temp + i;
- die("BUG: diff is failing to clean up its tempfiles");
+ BUG("diff is failing to clean up its tempfiles");
}
static void remove_tempfile(void)
else {
enum object_type type;
if (size_only || (flags & CHECK_BINARY)) {
- type = oid_object_info(&s->oid, &s->size);
+ type = oid_object_info(the_repository, &s->oid,
+ &s->size);
if (type < 0)
die("unable to read %s",
oid_to_hex(&s->oid));
if (abbrev < 0)
abbrev = FALLBACK_DEFAULT_ABBREV;
if (abbrev > GIT_SHA1_HEXSZ)
- die("BUG: oid abbreviation out of range: %d", abbrev);
+ BUG("oid abbreviation out of range: %d", abbrev);
if (abbrev)
hex[abbrev] = '\0';
return hex;
int argcount = 1;
if (!skip_prefix(arg, "--stat", &arg))
- die("BUG: stat option does not begin with --stat: %s", arg);
+ BUG("stat option does not begin with --stat: %s", arg);
end = (char *)arg;
switch (*arg) {
struct diff_queue_struct *q = &diff_queued_diff;
if (WSEH_NEW & WS_RULE_MASK)
- die("BUG: WS rules bit mask overlaps with diff symbol flags");
+ BUG("WS rules bit mask overlaps with diff symbol flags");
if (o->color_moved)
o->emitted_symbols = &esm;
}
if (!driver->textconv)
- die("BUG: fill_textconv called with non-textconv driver");
+ BUG("fill_textconv called with non-textconv driver");
if (driver->textconv_cache && df->oid_valid) {
*outbuf = notes_cache_get(driver->textconv_cache,
#include <openssl/err.h>
#endif
+#ifdef HAVE_SYSINFO
+# include <sys/sysinfo.h>
+#endif
+
/* On most systems <netdb.h> would have given us this, but
* not on some systems (e.g. z/OS).
*/
extern void set_die_is_recursing_routine(int (*routine)(void));
extern int starts_with(const char *str, const char *prefix);
+extern int istarts_with(const char *str, const char *prefix);
/*
* If the string "str" begins with the string found in "prefix", return 1.
return (x & 0x20) == 0;
}
+/*
+ * Like skip_prefix, but compare case-insensitively. Note that the comparison
+ * is done via tolower(), so it is strictly ASCII (no multi-byte characters or
+ * locale-specific conversions).
+ */
+static inline int skip_iprefix(const char *str, const char *prefix,
+ const char **out)
+{
+ do {
+ if (!*prefix) {
+ *out = str;
+ return 1;
+ }
+ } while (tolower(*str++) == tolower(*prefix++));
+ return 0;
+}
+
static inline int strtoul_ui(char const *s, int base, unsigned int *result)
{
unsigned long ul;
#define QSORT_S(base, n, compar, ctx) do { \
if (qsort_s((base), (n), sizeof(*(base)), compar, ctx)) \
- die("BUG: qsort_s() failed"); \
+ BUG("qsort_s() failed"); \
} while (0)
#ifndef REG_STARTEND
#define HAVE_VARIADIC_MACROS 1
#endif
+ /* usage.c: only to be used for testing BUG() implementation (see test-tool) */
+ extern int BUG_exit_code;
+
#ifdef HAVE_VARIADIC_MACROS
__attribute__((format (printf, 3, 4))) NORETURN
void BUG_fl(const char *file, int line, const char *fmt, ...);
*var = val;
}
-static void protocol_http_header(void)
-{
- if (get_protocol_version_config() > 0) {
- struct strbuf protocol_header = STRBUF_INIT;
-
- strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
- get_protocol_version_config());
-
-
- extra_http_headers = curl_slist_append(extra_http_headers,
- protocol_header.buf);
- strbuf_release(&protocol_header);
- }
-}
-
void http_init(struct remote *remote, const char *url, int proactive_auth)
{
char *low_speed_limit;
if (remote)
var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
- protocol_http_header();
-
pragma_header = curl_slist_append(http_copy_default_headers(),
"Pragma: no-cache");
no_pragma_header = curl_slist_append(http_copy_default_headers(),
headers = curl_slist_append(headers, buf.buf);
+ /* Add additional headers here */
+ if (options && options->extra_headers) {
+ const struct string_list_item *item;
+ for_each_string_list_item(item, options->extra_headers) {
+ headers = curl_slist_append(headers, item->string);
+ }
+ }
+
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
return 0;
if (!skip_prefix(asked, base->buf, &tail))
- die("BUG: update_url_from_redirect: %s is not a superset of %s",
+ BUG("update_url_from_redirect: %s is not a superset of %s",
asked, base->buf);
new_len = got->len;
strbuf_reset(result);
break;
default:
- die("BUG: HTTP_KEEP_ERROR is only supported with strbufs");
+ BUG("HTTP_KEEP_ERROR is only supported with strbufs");
}
}
*lst = (*lst)->next;
if (!strip_suffix(preq->tmpfile, ".pack.temp", &len))
- die("BUG: pack tmpfile does not end in .pack.temp?");
+ BUG("pack tmpfile does not end in .pack.temp?");
tmp_idx = xstrfmt("%.*s.idx.temp", (int)len, preq->tmpfile);
argv_array_push(&ip.args, "index-pack");
CURLcode c = curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CODE,
&slot->http_code);
if (c != CURLE_OK)
- die("BUG: curl_easy_getinfo for HTTP code failed: %s",
+ BUG("curl_easy_getinfo for HTTP code failed: %s",
curl_easy_strerror(c));
if (slot->http_code >= 300)
return size;
return ignore_case ? strihash(path) : strhash(path);
}
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+ char *dir)
+{
+ struct dir_rename_entry key;
+
+ if (dir == NULL)
+ return NULL;
+ hashmap_entry_init(&key, strhash(dir));
+ key.dir = dir;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+ const void *entry,
+ const void *entry_or_key,
+ const void *unused_keydata)
+{
+ const struct dir_rename_entry *e1 = entry;
+ const struct dir_rename_entry *e2 = entry_or_key;
+
+ return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+ hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+ char *directory)
+{
+ hashmap_entry_init(entry, strhash(directory));
+ entry->dir = directory;
+ entry->non_unique_new_dir = 0;
+ strbuf_init(&entry->new_dir, 0);
+ string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+ char *target_file)
+{
+ struct collision_entry key;
+
+ hashmap_entry_init(&key, strhash(target_file));
+ key.target_file = target_file;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+ const struct collision_entry *e1,
+ const struct collision_entry *e2,
+ const void *unused_keydata)
+{
+ return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+ hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
static void flush_output(struct merge_options *o)
{
if (o->buffer_output < 2 && o->obuf.len) {
struct commit *commit = alloc_commit_node();
set_merge_remote_desc(commit, comment, (struct object *)commit);
- commit->tree = tree;
+ commit->maybe_tree = tree;
commit->object.parsed = 1;
return commit;
}
enum rename_type {
RENAME_NORMAL = 0,
+ RENAME_DIR,
RENAME_DELETE,
RENAME_ONE_FILE_TO_ONE,
RENAME_ONE_FILE_TO_TWO,
ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
if (!ce)
- return err(o, _("addinfo_cache failed for path '%s'"), path);
+ return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
ret = add_cache_entry(ce, options);
if (refresh) {
nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
- return err(o, _("addinfo_cache failed for path '%s'"), path);
+ return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)
ret = add_cache_entry(nce, options);
}
init_tree_desc(desc, tree->buffer, tree->size);
}
-static int git_merge_trees(int index_only,
+static int git_merge_trees(struct merge_options *o,
struct tree *common,
struct tree *head,
struct tree *merge)
{
int rc;
struct tree_desc t[3];
- struct unpack_trees_options opts;
+ struct index_state tmp_index = { NULL };
- memset(&opts, 0, sizeof(opts));
- if (index_only)
- opts.index_only = 1;
+ memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+ if (o->call_depth)
+ o->unpack_opts.index_only = 1;
else
- opts.update = 1;
- opts.merge = 1;
- opts.head_idx = 2;
- opts.fn = threeway_merge;
- opts.src_index = &the_index;
- opts.dst_index = &the_index;
- setup_unpack_trees_porcelain(&opts, "merge");
+ o->unpack_opts.update = 1;
+ o->unpack_opts.merge = 1;
+ o->unpack_opts.head_idx = 2;
+ o->unpack_opts.fn = threeway_merge;
+ o->unpack_opts.src_index = &the_index;
+ o->unpack_opts.dst_index = &tmp_index;
+ o->unpack_opts.aggressive = !merge_detect_rename(o);
+ setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
init_tree_desc_from_tree(t+2, merge);
- rc = unpack_trees(3, t, &opts);
+ rc = unpack_trees(3, t, &o->unpack_opts);
cache_tree_free(&active_cache_tree);
+
+ /*
+ * Update the_index to match the new results, AFTER saving a copy
+ * in o->orig_index. Update src_index to point to the saved copy.
+ * (verify_uptodate() checks src_index, and the original index is
+ * the one that had the necessary modification timestamps.)
+ */
+ o->orig_index = the_index;
+ the_index = tmp_index;
+ o->unpack_opts.src_index = &o->orig_index;
+
return rc;
}
fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
(int)ce_namelen(ce), ce->name);
}
- die("BUG: unmerged index entries in merge-recursive.c");
+ BUG("unmerged index entries in merge-recursive.c");
}
if (!active_cache_tree)
read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
}
+static int get_tree_entry_if_blob(const struct object_id *tree,
+ const char *path,
+ struct object_id *hashy,
+ unsigned int *mode_o)
+{
+ int ret;
+
+ ret = get_tree_entry(tree, path, hashy, mode_o);
+ if (S_ISDIR(*mode_o)) {
+ oidcpy(hashy, &null_oid);
+ *mode_o = 0;
+ }
+ return ret;
+}
+
/*
* Returns an index_entry instance which doesn't have to correspond to
* a real cache entry in Git's index.
{
struct string_list_item *item;
struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
- get_tree_entry(&o->object.oid, path,
- &e->stages[1].oid, &e->stages[1].mode);
- get_tree_entry(&a->object.oid, path,
- &e->stages[2].oid, &e->stages[2].mode);
- get_tree_entry(&b->object.oid, path,
- &e->stages[3].oid, &e->stages[3].mode);
+ get_tree_entry_if_blob(&o->object.oid, path,
+ &e->stages[1].oid, &e->stages[1].mode);
+ get_tree_entry_if_blob(&a->object.oid, path,
+ &e->stages[2].oid, &e->stages[2].mode);
+ get_tree_entry_if_blob(&b->object.oid, path,
+ &e->stages[3].oid, &e->stages[3].mode);
item = string_list_insert(entries, path);
item->util = e;
return e;
*/
struct stage_data *src_entry;
struct stage_data *dst_entry;
+ unsigned add_turned_into_rename:1;
unsigned processed:1;
};
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
- struct tree *tree,
- struct tree *o_tree,
- struct tree *a_tree,
- struct tree *b_tree,
- struct string_list *entries)
-{
- int i;
- struct string_list *renames;
- struct diff_options opts;
-
- renames = xcalloc(1, sizeof(struct string_list));
- if (!o->detect_rename)
- return renames;
-
- diff_setup(&opts);
- opts.flags.recursive = 1;
- opts.flags.rename_empty = 0;
- opts.detect_rename = DIFF_DETECT_RENAME;
- opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
- o->diff_rename_limit >= 0 ? o->diff_rename_limit :
- 1000;
- opts.rename_score = o->rename_score;
- opts.show_rename_progress = o->show_rename_progress;
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_setup_done(&opts);
- diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
- diffcore_std(&opts);
- if (opts.needed_rename_limit > o->needed_rename_limit)
- o->needed_rename_limit = opts.needed_rename_limit;
- for (i = 0; i < diff_queued_diff.nr; ++i) {
- struct string_list_item *item;
- struct rename *re;
- struct diff_filepair *pair = diff_queued_diff.queue[i];
- if (pair->status != 'R') {
- diff_free_filepair(pair);
- continue;
- }
- re = xmalloc(sizeof(*re));
- re->processed = 0;
- re->pair = pair;
- item = string_list_lookup(entries, re->pair->one->path);
- if (!item)
- re->src_entry = insert_stage_data(re->pair->one->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->src_entry = item->util;
-
- item = string_list_lookup(entries, re->pair->two->path);
- if (!item)
- re->dst_entry = insert_stage_data(re->pair->two->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->dst_entry = item->util;
- item = string_list_insert(renames, pair->one->path);
- item->util = re;
- }
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_queued_diff.nr = 0;
- diff_flush(&opts);
- return renames;
-}
-
static int update_stages(struct merge_options *opt, const char *path,
const struct diff_filespec *o,
const struct diff_filespec *a,
return 0;
}
+static int update_stages_for_stage_data(struct merge_options *opt,
+ const char *path,
+ const struct stage_data *stage_data)
+{
+ struct diff_filespec o, a, b;
+
+ o.mode = stage_data->stages[1].mode;
+ oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+ a.mode = stage_data->stages[2].mode;
+ oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+ b.mode = stage_data->stages[3].mode;
+ oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+ return update_stages(opt, path,
+ is_null_oid(&o.oid) ? NULL : &o,
+ is_null_oid(&a.oid) ? NULL : &a,
+ is_null_oid(&b.oid) ? NULL : &b);
+}
+
static void update_entry(struct stage_data *entry,
struct diff_filespec *o,
struct diff_filespec *a,
!(empty_ok && is_empty_dir(path));
}
-static int was_tracked(const char *path)
+/*
+ * Returns whether path was tracked in the index before the merge started,
+ * and its oid and mode match the specified values
+ */
+static int was_tracked_and_matches(struct merge_options *o, const char *path,
+ const struct object_id *oid, unsigned mode)
{
- int pos = cache_name_pos(path, strlen(path));
+ int pos = index_name_pos(&o->orig_index, path, strlen(path));
+ struct cache_entry *ce;
+
+ if (0 > pos)
+ /* we were not tracking this path before the merge */
+ return 0;
+
+ /* See if the file we were tracking before matches */
+ ce = o->orig_index.cache[pos];
+ return (oid_eq(&ce->oid, oid) && ce->ce_mode == mode);
+}
+
+/*
+ * Returns whether path was tracked in the index before the merge started
+ */
+static int was_tracked(struct merge_options *o, const char *path)
+{
+ int pos = index_name_pos(&o->orig_index, path, strlen(path));
if (0 <= pos)
- /* we have been tracking this path */
+ /* we were tracking this path before the merge */
return 1;
- /*
- * Look for an unmerged entry for the path,
- * specifically stage #2, which would indicate
- * that "our" side before the merge started
- * had the path tracked (and resulted in a conflict).
- */
- for (pos = -1 - pos;
- pos < active_nr && !strcmp(path, active_cache[pos]->name);
- pos++)
- if (ce_stage(active_cache[pos]) == 2)
- return 1;
return 0;
}
static int would_lose_untracked(const char *path)
{
- return !was_tracked(path) && file_exists(path);
+ /*
+ * This may look like it can be simplified to:
+ * return !was_tracked(o, path) && file_exists(path)
+ * but it can't. This function needs to know whether path was in
+ * the working tree due to EITHER having been tracked in the index
+ * before the merge OR having been put into the working copy and
+ * index by unpack_trees(). Due to that either-or requirement, we
+ * check the current index instead of the original one.
+ *
+ * Note that we do not need to worry about merge-recursive itself
+ * updating the index after unpack_trees() and before calling this
+ * function, because we strictly require all code paths in
+ * merge-recursive to update the working tree first and the index
+ * second. Doing otherwise would break
+ * update_file()/would_lose_untracked(); see every comment in this
+ * file which mentions "update_stages".
+ */
+ int pos = cache_name_pos(path, strlen(path));
+
+ if (pos < 0)
+ pos = -1 - pos;
+ while (pos < active_nr &&
+ !strcmp(path, active_cache[pos]->name)) {
+ /*
+ * If stage #0, it is definitely tracked.
+ * If it has stage #2 then it was tracked
+ * before this merge started. All other
+ * cases the path was not tracked.
+ */
+ switch (ce_stage(active_cache[pos])) {
+ case 0:
+ case 2:
+ return 0;
+ }
+ pos++;
+ }
+ return file_exists(path);
+}
+
+static int was_dirty(struct merge_options *o, const char *path)
+{
+ struct cache_entry *ce;
+ int dirty = 1;
+
+ if (o->call_depth || !was_tracked(o, path))
+ return !dirty;
+
+ ce = index_file_exists(o->unpack_opts.src_index,
+ path, strlen(path), ignore_case);
+ dirty = verify_uptodate(ce, &o->unpack_opts) != 0;
+ return dirty;
}
static int make_room_for_path(struct merge_options *o, const char *path)
}
update_index:
if (!ret && update_cache)
- add_cacheinfo(o, mode, oid, path, 0, update_wd, ADD_CACHE_OK_TO_ADD);
+ if (add_cacheinfo(o, mode, oid, path, 0, update_wd,
+ ADD_CACHE_OK_TO_ADD))
+ return -1;
return ret;
}
}
static int merge_file_1(struct merge_options *o,
- const struct diff_filespec *one,
- const struct diff_filespec *a,
- const struct diff_filespec *b,
- const char *branch1,
- const char *branch2,
- struct merge_file_info *result)
+ const struct diff_filespec *one,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
+ const char *filename,
+ const char *branch1,
+ const char *branch2,
+ struct merge_file_info *result)
{
result->merge = 0;
result->clean = 1;
break;
}
} else
- die("BUG: unsupported object type in the tree");
+ BUG("unsupported object type in the tree");
}
+ if (result->merge)
+ output(o, 2, _("Auto-merging %s"), filename);
+
return 0;
}
static int merge_file_special_markers(struct merge_options *o,
- const struct diff_filespec *one,
- const struct diff_filespec *a,
- const struct diff_filespec *b,
- const char *branch1,
- const char *filename1,
- const char *branch2,
- const char *filename2,
- struct merge_file_info *mfi)
+ const struct diff_filespec *one,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
+ const char *target_filename,
+ const char *branch1,
+ const char *filename1,
+ const char *branch2,
+ const char *filename2,
+ struct merge_file_info *mfi)
{
char *side1 = NULL;
char *side2 = NULL;
if (filename2)
side2 = xstrfmt("%s:%s", branch2, filename2);
- ret = merge_file_1(o, one, a, b,
+ ret = merge_file_1(o, one, a, b, target_filename,
side1 ? side1 : branch1,
side2 ? side2 : branch2, mfi);
+
free(side1);
free(side2);
return ret;
}
static int merge_file_one(struct merge_options *o,
- const char *path,
- const struct object_id *o_oid, int o_mode,
- const struct object_id *a_oid, int a_mode,
- const struct object_id *b_oid, int b_mode,
- const char *branch1,
- const char *branch2,
- struct merge_file_info *mfi)
+ const char *path,
+ const struct object_id *o_oid, int o_mode,
+ const struct object_id *a_oid, int a_mode,
+ const struct object_id *b_oid, int b_mode,
+ const char *branch1,
+ const char *branch2,
+ struct merge_file_info *mfi)
{
struct diff_filespec one, a, b;
a.mode = a_mode;
oidcpy(&b.oid, b_oid);
b.mode = b_mode;
- return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
+ return merge_file_1(o, &one, &a, &b, path, branch1, branch2, mfi);
+}
+
+static int conflict_rename_dir(struct merge_options *o,
+ struct diff_filepair *pair,
+ const char *rename_branch,
+ const char *other_branch)
+{
+ const struct diff_filespec *dest = pair->two;
+
+ if (!o->call_depth && would_lose_untracked(dest->path)) {
+ char *alt_path = unique_path(o, dest->path, rename_branch);
+
+ output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+ "writing to %s instead."),
+ dest->path, alt_path);
+ /*
+ * Write the file in worktree at alt_path, but not in the
+ * index. Instead, write to dest->path for the index but
+ * only at the higher appropriate stage.
+ */
+ if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+ return -1;
+ free(alt_path);
+ return update_stages(o, dest->path, NULL,
+ rename_branch == o->branch1 ? dest : NULL,
+ rename_branch == o->branch1 ? NULL : dest);
+ }
+
+ /* Update dest->path both in index and in worktree */
+ if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+ return -1;
+ return 0;
}
static int handle_change_delete(struct merge_options *o,
const char *update_path = path;
int ret = 0;
- if (dir_in_way(path, !o->call_depth, 0)) {
+ if (dir_in_way(path, !o->call_depth, 0) ||
+ (!o->call_depth && would_lose_untracked(path))) {
update_path = alt_path = unique_path(o, path, change_branch);
}
add = filespec_from_entry(&other, dst_entry, stage ^ 1);
if (add) {
+ int ren_src_was_dirty = was_dirty(o, rename->path);
char *add_name = unique_path(o, rename->path, other_branch);
if (update_file(o, 0, &add->oid, add->mode, add_name))
return -1;
- remove_file(o, 0, rename->path, 0);
+ if (ren_src_was_dirty) {
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ rename->path);
+ }
+ /*
+ * Because the double negatives somehow keep confusing me...
+ * 1) update_wd iff !ren_src_was_dirty.
+ * 2) no_wd iff !update_wd
+ * 3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+ */
+ remove_file(o, 0, rename->path, ren_src_was_dirty);
dst_name = unique_path(o, rename->path, cur_branch);
} else {
if (dir_in_way(rename->path, !o->call_depth, 0)) {
dst_name = unique_path(o, rename->path, cur_branch);
output(o, 1, _("%s is a directory in %s adding as %s instead"),
rename->path, other_branch, dst_name);
+ } else if (!o->call_depth &&
+ would_lose_untracked(rename->path)) {
+ dst_name = unique_path(o, rename->path, cur_branch);
+ output(o, 1, _("Refusing to lose untracked file at %s; "
+ "adding as %s instead"),
+ rename->path, dst_name);
}
}
if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
struct diff_filespec *c1 = ci->pair1->two;
struct diff_filespec *c2 = ci->pair2->two;
char *path = c1->path; /* == c2->path */
+ char *path_side_1_desc;
+ char *path_side_2_desc;
struct merge_file_info mfi_c1;
struct merge_file_info mfi_c2;
int ret;
remove_file(o, 1, a->path, o->call_depth || would_lose_untracked(a->path));
remove_file(o, 1, b->path, o->call_depth || would_lose_untracked(b->path));
+ path_side_1_desc = xstrfmt("%s (was %s)", path, a->path);
+ path_side_2_desc = xstrfmt("%s (was %s)", path, b->path);
if (merge_file_special_markers(o, a, c1, &ci->ren1_other,
+ path_side_1_desc,
o->branch1, c1->path,
o->branch2, ci->ren1_other.path, &mfi_c1) ||
merge_file_special_markers(o, b, &ci->ren2_other, c2,
+ path_side_2_desc,
o->branch1, ci->ren2_other.path,
o->branch2, c2->path, &mfi_c2))
return -1;
+ free(path_side_1_desc);
+ free(path_side_2_desc);
if (o->call_depth) {
/*
char *new_path2 = unique_path(o, path, ci->branch2);
output(o, 1, _("Renaming %s to %s and %s to %s instead"),
a->path, new_path1, b->path, new_path2);
- remove_file(o, 0, path, 0);
+ if (was_dirty(o, path))
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ path);
+ else if (would_lose_untracked(path))
+ /*
+ * Only way we get here is if both renames were from
+ * a directory rename AND user had an untracked file
+ * at the location where both files end up after the
+ * two directory renames. See testcase 10d of t6043.
+ */
+ output(o, 1, _("Refusing to lose untracked file at "
+ "%s, even though it's in the way."),
+ path);
+ else
+ remove_file(o, 0, path, 0);
ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
if (!ret)
ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
new_path2);
+ /*
+ * unpack_trees() actually populates the index for us for
+ * "normal" rename/rename(2to1) situtations so that the
+ * correct entries are at the higher stages, which would
+ * make the call below to update_stages_for_stage_data
+ * unnecessary. However, if either of the renames came
+ * from a directory rename, then unpack_trees() will not
+ * have gotten the right data loaded into the index, so we
+ * need to do so now. (While it'd be tempting to move this
+ * call to update_stages_for_stage_data() to
+ * apply_directory_rename_modifications(), that would break
+ * our intermediate calls to would_lose_untracked() since
+ * those rely on the current in-memory index. See also the
+ * big "NOTE" in update_stages()).
+ */
+ if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+ ret = -1;
+
free(new_path2);
free(new_path1);
}
return ret;
}
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+ struct tree *o_tree,
+ struct tree *tree)
+{
+ struct diff_queue_struct *ret;
+ struct diff_options opts;
+
+ diff_setup(&opts);
+ opts.flags.recursive = 1;
+ opts.flags.rename_empty = 0;
+ opts.detect_rename = merge_detect_rename(o);
+ /*
+ * We do not have logic to handle the detection of copies. In
+ * fact, it may not even make sense to add such logic: would we
+ * really want a change to a base file to be propagated through
+ * multiple other files by a merge?
+ */
+ if (opts.detect_rename > DIFF_DETECT_RENAME)
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+ o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+ 1000;
+ opts.rename_score = o->rename_score;
+ opts.show_rename_progress = o->show_rename_progress;
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_setup_done(&opts);
+ diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+ diffcore_std(&opts);
+ if (opts.needed_rename_limit > o->needed_rename_limit)
+ o->needed_rename_limit = opts.needed_rename_limit;
+
+ ret = xmalloc(sizeof(*ret));
+ *ret = diff_queued_diff;
+
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_queued_diff.nr = 0;
+ diff_queued_diff.queue = NULL;
+ diff_flush(&opts);
+ return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+ struct object_id hashy;
+ unsigned int mode_o;
+
+ return !get_tree_entry(&tree->object.oid, path,
+ &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir. In perl-speak:
+ * new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ * Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+ const char *old_path)
+{
+ struct strbuf new_path = STRBUF_INIT;
+ int oldlen, newlen;
+
+ if (entry->non_unique_new_dir)
+ return NULL;
+
+ oldlen = strlen(entry->dir);
+ newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+ strbuf_grow(&new_path, newlen);
+ strbuf_addbuf(&new_path, &entry->new_dir);
+ strbuf_addstr(&new_path, &old_path[oldlen]);
+
+ return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+ char **old_dir, char **new_dir)
+{
+ char *end_of_old, *end_of_new;
+ int old_len, new_len;
+
+ *old_dir = NULL;
+ *new_dir = NULL;
+
+ /*
+ * For
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * the "e/foo.c" part is the same, we just want to know that
+ * "a/b/c/d" was renamed to "a/b/some/thing/else"
+ * so, for this example, this function returns "a/b/c/d" in
+ * *old_dir and "a/b/some/thing/else" in *new_dir.
+ *
+ * Also, if the basename of the file changed, we don't care. We
+ * want to know which portion of the directory, if any, changed.
+ */
+ end_of_old = strrchr(old_path, '/');
+ end_of_new = strrchr(new_path, '/');
+
+ if (end_of_old == NULL || end_of_new == NULL)
+ return;
+ while (*--end_of_new == *--end_of_old &&
+ end_of_old != old_path &&
+ end_of_new != new_path)
+ ; /* Do nothing; all in the while loop */
+ /*
+ * We've found the first non-matching character in the directory
+ * paths. That means the current directory we were comparing
+ * represents the rename. Move end_of_old and end_of_new back
+ * to the full directory name.
+ */
+ if (*end_of_old == '/')
+ end_of_old++;
+ if (*end_of_old != '/')
+ end_of_new++;
+ end_of_old = strchr(end_of_old, '/');
+ end_of_new = strchr(end_of_new, '/');
+
+ /*
+ * It may have been the case that old_path and new_path were the same
+ * directory all along. Don't claim a rename if they're the same.
+ */
+ old_len = end_of_old - old_path;
+ new_len = end_of_new - new_path;
+
+ if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+ *old_dir = xstrndup(old_path, old_len);
+ *new_dir = xstrndup(new_path, new_len);
+ }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+ struct string_list *items_to_remove)
+{
+ int i;
+ struct dir_rename_entry *entry;
+
+ for (i = 0; i < items_to_remove->nr; i++) {
+ entry = items_to_remove->items[i].util;
+ hashmap_remove(dir_renames, entry, NULL);
+ }
+ string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location. If there is a rename and
+ * there are no conflicts, return the new name. Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+ const char *path,
+ struct dir_rename_entry *entry,
+ struct hashmap *collisions,
+ struct tree *tree)
+{
+ char *new_path = NULL;
+ struct collision_entry *collision_ent;
+ int clean = 1;
+ struct strbuf collision_paths = STRBUF_INIT;
+
+ /*
+ * entry has the mapping of old directory name to new directory name
+ * that we want to apply to path.
+ */
+ new_path = apply_dir_rename(entry, path);
+
+ if (!new_path) {
+ /* This should only happen when entry->non_unique_new_dir set */
+ if (!entry->non_unique_new_dir)
+ BUG("entry->non_unqiue_dir not set and !new_path");
+ output(o, 1, _("CONFLICT (directory rename split): "
+ "Unclear where to place %s because directory "
+ "%s was renamed to multiple other directories, "
+ "with no destination getting a majority of the "
+ "files."),
+ path, entry->dir);
+ clean = 0;
+ return NULL;
+ }
+
+ /*
+ * The caller needs to have ensured that it has pre-populated
+ * collisions with all paths that map to new_path. Do a quick check
+ * to ensure that's the case.
+ */
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (collision_ent == NULL)
+ BUG("collision_ent is NULL");
+
+ /*
+ * Check for one-sided add/add/.../add conflicts, i.e.
+ * where implicit renames from the other side doing
+ * directory rename(s) can affect this side of history
+ * to put multiple paths into the same location. Warn
+ * and bail on directory renames for such paths.
+ */
+ if (collision_ent->reported_already) {
+ clean = 0;
+ } else if (tree_has_path(tree, new_path)) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+ "file/dir at %s in the way of implicit "
+ "directory rename(s) putting the following "
+ "path(s) there: %s."),
+ new_path, collision_paths.buf);
+ clean = 0;
+ } else if (collision_ent->source_files.nr > 1) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+ "more than one path to %s; implicit directory "
+ "renames tried to put these paths there: %s"),
+ new_path, collision_paths.buf);
+ clean = 0;
+ }
+
+ /* Free memory we no longer need */
+ strbuf_release(&collision_paths);
+ if (!clean && new_path) {
+ free(new_path);
+ return NULL;
+ }
+
+ return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ * 1. Check for both sides renaming to the same thing, in order to avoid
+ * implicit renaming of files that should be left in place. (See
+ * testcase 6b in t6043 for details.)
+ * 2. Prune directory renames if there are still files left in the
+ * the original directory. These represent a partial directory rename,
+ * i.e. a rename where only some of the files within the directory
+ * were renamed elsewhere. (Technically, this could be done earlier
+ * in get_directory_renames(), except that would prevent us from
+ * doing the previous check and thus failing testcase 6b.)
+ * 3. Check for rename/rename(1to2) conflicts (at the directory level).
+ * In the future, we could potentially record this info as well and
+ * omit reporting rename/rename(1to2) conflicts for each path within
+ * the affected directories, thus cleaning up the merge output.
+ * NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ * directory level, because merging directories is fine. If it
+ * causes conflicts for files within those merged directories, then
+ * that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+ struct hashmap *dir_re_head,
+ struct tree *head,
+ struct hashmap *dir_re_merge,
+ struct tree *merge)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *head_ent;
+ struct dir_rename_entry *merge_ent;
+
+ struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+ struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+ hashmap_iter_init(dir_re_head, &iter);
+ while ((head_ent = hashmap_iter_next(&iter))) {
+ merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+ if (merge_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir &&
+ !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+ /* 1. Renamed identically; remove it from both sides */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ } else if (tree_has_path(head, head_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+ hashmap_iter_init(dir_re_merge, &iter);
+ while ((merge_ent = hashmap_iter_next(&iter))) {
+ head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+ if (tree_has_path(merge, merge_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ } else if (head_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir) {
+ /* 3. rename/rename(1to2) */
+ /*
+ * We can assume it's not rename/rename(1to1) because
+ * that was case (1), already checked above. So we
+ * know that head_ent->new_dir and merge_ent->new_dir
+ * are different strings.
+ */
+ output(o, 1, _("CONFLICT (rename/rename): "
+ "Rename directory %s->%s in %s. "
+ "Rename directory %s->%s in %s"),
+ head_ent->dir, head_ent->new_dir.buf, o->branch1,
+ head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+ struct tree *tree)
+{
+ struct hashmap *dir_renames;
+ struct hashmap_iter iter;
+ struct dir_rename_entry *entry;
+ int i;
+
+ /*
+ * Typically, we think of a directory rename as all files from a
+ * certain directory being moved to a target directory. However,
+ * what if someone first moved two files from the original
+ * directory in one commit, and then renamed the directory
+ * somewhere else in a later commit? At merge time, we just know
+ * that files from the original directory went to two different
+ * places, and that the bulk of them ended up in the same place.
+ * We want each directory rename to represent where the bulk of the
+ * files from that directory end up; this function exists to find
+ * where the bulk of the files went.
+ *
+ * The first loop below simply iterates through the list of file
+ * renames, finding out how often each directory rename pair
+ * possibility occurs.
+ */
+ dir_renames = xmalloc(sizeof(*dir_renames));
+ dir_rename_init(dir_renames);
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ int *count;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *old_dir, *new_dir;
+
+ /* File not part of directory rename if it wasn't renamed */
+ if (pair->status != 'R')
+ continue;
+
+ get_renamed_dir_portion(pair->one->path, pair->two->path,
+ &old_dir, &new_dir);
+ if (!old_dir)
+ /* Directory didn't change at all; ignore this one. */
+ continue;
+
+ entry = dir_rename_find_entry(dir_renames, old_dir);
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ dir_rename_entry_init(entry, old_dir);
+ hashmap_put(dir_renames, entry);
+ } else {
+ free(old_dir);
+ }
+ item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+ if (!item) {
+ item = string_list_insert(&entry->possible_new_dirs,
+ new_dir);
+ item->util = xcalloc(1, sizeof(int));
+ } else {
+ free(new_dir);
+ }
+ count = item->util;
+ *count += 1;
+ }
+
+ /*
+ * For each directory with files moved out of it, we find out which
+ * target directory received the most files so we can declare it to
+ * be the "winning" target location for the directory rename. This
+ * winner gets recorded in new_dir. If there is no winner
+ * (multiple target directories received the same number of files),
+ * we set non_unique_new_dir. Once we've determined the winner (or
+ * that there is no winner), we no longer need possible_new_dirs.
+ */
+ hashmap_iter_init(dir_renames, &iter);
+ while ((entry = hashmap_iter_next(&iter))) {
+ int max = 0;
+ int bad_max = 0;
+ char *best = NULL;
+
+ for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+ int *count = entry->possible_new_dirs.items[i].util;
+
+ if (*count == max)
+ bad_max = max;
+ else if (*count > max) {
+ max = *count;
+ best = entry->possible_new_dirs.items[i].string;
+ }
+ }
+ if (bad_max == max)
+ entry->non_unique_new_dir = 1;
+ else {
+ assert(entry->new_dir.len == 0);
+ strbuf_addstr(&entry->new_dir, best);
+ }
+ /*
+ * The relevant directory sub-portion of the original full
+ * filepaths were xstrndup'ed before inserting into
+ * possible_new_dirs, and instead of manually iterating the
+ * list and free'ing each, just lie and tell
+ * possible_new_dirs that it did the strdup'ing so that it
+ * will free them for us.
+ */
+ entry->possible_new_dirs.strdup_strings = 1;
+ string_list_clear(&entry->possible_new_dirs, 1);
+ }
+
+ return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+ struct hashmap *dir_renames)
+{
+ char temp[PATH_MAX];
+ char *end;
+ struct dir_rename_entry *entry;
+
+ strcpy(temp, path);
+ while ((end = strrchr(temp, '/'))) {
+ *end = '\0';
+ entry = dir_rename_find_entry(dir_renames, temp);
+ if (entry)
+ return entry;
+ }
+ return NULL;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+ struct hashmap *dir_renames,
+ struct diff_queue_struct *pairs)
+{
+ int i;
+
+ /*
+ * Multiple files can be mapped to the same path due to directory
+ * renames done by the other side of history. Since that other
+ * side of history could have merged multiple directories into one,
+ * if our side of history added the same file basename to each of
+ * those directories, then all N of them would get implicitly
+ * renamed by the directory rename detection into the same path,
+ * and we'd get an add/add/.../add conflict, and all those adds
+ * from *this* side of history. This is not representable in the
+ * index, and users aren't going to easily be able to make sense of
+ * it. So we need to provide a good warning about what's
+ * happening, and fall back to no-directory-rename detection
+ * behavior for those paths.
+ *
+ * See testcases 9e and all of section 5 from t6043 for examples.
+ */
+ collision_init(collisions);
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct dir_rename_entry *dir_rename_ent;
+ struct collision_entry *collision_ent;
+ char *new_path;
+ struct diff_filepair *pair = pairs->queue[i];
+
+ if (pair->status != 'A' && pair->status != 'R')
+ continue;
+ dir_rename_ent = check_dir_renamed(pair->two->path,
+ dir_renames);
+ if (!dir_rename_ent)
+ continue;
+
+ new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+ if (!new_path)
+ /*
+ * dir_rename_ent->non_unique_new_path is true, which
+ * means there is no directory rename for us to use,
+ * which means it won't cause us any additional
+ * collisions.
+ */
+ continue;
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (!collision_ent) {
+ collision_ent = xcalloc(1,
+ sizeof(struct collision_entry));
+ hashmap_entry_init(collision_ent, strhash(new_path));
+ hashmap_put(collisions, collision_ent);
+ collision_ent->target_file = new_path;
+ } else {
+ free(new_path);
+ }
+ string_list_insert(&collision_ent->source_files,
+ pair->two->path);
+ }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+ const char *path,
+ struct tree *tree,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct hashmap *collisions,
+ int *clean_merge)
+{
+ char *new_path = NULL;
+ struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+ struct dir_rename_entry *oentry = NULL;
+
+ if (!entry)
+ return new_path;
+
+ /*
+ * This next part is a little weird. We do not want to do an
+ * implicit rename into a directory we renamed on our side, because
+ * that will result in a spurious rename/rename(1to2) conflict. An
+ * example:
+ * Base commit: dumbdir/afile, otherdir/bfile
+ * Side 1: smrtdir/afile, otherdir/bfile
+ * Side 2: dumbdir/afile, dumbdir/bfile
+ * Here, while working on Side 1, we could notice that otherdir was
+ * renamed/merged to dumbdir, and change the diff_filepair for
+ * otherdir/bfile into a rename into dumbdir/bfile. However, Side
+ * 2 will notice the rename from dumbdir to smrtdir, and do the
+ * transitive rename to move it from dumbdir/bfile to
+ * smrtdir/bfile. That gives us bfile in dumbdir vs being in
+ * smrtdir, a rename/rename(1to2) conflict. We really just want
+ * the file to end up in smrtdir. And the way to achieve that is
+ * to not let Side1 do the rename to dumbdir, since we know that is
+ * the source of one of our directory renames.
+ *
+ * That's why oentry and dir_rename_exclusions is here.
+ *
+ * As it turns out, this also prevents N-way transient rename
+ * confusion; See testcases 9c and 9d of t6043.
+ */
+ oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+ if (oentry) {
+ output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+ "to %s, because %s itself was renamed."),
+ entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+ } else {
+ new_path = handle_path_level_conflicts(o, path, entry,
+ collisions, tree);
+ *clean_merge &= (new_path != NULL);
+ }
+
+ return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+ struct diff_filepair *pair,
+ char *new_path,
+ struct rename *re,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean)
+{
+ struct string_list_item *item;
+ int stage = (tree == a_tree ? 2 : 3);
+ int update_wd;
+
+ /*
+ * In all cases where we can do directory rename detection,
+ * unpack_trees() will have read pair->two->path into the
+ * index and the working copy. We need to remove it so that
+ * we can instead place it at new_path. It is guaranteed to
+ * not be untracked (unpack_trees() would have errored out
+ * saying the file would have been overwritten), but it might
+ * be dirty, though.
+ */
+ update_wd = !was_dirty(o, pair->two->path);
+ if (!update_wd)
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ pair->two->path);
+ remove_file(o, 1, pair->two->path, !update_wd);
+
+ /* Find or create a new re->dst_entry */
+ item = string_list_lookup(entries, new_path);
+ if (item) {
+ /*
+ * Since we're renaming on this side of history, and it's
+ * due to a directory rename on the other side of history
+ * (which we only allow when the directory in question no
+ * longer exists on the other side of history), the
+ * original entry for re->dst_entry is no longer
+ * necessary...
+ */
+ re->dst_entry->processed = 1;
+
+ /*
+ * ...because we'll be using this new one.
+ */
+ re->dst_entry = item->util;
+ } else {
+ /*
+ * re->dst_entry is for the before-dir-rename path, and we
+ * need it to hold information for the after-dir-rename
+ * path. Before creating a new entry, we need to mark the
+ * old one as unnecessary (...unless it is shared by
+ * src_entry, i.e. this didn't use to be a rename, in which
+ * case we can just allow the normal processing to happen
+ * for it).
+ */
+ if (pair->status == 'R')
+ re->dst_entry->processed = 1;
+
+ re->dst_entry = insert_stage_data(new_path,
+ o_tree, a_tree, b_tree,
+ entries);
+ item = string_list_insert(entries, new_path);
+ item->util = re->dst_entry;
+ }
+
+ /*
+ * Update the stage_data with the information about the path we are
+ * moving into place. That slot will be empty and available for us
+ * to write to because of the collision checks in
+ * handle_path_level_conflicts(). In other words,
+ * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+ * open for us to write to.
+ *
+ * It may be tempting to actually update the index at this point as
+ * well, using update_stages_for_stage_data(), but as per the big
+ * "NOTE" in update_stages(), doing so will modify the current
+ * in-memory index which will break calls to would_lose_untracked()
+ * that we need to make. Instead, we need to just make sure that
+ * the various conflict_rename_*() functions update the index
+ * explicitly rather than relying on unpack_trees() to have done it.
+ */
+ get_tree_entry(&tree->object.oid,
+ pair->two->path,
+ &re->dst_entry->stages[stage].oid,
+ &re->dst_entry->stages[stage].mode);
+
+ /* Update pair status */
+ if (pair->status == 'A') {
+ /*
+ * Recording rename information for this add makes it look
+ * like a rename/delete conflict. Make sure we can
+ * correctly handle this as an add that was moved to a new
+ * directory instead of reporting a rename/delete conflict.
+ */
+ re->add_turned_into_rename = 1;
+ }
+ /*
+ * We don't actually look at pair->status again, but it seems
+ * pedagogically correct to adjust it.
+ */
+ pair->status = 'R';
+
+ /*
+ * Finally, record the new location.
+ */
+ pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+ struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean_merge)
+{
+ int i;
+ struct hashmap collisions;
+ struct hashmap_iter iter;
+ struct collision_entry *e;
+ struct string_list *renames;
+
+ compute_collisions(&collisions, dir_renames, pairs);
+ renames = xcalloc(1, sizeof(struct string_list));
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ struct rename *re;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *new_path; /* non-NULL only with directory renames */
+
+ if (pair->status != 'A' && pair->status != 'R') {
+ diff_free_filepair(pair);
+ continue;
+ }
+ new_path = check_for_directory_rename(o, pair->two->path, tree,
+ dir_renames,
+ dir_rename_exclusions,
+ &collisions,
+ clean_merge);
+ if (pair->status != 'R' && !new_path) {
+ diff_free_filepair(pair);
+ continue;
+ }
+
+ re = xmalloc(sizeof(*re));
+ re->processed = 0;
+ re->add_turned_into_rename = 0;
+ re->pair = pair;
+ item = string_list_lookup(entries, re->pair->one->path);
+ if (!item)
+ re->src_entry = insert_stage_data(re->pair->one->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->src_entry = item->util;
+
+ item = string_list_lookup(entries, re->pair->two->path);
+ if (!item)
+ re->dst_entry = insert_stage_data(re->pair->two->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->dst_entry = item->util;
+ item = string_list_insert(renames, pair->one->path);
+ item->util = re;
+ if (new_path)
+ apply_directory_rename_modifications(o, pair, new_path,
+ re, tree, o_tree,
+ a_tree, b_tree,
+ entries,
+ clean_merge);
+ }
+
+ hashmap_iter_init(&collisions, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->target_file);
+ string_list_clear(&e->source_files, 0);
+ }
+ hashmap_free(&collisions, 1);
+ return renames;
+}
+
static int process_renames(struct merge_options *o,
struct string_list *a_renames,
struct string_list *b_renames)
const char *ren2_dst = ren2->pair->two->path;
enum rename_type rename_type;
if (strcmp(ren1_src, ren2_src) != 0)
- die("BUG: ren1_src != ren2_src");
+ BUG("ren1_src != ren2_src");
ren2->dst_entry->processed = 1;
ren2->processed = 1;
if (strcmp(ren1_dst, ren2_dst) != 0) {
ren2 = lookup->util;
ren2_dst = ren2->pair->two->path;
if (strcmp(ren1_dst, ren2_dst) != 0)
- die("BUG: ren1_dst != ren2_dst");
+ BUG("ren1_dst != ren2_dst");
clean_merge = 0;
ren2->processed = 1;
* add-source case).
*/
remove_file(o, 1, ren1_src,
- renamed_stage == 2 || !was_tracked(ren1_src));
+ renamed_stage == 2 || !was_tracked(o, ren1_src));
oidcpy(&src_other.oid,
&ren1->src_entry->stages[other_stage].oid);
dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
try_merge = 0;
- if (oid_eq(&src_other.oid, &null_oid)) {
+ if (oid_eq(&src_other.oid, &null_oid) &&
+ ren1->add_turned_into_rename) {
+ setup_rename_conflict_info(RENAME_DIR,
+ ren1->pair,
+ NULL,
+ branch1,
+ branch2,
+ ren1->dst_entry,
+ NULL,
+ o,
+ NULL,
+ NULL);
+ } else if (oid_eq(&src_other.oid, &null_oid)) {
setup_rename_conflict_info(RENAME_DELETE,
ren1->pair,
NULL,
return clean_merge;
}
+struct rename_info {
+ struct string_list *head_renames;
+ struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *e;
+
+ hashmap_iter_init(dir_renames, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->dir);
+ strbuf_release(&e->new_dir);
+ /* possible_new_dirs already cleared in get_directory_renames */
+ }
+ hashmap_free(dir_renames, 1);
+ free(dir_renames);
+
+ free(pairs->queue);
+ free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+ struct tree *common,
+ struct tree *head,
+ struct tree *merge,
+ struct string_list *entries,
+ struct rename_info *ri)
+{
+ struct diff_queue_struct *head_pairs, *merge_pairs;
+ struct hashmap *dir_re_head, *dir_re_merge;
+ int clean = 1;
+
+ ri->head_renames = NULL;
+ ri->merge_renames = NULL;
+
+ if (!merge_detect_rename(o))
+ return 1;
+
+ head_pairs = get_diffpairs(o, common, head);
+ merge_pairs = get_diffpairs(o, common, merge);
+
+ dir_re_head = get_directory_renames(head_pairs, head);
+ dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+ handle_directory_level_conflicts(o,
+ dir_re_head, head,
+ dir_re_merge, merge);
+
+ ri->head_renames = get_renames(o, head_pairs,
+ dir_re_merge, dir_re_head, head,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ ri->merge_renames = get_renames(o, merge_pairs,
+ dir_re_head, dir_re_merge, merge,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+ /*
+ * Some cleanup is deferred until cleanup_renames() because the
+ * data structures are still needed and referenced in
+ * process_entry(). But there are a few things we can free now.
+ */
+ initial_cleanup_rename(head_pairs, dir_re_head);
+ initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+ return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+ const struct rename *re;
+ int i;
+
+ if (rename == NULL)
+ return;
+
+ for (i = 0; i < rename->nr; i++) {
+ re = rename->items[i].util;
+ diff_free_filepair(re->pair);
+ }
+ string_list_clear(rename, 1);
+ free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+ final_cleanup_rename(re_info->head_renames);
+ final_cleanup_rename(re_info->merge_renames);
+}
+
static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
{
return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
static int merge_content(struct merge_options *o,
const char *path,
+ int is_dirty,
struct object_id *o_oid, int o_mode,
struct object_id *a_oid, int a_mode,
struct object_id *b_oid, int b_mode,
S_ISGITLINK(pair1->two->mode)))
df_conflict_remains = 1;
}
- if (merge_file_special_markers(o, &one, &a, &b,
+ if (merge_file_special_markers(o, &one, &a, &b, path,
o->branch1, path1,
o->branch2, path2, &mfi))
return -1;
- if (mfi.clean && !df_conflict_remains &&
- oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
- int path_renamed_outside_HEAD;
+ /*
+ * We can skip updating the working tree file iff:
+ * a) The merge is clean
+ * b) The merge matches what was in HEAD (content, mode, pathname)
+ * c) The target path is usable (i.e. not involved in D/F conflict)
+ */
+ if (mfi.clean &&
+ was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
+ !df_conflict_remains) {
output(o, 3, _("Skipped %s (merged same as existing)"), path);
- /*
- * The content merge resulted in the same file contents we
- * already had. We can return early if those file contents
- * are recorded at the correct path (which may not be true
- * if the merge involves a rename).
- */
- path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
- if (!path_renamed_outside_HEAD) {
- add_cacheinfo(o, mfi.mode, &mfi.oid, path,
- 0, (!o->call_depth), 0);
- return mfi.clean;
- }
- } else
- output(o, 2, _("Auto-merging %s"), path);
+ if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
+ 0, (!o->call_depth && !is_dirty), 0))
+ return -1;
+ return mfi.clean;
+ }
if (!mfi.clean) {
if (S_ISGITLINK(mfi.mode))
return -1;
}
- if (df_conflict_remains) {
+ if (df_conflict_remains || is_dirty) {
char *new_path;
if (o->call_depth) {
remove_file_from_cache(path);
if (update_stages(o, path, &one, &a, &b))
return -1;
} else {
- int file_from_stage2 = was_tracked(path);
+ int file_from_stage2 = was_tracked(o, path);
struct diff_filespec merged;
oidcpy(&merged.oid, &mfi.oid);
merged.mode = mfi.mode;
}
new_path = unique_path(o, path, rename_conflict_info->branch1);
+ if (is_dirty) {
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ path);
+ }
output(o, 1, _("Adding as %s instead"), new_path);
if (update_file(o, 0, &mfi.oid, mfi.mode, new_path)) {
free(new_path);
mfi.clean = 0;
} else if (update_file(o, mfi.clean, &mfi.oid, mfi.mode, path))
return -1;
- return mfi.clean;
+ return !is_dirty && mfi.clean;
+}
+
+static int conflict_rename_normal(struct merge_options *o,
+ const char *path,
+ struct object_id *o_oid, unsigned int o_mode,
+ struct object_id *a_oid, unsigned int a_mode,
+ struct object_id *b_oid, unsigned int b_mode,
+ struct rename_conflict_info *ci)
+{
+ /* Merge the content and write it out */
+ return merge_content(o, path, was_dirty(o, path),
+ o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+ ci);
}
/* Per entry merge function */
switch (conflict_info->rename_type) {
case RENAME_NORMAL:
case RENAME_ONE_FILE_TO_ONE:
- clean_merge = merge_content(o, path,
- o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
- conflict_info);
+ clean_merge = conflict_rename_normal(o,
+ path,
+ o_oid, o_mode,
+ a_oid, a_mode,
+ b_oid, b_mode,
+ conflict_info);
+ break;
+ case RENAME_DIR:
+ clean_merge = 1;
+ if (conflict_rename_dir(o,
+ conflict_info->pair1,
+ conflict_info->branch1,
+ conflict_info->branch2))
+ clean_merge = -1;
break;
case RENAME_DELETE:
clean_merge = 0;
} else if (a_oid && b_oid) {
/* Case C: Added in both (check for same permissions) and */
/* case D: Modified in both, but differently. */
- clean_merge = merge_content(o, path,
+ int is_dirty = 0; /* unpack_trees would have bailed if dirty */
+ clean_merge = merge_content(o, path, is_dirty,
o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
NULL);
} else if (!o_oid && !a_oid && !b_oid) {
*/
remove_file(o, 1, path, !a_mode);
} else
- die("BUG: fatal merge failure, shouldn't happen.");
+ BUG("fatal merge failure, shouldn't happen.");
return clean_merge;
}
return 1;
}
- code = git_merge_trees(o->call_depth, common, head, merge);
+ code = git_merge_trees(o, common, head, merge);
if (code != 0) {
if (show(o, 4) || o->call_depth)
}
if (unmerged_cache()) {
- struct string_list *entries, *re_head, *re_merge;
+ struct string_list *entries;
+ struct rename_info re_info;
int i;
/*
* Only need the hashmap while processing entries, so
get_files_dirs(o, merge);
entries = get_unmerged();
- re_head = get_renames(o, head, common, head, merge, entries);
- re_merge = get_renames(o, merge, common, head, merge, entries);
- clean = process_renames(o, re_head, re_merge);
+ clean = handle_renames(o, common, head, merge, entries,
+ &re_info);
record_df_conflict_files(o, entries);
if (clean < 0)
goto cleanup;
for (i = 0; i < entries->nr; i++) {
struct stage_data *e = entries->items[i].util;
if (!e->processed)
- die("BUG: unprocessed path??? %s",
+ BUG("unprocessed path??? %s",
entries->items[i].string);
}
cleanup:
- string_list_clear(re_merge, 0);
- string_list_clear(re_head, 0);
+ final_cleanup_renames(&re_info);
+
string_list_clear(entries, 1);
+ free(entries);
hashmap_free(&o->current_file_dir_set, 1);
- free(re_merge);
- free(re_head);
- free(entries);
-
if (clean < 0)
return clean;
}
else
clean = 1;
+ /* Free the extra index left from git_merge_trees() */
+ /*
+ * FIXME: Need to also free data allocated by
+ * setup_unpack_trees_porcelain() tucked away in o->unpack_opts.msgs,
+ * but the problem is that only half of it refers to dynamically
+ * allocated data, while the other half points at static strings.
+ */
+ discard_index(&o->orig_index);
+
if (o->call_depth && !(*result = write_tree_from_memory(o)))
return -1;
read_cache();
o->ancestor = "merged common ancestors";
- clean = merge_trees(o, h1->tree, h2->tree, merged_common_ancestors->tree,
+ clean = merge_trees(o, get_commit_tree(h1), get_commit_tree(h2),
+ get_commit_tree(merged_common_ancestors),
&mrtree);
if (clean < 0) {
flush_output(o);
static void merge_recursive_config(struct merge_options *o)
{
+ char *value = NULL;
git_config_get_int("merge.verbosity", &o->verbosity);
git_config_get_int("diff.renamelimit", &o->diff_rename_limit);
git_config_get_int("merge.renamelimit", &o->merge_rename_limit);
+ if (!git_config_get_string("diff.renames", &value)) {
+ o->diff_detect_rename = git_config_rename("diff.renames", value);
+ free(value);
+ }
+ if (!git_config_get_string("merge.renames", &value)) {
+ o->merge_detect_rename = git_config_rename("merge.renames", value);
+ free(value);
+ }
git_config(git_xmerge_config, NULL);
}
o->diff_rename_limit = -1;
o->merge_rename_limit = -1;
o->renormalize = 0;
- o->detect_rename = 1;
+ o->diff_detect_rename = -1;
+ o->merge_detect_rename = -1;
merge_recursive_config(o);
merge_verbosity = getenv("GIT_MERGE_VERBOSITY");
if (merge_verbosity)
else if (!strcmp(s, "no-renormalize"))
o->renormalize = 0;
else if (!strcmp(s, "no-renames"))
- o->detect_rename = 0;
+ o->merge_detect_rename = 0;
else if (!strcmp(s, "find-renames")) {
- o->detect_rename = 1;
+ o->merge_detect_rename = 1;
o->rename_score = 0;
}
else if (skip_prefix(s, "find-renames=", &arg) ||
skip_prefix(s, "rename-threshold=", &arg)) {
if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
- o->detect_rename = 1;
+ o->merge_detect_rename = 1;
}
else
return -1;
printf("Using remote notes for %s\n",
oid_to_hex(&p->obj));
if (add_note(t, &p->obj, &p->remote, combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
return 0;
case NOTES_MERGE_RESOLVE_UNION:
if (o->verbosity >= 2)
trace_printf("\t\t\tno local change, adopted remote\n");
if (add_note(t, &p->obj, &p->remote,
combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
} else {
/* need file-level merge between local and remote */
trace_printf("\t\t\tneed content-level merge\n");
printf("No merge base found; doing history-less merge\n");
} else if (!bases->next) {
base_oid = &bases->item->object.oid;
- base_tree_oid = &bases->item->tree->object.oid;
+ base_tree_oid = get_commit_tree_oid(bases->item);
if (o->verbosity >= 4)
printf("One merge base found (%.7s)\n",
oid_to_hex(base_oid));
} else {
/* TODO: How to handle multiple merge-bases? */
base_oid = &bases->item->object.oid;
- base_tree_oid = &bases->item->tree->object.oid;
+ base_tree_oid = get_commit_tree_oid(bases->item);
if (o->verbosity >= 3)
printf("Multiple merge bases found. Using the first "
"(%.7s)\n", oid_to_hex(base_oid));
goto found_result;
}
- result = merge_from_diffs(o, base_tree_oid, &local->tree->object.oid,
- &remote->tree->object.oid, local_tree);
+ result = merge_from_diffs(o, base_tree_oid,
+ get_commit_tree_oid(local),
+ get_commit_tree_oid(remote), local_tree);
if (result != 0) { /* non-trivial merge (with or without conflicts) */
/* Commit (partial) result */
/**
* Build the initial type index for the packfile
*/
-void bitmap_writer_build_type_index(struct pack_idx_entry **index,
+void bitmap_writer_build_type_index(struct packing_data *to_pack,
+ struct pack_idx_entry **index,
uint32_t index_nr)
{
uint32_t i;
writer.trees = ewah_new();
writer.blobs = ewah_new();
writer.tags = ewah_new();
+ ALLOC_ARRAY(to_pack->in_pack_pos, to_pack->nr_objects);
for (i = 0; i < index_nr; ++i) {
struct object_entry *entry = (struct object_entry *)index[i];
enum object_type real_type;
- entry->in_pack_pos = i;
+ oe_set_in_pack_pos(to_pack, entry, i);
- switch (entry->type) {
+ switch (oe_type(entry)) {
case OBJ_COMMIT:
case OBJ_TREE:
case OBJ_BLOB:
case OBJ_TAG:
- real_type = entry->type;
+ real_type = oe_type(entry);
break;
default:
- real_type = oid_object_info(&entry->idx.oid, NULL);
+ real_type = oid_object_info(the_repository,
+ &entry->idx.oid, NULL);
break;
}
default:
die("Missing type information for %s (%d/%d)",
oid_to_hex(&entry->idx.oid), real_type,
- entry->type);
+ oe_type(entry));
}
}
}
"(object %s is missing)", sha1_to_hex(sha1));
}
- return entry->in_pack_pos;
+ return oe_in_pack_pos(writer.to_pack, entry);
}
static void show_object(struct object *object, const char *name, void *data)
sha1_pos(stored->commit->object.oid.hash, index, index_nr, sha1_access);
if (commit_pos < 0)
- die("BUG: trying to write commit not in index");
+ BUG("trying to write commit not in index");
hashwrite_be32(f, commit_pos);
hashwrite_u8(f, stored->xor_offset);
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- hashclose(f, NULL, CSUM_FSYNC);
+ finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
size_t len;
if (!strip_suffix(p->pack_name, ".pack", &len))
- die("BUG: pack_name does not end in .pack");
+ BUG("pack_name does not end in .pack");
return xstrfmt("%.*s.bitmap", (int)len, p->pack_name);
}
revs->ignore_missing_links = 0;
if (haves_bitmap == NULL)
- die("BUG: failed to perform bitmap walk");
+ BUG("failed to perform bitmap walk");
}
wants_bitmap = find_objects(revs, wants, haves_bitmap);
if (!wants_bitmap)
- die("BUG: failed to perform bitmap walk");
+ BUG("failed to perform bitmap walk");
if (haves_bitmap)
bitmap_and_not(wants_bitmap, haves_bitmap);
oe = packlist_find(mapping, sha1, NULL);
if (oe)
- reposition[i] = oe->in_pack_pos + 1;
+ reposition[i] = oe_in_pack_pos(mapping, oe) + 1;
}
rebuild = bitmap_new();
#include "object.h"
#include "pack.h"
#include "pack-objects.h"
+#include "packfile.h"
+#include "config.h"
static uint32_t locate_object_entry_hash(struct packing_data *pdata,
const unsigned char *sha1,
&found);
if (found)
- die("BUG: Duplicate object in hash");
+ BUG("Duplicate object in hash");
pdata->index[ix] = i + 1;
entry++;
return &pdata->objects[pdata->index[i] - 1];
}
+static void prepare_in_pack_by_idx(struct packing_data *pdata)
+{
+ struct packed_git **mapping, *p;
+ int cnt = 0, nr = 1U << OE_IN_PACK_BITS;
+
+ ALLOC_ARRAY(mapping, nr);
+ /*
+ * oe_in_pack() on an all-zero'd object_entry
+ * (i.e. in_pack_idx also zero) should return NULL.
+ */
+ mapping[cnt++] = NULL;
+ for (p = get_packed_git(the_repository); p; p = p->next, cnt++) {
+ if (cnt == nr) {
+ free(mapping);
+ return;
+ }
+ p->index = cnt;
+ mapping[cnt] = p;
+ }
+ pdata->in_pack_by_idx = mapping;
+}
+
+/*
+ * A new pack appears after prepare_in_pack_by_idx() has been
+ * run. This is likely a race.
+ *
+ * We could map this new pack to in_pack_by_idx[] array, but then we
+ * have to deal with full array anyway. And since it's hard to test
+ * this fall back code, just stay simple and fall back to using
+ * in_pack[] array.
+ */
+void oe_map_new_pack(struct packing_data *pack,
+ struct packed_git *p)
+{
+ uint32_t i;
+
+ REALLOC_ARRAY(pack->in_pack, pack->nr_alloc);
+
+ for (i = 0; i < pack->nr_objects; i++)
+ pack->in_pack[i] = oe_in_pack(pack, pack->objects + i);
+
+ FREE_AND_NULL(pack->in_pack_by_idx);
+}
+
+/* assume pdata is already zero'd by caller */
+void prepare_packing_data(struct packing_data *pdata)
+{
+ if (git_env_bool("GIT_TEST_FULL_IN_PACK_ARRAY", 0)) {
+ /*
+ * do not initialize in_pack_by_idx[] to force the
+ * slow path in oe_in_pack()
+ */
+ } else {
+ prepare_in_pack_by_idx(pdata);
+ }
+
+ pdata->oe_size_limit = git_env_ulong("GIT_TEST_OE_SIZE",
+ 1U << OE_SIZE_BITS);
+}
+
struct object_entry *packlist_alloc(struct packing_data *pdata,
const unsigned char *sha1,
uint32_t index_pos)
if (pdata->nr_objects >= pdata->nr_alloc) {
pdata->nr_alloc = (pdata->nr_alloc + 1024) * 3 / 2;
REALLOC_ARRAY(pdata->objects, pdata->nr_alloc);
+
+ if (!pdata->in_pack_by_idx)
+ REALLOC_ARRAY(pdata->in_pack, pdata->nr_alloc);
}
new_entry = pdata->objects + pdata->nr_objects++;
else
pdata->index[index_pos] = pdata->nr_objects;
+ if (pdata->in_pack)
+ pdata->in_pack[pdata->nr_objects - 1] = NULL;
+
return new_entry;
}
return 0;
if (!strip_suffix(p->pack_name, ".pack", &len))
- die("BUG: pack_name does not end in .pack");
+ BUG("pack_name does not end in .pack");
idx_name = xstrfmt("%.*s.idx", (int)len, p->pack_name);
ret = check_packed_git_idx(idx_name, p);
free(idx_name);
}
}
-static void close_pack(struct packed_git *p)
+void close_pack(struct packed_git *p)
{
close_pack_windows(p);
close_pack_fd(p);
for (p = o->packed_git; p; p = p->next)
if (p->do_not_close)
- die("BUG: want to close pack marked 'do-not-close'");
+ BUG("want to close pack marked 'do-not-close'");
else
close_pack(p);
}
return NULL;
}
-static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
+static int retry_bad_packed_offset(struct repository *r,
+ struct packed_git *p,
+ off_t obj_offset)
{
int type;
struct revindex_entry *revidx;
return OBJ_BAD;
nth_packed_object_oid(&oid, p, revidx->nr);
mark_bad_packed_object(p, oid.hash);
- type = oid_object_info(&oid, NULL);
+ type = oid_object_info(r, &oid, NULL);
if (type <= OBJ_NONE)
return OBJ_BAD;
return type;
#define POI_STACK_PREALLOC 64
-static enum object_type packed_to_object_type(struct packed_git *p,
+static enum object_type packed_to_object_type(struct repository *r,
+ struct packed_git *p,
off_t obj_offset,
enum object_type type,
struct pack_window **w_curs,
if (type <= OBJ_NONE) {
/* If getting the base itself fails, we first
* retry the base, otherwise unwind */
- type = retry_bad_packed_offset(p, base_offset);
+ type = retry_bad_packed_offset(r, p, base_offset);
if (type > OBJ_NONE)
goto out;
goto unwind;
unwind:
while (poi_stack_nr) {
obj_offset = poi_stack[--poi_stack_nr];
- type = retry_bad_packed_offset(p, obj_offset);
+ type = retry_bad_packed_offset(r, p, obj_offset);
if (type > OBJ_NONE)
goto out;
}
free(ent);
}
-static void *cache_or_unpack_entry(struct packed_git *p, off_t base_offset,
- unsigned long *base_size, enum object_type *type)
+static void *cache_or_unpack_entry(struct repository *r, struct packed_git *p,
+ off_t base_offset, unsigned long *base_size,
+ enum object_type *type)
{
struct delta_base_cache_entry *ent;
ent = get_delta_base_cache_entry(p, base_offset);
if (!ent)
- return unpack_entry(p, base_offset, type, base_size);
+ return unpack_entry(r, p, base_offset, type, base_size);
if (type)
*type = ent->type;
hashmap_add(&delta_base_cache, ent);
}
-int packed_object_info(struct packed_git *p, off_t obj_offset,
- struct object_info *oi)
+int packed_object_info(struct repository *r, struct packed_git *p,
+ off_t obj_offset, struct object_info *oi)
{
struct pack_window *w_curs = NULL;
unsigned long size;
* a "real" type later if the caller is interested.
*/
if (oi->contentp) {
- *oi->contentp = cache_or_unpack_entry(p, obj_offset, oi->sizep,
+ *oi->contentp = cache_or_unpack_entry(r, p, obj_offset, oi->sizep,
&type);
if (!*oi->contentp)
type = OBJ_BAD;
if (oi->typep || oi->type_name) {
enum object_type ptot;
- ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
- curpos);
+ ptot = packed_to_object_type(r, p, obj_offset,
+ type, &w_curs, curpos);
if (oi->typep)
*oi->typep = ptot;
if (oi->type_name) {
unsigned long size;
};
-static void *read_object(const struct object_id *oid, enum object_type *type,
+static void *read_object(struct repository *r,
+ const struct object_id *oid,
+ enum object_type *type,
unsigned long *size)
{
struct object_info oi = OBJECT_INFO_INIT;
oi.sizep = size;
oi.contentp = &content;
- if (oid_object_info_extended(oid, &oi, 0) < 0)
+ if (oid_object_info_extended(r, oid, &oi, 0) < 0)
return NULL;
return content;
}
-void *unpack_entry(struct packed_git *p, off_t obj_offset,
+void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
enum object_type *final_type, unsigned long *final_size)
{
struct pack_window *w_curs = NULL;
case OBJ_OFS_DELTA:
case OBJ_REF_DELTA:
if (data)
- die("BUG: unpack_entry: left loop at a valid delta");
+ BUG("unpack_entry: left loop at a valid delta");
break;
case OBJ_COMMIT:
case OBJ_TREE:
oid_to_hex(&base_oid), (uintmax_t)obj_offset,
p->pack_name);
mark_bad_packed_object(p, base_oid.hash);
- base = read_object(&base_oid, &type, &base_size);
+ base = read_object(r, &base_oid, &type, &base_size);
external_base = base;
}
}
return 1;
}
-static int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
+int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
{
uint32_t i;
int r = 0;
struct commit *commit = (struct commit *) obj;
struct commit_list *parents = commit->parents;
- oidset_insert(set, &commit->tree->object.oid);
+ oidset_insert(set, get_commit_tree_oid(commit));
for (; parents; parents = parents->next)
oidset_insert(set, &parents->item->object.oid);
} else if (obj->type == OBJ_TAG) {
write_or_die(fd, "0000", 4);
}
+void packet_delim(int fd)
+{
+ packet_trace("0001", 4, 1);
+ write_or_die(fd, "0001", 4);
+}
+
int packet_flush_gently(int fd)
{
packet_trace("0000", 4, 1);
strbuf_add(buf, "0000", 4);
}
+void packet_buf_delim(struct strbuf *buf)
+{
+ packet_trace("0001", 4, 1);
+ strbuf_add(buf, "0001", 4);
+}
+
static void set_packet_header(char *buf, const int size)
{
static char hexchar[] = "0123456789abcdef";
va_end(args);
}
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
+{
+ size_t orig_len, n;
+
+ orig_len = buf->len;
+ strbuf_addstr(buf, "0000");
+ strbuf_add(buf, data, len);
+ n = buf->len - orig_len;
+
+ if (n > LARGE_PACKET_MAX)
+ die("protocol error: impossibly long line");
+
+ set_packet_header(&buf->buf[orig_len], n);
+ packet_trace(data, len, 1);
+}
+
int write_packetized_from_fd(int fd_in, int fd_out)
{
static char buf[LARGE_PACKET_DATA_MAX];
ssize_t ret;
if (fd >= 0 && src_buf && *src_buf)
- die("BUG: multiple sources given to packet_read");
+ BUG("multiple sources given to packet_read");
/* Read up to "size" bytes from our source, whatever it is. */
if (src_buf && *src_buf) {
return (val < 0) ? val : (val << 8) | hex2chr(linelen + 2);
}
-int packet_read(int fd, char **src_buf, size_t *src_len,
- char *buffer, unsigned size, int options)
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+ size_t *src_len, char *buffer,
+ unsigned size, int *pktlen,
+ int options)
{
- int len, ret;
+ int len;
char linelen[4];
- ret = get_packet_data(fd, src_buf, src_len, linelen, 4, options);
- if (ret < 0)
- return ret;
+ if (get_packet_data(fd, src_buffer, src_len, linelen, 4, options) < 0) {
+ *pktlen = -1;
+ return PACKET_READ_EOF;
+ }
+
len = packet_length(linelen);
- if (len < 0)
+
+ if (len < 0) {
die("protocol error: bad line length character: %.4s", linelen);
- if (!len) {
+ } else if (!len) {
packet_trace("0000", 4, 0);
- return 0;
+ *pktlen = 0;
+ return PACKET_READ_FLUSH;
+ } else if (len == 1) {
+ packet_trace("0001", 4, 0);
+ *pktlen = 0;
+ return PACKET_READ_DELIM;
+ } else if (len < 4) {
+ die("protocol error: bad line length %d", len);
}
+
len -= 4;
- if (len >= size)
+ if ((unsigned)len >= size)
die("protocol error: bad line length %d", len);
- ret = get_packet_data(fd, src_buf, src_len, buffer, len, options);
- if (ret < 0)
- return ret;
+
+ if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
+ *pktlen = -1;
+ return PACKET_READ_EOF;
+ }
if ((options & PACKET_READ_CHOMP_NEWLINE) &&
len && buffer[len-1] == '\n')
buffer[len] = 0;
packet_trace(buffer, len, 0);
- return len;
+ *pktlen = len;
+ return PACKET_READ_NORMAL;
+}
+
+int packet_read(int fd, char **src_buffer, size_t *src_len,
+ char *buffer, unsigned size, int options)
+{
+ int pktlen = -1;
+
+ packet_read_with_status(fd, src_buffer, src_len, buffer, size,
+ &pktlen, options);
+
+ return pktlen;
}
static char *packet_read_line_generic(int fd,
}
return sb_out->len - orig_len;
}
+
+/* Packet Reader Functions */
+void packet_reader_init(struct packet_reader *reader, int fd,
+ char *src_buffer, size_t src_len,
+ int options)
+{
+ memset(reader, 0, sizeof(*reader));
+
+ reader->fd = fd;
+ reader->src_buffer = src_buffer;
+ reader->src_len = src_len;
+ reader->buffer = packet_buffer;
+ reader->buffer_size = sizeof(packet_buffer);
+ reader->options = options;
+}
+
+enum packet_read_status packet_reader_read(struct packet_reader *reader)
+{
+ if (reader->line_peeked) {
+ reader->line_peeked = 0;
+ return reader->status;
+ }
+
+ reader->status = packet_read_with_status(reader->fd,
+ &reader->src_buffer,
+ &reader->src_len,
+ reader->buffer,
+ reader->buffer_size,
+ &reader->pktlen,
+ reader->options);
+
+ if (reader->status == PACKET_READ_NORMAL)
+ reader->line = reader->buffer;
+ else
+ reader->line = NULL;
+
+ return reader->status;
+}
+
+enum packet_read_status packet_reader_peek(struct packet_reader *reader)
+{
+ /* Only allow peeking a single line */
+ if (reader->line_peeked)
+ return reader->status;
+
+ /* Peek a line by reading it and setting peeked flag */
+ packet_reader_read(reader);
+ reader->line_peeked = 1;
+ return reader->status;
+}
} *used_atom;
static int used_atom_cnt, need_tagged, need_symref;
-static void color_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *color_value)
+/*
+ * Expand string, append it to strbuf *sb, then return error code ret.
+ * Allow to save few lines of code.
+ */
+static int strbuf_addf_ret(struct strbuf *sb, int ret, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ strbuf_vaddf(sb, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+static int color_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *color_value, struct strbuf *err)
{
if (!color_value)
- die(_("expected format: %%(color:<color>)"));
+ return strbuf_addf_ret(err, -1, _("expected format: %%(color:<color>)"));
if (color_parse(color_value, atom->u.color) < 0)
- die(_("unrecognized color: %%(color:%s)"), color_value);
+ return strbuf_addf_ret(err, -1, _("unrecognized color: %%(color:%s)"),
+ color_value);
/*
* We check this after we've parsed the color, which lets us complain
* about syntactically bogus color names even if they won't be used.
*/
if (!want_color(format->use_color))
color_parse("", atom->u.color);
+ return 0;
}
-static void refname_atom_parser_internal(struct refname_atom *atom,
- const char *arg, const char *name)
+static int refname_atom_parser_internal(struct refname_atom *atom, const char *arg,
+ const char *name, struct strbuf *err)
{
if (!arg)
atom->option = R_NORMAL;
skip_prefix(arg, "strip=", &arg)) {
atom->option = R_LSTRIP;
if (strtol_i(arg, 10, &atom->lstrip))
- die(_("Integer value expected refname:lstrip=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("Integer value expected refname:lstrip=%s"), arg);
} else if (skip_prefix(arg, "rstrip=", &arg)) {
atom->option = R_RSTRIP;
if (strtol_i(arg, 10, &atom->rstrip))
- die(_("Integer value expected refname:rstrip=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("Integer value expected refname:rstrip=%s"), arg);
} else
- die(_("unrecognized %%(%s) argument: %s"), name, arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(%s) argument: %s"), name, arg);
+ return 0;
}
-static void remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct string_list params = STRING_LIST_INIT_DUP;
int i;
if (!arg) {
atom->u.remote_ref.option = RR_REF;
- refname_atom_parser_internal(&atom->u.remote_ref.refname,
- arg, atom->name);
- return;
+ return refname_atom_parser_internal(&atom->u.remote_ref.refname,
+ arg, atom->name, err);
}
atom->u.remote_ref.nobracket = 0;
atom->u.remote_ref.push_remote = 1;
} else {
atom->u.remote_ref.option = RR_REF;
- refname_atom_parser_internal(&atom->u.remote_ref.refname,
- arg, atom->name);
+ if (refname_atom_parser_internal(&atom->u.remote_ref.refname,
+ arg, atom->name, err)) {
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
}
string_list_clear(¶ms, 0);
+ return 0;
}
-static void body_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int body_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (arg)
- die(_("%%(body) does not take arguments"));
+ return strbuf_addf_ret(err, -1, _("%%(body) does not take arguments"));
atom->u.contents.option = C_BODY_DEP;
+ return 0;
}
-static void subject_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int subject_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (arg)
- die(_("%%(subject) does not take arguments"));
+ return strbuf_addf_ret(err, -1, _("%%(subject) does not take arguments"));
atom->u.contents.option = C_SUB;
+ return 0;
}
-static void trailers_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int trailers_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct string_list params = STRING_LIST_INIT_DUP;
int i;
atom->u.contents.trailer_opts.unfold = 1;
else if (!strcmp(s, "only"))
atom->u.contents.trailer_opts.only_trailers = 1;
- else
- die(_("unknown %%(trailers) argument: %s"), s);
+ else {
+ strbuf_addf(err, _("unknown %%(trailers) argument: %s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
}
atom->u.contents.option = C_TRAILERS;
string_list_clear(¶ms, 0);
+ return 0;
}
-static void contents_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int contents_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg)
atom->u.contents.option = C_BARE;
atom->u.contents.option = C_SUB;
else if (skip_prefix(arg, "trailers", &arg)) {
skip_prefix(arg, ":", &arg);
- trailers_atom_parser(format, atom, *arg ? arg : NULL);
+ if (trailers_atom_parser(format, atom, *arg ? arg : NULL, err))
+ return -1;
} else if (skip_prefix(arg, "lines=", &arg)) {
atom->u.contents.option = C_LINES;
if (strtoul_ui(arg, 10, &atom->u.contents.nlines))
- die(_("positive value expected contents:lines=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("positive value expected contents:lines=%s"), arg);
} else
- die(_("unrecognized %%(contents) argument: %s"), arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(contents) argument: %s"), arg);
+ return 0;
}
-static void objectname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int objectname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg)
atom->u.objectname.option = O_FULL;
atom->u.objectname.option = O_LENGTH;
if (strtoul_ui(arg, 10, &atom->u.objectname.length) ||
atom->u.objectname.length == 0)
- die(_("positive value expected objectname:short=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("positive value expected objectname:short=%s"), arg);
if (atom->u.objectname.length < MINIMUM_ABBREV)
atom->u.objectname.length = MINIMUM_ABBREV;
} else
- die(_("unrecognized %%(objectname) argument: %s"), arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(objectname) argument: %s"), arg);
+ return 0;
}
-static void refname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int refname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
- refname_atom_parser_internal(&atom->u.refname, arg, atom->name);
+ return refname_atom_parser_internal(&atom->u.refname, arg, atom->name, err);
}
static align_type parse_align_position(const char *s)
return -1;
}
-static void align_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int align_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct align *align = &atom->u.align;
struct string_list params = STRING_LIST_INIT_DUP;
unsigned int width = ~0U;
if (!arg)
- die(_("expected format: %%(align:<width>,<position>)"));
+ return strbuf_addf_ret(err, -1, _("expected format: %%(align:<width>,<position>)"));
align->position = ALIGN_LEFT;
if (skip_prefix(s, "position=", &s)) {
position = parse_align_position(s);
- if (position < 0)
- die(_("unrecognized position:%s"), s);
+ if (position < 0) {
+ strbuf_addf(err, _("unrecognized position:%s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
align->position = position;
} else if (skip_prefix(s, "width=", &s)) {
- if (strtoul_ui(s, 10, &width))
- die(_("unrecognized width:%s"), s);
+ if (strtoul_ui(s, 10, &width)) {
+ strbuf_addf(err, _("unrecognized width:%s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
} else if (!strtoul_ui(s, 10, &width))
;
else if ((position = parse_align_position(s)) >= 0)
align->position = position;
- else
- die(_("unrecognized %%(align) argument: %s"), s);
+ else {
+ strbuf_addf(err, _("unrecognized %%(align) argument: %s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
- if (width == ~0U)
- die(_("positive width expected with the %%(align) atom"));
+ if (width == ~0U) {
+ string_list_clear(¶ms, 0);
+ return strbuf_addf_ret(err, -1, _("positive width expected with the %%(align) atom"));
+ }
align->width = width;
string_list_clear(¶ms, 0);
+ return 0;
}
-static void if_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int if_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg) {
atom->u.if_then_else.cmp_status = COMPARE_NONE;
- return;
+ return 0;
} else if (skip_prefix(arg, "equals=", &atom->u.if_then_else.str)) {
atom->u.if_then_else.cmp_status = COMPARE_EQUAL;
} else if (skip_prefix(arg, "notequals=", &atom->u.if_then_else.str)) {
atom->u.if_then_else.cmp_status = COMPARE_UNEQUAL;
- } else {
- die(_("unrecognized %%(if) argument: %s"), arg);
- }
+ } else
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(if) argument: %s"), arg);
+ return 0;
}
-static void head_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int head_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *unused_err)
{
atom->u.head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
+ return 0;
}
static struct {
const char *name;
cmp_type cmp_type;
- void (*parser)(const struct ref_format *format, struct used_atom *atom, const char *arg);
+ int (*parser)(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err);
} valid_atom[] = {
{ "refname" , FIELD_STR, refname_atom_parser },
{ "objecttype" },
struct atom_value {
const char *s;
- void (*handler)(struct atom_value *atomv, struct ref_formatting_state *state);
+ int (*handler)(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err);
uintmax_t value; /* used for sorting when not FIELD_STR */
struct used_atom *atom;
};
* Used to parse format string and sort specifiers
*/
static int parse_ref_filter_atom(const struct ref_format *format,
- const char *atom, const char *ep)
+ const char *atom, const char *ep,
+ struct strbuf *err)
{
const char *sp;
const char *arg;
if (*sp == '*' && sp < ep)
sp++; /* deref */
if (ep <= sp)
- die(_("malformed field name: %.*s"), (int)(ep-atom), atom);
+ return strbuf_addf_ret(err, -1, _("malformed field name: %.*s"),
+ (int)(ep-atom), atom);
/* Do we have the atom already used elsewhere? */
for (i = 0; i < used_atom_cnt; i++) {
}
if (ARRAY_SIZE(valid_atom) <= i)
- die(_("unknown field name: %.*s"), (int)(ep-atom), atom);
+ return strbuf_addf_ret(err, -1, _("unknown field name: %.*s"),
+ (int)(ep-atom), atom);
/* Add it in, including the deref prefix */
at = used_atom_cnt;
}
}
memset(&used_atom[at].u, 0, sizeof(used_atom[at].u));
- if (valid_atom[i].parser)
- valid_atom[i].parser(format, &used_atom[at], arg);
+ if (valid_atom[i].parser && valid_atom[i].parser(format, &used_atom[at], arg, err))
+ return -1;
if (*atom == '*')
need_tagged = 1;
if (!strcmp(valid_atom[i].name, "symref"))
}
}
-static void append_atom(struct atom_value *v, struct ref_formatting_state *state)
+static int append_atom(struct atom_value *v, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
/*
* Quote formatting is only done when the stack has a single
quote_formatting(&state->stack->output, v->s, state->quote_style);
else
strbuf_addstr(&state->stack->output, v->s);
+ return 0;
}
static void push_stack_element(struct ref_formatting_stack **stack)
strbuf_release(&s);
}
-static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
struct ref_formatting_stack *new_stack;
new_stack = state->stack;
new_stack->at_end = end_align_handler;
new_stack->at_end_data = &atomv->atom->u.align;
+ return 0;
}
static void if_then_else_handler(struct ref_formatting_stack **stack)
free(if_then_else);
}
-static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
struct ref_formatting_stack *new_stack;
struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
new_stack = state->stack;
new_stack->at_end = if_then_else_handler;
new_stack->at_end_data = if_then_else;
+ return 0;
}
static int is_empty(const char *s)
return 1;
}
-static void then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *cur = state->stack;
struct if_then_else *if_then_else = NULL;
if (cur->at_end == if_then_else_handler)
if_then_else = (struct if_then_else *)cur->at_end_data;
if (!if_then_else)
- die(_("format: %%(then) atom used without an %%(if) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used without an %%(if) atom"));
if (if_then_else->then_atom_seen)
- die(_("format: %%(then) atom used more than once"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used more than once"));
if (if_then_else->else_atom_seen)
- die(_("format: %%(then) atom used after %%(else)"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used after %%(else)"));
if_then_else->then_atom_seen = 1;
/*
* If the 'equals' or 'notequals' attribute is used then
} else if (cur->output.len && !is_empty(cur->output.buf))
if_then_else->condition_satisfied = 1;
strbuf_reset(&cur->output);
+ return 0;
}
-static void else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *prev = state->stack;
struct if_then_else *if_then_else = NULL;
if (prev->at_end == if_then_else_handler)
if_then_else = (struct if_then_else *)prev->at_end_data;
if (!if_then_else)
- die(_("format: %%(else) atom used without an %%(if) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without an %%(if) atom"));
if (!if_then_else->then_atom_seen)
- die(_("format: %%(else) atom used without a %%(then) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without a %%(then) atom"));
if (if_then_else->else_atom_seen)
- die(_("format: %%(else) atom used more than once"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used more than once"));
if_then_else->else_atom_seen = 1;
push_stack_element(&state->stack);
state->stack->at_end_data = prev->at_end_data;
state->stack->at_end = prev->at_end;
+ return 0;
}
-static void end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *current = state->stack;
struct strbuf s = STRBUF_INIT;
if (!current->at_end)
- die(_("format: %%(end) atom used without corresponding atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(end) atom used without corresponding atom"));
current->at_end(&state->stack);
/* Stack may have been popped within at_end(), hence reset the current pointer */
}
strbuf_release(&s);
pop_stack_element(&state->stack);
+ return 0;
}
/*
format->need_color_reset_at_eol = 0;
for (cp = format->format; *cp && (sp = find_next(cp)); ) {
+ struct strbuf err = STRBUF_INIT;
const char *color, *ep = strchr(sp, ')');
int at;
if (!ep)
return error(_("malformed format string %s"), sp);
/* sp points at "%(" and ep points at the closing ")" */
- at = parse_ref_filter_atom(format, sp + 2, ep);
+ at = parse_ref_filter_atom(format, sp + 2, ep, &err);
+ if (at < 0)
+ die("%s", err.buf);
cp = ep + 1;
if (skip_prefix(used_atom[at].name, "color:", &color))
format->need_color_reset_at_eol = !!strcmp(color, "reset");
+ strbuf_release(&err);
}
if (format->need_color_reset_at_eol && !want_color(format->use_color))
format->need_color_reset_at_eol = 0;
v->s = xstrdup(find_unique_abbrev(oid, atom->u.objectname.length));
return 1;
} else
- die("BUG: unknown %%(objectname) option");
+ BUG("unknown %%(objectname) option");
}
return 0;
}
if (deref)
name++;
if (!strcmp(name, "tree")) {
- v->s = xstrdup(oid_to_hex(&commit->tree->object.oid));
+ v->s = xstrdup(oid_to_hex(get_commit_tree_oid(commit)));
}
else if (!strcmp(name, "numparent")) {
v->value = commit_list_count(commit->parents);
else
*s = "";
} else
- die("BUG: unhandled RR_* enum");
+ BUG("unhandled RR_* enum");
}
char *get_head_description(void)
return show_ref(&atom->u.refname, ref->refname);
}
-static void get_object(struct ref_array_item *ref, const struct object_id *oid,
- int deref, struct object **obj)
+static int get_object(struct ref_array_item *ref, const struct object_id *oid,
+ int deref, struct object **obj, struct strbuf *err)
{
int eaten;
+ int ret = 0;
unsigned long size;
void *buf = get_obj(oid, obj, &size, &eaten);
if (!buf)
- die(_("missing object %s for %s"),
- oid_to_hex(oid), ref->refname);
- if (!*obj)
- die(_("parse_object_buffer failed on %s for %s"),
- oid_to_hex(oid), ref->refname);
-
- grab_values(ref->value, deref, *obj, buf, size);
+ ret = strbuf_addf_ret(err, -1, _("missing object %s for %s"),
+ oid_to_hex(oid), ref->refname);
+ else if (!*obj)
+ ret = strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
+ oid_to_hex(oid), ref->refname);
+ else
+ grab_values(ref->value, deref, *obj, buf, size);
if (!eaten)
free(buf);
+ return ret;
}
/*
* Parse the object referred by ref, and grab needed value.
*/
-static void populate_value(struct ref_array_item *ref)
+static int populate_value(struct ref_array_item *ref, struct strbuf *err)
{
struct object *obj;
int i;
break;
}
if (used_atom_cnt <= i)
- return;
+ return 0;
- get_object(ref, &ref->objectname, 0, &obj);
+ if (get_object(ref, &ref->objectname, 0, &obj, err))
+ return -1;
/*
* If there is no atom that wants to know about tagged
* object, we are done.
*/
if (!need_tagged || (obj->type != OBJ_TAG))
- return;
+ return 0;
/*
* If it is a tag object, see if we use a value that derefs
* is not consistent with what deref_tag() does
* which peels the onion to the core.
*/
- get_object(ref, tagged, 1, &obj);
+ return get_object(ref, tagged, 1, &obj, err);
}
/*
* Given a ref, return the value for the atom. This lazily gets value
* out of the object by calling populate value.
*/
-static void get_ref_atom_value(struct ref_array_item *ref, int atom, struct atom_value **v)
+static int get_ref_atom_value(struct ref_array_item *ref, int atom,
+ struct atom_value **v, struct strbuf *err)
{
if (!ref->value) {
- populate_value(ref);
+ if (populate_value(ref, err))
+ return -1;
fill_missing_values(ref->value);
}
*v = &ref->value[atom];
+ return 0;
}
/*
int cmp;
cmp_type cmp_type = used_atom[s->atom].type;
int (*cmp_fn)(const char *, const char *);
+ struct strbuf err = STRBUF_INIT;
- get_ref_atom_value(a, s->atom, &va);
- get_ref_atom_value(b, s->atom, &vb);
+ if (get_ref_atom_value(a, s->atom, &va, &err))
+ die("%s", err.buf);
+ if (get_ref_atom_value(b, s->atom, &vb, &err))
+ die("%s", err.buf);
+ strbuf_release(&err);
cmp_fn = s->ignore_case ? strcasecmp : strcmp;
if (s->version)
cmp = versioncmp(va->s, vb->s);
}
}
-void format_ref_array_item(struct ref_array_item *info,
+int format_ref_array_item(struct ref_array_item *info,
const struct ref_format *format,
- struct strbuf *final_buf)
+ struct strbuf *final_buf,
+ struct strbuf *error_buf)
{
const char *cp, *sp, *ep;
struct ref_formatting_state state = REF_FORMATTING_STATE_INIT;
for (cp = format->format; *cp && (sp = find_next(cp)); cp = ep + 1) {
struct atom_value *atomv;
+ int pos;
ep = strchr(sp, ')');
if (cp < sp)
append_literal(cp, sp, &state);
- get_ref_atom_value(info,
- parse_ref_filter_atom(format, sp + 2, ep),
- &atomv);
- atomv->handler(atomv, &state);
+ pos = parse_ref_filter_atom(format, sp + 2, ep, error_buf);
+ if (pos < 0 || get_ref_atom_value(info, pos, &atomv, error_buf) ||
+ atomv->handler(atomv, &state, error_buf)) {
+ pop_stack_element(&state.stack);
+ return -1;
+ }
}
if (*cp) {
sp = cp + strlen(cp);
if (format->need_color_reset_at_eol) {
struct atom_value resetv;
resetv.s = GIT_COLOR_RESET;
- append_atom(&resetv, &state);
+ if (append_atom(&resetv, &state, error_buf)) {
+ pop_stack_element(&state.stack);
+ return -1;
+ }
+ }
+ if (state.stack->prev) {
+ pop_stack_element(&state.stack);
+ return strbuf_addf_ret(error_buf, -1, _("format: %%(end) atom missing"));
}
- if (state.stack->prev)
- die(_("format: %%(end) atom missing"));
strbuf_addbuf(final_buf, &state.stack->output);
pop_stack_element(&state.stack);
+ return 0;
}
void show_ref_array_item(struct ref_array_item *info,
const struct ref_format *format)
{
struct strbuf final_buf = STRBUF_INIT;
+ struct strbuf error_buf = STRBUF_INIT;
- format_ref_array_item(info, format, &final_buf);
+ if (format_ref_array_item(info, format, &final_buf, &error_buf))
+ die("%s", error_buf.buf);
fwrite(final_buf.buf, 1, final_buf.len, stdout);
+ strbuf_release(&error_buf);
strbuf_release(&final_buf);
putchar('\n');
}
*/
struct ref_format dummy = REF_FORMAT_INIT;
const char *end = atom + strlen(atom);
- return parse_ref_filter_atom(&dummy, atom, end);
+ struct strbuf err = STRBUF_INIT;
+ int res = parse_ref_filter_atom(&dummy, atom, end, &err);
+ if (res < 0)
+ die("%s", err.buf);
+ strbuf_release(&err);
+ return res;
}
/* If no sorting option is given, use refname to sort as default */
#include "tag.h"
#include "submodule.h"
#include "worktree.h"
+#include "argv-array.h"
+#include "repository.h"
/*
* List of all available backends
char *resolve_refdup(const char *refname, int resolve_flags,
struct object_id *oid, int *flags)
{
- return refs_resolve_refdup(get_main_ref_store(),
+ return refs_resolve_refdup(get_main_ref_store(the_repository),
refname, resolve_flags,
oid, flags);
}
int read_ref_full(const char *refname, int resolve_flags, struct object_id *oid, int *flags)
{
- return refs_read_ref_full(get_main_ref_store(), refname,
+ return refs_read_ref_full(get_main_ref_store(the_repository), refname,
resolve_flags, oid, flags);
}
struct object *o = lookup_unknown_object(name->hash);
if (o->type == OBJ_NONE) {
- int type = oid_object_info(name, NULL);
+ int type = oid_object_info(the_repository, name, NULL);
if (type < 0 || !object_as_type(o, type, 0))
return PEEL_INVALID;
}
int for_each_tag_ref(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_tag_ref(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_tag_ref(get_main_ref_store(the_repository), fn, cb_data);
}
int refs_for_each_branch_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
int for_each_branch_ref(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_branch_ref(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_branch_ref(get_main_ref_store(the_repository), fn, cb_data);
}
int refs_for_each_remote_ref(struct ref_store *refs, each_ref_fn fn, void *cb_data)
int for_each_remote_ref(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_remote_ref(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_remote_ref(get_main_ref_store(the_repository), fn, cb_data);
}
int head_ref_namespaced(each_ref_fn fn, void *cb_data)
return 0;
}
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix)
+{
+ const char **p;
+ int len = strlen(prefix);
+
+ for (p = ref_rev_parse_rules; *p; p++)
+ argv_array_pushf(prefixes, *p, len, prefix);
+}
+
/*
* *string and *len will only be substituted, and *string returned (for
* later free()ing) if the string passed in is a magic short-hand form
static int is_per_worktree_ref(const char *refname)
{
return !strcmp(refname, "HEAD") ||
- starts_with(refname, "refs/bisect/");
+ starts_with(refname, "refs/bisect/") ||
+ starts_with(refname, "refs/rewritten/");
}
static int is_pseudoref_syntax(const char *refname)
{
const char *filename;
int fd;
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
struct strbuf buf = STRBUF_INIT;
int ret = -1;
strbuf_addf(&buf, "%s\n", oid_to_hex(oid));
filename = git_path("%s", pseudoref);
- fd = hold_lock_file_for_update_timeout(&lock, filename,
- LOCK_DIE_ON_ERROR,
+ fd = hold_lock_file_for_update_timeout(&lock, filename, 0,
get_files_ref_lock_timeout_ms());
if (fd < 0) {
strbuf_addf(err, "could not open '%s' for writing: %s",
static int delete_pseudoref(const char *pseudoref, const struct object_id *old_oid)
{
- static struct lock_file lock;
const char *filename;
filename = git_path("%s", pseudoref);
if (old_oid && !is_null_oid(old_oid)) {
+ struct lock_file lock = LOCK_INIT;
int fd;
struct object_id actual_old_oid;
fd = hold_lock_file_for_update_timeout(
- &lock, filename, LOCK_DIE_ON_ERROR,
+ &lock, filename, 0,
get_files_ref_lock_timeout_ms());
- if (fd < 0)
- die_errno(_("Could not open '%s' for writing"), filename);
+ if (fd < 0) {
+ error_errno(_("could not open '%s' for writing"),
+ filename);
+ return -1;
+ }
if (read_ref(pseudoref, &actual_old_oid))
die("could not read ref '%s'", pseudoref);
if (oidcmp(&actual_old_oid, old_oid)) {
struct strbuf err = STRBUF_INIT;
if (ref_type(refname) == REF_TYPE_PSEUDOREF) {
- assert(refs == get_main_ref_store());
+ assert(refs == get_main_ref_store(the_repository));
return delete_pseudoref(refname, old_oid);
}
int delete_ref(const char *msg, const char *refname,
const struct object_id *old_oid, unsigned int flags)
{
- return refs_delete_ref(get_main_ref_store(), msg, refname,
+ return refs_delete_ref(get_main_ref_store(the_repository), msg, refname,
old_oid, flags);
}
struct ref_transaction *ref_transaction_begin(struct strbuf *err)
{
- return ref_store_transaction_begin(get_main_ref_store(), err);
+ return ref_store_transaction_begin(get_main_ref_store(the_repository), err);
}
void ref_transaction_free(struct ref_transaction *transaction)
/* OK */
break;
case REF_TRANSACTION_PREPARED:
- die("BUG: free called on a prepared reference transaction");
+ BUG("free called on a prepared reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
struct ref_update *update;
if (transaction->state != REF_TRANSACTION_OPEN)
- die("BUG: update called for transaction that is not open");
+ BUG("update called for transaction that is not open");
FLEX_ALLOC_STR(update, refname, refname);
ALLOC_GROW(transaction->updates, transaction->nr + 1, transaction->alloc);
struct strbuf *err)
{
if (!new_oid || is_null_oid(new_oid))
- die("BUG: create called without valid new_oid");
+ BUG("create called without valid new_oid");
return ref_transaction_update(transaction, refname, new_oid,
&null_oid, flags, msg, err);
}
struct strbuf *err)
{
if (old_oid && is_null_oid(old_oid))
- die("BUG: delete called with old_oid set to zeros");
+ BUG("delete called with old_oid set to zeros");
return ref_transaction_update(transaction, refname,
&null_oid, old_oid,
flags, msg, err);
struct strbuf *err)
{
if (!old_oid)
- die("BUG: verify called with old_oid set to NULL");
+ BUG("verify called with old_oid set to NULL");
return ref_transaction_update(transaction, refname,
NULL, old_oid,
flags, NULL, err);
int ret = 0;
if (ref_type(refname) == REF_TYPE_PSEUDOREF) {
- assert(refs == get_main_ref_store());
+ assert(refs == get_main_ref_store(the_repository));
ret = write_pseudoref(refname, new_oid, old_oid, &err);
} else {
t = ref_store_transaction_begin(refs, &err);
const struct object_id *old_oid,
unsigned int flags, enum action_on_err onerr)
{
- return refs_update_ref(get_main_ref_store(), msg, refname, new_oid,
+ return refs_update_ref(get_main_ref_store(the_repository), msg, refname, new_oid,
old_oid, flags, onerr);
}
int head_ref(each_ref_fn fn, void *cb_data)
{
- return refs_head_ref(get_main_ref_store(), fn, cb_data);
+ return refs_head_ref(get_main_ref_store(the_repository), fn, cb_data);
}
struct ref_iterator *refs_ref_iterator_begin(
int for_each_ref(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_ref(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_ref(get_main_ref_store(the_repository), fn, cb_data);
}
int refs_for_each_ref_in(struct ref_store *refs, const char *prefix,
int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data)
{
- return refs_for_each_ref_in(get_main_ref_store(), prefix, fn, cb_data);
+ return refs_for_each_ref_in(get_main_ref_store(the_repository), prefix, fn, cb_data);
}
int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken)
if (broken)
flag = DO_FOR_EACH_INCLUDE_BROKEN;
- return do_for_each_ref(get_main_ref_store(),
+ return do_for_each_ref(get_main_ref_store(the_repository),
prefix, fn, 0, flag, cb_data);
}
return do_for_each_ref(refs, prefix, fn, 0, flag, cb_data);
}
-int for_each_replace_ref(each_ref_fn fn, void *cb_data)
+int for_each_replace_ref(struct repository *r, each_ref_fn fn, void *cb_data)
{
- return do_for_each_ref(get_main_ref_store(),
+ return do_for_each_ref(get_main_ref_store(r),
git_replace_ref_base, fn,
strlen(git_replace_ref_base),
DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
struct strbuf buf = STRBUF_INIT;
int ret;
strbuf_addf(&buf, "%srefs/", get_git_namespace());
- ret = do_for_each_ref(get_main_ref_store(),
+ ret = do_for_each_ref(get_main_ref_store(the_repository),
buf.buf, fn, 0, 0, cb_data);
strbuf_release(&buf);
return ret;
int for_each_rawref(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_rawref(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_rawref(get_main_ref_store(the_repository), fn, cb_data);
}
int refs_read_raw_ref(struct ref_store *ref_store,
/* backend functions */
int refs_init_db(struct strbuf *err)
{
- struct ref_store *refs = get_main_ref_store();
+ struct ref_store *refs = get_main_ref_store(the_repository);
return refs->be->init_db(refs, err);
}
const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
struct object_id *oid, int *flags)
{
- return refs_resolve_ref_unsafe(get_main_ref_store(), refname,
+ return refs_resolve_ref_unsafe(get_main_ref_store(the_repository), refname,
resolve_flags, oid, flags);
}
return entry;
}
-/* A pointer to the ref_store for the main repository: */
-static struct ref_store *main_ref_store;
-
/* A hashmap of ref_stores, stored by submodule name: */
static struct hashmap submodule_ref_stores;
struct ref_store *refs;
if (!be)
- die("BUG: reference backend %s is unknown", be_name);
+ BUG("reference backend %s is unknown", be_name);
refs = be->init(gitdir, flags);
return refs;
}
-struct ref_store *get_main_ref_store(void)
+struct ref_store *get_main_ref_store(struct repository *r)
{
- if (main_ref_store)
- return main_ref_store;
+ if (r->refs)
+ return r->refs;
+
+ if (!r->gitdir)
+ BUG("attempting to get main_ref_store outside of repository");
- main_ref_store = ref_store_init(get_git_dir(), REF_STORE_ALL_CAPS);
- return main_ref_store;
+ r->refs = ref_store_init(r->gitdir, REF_STORE_ALL_CAPS);
+ return r->refs;
}
/*
hashmap_init(map, ref_store_hash_cmp, NULL, 0);
if (hashmap_put(map, alloc_ref_store_hash_entry(name, refs)))
- die("BUG: %s ref_store '%s' initialized twice", type, name);
+ BUG("%s ref_store '%s' initialized twice", type, name);
}
struct ref_store *get_submodule_ref_store(const char *submodule)
const char *id;
if (wt->is_current)
- return get_main_ref_store();
+ return get_main_ref_store(the_repository);
id = wt->id ? wt->id : "/";
refs = lookup_ref_store_map(&worktree_ref_stores, id);
int peel_ref(const char *refname, struct object_id *oid)
{
- return refs_peel_ref(get_main_ref_store(), refname, oid);
+ return refs_peel_ref(get_main_ref_store(the_repository), refname, oid);
}
int refs_create_symref(struct ref_store *refs,
int create_symref(const char *ref_target, const char *refs_heads_master,
const char *logmsg)
{
- return refs_create_symref(get_main_ref_store(), ref_target,
+ return refs_create_symref(get_main_ref_store(the_repository), ref_target,
refs_heads_master, logmsg);
}
refnames->items[i].string);
return 1;
} else if (cmp > 0) {
- die("BUG: ref_update_reject_duplicates() received unsorted list");
+ BUG("ref_update_reject_duplicates() received unsorted list");
}
}
return 0;
/* Good. */
break;
case REF_TRANSACTION_PREPARED:
- die("BUG: prepare called twice on reference transaction");
+ BUG("prepare called twice on reference transaction");
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: prepare called on a closed reference transaction");
+ BUG("prepare called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
ret = refs->be->transaction_abort(refs, transaction, err);
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: abort called on a closed reference transaction");
+ BUG("abort called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
/* Fall through to finish. */
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: commit called on a closed reference transaction");
+ BUG("commit called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
}
if (ok != ITER_DONE)
- die("BUG: error while iterating over references");
+ BUG("error while iterating over references");
extra_refname = find_descendant_ref(dirname.buf, extras, skip);
if (extra_refname)
int for_each_reflog(each_ref_fn fn, void *cb_data)
{
- return refs_for_each_reflog(get_main_ref_store(), fn, cb_data);
+ return refs_for_each_reflog(get_main_ref_store(the_repository), fn, cb_data);
}
int refs_for_each_reflog_ent_reverse(struct ref_store *refs,
int for_each_reflog_ent_reverse(const char *refname, each_reflog_ent_fn fn,
void *cb_data)
{
- return refs_for_each_reflog_ent_reverse(get_main_ref_store(),
+ return refs_for_each_reflog_ent_reverse(get_main_ref_store(the_repository),
refname, fn, cb_data);
}
int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn,
void *cb_data)
{
- return refs_for_each_reflog_ent(get_main_ref_store(), refname,
+ return refs_for_each_reflog_ent(get_main_ref_store(the_repository), refname,
fn, cb_data);
}
int reflog_exists(const char *refname)
{
- return refs_reflog_exists(get_main_ref_store(), refname);
+ return refs_reflog_exists(get_main_ref_store(the_repository), refname);
}
int refs_create_reflog(struct ref_store *refs, const char *refname,
int safe_create_reflog(const char *refname, int force_create,
struct strbuf *err)
{
- return refs_create_reflog(get_main_ref_store(), refname,
+ return refs_create_reflog(get_main_ref_store(the_repository), refname,
force_create, err);
}
int delete_reflog(const char *refname)
{
- return refs_delete_reflog(get_main_ref_store(), refname);
+ return refs_delete_reflog(get_main_ref_store(the_repository), refname);
}
int refs_reflog_expire(struct ref_store *refs,
reflog_expiry_cleanup_fn cleanup_fn,
void *policy_cb_data)
{
- return refs_reflog_expire(get_main_ref_store(),
+ return refs_reflog_expire(get_main_ref_store(the_repository),
refname, oid, flags,
prepare_fn, should_prune_fn,
cleanup_fn, policy_cb_data);
int delete_refs(const char *msg, struct string_list *refnames,
unsigned int flags)
{
- return refs_delete_refs(get_main_ref_store(), msg, refnames, flags);
+ return refs_delete_refs(get_main_ref_store(the_repository), msg, refnames, flags);
}
int refs_rename_ref(struct ref_store *refs, const char *oldref,
int rename_ref(const char *oldref, const char *newref, const char *logmsg)
{
- return refs_rename_ref(get_main_ref_store(), oldref, newref, logmsg);
+ return refs_rename_ref(get_main_ref_store(the_repository), oldref, newref, logmsg);
}
int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
int copy_existing_ref(const char *oldref, const char *newref, const char *logmsg)
{
- return refs_copy_existing_ref(get_main_ref_store(), oldref, newref, logmsg);
+ return refs_copy_existing_ref(get_main_ref_store(the_repository), oldref, newref, logmsg);
}
struct object_id old_oid;
};
-/*
- * Future: need to be in "struct repository"
- * when doing a full libification.
- */
struct files_ref_store {
struct ref_store base;
unsigned int store_flags;
if (refs->store_flags & REF_STORE_MAIN)
return;
- die("BUG: operation %s only allowed for main ref store", caller);
+ BUG("operation %s only allowed for main ref store", caller);
}
/*
struct files_ref_store *refs;
if (ref_store->be != &refs_be_files)
- die("BUG: ref_store is type \"%s\" not \"files\" in %s",
+ BUG("ref_store is type \"%s\" not \"files\" in %s",
ref_store->be->name, caller);
refs = (struct files_ref_store *)ref_store;
if ((refs->store_flags & required_flags) != required_flags)
- die("BUG: operation %s requires abilities 0x%x, but only have 0x%x",
+ BUG("operation %s requires abilities 0x%x, but only have 0x%x",
caller, required_flags, refs->store_flags);
return refs;
strbuf_addf(sb, "%s/logs/%s", refs->gitcommondir, refname);
break;
default:
- die("BUG: unknown ref type %d of ref %s",
+ BUG("unknown ref type %d of ref %s",
ref_type(refname), refname);
}
}
strbuf_addf(sb, "%s/%s", refs->gitcommondir, refname);
break;
default:
- die("BUG: unknown ref type %d of ref %s",
+ BUG("unknown ref type %d of ref %s",
ref_type(refname), refname);
}
}
}
if (!ret && sb.len)
- die("BUG: reverse reflog parser had leftover data");
+ BUG("reverse reflog parser had leftover data");
fclose(logfp);
strbuf_release(&sb);
static int files_reflog_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
- die("BUG: ref_iterator_peel() called for reflog_iterator");
+ BUG("ref_iterator_peel() called for reflog_iterator");
}
static int files_reflog_iterator_abort(struct ref_iterator *ref_iterator)
assert(err);
if (transaction->state != REF_TRANSACTION_OPEN)
- die("BUG: commit called for transaction that is not open");
+ BUG("commit called for transaction that is not open");
/* Fail if a refname appears more than once in the transaction: */
for (i = 0; i < transaction->nr; i++)
*/
if (refs_for_each_rawref(&refs->base, ref_present,
&affected_refnames))
- die("BUG: initial ref transaction called with existing refs");
+ BUG("initial ref transaction called with existing refs");
packed_transaction = ref_store_transaction_begin(refs->packed_ref_store, err);
if (!packed_transaction) {
if ((update->flags & REF_HAVE_OLD) &&
!is_null_oid(&update->old_oid))
- die("BUG: initial ref transaction with old_sha1 set");
+ BUG("initial ref transaction with old_sha1 set");
if (refs_verify_refname_available(&refs->base, update->refname,
&affected_refnames, NULL,
err)) {
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_WRITE, "reflog_expire");
- static struct lock_file reflog_lock;
+ struct lock_file reflog_lock = LOCK_INIT;
struct expire_reflog_cb cb;
struct ref_lock *lock;
struct strbuf log_file_sb = STRBUF_INIT;
continue; /* not a tag */
if (string_list_has_string(&dst_tag, ref->name))
continue; /* they already have it */
- if (oid_object_info(&ref->new_oid, NULL) != OBJ_TAG)
+ if (oid_object_info(the_repository, &ref->new_oid, NULL) != OBJ_TAG)
continue; /* be conservative */
item = string_list_append(&src_tag, ref->name);
item->util = ref;
}
}
- die("BUG: unhandled push situation");
+ BUG("unhandled push situation");
}
const char *branch_get_push(struct branch *branch, struct strbuf *err)
#include "diff.h"
#include "refs.h"
#include "revision.h"
+#include "repository.h"
#include "graph.h"
#include "grep.h"
#include "reflog-walk.h"
static int rev_compare_tree(struct rev_info *revs,
struct commit *parent, struct commit *commit)
{
- struct tree *t1 = parent->tree;
- struct tree *t2 = commit->tree;
+ struct tree *t1 = get_commit_tree(parent);
+ struct tree *t2 = get_commit_tree(commit);
if (!t1)
return REV_TREE_NEW;
static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
{
int retval;
- struct tree *t1 = commit->tree;
+ struct tree *t1 = get_commit_tree(commit);
if (!t1)
return 0;
if (!revs->prune)
return;
- if (!commit->tree)
+ if (!get_commit_tree(commit))
return;
if (!commit->parents) {
cb.all_revs = revs;
cb.all_flags = flags;
- cb.refs = get_main_ref_store();
+ cb.refs = get_main_ref_store(the_repository);
for_each_reflog(handle_one_reflog, &cb);
if (!revs->single_worktree)
revs->ignore_missing = 1;
} else if (!strcmp(arg, "--exclude-promisor-objects")) {
if (fetch_if_missing)
- die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ BUG("exclude_promisor_objects can only be used when fetch_if_missing is 0");
revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
* supported right now, so stick to single worktree.
*/
if (!revs->single_worktree)
- die("BUG: --single-worktree cannot be used together with submodule");
+ BUG("--single-worktree cannot be used together with submodule");
refs = get_submodule_ref_store(submodule);
} else
- refs = get_main_ref_store();
+ refs = get_main_ref_store(the_repository);
/*
* NOTE!
static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = oid_object_info(oid, NULL);
+ int kind = oid_object_info(the_repository, oid, NULL);
return kind == OBJ_COMMIT;
}
struct object *obj;
int kind;
- kind = oid_object_info(oid, NULL);
+ kind = oid_object_info(the_repository, oid, NULL);
if (kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = oid_object_info(oid, NULL);
+ int kind = oid_object_info(the_repository, oid, NULL);
return kind == OBJ_TREE;
}
struct object *obj;
int kind;
- kind = oid_object_info(oid, NULL);
+ kind = oid_object_info(the_repository, oid, NULL);
if (kind == OBJ_TREE || kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = oid_object_info(oid, NULL);
+ int kind = oid_object_info(the_repository, oid, NULL);
return kind == OBJ_BLOB;
}
if (ds->fn && !ds->fn(oid, ds->cb_data))
return 0;
- type = oid_object_info(oid, NULL);
+ type = oid_object_info(the_repository, oid, NULL);
if (type == OBJ_COMMIT) {
struct commit *commit = lookup_commit(oid);
if (commit) {
return -1;
if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
- die("BUG: multiple get_short_oid disambiguator flags");
+ BUG("multiple get_short_oid disambiguator flags");
if (flags & GET_OID_COMMIT)
ds.fn = disambiguate_commit_only;
if (o->type == OBJ_TAG)
o = ((struct tag*) o)->tagged;
else if (o->type == OBJ_COMMIT)
- o = &(((struct commit *) o)->tree->object);
+ o = &(get_commit_tree(((struct commit *)o))->object);
else {
if (name)
error("%.*s: expected %s type, but the object "
int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
{
if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
- die("BUG: incompatible flags for get_sha1_with_context");
+ BUG("incompatible flags for get_sha1_with_context");
return get_oid_with_context_1(str, flags, NULL, oid, oc);
}
void set_alternate_shallow_file(const char *path, int override)
{
if (is_shallow != -1)
- die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
+ BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
if (alternate_shallow_file && !override)
return;
free(alternate_shallow_file);
static void check_shallow_file_for_update(void)
{
if (is_shallow == -1)
- die("BUG: shallow must be initialized by now");
+ BUG("shallow must be initialized by now");
if (!stat_validity_check(&shallow_stat, git_path_shallow()))
die("shallow file has changed since we read it");
*/
void prune_shallow(int show_only)
{
- static struct lock_file shallow_lock;
+ struct lock_file shallow_lock = LOCK_INIT;
struct strbuf sb = STRBUF_INIT;
int fd;
void *p;
if (!info->pool_count || size > info->end - info->free) {
if (size > POOL_SIZE)
- die("BUG: pool size too small for %d in paint_alloc()",
+ BUG("pool size too small for %d in paint_alloc()",
size);
info->pool_count++;
REALLOC_ARRAY(info->pools, info->pool_count);
#include "cache.h"
#include "refs.h"
+#include "string-list.h"
#include "utf8.h"
int starts_with(const char *str, const char *prefix)
return 0;
}
+int istarts_with(const char *str, const char *prefix)
+{
+ for (; ; str++, prefix++)
+ if (!*prefix)
+ return 1;
+ else if (tolower(*str) != tolower(*prefix))
+ return 0;
+}
+
int skip_to_optional_arg_default(const char *str, const char *prefix,
const char **arg, const char *def)
{
return ret;
}
+void strbuf_add_separated_string_list(struct strbuf *str,
+ const char *sep,
+ struct string_list *slist)
+{
+ struct string_list_item *item;
+ int sep_needed = 0;
+
+ for_each_string_list_item(item, slist) {
+ if (sep_needed)
+ strbuf_addstr(str, sep);
+ strbuf_addstr(str, item->string);
+ sep_needed = 1;
+ }
+}
+
void strbuf_list_free(struct strbuf **sbs)
{
struct strbuf **s = sbs;
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, cp);
va_end(cp);
if (len < 0)
- die("BUG: your vsnprintf is broken (returned %d)", len);
+ BUG("your vsnprintf is broken (returned %d)", len);
if (len > strbuf_avail(sb)) {
strbuf_grow(sb, len);
len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
if (len > strbuf_avail(sb))
- die("BUG: your vsnprintf is broken (insatiable)");
+ BUG("your vsnprintf is broken (insatiable)");
}
strbuf_setlen(sb, sb->len + len);
}
result = xmallocz(len);
for (i = 0; i < len; i++)
result[i] = tolower(string[i]);
- result[i] = '\0';
+ return result;
+}
+
+char *xstrdup_toupper(const char *string)
+{
+ char *result;
+ size_t len, i;
+
+ len = strlen(string);
+ result = xmallocz(len);
+ for (i = 0; i < len; i++)
+ result[i] = toupper(string[i]);
return result;
}
if (is_gitmodules_unmerged(&the_index))
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
- submodule = submodule_from_path(&null_oid, oldpath);
+ submodule = submodule_from_path(the_repository, &null_oid, oldpath);
if (!submodule || !submodule->name) {
warning(_("Could not find section in .gitmodules where path=%s"), oldpath);
return -1;
if (is_gitmodules_unmerged(&the_index))
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
- submodule = submodule_from_path(&null_oid, path);
+ submodule = submodule_from_path(the_repository, &null_oid, path);
if (!submodule || !submodule->name) {
warning(_("Could not find section in .gitmodules where path=%s"), path);
return -1;
void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
const char *path)
{
- const struct submodule *submodule = submodule_from_path(&null_oid, path);
+ const struct submodule *submodule = submodule_from_path(the_repository,
+ &null_oid, path);
if (submodule) {
const char *ignore;
char *key;
const struct string_list *sl;
const struct submodule *module;
- module = submodule_from_cache(repo, &null_oid, path);
+ module = submodule_from_path(repo, &null_oid, path);
/* early return if there isn't a path->module mapping */
if (!module)
if (!should_update_submodules())
return NULL;
- return submodule_from_path(&null_oid, ce->name);
+ return submodule_from_path(the_repository, &null_oid, ce->name);
}
static struct oid_array *submodule_commits(struct string_list *submodules,
if (!S_ISGITLINK(p->two->mode))
continue;
- submodule = submodule_from_path(commit_oid, p->two->path);
+ submodule = submodule_from_path(the_repository,
+ commit_oid, p->two->path);
if (submodule)
name = submodule->name;
else {
name = default_name_or_path(p->two->path);
/* make sure name does not collide with existing one */
- submodule = submodule_from_name(commit_oid, name);
+ submodule = submodule_from_name(the_repository, commit_oid, name);
if (submodule) {
warning("Submodule in commit %s at path: "
"'%s' collides with a submodule named "
{
struct has_commit_data *cb = data;
- enum object_type type = oid_object_info(oid, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
switch (type) {
case OBJ_COMMIT:
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(&null_oid, name->string);
+ submodule = submodule_from_name(the_repository, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
const struct string_list_item *name;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(NULL, NULL))
+ if (!submodule_from_path(the_repository, NULL, NULL))
return;
argv_array_push(&argv, "--"); /* argv[0] program name */
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(&null_oid, name->string);
+ submodule = submodule_from_name(the_repository, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
int ret;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(NULL, NULL))
+ if (!submodule_from_path(the_repository, NULL, NULL))
return 0;
argv_array_push(&args, "--"); /* args[0] program name */
if (!S_ISGITLINK(ce->ce_mode))
continue;
- submodule = submodule_from_cache(spf->r, &null_oid, ce->name);
+ submodule = submodule_from_path(spf->r, &null_oid, ce->name);
if (!submodule) {
const char *name = default_name_or_path(ce->name);
if (name) {
buf.buf[0] == '2') {
/* T = line type, XY = status, SSSS = submodule state */
if (buf.len < strlen("T XY SSSS"))
- die("BUG: invalid status --porcelain=2 line %s",
+ BUG("invalid status --porcelain=2 line %s",
buf.buf);
if (buf.buf[5] == 'S' && buf.buf[8] == 'U')
if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
return 0;
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
- die("BUG: could not get submodule information for '%s'", path);
+ BUG("could not get submodule information for '%s'", path);
if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
/* Check if the submodule has a dirty index. */
} else {
char *gitdir = xstrfmt("%s/modules/%s",
get_git_common_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir);
+ connect_work_tree_and_git_dir(path, gitdir, 0);
free(gitdir);
/* make sure the index is clean as well */
if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
char *gitdir = xstrfmt("%s/modules/%s",
get_git_common_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir);
+ connect_work_tree_and_git_dir(path, gitdir, 1);
free(gitdir);
}
}
real_old_git_dir = real_pathdup(old_git_dir, 1);
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
* superproject did not rewrite the git file links yet,
* fix it now.
*/
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
connect_work_tree_and_git_dir(path,
- git_path("modules/%s", sub->name));
+ git_path("modules/%s", sub->name), 0);
} else {
/* Is it already absorbed into the superprojects git dir? */
char *real_sub_git_dir = real_pathdup(sub_git_dir, 1);
struct strbuf sb = STRBUF_INIT;
if (flags & ~ABSORB_GITDIR_RECURSE_SUBMODULES)
- die("BUG: we don't know how to pass the flags down?");
+ BUG("we don't know how to pass the flags down?");
strbuf_addstr(&sb, get_super_prefix_or_empty());
strbuf_addstr(&sb, path);
if (super_sub_len > cwd_len ||
strcmp(&cwd[cwd_len - super_sub_len], super_sub))
- die (_("BUG: returned path string doesn't match cwd?"));
+ BUG("returned path string doesn't match cwd?");
super_wt = xstrdup(cwd);
super_wt[cwd_len - super_sub_len] = '\0';
strbuf_addstr(buf, git_dir);
}
if (!is_git_directory(buf->buf)) {
- sub = submodule_from_path(&null_oid, submodule);
+ sub = submodule_from_path(the_repository, &null_oid, submodule);
if (!sub) {
ret = -1;
goto cleanup;
#include "sha1-array.h"
#include "sigchain.h"
#include "transport-internal.h"
+#include "protocol.h"
#include "object-store.h"
+#include "color.h"
+
+static int transport_use_color = -1;
+static char transport_colors[][COLOR_MAXLEN] = {
+ GIT_COLOR_RESET,
+ GIT_COLOR_RED /* REJECTED */
+};
+
+enum color_transport {
+ TRANSPORT_COLOR_RESET = 0,
+ TRANSPORT_COLOR_REJECTED = 1
+};
+
+static int transport_color_config(void)
+{
+ const char *keys[] = {
+ "color.transport.reset",
+ "color.transport.rejected"
+ }, *key = "color.transport";
+ char *value;
+ int i;
+ static int initialized;
+
+ if (initialized)
+ return 0;
+ initialized = 1;
+
+ if (!git_config_get_string(key, &value))
+ transport_use_color = git_config_colorbool(key, value);
+
+ if (!want_color_stderr(transport_use_color))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++)
+ if (!git_config_get_string(keys[i], &value)) {
+ if (!value)
+ return config_error_nonbool(keys[i]);
+ if (color_parse(value, transport_colors[i]) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static const char *transport_get_color(enum color_transport ix)
+{
+ if (want_color_stderr(transport_use_color))
+ return transport_colors[ix];
+ return "";
+}
static void set_upstreams(struct transport *transport, struct ref *refs,
int pretend)
struct bundle_header header;
};
-static struct ref *get_refs_from_bundle(struct transport *transport, int for_push)
+static struct ref *get_refs_from_bundle(struct transport *transport,
+ int for_push,
+ const struct argv_array *ref_prefixes)
{
struct bundle_transport_data *data = transport->data;
struct ref *result = NULL;
struct child_process *conn;
int fd[2];
unsigned got_remote_heads : 1;
+ enum protocol_version version;
struct oid_array extra_have;
struct oid_array shallow;
};
return 0;
}
-static struct ref *get_refs_via_connect(struct transport *transport, int for_push)
+static struct ref *get_refs_via_connect(struct transport *transport, int for_push,
+ const struct argv_array *ref_prefixes)
{
struct git_transport_data *data = transport->data;
- struct ref *refs;
+ struct ref *refs = NULL;
+ struct packet_reader reader;
connect_setup(transport, for_push);
- get_remote_heads(data->fd[0], NULL, 0, &refs,
- for_push ? REF_NORMAL : 0,
- &data->extra_have,
- &data->shallow);
+
+ packet_reader_init(&reader, data->fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ data->version = discover_version(&reader);
+ switch (data->version) {
+ case protocol_v2:
+ get_remote_refs(data->fd[1], &reader, &refs, for_push,
+ ref_prefixes, transport->server_options);
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &refs,
+ for_push ? REF_NORMAL : 0,
+ &data->extra_have,
+ &data->shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
data->got_remote_heads = 1;
return refs;
{
int ret = 0;
struct git_transport_data *data = transport->data;
- struct ref *refs;
+ struct ref *refs = NULL;
char *dest = xstrdup(transport->url);
struct fetch_pack_args args;
struct ref *refs_tmp = NULL;
args.from_promisor = data->options.from_promisor;
args.no_dependents = data->options.no_dependents;
args.filter_options = data->options.filter_options;
-
- if (!data->got_remote_heads) {
- connect_setup(transport, 0);
- get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
- NULL, &data->shallow);
- data->got_remote_heads = 1;
+ args.stateless_rpc = transport->stateless_rpc;
+ args.server_options = transport->server_options;
+
+ if (!data->got_remote_heads)
+ refs_tmp = get_refs_via_connect(transport, 0, NULL);
+
+ switch (data->version) {
+ case protocol_v2:
+ refs = fetch_pack(&args, data->fd, data->conn,
+ refs_tmp ? refs_tmp : transport->remote_refs,
+ dest, to_fetch, nr_heads, &data->shallow,
+ &transport->pack_lockfile, data->version);
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ refs = fetch_pack(&args, data->fd, data->conn,
+ refs_tmp ? refs_tmp : transport->remote_refs,
+ dest, to_fetch, nr_heads, &data->shallow,
+ &transport->pack_lockfile, data->version);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
}
- refs = fetch_pack(&args, data->fd, data->conn,
- refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
- &transport->pack_lockfile);
close(data->fd[0]);
close(data->fd[1]);
if (finish_connect(data->conn))
else
fprintf(stdout, "%s\n", summary);
} else {
- fprintf(stderr, " %c %-*s ", flag, summary_width, summary);
+ const char *red = "", *reset = "";
+ if (push_had_errors(to)) {
+ red = transport_get_color(TRANSPORT_COLOR_REJECTED);
+ reset = transport_get_color(TRANSPORT_COLOR_RESET);
+ }
+ fprintf(stderr, " %s%c %-*s%s ", red, flag, summary_width,
+ summary, reset);
if (from)
fprintf(stderr, "%s -> %s", prettify_refname(from->name), prettify_refname(to->name));
else
char *head;
int summary_width = transport_summary_width(refs);
+ if (transport_color_config() < 0)
+ warning(_("could not parse transport.color.* config"));
+
head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
if (verbose) {
{
struct git_transport_data *data = transport->data;
struct send_pack_args args;
- int ret;
+ int ret = 0;
- if (!data->got_remote_heads) {
- struct ref *tmp_refs;
- connect_setup(transport, 1);
+ if (transport_color_config() < 0)
+ return -1;
- get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
- NULL, &data->shallow);
- data->got_remote_heads = 1;
- }
+ if (!data->got_remote_heads)
+ get_refs_via_connect(transport, 1, NULL);
memset(&args, 0, sizeof(args));
args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR);
else
args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
- ret = send_pack(&args, data->fd, data->conn, remote_refs,
- &data->extra_have);
+ switch (data->version) {
+ case protocol_v2:
+ die("support for protocol v2 not implemented yet");
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ ret = send_pack(&args, data->fd, data->conn, remote_refs,
+ &data->extra_have);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
close(data->fd[1]);
close(data->fd[0]);
struct git_transport_data *data;
if (!transport->smart_options)
- die("BUG: taking over transport requires non-NULL "
+ BUG("taking over transport requires non-NULL "
"smart_options field.");
data = xcalloc(1, sizeof(*data));
return from_user;
}
- die("BUG: invalid protocol_allow_config type");
+ BUG("invalid protocol_allow_config type");
}
void transport_check_allowed(const char *type)
*reject_reasons = 0;
transport_verify_remote_names(refspec_nr, refspec);
+ if (transport_color_config() < 0)
+ return -1;
+
if (transport->vtable->push_refs) {
struct ref *remote_refs;
struct ref *local_refs = get_local_heads();
int porcelain = flags & TRANSPORT_PUSH_PORCELAIN;
int pretend = flags & TRANSPORT_PUSH_DRY_RUN;
int push_ret, ret, err;
+ struct refspec *tmp_rs;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
+ int i;
if (check_push_refs(local_refs, refspec_nr, refspec) < 0)
return -1;
- remote_refs = transport->vtable->get_refs_list(transport, 1);
+ tmp_rs = parse_push_refspec(refspec_nr, refspec);
+ for (i = 0; i < refspec_nr; i++) {
+ const char *prefix = NULL;
+
+ if (tmp_rs[i].dst)
+ prefix = tmp_rs[i].dst;
+ else if (tmp_rs[i].src && !tmp_rs[i].exact_sha1)
+ prefix = tmp_rs[i].src;
+
+ if (prefix) {
+ const char *glob = strchr(prefix, '*');
+ if (glob)
+ argv_array_pushf(&ref_prefixes, "%.*s",
+ (int)(glob - prefix),
+ prefix);
+ else
+ expand_ref_prefix(&ref_prefixes, prefix);
+ }
+ }
+
+ remote_refs = transport->vtable->get_refs_list(transport, 1,
+ &ref_prefixes);
+
+ argv_array_clear(&ref_prefixes);
+ free_refspec(refspec_nr, tmp_rs);
if (flags & TRANSPORT_PUSH_ALL)
match_flags |= MATCH_REFS_ALL;
return 1;
}
-const struct ref *transport_get_remote_refs(struct transport *transport)
+const struct ref *transport_get_remote_refs(struct transport *transport,
+ const struct argv_array *ref_prefixes)
{
if (!transport->got_remote_refs) {
- transport->remote_refs = transport->vtable->get_refs_list(transport, 0);
+ transport->remote_refs =
+ transport->vtable->get_refs_list(transport, 0,
+ ref_prefixes);
transport->got_remote_refs = 1;
}
if (!state && ce->ce_flags & CE_WT_REMOVE) {
repo_read_gitmodules(the_repository);
} else if (state && (ce->ce_flags & CE_UPDATE)) {
- submodule_free();
+ submodule_free(the_repository);
checkout_entry(ce, state, NULL);
repo_read_gitmodules(the_repository);
}
if (ce->ce_flags & CE_UPDATE) {
if (ce->ce_flags & CE_WT_REMOVE)
- die("BUG: both update and delete flags are set on %s",
+ BUG("both update and delete flags are set on %s",
ce->name);
display_progress(progress, ++cnt);
ce->ce_flags &= ~CE_UPDATE;
o->result.timestamp.sec = o->src_index->timestamp.sec;
o->result.timestamp.nsec = o->src_index->timestamp.nsec;
o->result.version = o->src_index->version;
- o->result.split_index = o->src_index->split_index;
- if (o->result.split_index)
+ if (!o->src_index->split_index) {
+ o->result.split_index = NULL;
+ } else if (o->src_index == o->dst_index) {
+ /*
+ * o->dst_index (and thus o->src_index) will be discarded
+ * and overwritten with o->result at the end of this function,
+ * so just use src_index's split_index to avoid having to
+ * create a new one.
+ */
+ o->result.split_index = o->src_index->split_index;
o->result.split_index->refcount++;
+ } else {
+ o->result.split_index = init_split_index(&o->result);
+ }
hashcpy(o->result.sha1, o->src_index->sha1);
o->merge_size = len;
mark_all_ce_unused(o->src_index);
}
}
- o->src_index = NULL;
ret = check_updates(o) ? (-2) : 0;
if (o->dst_index) {
if (!ret) {
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
- move_index_extensions(&o->result, o->dst_index);
+ move_index_extensions(&o->result, o->src_index);
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
discard_index(&o->result);
}
+ o->src_index = NULL;
done:
clear_exclude_list(&el);
add_rejected_path(o, error_type, ce->name);
}
-static int verify_uptodate(const struct cache_entry *ce,
- struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+ struct unpack_trees_options *o)
{
if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
return 0;
case 7:
return _("both modified:");
default:
- die("BUG: unhandled unmerged status %x", stagemask);
+ BUG("unhandled unmerged status %x", stagemask);
}
}
status = d->worktree_status;
break;
default:
- die("BUG: unhandled change_type %d in wt_longstatus_print_change_data",
+ BUG("unhandled change_type %d in wt_longstatus_print_change_data",
change_type);
}
status_printf(s, color(WT_STATUS_HEADER, s), "\t");
what = wt_status_diff_status_string(status);
if (!what)
- die("BUG: unhandled diff status %c", status);
+ BUG("unhandled diff status %c", status);
len = label_width - utf8_strwidth(what);
assert(len >= 0);
if (one_name != two_name)
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-files status '%c'", p->status);
+ BUG("unhandled diff-files status '%c'", p->status);
break;
}
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-index status '%c'", p->status);
+ BUG("unhandled diff-index status '%c'", p->status);
break;
}
}
rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
rev.diffopt.format_callback = wt_status_collect_updated_cb;
rev.diffopt.format_callback_data = s;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.rename_limit = 200;
- rev.diffopt.break_opt = 0;
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_index(&rev, 1);
}
setup_revisions(0, NULL, &rev, &opt);
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
rev.diffopt.file = s->fp;
rev.diffopt.close_file = 0;
/*
case 6: key = "AA"; break; /* both added */
case 7: key = "UU"; break; /* both modified */
default:
- die("BUG: unhandled unmerged status %x", d->stagemask);
+ BUG("unhandled unmerged status %x", d->stagemask);
}
/*
sum |= (1 << (stage - 1));
}
if (sum != d->stagemask)
- die("BUG: observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
+ BUG("observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
if (s->null_termination)
path_index = it->string;
wt_porcelain_v2_print(s);
break;
case STATUS_FORMAT_UNSPECIFIED:
- die("BUG: finalize_deferred_config() should have been called");
+ BUG("finalize_deferred_config() should have been called");
break;
case STATUS_FORMAT_NONE:
case STATUS_FORMAT_LONG: