#include "cache.h"
#include "config.h"
+ #include "object-store.h"
#include "blob.h"
#include "delta.h"
#include "diff.h"
return error(_("--cached outside a repository"));
state->check_index = 1;
}
+ if (state->ita_only && (state->check_index || is_not_gitdir))
+ state->ita_only = 0;
if (state->check_index)
state->unsafe_paths = 0;
if (postlen
? postlen < new_buf - postimage->buf
: postimage->len < new_buf - postimage->buf)
- die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
+ BUG("caller miscounted postlen: asked %d, orig = %d, used = %d",
(int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
/* Fix the length of the whole thing */
unsigned mode = patch->new_mode;
if (!patch->is_new)
- die("BUG: patch to %s is not a creation", patch->old_name);
+ BUG("patch to %s is not a creation", patch->old_name);
pos = cache_name_pos(name, strlen(name));
if (pos < 0)
if (!patch->is_delete)
new_name = patch->new_name;
- if (old_name && !verify_path(old_name))
+ if (old_name && !verify_path(old_name, patch->old_mode))
return error(_("invalid path '%s'"), old_name);
- if (new_name && !verify_path(new_name))
+ if (new_name && !verify_path(new_name, patch->new_mode))
return error(_("invalid path '%s'"), new_name);
return 0;
}
{
struct patch *patch;
struct index_state result = { NULL };
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
int res;
/* Once we start supporting the reverse patch, it may be
static int remove_file(struct apply_state *state, struct patch *patch, int rmdir_empty)
{
- if (state->update_index) {
+ if (state->update_index && !state->ita_only) {
if (remove_file_from_cache(patch->old_name) < 0)
return error(_("unable to remove %s from index"), patch->old_name);
}
int namelen = strlen(path);
unsigned ce_size = cache_entry_size(namelen);
- if (!state->update_index)
- return 0;
-
ce = xcalloc(1, ce_size);
memcpy(ce->name, path, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(0);
ce->ce_namelen = namelen;
- if (S_ISGITLINK(mode)) {
+ if (state->ita_only) {
+ ce->ce_flags |= CE_INTENT_TO_ADD;
+ set_object_name_for_intent_to_add_entry(ce);
+ } else if (S_ISGITLINK(mode)) {
const char *s;
if (!skip_prefix(buf, "Subproject commit ", &s) ||
if (patch->conflicted_threeway)
return add_conflicted_stages_file(state, patch);
- else
+ else if (state->update_index)
return add_index_file(state, path, mode, buf, size);
+ return 0;
}
/* phase zero is to remove, phase one is to create */
if (state->whitespace_error && (state->ws_error_action == die_on_ws_error))
state->apply = 0;
- state->update_index = state->check_index && state->apply;
+ state->update_index = (state->check_index || state->ita_only) && state->apply;
if (state->update_index && !is_lock_file_locked(&state->lock_file)) {
if (state->index_file)
hold_lock_file_for_update(&state->lock_file,
N_("instead of applying the patch, see if the patch is applicable")),
OPT_BOOL(0, "index", &state->check_index,
N_("make sure the patch is applicable to the current index")),
+ OPT_BOOL('N', "intent-to-add", &state->ita_only,
+ N_("mark new files with `git add --intent-to-add`")),
OPT_BOOL(0, "cached", &state->cached,
N_("apply a patch without touching the working tree")),
OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths,
#include "config.h"
#include "tar.h"
#include "archive.h"
+ #include "object-store.h"
#include "streaming.h"
#include "run-command.h"
int r;
if (!ar->data)
- die("BUG: tar-filter archiver called with no filter defined");
+ BUG("tar-filter archiver called with no filter defined");
strbuf_addstr(&cmd, ar->data);
if (args->compression_level >= 0)
#include "cache.h"
#include "config.h"
#include "refs.h"
+ #include "object-store.h"
#include "commit.h"
#include "tree-walk.h"
#include "attr.h"
}
#define OPT__COMPR(s, v, h, p) \
- { OPTION_SET_INT, (s), NULL, (v), NULL, (h), \
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, (p) }
+ OPT_SET_INT_F(s, NULL, v, h, p, PARSE_OPT_NONEG)
#define OPT__COMPR_HIDDEN(s, v, p) \
- { OPTION_SET_INT, (s), NULL, (v), NULL, "", \
- PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_HIDDEN, NULL, (p) }
+ OPT_SET_INT_F(s, NULL, v, "", p, PARSE_OPT_NONEG | PARSE_OPT_HIDDEN)
static int parse_archive_args(int argc, const char **argv,
const struct archiver **ar, struct archiver_args *args,
#include "cache.h"
#include "refs.h"
+ #include "object-store.h"
#include "cache-tree.h"
#include "mergesort.h"
#include "diff.h"
#include "tag.h"
#include "blame.h"
#include "alloc.h"
+#include "commit-slab.h"
+
+define_commit_slab(blame_suspects, struct blame_origin *);
+static struct blame_suspects blame_suspects;
+
+struct blame_origin *get_blame_suspects(struct commit *commit)
+{
+ struct blame_origin **result;
+
+ result = blame_suspects_peek(&blame_suspects, commit);
+
+ return result ? *result : NULL;
+}
+
+static void set_blame_suspects(struct commit *commit, struct blame_origin *origin)
+{
+ *blame_suspects_at(&blame_suspects, commit) = origin;
+}
void blame_origin_decref(struct blame_origin *o)
{
blame_origin_decref(o->previous);
free(o->file.ptr);
/* Should be present exactly once in commit chain */
- for (p = o->commit->util; p; l = p, p = p->next) {
+ for (p = get_blame_suspects(o->commit); p; l = p, p = p->next) {
if (p == o) {
if (l)
l->next = p->next;
else
- o->commit->util = p->next;
+ set_blame_suspects(o->commit, p->next);
free(o);
return;
}
FLEX_ALLOC_STR(o, path, path);
o->commit = commit;
o->refcnt = 1;
- o->next = commit->util;
- commit->util = o;
+ o->next = get_blame_suspects(commit);
+ set_blame_suspects(commit, o);
return o;
}
{
struct blame_origin *o, *l;
- for (o = commit->util, l = NULL; o; l = o, o = o->next) {
+ for (o = get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) {
if (!strcmp(o->path, path)) {
/* bump to front */
if (l) {
l->next = o->next;
- o->next = commit->util;
- commit->util = o;
+ o->next = get_blame_suspects(commit);
+ set_blame_suspects(commit, o);
}
return blame_origin_incref(o);
}
int merge_head;
struct strbuf line = STRBUF_INIT;
- merge_head = open(git_path_merge_head(), O_RDONLY);
+ merge_head = open(git_path_merge_head(the_repository), O_RDONLY);
if (merge_head < 0) {
if (errno == ENOENT)
return;
- die("cannot open '%s' for reading", git_path_merge_head());
+ die("cannot open '%s' for reading",
+ git_path_merge_head(the_repository));
}
while (!strbuf_getwholeline_fd(&line, merge_head, '\n')) {
struct object_id oid;
if (line.len < GIT_SHA1_HEXSZ || get_oid_hex(line.buf, &oid))
- die("unknown line in '%s': %s", git_path_merge_head(), line.buf);
+ die("unknown line in '%s': %s",
+ git_path_merge_head(the_repository), line.buf);
tail = append_parent(tail, &oid);
}
close(merge_head);
porigin->suspects = blame_merge(porigin->suspects, sorted);
else {
struct blame_origin *o;
- for (o = porigin->commit->util; o; o = o->next) {
+ for (o = get_blame_suspects(porigin->commit); o; o = o->next) {
if (o->suspects) {
porigin->suspects = sorted;
return;
const char *paths[2];
/* First check any existing origins */
- for (porigin = parent->util; porigin; porigin = porigin->next)
+ for (porigin = get_blame_suspects(parent); porigin; porigin = porigin->next)
if (!strcmp(porigin->path, origin->path)) {
/*
* The same path between origin and its parent
diff_setup_done(&diff_opts);
if (is_null_oid(&origin->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &origin->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(origin->commit),
"", &diff_opts);
diffcore_std(&diff_opts);
diff_setup_done(&diff_opts);
if (is_null_oid(&origin->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &origin->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(origin->commit),
"", &diff_opts);
diffcore_std(&diff_opts);
diff_opts.flags.find_copies_harder = 1;
if (is_null_oid(&target->commit->object.oid))
- do_diff_cache(&parent->tree->object.oid, &diff_opts);
+ do_diff_cache(get_commit_tree_oid(parent), &diff_opts);
else
- diff_tree_oid(&parent->tree->object.oid,
- &target->commit->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
+ get_commit_tree_oid(target->commit),
"", &diff_opts);
if (!diff_opts.flags.find_copies_harder)
while (commit) {
struct blame_entry *ent;
- struct blame_origin *suspect = commit->util;
+ struct blame_origin *suspect = get_blame_suspects(commit);
/* find one suspect to break down */
while (suspect && !suspect->suspects)
struct commit *final_commit = NULL;
enum object_type type;
+ init_blame_suspects(&blame_suspects);
+
if (sb->reverse && sb->contents_from)
die(_("--contents and --reverse do not blend well."));
l->item = c;
if (add_decoration(&sb->revs->children,
&c->parents->item->object, l))
- die("BUG: not unique item in first-parent chain");
+ BUG("not unique item in first-parent chain");
c = c->parents->item;
}
}
if (is_null_oid(&sb->final->object.oid)) {
- o = sb->final->util;
+ o = get_blame_suspects(sb->final);
sb->final_buf = xmemdupz(o->file.ptr, o->file.size);
sb->final_buf_size = o->file.size;
}
#include "config.h"
#include "branch.h"
#include "refs.h"
+#include "refspec.h"
#include "remote.h"
#include "commit.h"
#include "worktree.h"
struct tracking {
- struct refspec spec;
+ struct refspec_item spec;
char *src;
const char *remote;
int matches;
static int check_tracking_branch(struct remote *remote, void *cb_data)
{
char *tracking_branch = cb_data;
- struct refspec query;
- memset(&query, 0, sizeof(struct refspec));
+ struct refspec_item query;
+ memset(&query, 0, sizeof(struct refspec_item));
query.dst = tracking_branch;
return !remote_find_tracking(remote, &query);
}
void remove_branch_state(void)
{
- unlink(git_path_cherry_pick_head());
- unlink(git_path_revert_head());
- unlink(git_path_merge_head());
- unlink(git_path_merge_rr());
- unlink(git_path_merge_msg());
- unlink(git_path_merge_mode());
- unlink(git_path_squash_msg());
+ unlink(git_path_cherry_pick_head(the_repository));
+ unlink(git_path_revert_head(the_repository));
+ unlink(git_path_merge_head(the_repository));
+ unlink(git_path_merge_rr(the_repository));
+ unlink(git_path_merge_msg(the_repository));
+ unlink(git_path_merge_mode(the_repository));
+ unlink(git_path_squash_msg(the_repository));
}
void die_if_checked_out(const char *branch, int ignore_current_worktree)
#include "cache.h"
#include "config.h"
+#include "color.h"
#include "builtin.h"
+ #include "repository.h"
#include "commit.h"
#include "diff.h"
#include "revision.h"
#include "line-log.h"
#include "dir.h"
#include "progress.h"
+ #include "object-store.h"
#include "blame.h"
+#include "string-list.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
static int abbrev = -1;
static int no_whole_file_rename;
static int show_progress;
+static char repeated_meta_color[COLOR_MAXLEN];
+static int coloring_mode;
static struct date_mode blame_date_mode = { DATE_ISO8601 };
static size_t blame_date_width;
#define OUTPUT_PORCELAIN 010
#define OUTPUT_SHOW_NAME 020
#define OUTPUT_SHOW_NUMBER 040
-#define OUTPUT_SHOW_SCORE 0100
-#define OUTPUT_NO_AUTHOR 0200
+#define OUTPUT_SHOW_SCORE 0100
+#define OUTPUT_NO_AUTHOR 0200
#define OUTPUT_SHOW_EMAIL 0400
-#define OUTPUT_LINE_PORCELAIN 01000
+#define OUTPUT_LINE_PORCELAIN 01000
+#define OUTPUT_COLOR_LINE 02000
+#define OUTPUT_SHOW_AGE_WITH_COLOR 04000
static void emit_porcelain_details(struct blame_origin *suspect, int repeat)
{
putchar('\n');
}
+static struct color_field {
+ timestamp_t hop;
+ char col[COLOR_MAXLEN];
+} *colorfield;
+static int colorfield_nr, colorfield_alloc;
+
+static void parse_color_fields(const char *s)
+{
+ struct string_list l = STRING_LIST_INIT_DUP;
+ struct string_list_item *item;
+ enum { EXPECT_DATE, EXPECT_COLOR } next = EXPECT_COLOR;
+
+ colorfield_nr = 0;
+
+ /* Ideally this would be stripped and split at the same time? */
+ string_list_split(&l, s, ',', -1);
+ ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc);
+
+ for_each_string_list_item(item, &l) {
+ switch (next) {
+ case EXPECT_DATE:
+ colorfield[colorfield_nr].hop = approxidate(item->string);
+ next = EXPECT_COLOR;
+ colorfield_nr++;
+ ALLOC_GROW(colorfield, colorfield_nr + 1, colorfield_alloc);
+ break;
+ case EXPECT_COLOR:
+ if (color_parse(item->string, colorfield[colorfield_nr].col))
+ die(_("expecting a color: %s"), item->string);
+ next = EXPECT_DATE;
+ break;
+ }
+ }
+
+ if (next == EXPECT_COLOR)
+ die (_("must end with a color"));
+
+ colorfield[colorfield_nr].hop = TIME_MAX;
+ string_list_clear(&l, 0);
+}
+
+static void setup_default_color_by_age(void)
+{
+ parse_color_fields("blue,12 month ago,white,1 month ago,red");
+}
+
+static void determine_line_heat(struct blame_entry *ent, const char **dest_color)
+{
+ int i = 0;
+ struct commit_info ci;
+ get_commit_info(ent->suspect->commit, &ci, 1);
+
+ while (i < colorfield_nr && ci.author_time > colorfield[i].hop)
+ i++;
+
+ *dest_color = colorfield[i].col;
+}
+
static void emit_other(struct blame_scoreboard *sb, struct blame_entry *ent, int opt)
{
int cnt;
struct commit_info ci;
char hex[GIT_MAX_HEXSZ + 1];
int show_raw_time = !!(opt & OUTPUT_RAW_TIMESTAMP);
+ const char *default_color = NULL, *color = NULL, *reset = NULL;
get_commit_info(suspect->commit, &ci, 1);
oid_to_hex_r(hex, &suspect->commit->object.oid);
cp = blame_nth_line(sb, ent->lno);
+
+ if (opt & OUTPUT_SHOW_AGE_WITH_COLOR) {
+ determine_line_heat(ent, &default_color);
+ color = default_color;
+ reset = GIT_COLOR_RESET;
+ }
+
for (cnt = 0; cnt < ent->num_lines; cnt++) {
char ch;
int length = (opt & OUTPUT_LONG_OBJECT_NAME) ? GIT_SHA1_HEXSZ : abbrev;
+ if (opt & OUTPUT_COLOR_LINE) {
+ if (cnt > 0) {
+ color = repeated_meta_color;
+ reset = GIT_COLOR_RESET;
+ } else {
+ color = default_color ? default_color : NULL;
+ reset = default_color ? GIT_COLOR_RESET : NULL;
+ }
+ }
+ if (color)
+ fputs(color, stdout);
+
if (suspect->commit->object.flags & UNINTERESTING) {
if (blank_boundary)
memset(hex, ' ', length);
printf(" %*d) ",
max_digits, ent->lno + 1 + cnt);
}
+ if (reset)
+ fputs(reset, stdout);
do {
ch = *cp++;
putchar(ch);
struct commit *commit = ent->suspect->commit;
if (commit->object.flags & MORE_THAN_ONE_PATH)
continue;
- for (suspect = commit->util; suspect; suspect = suspect->next) {
+ for (suspect = get_blame_suspects(commit); suspect; suspect = suspect->next) {
if (suspect->guilty && count++) {
commit->object.flags |= MORE_THAN_ONE_PATH;
break;
/* The format is just "Commit Parent1 Parent2 ...\n" */
struct commit_graft *graft = read_graft_line(&buf);
if (graft)
- register_commit_graft(graft, 0);
+ register_commit_graft(the_repository, graft, 0);
}
fclose(fp);
strbuf_release(&buf);
parse_date_format(value, &blame_date_mode);
return 0;
}
+ if (!strcmp(var, "color.blame.repeatedlines")) {
+ if (color_parse_mem(value, strlen(value), repeated_meta_color))
+ warning(_("invalid color '%s' in color.blame.repeatedLines"),
+ value);
+ return 0;
+ }
+ if (!strcmp(var, "color.blame.highlightrecent")) {
+ parse_color_fields(value);
+ return 0;
+ }
+
+ if (!strcmp(var, "blame.coloring")) {
+ if (!strcmp(value, "repeatedLines")) {
+ coloring_mode |= OUTPUT_COLOR_LINE;
+ } else if (!strcmp(value, "highlightRecent")) {
+ coloring_mode |= OUTPUT_SHOW_AGE_WITH_COLOR;
+ } else if (!strcmp(value, "none")) {
+ coloring_mode &= ~(OUTPUT_COLOR_LINE |
+ OUTPUT_SHOW_AGE_WITH_COLOR);
+ } else {
+ warning(_("invalid value for blame.coloring"));
+ return 0;
+ }
+ }
if (git_diff_heuristic_config(var, value, cb) < 0)
return -1;
OPT_BIT('s', NULL, &output_option, N_("Suppress author name and timestamp (Default: off)"), OUTPUT_NO_AUTHOR),
OPT_BIT('e', "show-email", &output_option, N_("Show author email instead of name (Default: off)"), OUTPUT_SHOW_EMAIL),
OPT_BIT('w', NULL, &xdl_opts, N_("Ignore whitespace differences"), XDF_IGNORE_WHITESPACE),
+ OPT_BIT(0, "color-lines", &output_option, N_("color redundant metadata from previous line differently"), OUTPUT_COLOR_LINE),
+ OPT_BIT(0, "color-by-age", &output_option, N_("color lines by age"), OUTPUT_SHOW_AGE_WITH_COLOR),
/*
* The following two options are parsed by parse_revision_opt()
unsigned int range_i;
long anchor;
+ setup_default_color_by_age();
git_config(git_blame_config, &output_option);
init_revisions(&revs, NULL);
revs.date_mode = blame_date_mode;
blame_coalesce(&sb);
- if (!(output_option & OUTPUT_PORCELAIN))
+ if (!(output_option & (OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR)))
+ output_option |= coloring_mode;
+
+ if (!(output_option & OUTPUT_PORCELAIN)) {
find_alignment(&sb, &output_option);
+ if (!*repeated_meta_color &&
+ (output_option & OUTPUT_COLOR_LINE))
+ strcpy(repeated_meta_color, GIT_COLOR_CYAN);
+ }
+ if (output_option & OUTPUT_ANNOTATE_COMPAT)
+ output_option &= ~(OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR);
output(&sb, output_option);
free((void *)sb.final_buf);
#include "tree-walk.h"
#include "sha1-array.h"
#include "packfile.h"
+ #include "object-store.h"
struct batch_options {
int enabled;
die("could not convert '%s' %s",
oid_to_hex(oid), data->rest);
} else
- die("BUG: invalid cmdmode: %c", opt->cmdmode);
+ BUG("invalid cmdmode: %c", opt->cmdmode);
batch_write(opt, contents, size);
free(contents);
} else if (stream_blob_to_fd(1, oid, NULL, 0) < 0)
(uintmax_t)strlen(obj_name), obj_name);
break;
default:
- die("BUG: unknown get_sha1_with_context result %d\n",
+ BUG("unknown get_sha1_with_context result %d\n",
result);
break;
}
#include "lockfile.h"
#include "parse-options.h"
#include "refs.h"
+ #include "object-store.h"
#include "commit.h"
#include "tree.h"
#include "tree-walk.h"
resolve_undo_clear();
if (opts->force) {
- ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
+ ret = reset_tree(get_commit_tree(new_branch_info->commit),
+ opts, 1, writeout_error);
if (ret)
return ret;
} else {
init_tree_desc(&trees[1], tree->buffer, tree->size);
ret = unpack_trees(2, trees, &topts);
+ clear_unpack_trees_porcelain(&topts);
if (ret == -1) {
/*
* Unpack couldn't do a trivial merge; either
o.verbosity = 0;
work = write_tree_from_memory(&o);
- ret = reset_tree(new_branch_info->commit->tree, opts, 1,
+ ret = reset_tree(get_commit_tree(new_branch_info->commit),
+ opts, 1,
writeout_error);
if (ret)
return ret;
o.ancestor = old_branch_info->name;
o.branch1 = new_branch_info->name;
o.branch2 = "local";
- ret = merge_trees(&o, new_branch_info->commit->tree, work,
- old_branch_info->commit->tree, &result);
+ ret = merge_trees(&o,
+ get_commit_tree(new_branch_info->commit),
+ work,
+ get_commit_tree(old_branch_info->commit),
+ &result);
if (ret < 0)
exit(128);
- ret = reset_tree(new_branch_info->commit->tree, opts, 0,
+ ret = reset_tree(get_commit_tree(new_branch_info->commit),
+ opts, 0,
writeout_error);
strbuf_release(&o.obuf);
if (ret)
*source_tree = parse_tree_indirect(rev);
} else {
parse_commit_or_die(new_branch_info->commit);
- *source_tree = new_branch_info->commit->tree;
+ *source_tree = get_commit_tree(new_branch_info->commit);
}
if (!*source_tree) /* case (1): want a tree */
OPT_SET_INT('t', "track", &opts.track, N_("set upstream info for new branch"),
BRANCH_TRACK_EXPLICIT),
OPT_STRING(0, "orphan", &opts.new_orphan_branch, N_("new-branch"), N_("new unparented branch")),
- OPT_SET_INT('2', "ours", &opts.writeout_stage, N_("checkout our version for unmerged files"),
- 2),
- OPT_SET_INT('3', "theirs", &opts.writeout_stage, N_("checkout their version for unmerged files"),
- 3),
+ OPT_SET_INT_F('2', "ours", &opts.writeout_stage,
+ N_("checkout our version for unmerged files"),
+ 2, PARSE_OPT_NONEG),
+ OPT_SET_INT_F('3', "theirs", &opts.writeout_stage,
+ N_("checkout their version for unmerged files"),
+ 3, PARSE_OPT_NONEG),
OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)"),
PARSE_OPT_NOCOMPLETE),
OPT_BOOL('m', "merge", &opts.merge, N_("perform a 3-way merge with the new branch")),
#include "parse-options.h"
#include "fetch-pack.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "tree.h"
#include "tree-walk.h"
#include "unpack-trees.h"
}
static struct ref *wanted_peer_refs(const struct ref *refs,
- struct refspec *refspec)
+ struct refspec_item *refspec)
{
struct ref *head = copy_ref(find_ref_by_name(refs, "HEAD"));
struct ref *local_refs = head;
} else if (remote_head_points_at) {
const char *head = remote_head_points_at->name;
if (!skip_prefix(head, "refs/heads/", &head))
- die("BUG: remote HEAD points at non-head?");
+ BUG("remote HEAD points at non-head?");
strbuf_addf(&value, "+%s:%s%s", remote_head_points_at->name,
branch_top->buf, head);
int err = 0, complete_refs_before_fetch = 1;
int submodule_progress;
- struct refspec *refspec;
- const char *fetch_pattern;
+ struct refspec_item refspec;
fetch_if_missing = 0;
if (option_required_reference.nr || option_optional_reference.nr)
setup_reference();
- fetch_pattern = value.buf;
- refspec = parse_fetch_refspec(1, &fetch_pattern);
+ refspec_item_init_or_die(&refspec, value.buf, REFSPEC_FETCH);
strbuf_reset(&value);
if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
- refs = transport_get_remote_refs(transport);
+ refs = transport_get_remote_refs(transport, NULL);
if (refs) {
- mapped_refs = wanted_peer_refs(refs, refspec);
+ mapped_refs = wanted_peer_refs(refs, &refspec);
/*
* transport_get_remote_refs() may return refs with null sha-1
* in mapped_refs (see struct transport->get_refs_list
strbuf_release(&value);
junk_mode = JUNK_LEAVE_ALL;
- free(refspec);
+ refspec_item_clear(&refspec);
return err;
}
#include "column.h"
#include "sequencer.h"
#include "mailmap.h"
+#include "help.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
"Then \"git cherry-pick --continue\" will resume cherry-picking\n"
"the remaining commits.\n");
+static const char *color_status_slots[] = {
+ [WT_STATUS_HEADER] = "header",
+ [WT_STATUS_UPDATED] = "updated",
+ [WT_STATUS_CHANGED] = "changed",
+ [WT_STATUS_UNTRACKED] = "untracked",
+ [WT_STATUS_NOBRANCH] = "noBranch",
+ [WT_STATUS_UNMERGED] = "unmerged",
+ [WT_STATUS_LOCAL_BRANCH] = "localBranch",
+ [WT_STATUS_REMOTE_BRANCH] = "remoteBranch",
+ [WT_STATUS_ONBRANCH] = "branch",
+};
+
static const char *use_message_buffer;
static struct lock_file index_lock; /* real index */
static struct lock_file false_lock; /* used only for partial commits */
return 0;
}
+static int opt_parse_rename_score(const struct option *opt, const char *arg, int unset)
+{
+ const char **value = opt->value;
+ if (arg != NULL && *arg == '=')
+ arg = arg + 1;
+
+ *value = arg;
+ return 0;
+}
+
static void determine_whence(struct wt_status *s)
{
- if (file_exists(git_path_merge_head()))
+ if (file_exists(git_path_merge_head(the_repository)))
whence = FROM_MERGE;
- else if (file_exists(git_path_cherry_pick_head())) {
+ else if (file_exists(git_path_cherry_pick_head(the_repository))) {
whence = FROM_CHERRY_PICK;
if (file_exists(git_path_seq_dir()))
sequencer_in_use = 1;
static void status_init_config(struct wt_status *s, config_fn_t fn)
{
wt_status_prepare(s);
+ init_diff_ui_defaults();
git_config(fn, s);
determine_whence(s);
- init_diff_ui_defaults();
s->hints = advice_status_hints; /* must come after git_config() */
}
if (with_tree) {
char *max_prefix = common_prefix(pattern);
- overlay_tree_on_index(&the_index, with_tree,
- max_prefix ? max_prefix : prefix);
+ overlay_tree_on_index(&the_index, with_tree, max_prefix);
free(max_prefix);
}
static void assert_split_ident(struct ident_split *id, const struct strbuf *buf)
{
if (split_ident_line(id, buf->buf, buf->len) || !id->date_begin)
- die("BUG: unable to parse our own ident: %s", buf->buf);
+ BUG("unable to parse our own ident: %s", buf->buf);
}
static void export_one(const char *var, const char *s, const char *e, int hack)
if (have_option_m)
strbuf_addbuf(&sb, &message);
hook_arg1 = "message";
- } else if (!stat(git_path_merge_msg(), &statbuf)) {
+ } else if (!stat(git_path_merge_msg(the_repository), &statbuf)) {
/*
* prepend SQUASH_MSG here if it exists and a
* "merge --squash" was originally performed
*/
- if (!stat(git_path_squash_msg(), &statbuf)) {
- if (strbuf_read_file(&sb, git_path_squash_msg(), 0) < 0)
+ if (!stat(git_path_squash_msg(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_squash_msg(the_repository), 0) < 0)
die_errno(_("could not read SQUASH_MSG"));
hook_arg1 = "squash";
} else
hook_arg1 = "merge";
- if (strbuf_read_file(&sb, git_path_merge_msg(), 0) < 0)
+ if (strbuf_read_file(&sb, git_path_merge_msg(the_repository), 0) < 0)
die_errno(_("could not read MERGE_MSG"));
- } else if (!stat(git_path_squash_msg(), &statbuf)) {
- if (strbuf_read_file(&sb, git_path_squash_msg(), 0) < 0)
+ } else if (!stat(git_path_squash_msg(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_squash_msg(the_repository), 0) < 0)
die_errno(_("could not read SQUASH_MSG"));
hook_arg1 = "squash";
} else if (template_file) {
" %s\n"
"and try again.\n"),
whence == FROM_MERGE ?
- git_path_merge_head() :
- git_path_cherry_pick_head());
+ git_path_merge_head(the_repository) :
+ git_path_cherry_pick_head(the_repository));
}
fprintf(s->fp, "\n");
return commitable ? 0 : 1;
}
+define_list_config_array_extra(color_status_slots, {"added"});
+
static int parse_status_slot(const char *slot)
{
- if (!strcasecmp(slot, "header"))
- return WT_STATUS_HEADER;
- if (!strcasecmp(slot, "branch"))
- return WT_STATUS_ONBRANCH;
- if (!strcasecmp(slot, "updated") || !strcasecmp(slot, "added"))
+ if (!strcasecmp(slot, "added"))
return WT_STATUS_UPDATED;
- if (!strcasecmp(slot, "changed"))
- return WT_STATUS_CHANGED;
- if (!strcasecmp(slot, "untracked"))
- return WT_STATUS_UNTRACKED;
- if (!strcasecmp(slot, "nobranch"))
- return WT_STATUS_NOBRANCH;
- if (!strcasecmp(slot, "unmerged"))
- return WT_STATUS_UNMERGED;
- if (!strcasecmp(slot, "localBranch"))
- return WT_STATUS_LOCAL_BRANCH;
- if (!strcasecmp(slot, "remoteBranch"))
- return WT_STATUS_REMOTE_BRANCH;
- return -1;
+
+ return LOOKUP_CONFIG(color_status_slots, slot);
}
static int git_status_config(const char *k, const char *v, void *cb)
return error(_("Invalid untracked files mode '%s'"), v);
return 0;
}
+ if (!strcmp(k, "diff.renamelimit")) {
+ if (s->rename_limit == -1)
+ s->rename_limit = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.renamelimit")) {
+ s->rename_limit = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "diff.renames")) {
+ if (s->detect_rename == -1)
+ s->detect_rename = git_config_rename(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "status.renames")) {
+ s->detect_rename = git_config_rename(k, v);
+ return 0;
+ }
return git_diff_ui_config(k, v, NULL);
}
int cmd_status(int argc, const char **argv, const char *prefix)
{
+ static int no_renames = -1;
+ static const char *rename_score_arg = (const char *)-1;
static struct wt_status s;
int fd;
struct object_id oid;
N_("ignore changes to submodules, optional when: all, dirty, untracked. (Default: all)"),
PARSE_OPT_OPTARG, NULL, (intptr_t)"all" },
OPT_COLUMN(0, "column", &s.colopts, N_("list untracked files in columns")),
+ OPT_BOOL(0, "no-renames", &no_renames, N_("do not detect renames")),
+ { OPTION_CALLBACK, 'M', "find-renames", &rename_score_arg,
+ N_("n"), N_("detect renames, optionally set similarity index"),
+ PARSE_OPT_OPTARG, opt_parse_rename_score },
OPT_END(),
};
s.ignore_submodule_arg = ignore_submodule_arg;
s.status_format = status_format;
s.verbose = verbose;
+ if (no_renames != -1)
+ s.detect_rename = !no_renames;
+ if ((intptr_t)rename_score_arg != -1) {
+ if (s.detect_rename < DIFF_DETECT_RENAME)
+ s.detect_rename = DIFF_DETECT_RENAME;
+ if (rename_score_arg)
+ s.rename_score = parse_rename_score(&rename_score_arg);
+ }
wt_status_collect(&s);
if (!reflog_msg)
reflog_msg = "commit (merge)";
pptr = commit_list_append(current_head, pptr);
- fp = xfopen(git_path_merge_head(), "r");
+ fp = xfopen(git_path_merge_head(the_repository), "r");
while (strbuf_getline_lf(&m, fp) != EOF) {
struct commit *parent;
}
fclose(fp);
strbuf_release(&m);
- if (!stat(git_path_merge_mode(), &statbuf)) {
- if (strbuf_read_file(&sb, git_path_merge_mode(), 0) < 0)
+ if (!stat(git_path_merge_mode(the_repository), &statbuf)) {
+ if (strbuf_read_file(&sb, git_path_merge_mode(the_repository), 0) < 0)
die_errno(_("could not read MERGE_MODE"));
if (!strcmp(sb.buf, "no-ff"))
allow_fast_forward = 0;
die("%s", err.buf);
}
- unlink(git_path_cherry_pick_head());
- unlink(git_path_revert_head());
- unlink(git_path_merge_head());
- unlink(git_path_merge_msg());
- unlink(git_path_merge_mode());
- unlink(git_path_squash_msg());
+ unlink(git_path_cherry_pick_head(the_repository));
+ unlink(git_path_revert_head(the_repository));
+ unlink(git_path_merge_head(the_repository));
+ unlink(git_path_merge_msg(the_repository));
+ unlink(git_path_merge_mode(the_repository));
+ unlink(git_path_squash_msg(the_repository));
if (commit_index_files())
die (_("Repository has been updated, but unable to write\n"
#include "blob.h"
#include "refs.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "revision.h"
#include "diff.h"
#include "hashmap.h"
#include "argv-array.h"
#include "run-command.h"
+ #include "object-store.h"
#include "revision.h"
#include "list-objects.h"
+#include "commit-slab.h"
#define MAX_TAGS (FLAG_BITS - 1)
+define_commit_slab(commit_names, struct commit_name *);
+
static const char * const describe_usage[] = {
N_("git describe [<options>] [<commit-ish>...]"),
N_("git describe [<options>] --dirty"),
static struct string_list exclude_patterns = STRING_LIST_INIT_NODUP;
static int always;
static const char *suffix, *dirty, *broken;
+static struct commit_names commit_names;
/* diff-index command arguments to check if working tree is dirty. */
static const char *diff_index_args[] = {
if (!have_util) {
struct hashmap_iter iter;
struct commit *c;
- struct commit_name *n = hashmap_iter_first(&names, &iter);
+ struct commit_name *n;
+
+ init_commit_names(&commit_names);
+ n = hashmap_iter_first(&names, &iter);
for (; n; n = hashmap_iter_next(&iter)) {
c = lookup_commit_reference_gently(&n->peeled, 1);
if (c)
- c->util = n;
+ *commit_names_at(&commit_names, c) = n;
}
have_util = 1;
}
while (list) {
struct commit *c = pop_commit(&list);
struct commit_list *parents = c->parents;
+ struct commit_name **slot;
+
seen_commits++;
- n = c->util;
+ slot = commit_names_peek(&commit_names, c);
+ n = slot ? *slot : NULL;
if (n) {
if (!tags && !all && n->prio < 2) {
unannotated_cnt++;
suffix = broken;
}
} else if (dirty) {
- static struct lock_file index_lock;
+ struct lock_file index_lock = LOCK_INIT;
struct rev_info revs;
struct argv_array args = ARGV_ARRAY_INIT;
int fd, result;
#include "config.h"
#include "builtin.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "argv-array.h"
#include "strbuf.h"
#include "lockfile.h"
+ #include "object-store.h"
#include "dir.h"
static char *diff_gui_tool;
continue;
if (!indices_loaded) {
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
strbuf_reset(&buf);
strbuf_addf(&buf, "%s/wtindex", tmpdir);
if (hold_lock_file_for_update(&lock, buf.buf, 0) < 0 ||
N_("use `diff.guitool` instead of `diff.tool`")),
OPT_BOOL('d', "dir-diff", &dir_diff,
N_("perform a full-directory diff")),
- { OPTION_SET_INT, 'y', "no-prompt", &prompt, NULL,
+ OPT_SET_INT_F('y', "no-prompt", &prompt,
N_("do not prompt before launching a diff tool"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 0},
- { OPTION_SET_INT, 0, "prompt", &prompt, NULL, NULL,
- PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_HIDDEN,
- NULL, 1 },
+ 0, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "prompt", &prompt, NULL,
+ 1, PARSE_OPT_NONEG | PARSE_OPT_HIDDEN),
OPT_BOOL(0, "symlinks", &symlinks,
N_("use symlinks in dir-diff mode")),
OPT_STRING('t', "tool", &difftool_cmd, N_("<tool>"),
#include "cache.h"
#include "config.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "object.h"
#include "tag.h"
#include "quote.h"
#include "remote.h"
#include "blob.h"
+#include "commit-slab.h"
static const char *fast_export_usage[] = {
N_("git fast-export [rev-list-opts]"),
static int no_data;
static int full_tree;
static struct string_list extra_refs = STRING_LIST_INIT_NODUP;
-static struct refspec *refspecs;
-static int refspecs_nr;
+static struct refspec refspecs = REFSPEC_INIT_FETCH;
static int anonymize;
+static struct revision_sources revision_sources;
static int parse_opt_signed_tag_mode(const struct option *opt,
const char *arg, int unset)
}
}
-/* Since intptr_t is C99, we do not use it here */
-static inline uint32_t *mark_to_ptr(uint32_t mark)
+static inline void *mark_to_ptr(uint32_t mark)
{
- return ((uint32_t *)NULL) + mark;
+ return (void *)(uintptr_t)mark;
}
static inline uint32_t ptr_to_mark(void * mark)
{
- return (uint32_t *)mark - (uint32_t *)NULL;
+ return (uint32_t)(uintptr_t)mark;
}
static inline void mark_object(struct object *object, uint32_t mark)
/* skip "committer", "author", "tagger", etc */
end_of_header = strchr(*beg, ' ');
if (!end_of_header)
- die("BUG: malformed line fed to anonymize_ident_line: %.*s",
+ BUG("malformed line fed to anonymize_ident_line: %.*s",
(int)(*end - *beg), *beg);
end_of_header++;
strbuf_add(out, *beg, end_of_header - *beg);
get_object_mark(&commit->parents->item->object) != 0 &&
!full_tree) {
parse_commit_or_die(commit->parents->item);
- diff_tree_oid(&commit->parents->item->tree->object.oid,
- &commit->tree->object.oid, "", &rev->diffopt);
+ diff_tree_oid(get_commit_tree_oid(commit->parents->item),
+ get_commit_tree_oid(commit), "", &rev->diffopt);
}
else
- diff_root_tree_oid(&commit->tree->object.oid,
+ diff_root_tree_oid(get_commit_tree_oid(commit),
"", &rev->diffopt);
/* Export the referenced blobs, and remember the marks. */
if (!S_ISGITLINK(diff_queued_diff.queue[i]->two->mode))
export_blob(&diff_queued_diff.queue[i]->two->oid);
- refname = commit->util;
+ refname = *revision_sources_at(&revision_sources, commit);
if (anonymize) {
refname = anonymize_refname(refname);
anonymize_ident_line(&committer, &committer_end);
struct commit *commit;
while (commits->nr) {
commit = (struct commit *)object_array_pop(commits);
- if (has_unshown_parent(commit))
+ if (has_unshown_parent(commit)) {
+ /* Queue again, to be handled later */
+ add_object_array(&commit->object, NULL, commits);
return;
+ }
handle_commit(commit, revs, paths_of_changed_objects);
}
}
if (dwim_ref(e->name, strlen(e->name), &oid, &full_name) != 1)
continue;
- if (refspecs) {
+ if (refspecs.nr) {
char *private;
- private = apply_refspecs(refspecs, refspecs_nr, full_name);
+ private = apply_refspecs(&refspecs, full_name);
if (private) {
free(full_name);
full_name = private;
* This ref will not be updated through a commit, lets make
* sure it gets properly updated eventually.
*/
- if (commit->util || commit->object.flags & SHOWN)
+ if (*revision_sources_at(&revision_sources, commit) ||
+ commit->object.flags & SHOWN)
string_list_append(&extra_refs, full_name)->util = commit;
- if (!commit->util)
- commit->util = full_name;
+ if (!*revision_sources_at(&revision_sources, commit))
+ *revision_sources_at(&revision_sources, commit) = full_name;
}
}
static void handle_deletes(void)
{
int i;
- for (i = 0; i < refspecs_nr; i++) {
- struct refspec *refspec = &refspecs[i];
+ for (i = 0; i < refspecs.nr; i++) {
+ struct refspec_item *refspec = &refspecs.items[i];
if (*refspec->src)
continue;
git_config(git_default_config, NULL);
init_revisions(&revs, prefix);
+ init_revision_sources(&revision_sources);
revs.topo_order = 1;
- revs.show_source = 1;
+ revs.sources = &revision_sources;
revs.rewrite_parents = 1;
argc = parse_options(argc, argv, prefix, options, fast_export_usage,
PARSE_OPT_KEEP_ARGV0 | PARSE_OPT_KEEP_UNKNOWN);
usage_with_options (fast_export_usage, options);
if (refspecs_list.nr) {
- const char **refspecs_str;
int i;
- ALLOC_ARRAY(refspecs_str, refspecs_list.nr);
for (i = 0; i < refspecs_list.nr; i++)
- refspecs_str[i] = refspecs_list.items[i].string;
-
- refspecs_nr = refspecs_list.nr;
- refspecs = parse_fetch_refspec(refspecs_nr, refspecs_str);
+ refspec_append(&refspecs, refspecs_list.items[i].string);
string_list_clear(&refspecs_list, 1);
- free(refspecs_str);
}
if (use_done_feature)
if (use_done_feature)
printf("done\n");
- free_refspec(refspecs_nr, refspecs);
+ refspec_clear(&refspecs);
return 0;
}
#include "config.h"
#include "repository.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "builtin.h"
#include "string-list.h"
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
static int shown_url = 0;
-static int refmap_alloc, refmap_nr;
-static const char **refmap_array;
+static struct refspec refmap = REFSPEC_INIT_FETCH;
static struct list_objects_filter_options filter_options;
+static struct string_list server_options = STRING_LIST_INIT_DUP;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
static int parse_refmap_arg(const struct option *opt, const char *arg, int unset)
{
- ALLOC_GROW(refmap_array, refmap_nr + 1, refmap_alloc);
-
/*
* "git fetch --refmap='' origin foo"
* can be used to tell the command not to store anywhere
*/
- if (*arg)
- refmap_array[refmap_nr++] = arg;
+ refspec_append(&refmap, arg);
+
return 0;
}
N_("deepen history of shallow clone, excluding rev")),
OPT_INTEGER(0, "deepen", &deepen_relative,
N_("deepen history of shallow clone")),
- { OPTION_SET_INT, 0, "unshallow", &unshallow, NULL,
- N_("convert to a complete repository"),
- PARSE_OPT_NONEG | PARSE_OPT_NOARG, NULL, 1 },
+ OPT_SET_INT_F(0, "unshallow", &unshallow,
+ N_("convert to a complete repository"),
+ 1, PARSE_OPT_NONEG),
{ OPTION_STRING, 0, "submodule-prefix", &submodule_prefix, N_("dir"),
N_("prepend this to submodule path output"), PARSE_OPT_HIDDEN },
{ OPTION_CALLBACK, 0, "recurse-submodules-default",
N_("accept refs that update .git/shallow")),
{ OPTION_CALLBACK, 0, "refmap", NULL, N_("refmap"),
N_("specify fetch refmap"), PARSE_OPT_NONEG, parse_refmap_arg },
+ OPT_STRING_LIST('o', "server-option", &server_options, N_("server-specific"), N_("option to transmit")),
OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
for (i = 0; i < branch->merge_nr; i++) {
struct ref *rm, **old_tail = *tail;
- struct refspec refspec;
+ struct refspec_item refspec;
for (rm = *head; rm; rm = rm->next) {
if (branch_merge_matches(branch, i, rm->name)) {
struct string_list_item *item = NULL;
for_each_ref(add_existing, &existing_refs);
- for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
+ for (ref = transport_get_remote_refs(transport, NULL); ref; ref = ref->next) {
if (!starts_with(ref->name, "refs/tags/"))
continue;
}
static struct ref *get_ref_map(struct transport *transport,
- struct refspec *refspecs, int refspec_count,
+ struct refspec *rs,
int tags, int *autotags)
{
int i;
struct ref *rm;
struct ref *ref_map = NULL;
struct ref **tail = &ref_map;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
/* opportunistically-updated references: */
struct ref *orefs = NULL, **oref_tail = &orefs;
- const struct ref *remote_refs = transport_get_remote_refs(transport);
+ const struct ref *remote_refs;
+
+ if (rs->nr)
+ refspec_ref_prefixes(rs, &ref_prefixes);
+ else if (transport->remote && transport->remote->fetch.nr)
+ refspec_ref_prefixes(&transport->remote->fetch, &ref_prefixes);
+
+ if (ref_prefixes.argc &&
+ (tags == TAGS_SET || (tags == TAGS_DEFAULT && !rs->nr))) {
+ argv_array_push(&ref_prefixes, "refs/tags/");
+ }
+
+ remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
- if (refspec_count) {
+ argv_array_clear(&ref_prefixes);
+
+ if (rs->nr) {
struct refspec *fetch_refspec;
- int fetch_refspec_nr;
- for (i = 0; i < refspec_count; i++) {
- get_fetch_map(remote_refs, &refspecs[i], &tail, 0);
- if (refspecs[i].dst && refspecs[i].dst[0])
+ for (i = 0; i < rs->nr; i++) {
+ get_fetch_map(remote_refs, &rs->items[i], &tail, 0);
+ if (rs->items[i].dst && rs->items[i].dst[0])
*autotags = 1;
}
/* Merge everything on the command line (but not --tags) */
* by ref_remove_duplicates() in favor of one of these
* opportunistic entries with FETCH_HEAD_IGNORE.
*/
- if (refmap_array) {
- fetch_refspec = parse_fetch_refspec(refmap_nr, refmap_array);
- fetch_refspec_nr = refmap_nr;
- } else {
- fetch_refspec = transport->remote->fetch;
- fetch_refspec_nr = transport->remote->fetch_refspec_nr;
- }
+ if (refmap.nr)
+ fetch_refspec = &refmap;
+ else
+ fetch_refspec = &transport->remote->fetch;
- for (i = 0; i < fetch_refspec_nr; i++)
- get_fetch_map(ref_map, &fetch_refspec[i], &oref_tail, 1);
- } else if (refmap_array) {
+ for (i = 0; i < fetch_refspec->nr; i++)
+ get_fetch_map(ref_map, &fetch_refspec->items[i], &oref_tail, 1);
+ } else if (refmap.nr) {
die("--refmap option is only meaningful with command-line refspec(s).");
} else {
/* Use the defaults */
struct branch *branch = branch_get(NULL);
int has_merge = branch_has_merge_config(branch);
if (remote &&
- (remote->fetch_refspec_nr ||
+ (remote->fetch.nr ||
/* Note: has_merge implies non-NULL branch->remote_name */
(has_merge && !strcmp(branch->remote_name, remote->name)))) {
- for (i = 0; i < remote->fetch_refspec_nr; i++) {
- get_fetch_map(remote_refs, &remote->fetch[i], &tail, 0);
- if (remote->fetch[i].dst &&
- remote->fetch[i].dst[0])
+ for (i = 0; i < remote->fetch.nr; i++) {
+ get_fetch_map(remote_refs, &remote->fetch.items[i], &tail, 0);
+ if (remote->fetch.items[i].dst &&
+ remote->fetch.items[i].dst[0])
*autotags = 1;
if (!i && !has_merge && ref_map &&
- !remote->fetch[0].pattern)
+ !remote->fetch.items[0].pattern)
ref_map->fetch_head_status = FETCH_HEAD_MERGE;
}
/*
const char *what, *kind;
struct ref *rm;
char *url;
- const char *filename = dry_run ? "/dev/null" : git_path_fetch_head();
+ const char *filename = dry_run ? "/dev/null" : git_path_fetch_head(the_repository);
int want_status;
int summary_width = transport_summary_width(ref_map);
return ret;
}
-static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map,
- const char *raw_url)
+static int prune_refs(struct refspec *rs, struct ref *ref_map,
+ const char *raw_url)
{
int url_len, i, result = 0;
- struct ref *ref, *stale_refs = get_stale_heads(refs, ref_count, ref_map);
+ struct ref *ref, *stale_refs = get_stale_heads(rs, ref_map);
char *url;
int summary_width = transport_summary_width(stale_refs);
const char *dangling_msg = dry_run
static int truncate_fetch_head(void)
{
- const char *filename = git_path_fetch_head();
+ const char *filename = git_path_fetch_head(the_repository);
FILE *fp = fopen_for_writing(filename);
if (!fp)
}
static int do_fetch(struct transport *transport,
- struct refspec *refs, int ref_count)
+ struct refspec *rs)
{
struct string_list existing_refs = STRING_LIST_INIT_DUP;
struct ref *ref_map;
goto cleanup;
}
- ref_map = get_ref_map(transport, refs, ref_count, tags, &autotags);
+ ref_map = get_ref_map(transport, rs, tags, &autotags);
if (!update_head_ok)
check_not_current_branch(ref_map);
* explicitly (via command line or configuration); we
* don't care whether --tags was specified.
*/
- if (ref_count) {
- prune_refs(refs, ref_count, ref_map, transport->url);
+ if (rs->nr) {
+ prune_refs(rs, ref_map, transport->url);
} else {
- prune_refs(transport->remote->fetch,
- transport->remote->fetch_refspec_nr,
+ prune_refs(&transport->remote->fetch,
ref_map,
transport->url);
}
static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
{
- static const char **refs = NULL;
- struct refspec *refspec;
- int ref_nr = 0;
- int j = 0;
+ struct refspec rs = REFSPEC_INIT_FETCH;
+ int i;
int exit_code;
int maybe_prune_tags;
int remote_via_config = remote_is_configured(remote, 0);
maybe_prune_tags = prune_tags_ok && prune_tags;
if (maybe_prune_tags && remote_via_config)
- add_prune_tags_to_fetch_refspec(remote);
-
- if (argc > 0 || (maybe_prune_tags && !remote_via_config)) {
- size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1);
- refs = xcalloc(nr_alloc, sizeof(const char *));
- if (maybe_prune_tags) {
- refs[j++] = xstrdup("refs/tags/*:refs/tags/*");
- ref_nr++;
+ refspec_append(&remote->fetch, TAG_REFSPEC);
+
+ if (maybe_prune_tags && (argc || !remote_via_config))
+ refspec_append(&rs, TAG_REFSPEC);
+
+ for (i = 0; i < argc; i++) {
+ if (!strcmp(argv[i], "tag")) {
+ char *tag;
+ i++;
+ if (i >= argc)
+ die(_("You need to specify a tag name."));
+
+ tag = xstrfmt("refs/tags/%s:refs/tags/%s",
+ argv[i], argv[i]);
+ refspec_append(&rs, tag);
+ free(tag);
+ } else {
+ refspec_append(&rs, argv[i]);
}
}
- if (argc > 0) {
- int i;
- for (i = 0; i < argc; i++) {
- if (!strcmp(argv[i], "tag")) {
- i++;
- if (i >= argc)
- die(_("You need to specify a tag name."));
- refs[j++] = xstrfmt("refs/tags/%s:refs/tags/%s",
- argv[i], argv[i]);
- } else
- refs[j++] = argv[i];
- ref_nr++;
- }
- }
+ if (server_options.nr)
+ gtransport->server_options = &server_options;
sigchain_push_common(unlock_pack_on_signal);
atexit(unlock_pack);
- refspec = parse_fetch_refspec(ref_nr, refs);
- exit_code = do_fetch(gtransport, refspec, ref_nr);
- free_refspec(ref_nr, refspec);
+ exit_code = do_fetch(gtransport, &rs);
+ refspec_clear(&rs);
transport_disconnect(gtransport);
gtransport = NULL;
return exit_code;
if (unshallow) {
if (depth)
die(_("--depth and --unshallow cannot be used together"));
- else if (!is_repository_shallow())
+ else if (!is_repository_shallow(the_repository))
die(_("--unshallow on a complete repository does not make sense"));
else
depth = xstrfmt("%d", INFINITE_DEPTH);
*/
#include "builtin.h"
#include "config.h"
+ #include "object-store.h"
#include "blob.h"
#include "quote.h"
#include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
/*
* This is to create corrupt objects for debugging and as such it
#include "cache.h"
#include "config.h"
#include "refs.h"
+ #include "object-store.h"
#include "color.h"
#include "commit.h"
#include "diff.h"
#include "mailmap.h"
#include "gpg-interface.h"
#include "progress.h"
+#include "commit-slab.h"
#define MAIL_DEFAULT_WRAP 72
static struct string_list decorate_refs_include = STRING_LIST_INIT_NODUP;
struct decoration_filter decoration_filter = {&decorate_refs_include,
&decorate_refs_exclude};
+ static struct revision_sources revision_sources;
const struct option builtin_log_options[] = {
OPT__QUIET(&quiet, N_("suppress diff output")),
rev->diffopt.filter || rev->diffopt.flags.follow_renames)
rev->always_show_header = 0;
- if (source)
- rev->show_source = 1;
+ if (source) {
+ init_revision_sources(&revision_sources);
+ rev->sources = &revision_sources;
+ }
if (mailmap) {
rev->mailmap = xcalloc(1, sizeof(struct string_list));
open_next_file(NULL, rev->numbered_files ? NULL : "cover-letter", rev, quiet))
return;
- log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte);
+ log_write_email_headers(rev, head, &pp.after_subject, &need_8bit_cte, 0);
for (i = 0; !need_8bit_cte && i < nr; i++) {
const char *buf = get_commit_buffer(list[i], NULL);
diff_setup_done(&opts);
- diff_tree_oid(&origin->tree->object.oid,
- &head->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(origin),
+ get_commit_tree_oid(head),
"", &opts);
diffcore_std(&opts);
diff_flush(&opts);
return base;
}
+define_commit_slab(commit_base, int);
+
static void prepare_bases(struct base_tree_info *bases,
struct commit *base,
struct commit **list,
struct commit *commit;
struct rev_info revs;
struct diff_options diffopt;
+ struct commit_base commit_base;
int i;
if (!base)
return;
+ init_commit_base(&commit_base);
diff_setup(&diffopt);
diffopt.flags.recursive = 1;
diff_setup_done(&diffopt);
for (i = 0; i < total; i++) {
list[i]->object.flags &= ~UNINTERESTING;
add_pending_object(&revs, &list[i]->object, "rev_list");
- list[i]->util = (void *)1;
+ *commit_base_at(&commit_base, list[i]) = 1;
}
base->object.flags |= UNINTERESTING;
add_pending_object(&revs, &base->object, "base");
while ((commit = get_revision(&revs)) != NULL) {
struct object_id oid;
struct object_id *patch_id;
- if (commit->util)
+ if (*commit_base_at(&commit_base, commit))
continue;
if (commit_patch_id(commit, &diffopt, &oid, 0))
die(_("cannot get patch id"));
oidcpy(patch_id, &oid);
bases->nr_patch_id++;
}
+ clear_commit_base(&commit_base);
}
static void print_bases(struct base_tree_info *bases, FILE *file)
N_("output all-zero hash in From header")),
OPT_BOOL(0, "ignore-if-in-upstream", &ignore_if_in_upstream,
N_("don't include a patch matching a commit upstream")),
- { OPTION_SET_INT, 'p', "no-stat", &use_patch_format, NULL,
- N_("show patch format instead of default (patch + stat)"),
- PARSE_OPT_NONEG | PARSE_OPT_NOARG, NULL, 1},
+ OPT_SET_INT_F('p', "no-stat", &use_patch_format,
+ N_("show patch format instead of default (patch + stat)"),
+ 1, PARSE_OPT_NONEG),
OPT_GROUP(N_("Messaging")),
{ OPTION_CALLBACK, 0, "add-header", NULL, N_("header"),
N_("add email header"), 0, header_callback },
#include "builtin.h"
#include "tree-walk.h"
#include "xdiff-interface.h"
+ #include "object-store.h"
#include "blob.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "merge-blobs.h"
static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
#include "run-command.h"
#include "diff.h"
#include "refs.h"
+#include "refspec.h"
#include "commit.h"
#include "diffcore.h"
#include "revision.h"
#include "string-list.h"
#include "packfile.h"
#include "tag.h"
+#include "alias.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
OPT_BOOL('e', "edit", &option_edit,
N_("edit message before committing")),
OPT_SET_INT(0, "ff", &fast_forward, N_("allow fast-forward (default)"), FF_ALLOW),
- { OPTION_SET_INT, 0, "ff-only", &fast_forward, NULL,
- N_("abort if fast-forward is not possible"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, FF_ONLY },
+ OPT_SET_INT_F(0, "ff-only", &fast_forward,
+ N_("abort if fast-forward is not possible"),
+ FF_ONLY, PARSE_OPT_NONEG),
OPT_RERERE_AUTOUPDATE(&allow_rerere_auto),
OPT_BOOL(0, "verify-signatures", &verify_signatures,
N_("verify that the named commit has a valid GPG signature")),
/* Cleans up metadata that is uninteresting after a succeeded merge. */
static void drop_save(void)
{
- unlink(git_path_merge_head());
- unlink(git_path_merge_msg());
- unlink(git_path_merge_mode());
+ unlink(git_path_merge_head(the_repository));
+ unlink(git_path_merge_msg(the_repository));
+ unlink(git_path_merge_mode(the_repository));
}
static int save_state(struct object_id *stash)
return rc;
}
-static void read_empty(unsigned const char *sha1, int verbose)
+static void read_empty(const struct object_id *oid, int verbose)
{
int i = 0;
const char *args[7];
args[i++] = "-v";
args[i++] = "-m";
args[i++] = "-u";
- args[i++] = EMPTY_TREE_SHA1_HEX;
- args[i++] = sha1_to_hex(sha1);
+ args[i++] = empty_tree_oid_hex();
+ args[i++] = oid_to_hex(oid);
args[i] = NULL;
if (run_command_v_opt(args, RUN_GIT_CMD))
die(_("read-tree failed"));
}
-static void reset_hard(unsigned const char *sha1, int verbose)
+static void reset_hard(const struct object_id *oid, int verbose)
{
int i = 0;
const char *args[6];
args[i++] = "-v";
args[i++] = "--reset";
args[i++] = "-u";
- args[i++] = sha1_to_hex(sha1);
+ args[i++] = oid_to_hex(oid);
args[i] = NULL;
if (run_command_v_opt(args, RUN_GIT_CMD))
if (is_null_oid(stash))
return;
- reset_hard(head->hash, 1);
+ reset_hard(head, 1);
args[2] = oid_to_hex(stash);
oid_to_hex(&commit->object.oid));
pretty_print_commit(&ctx, commit, &out);
}
- write_file_buf(git_path_squash_msg(), out.buf, out.len);
+ write_file_buf(git_path_squash_msg(the_repository), out.buf, out.len);
strbuf_release(&out);
}
struct object_id branch_head;
struct strbuf buf = STRBUF_INIT;
struct strbuf bname = STRBUF_INIT;
+ struct merge_remote_desc *desc;
const char *ptr;
char *found_ref;
int len, early;
strbuf_release(&truname);
}
- if (remote_head->util) {
- struct merge_remote_desc *desc;
- desc = merge_remote_util(remote_head);
- if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
- strbuf_addf(msg, "%s\t\t%s '%s'\n",
- oid_to_hex(&desc->obj->oid),
- type_name(desc->obj->type),
- remote);
- goto cleanup;
- }
+ desc = merge_remote_util(remote_head);
+ if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
+ strbuf_addf(msg, "%s\t\t%s '%s'\n",
+ oid_to_hex(&desc->obj->oid),
+ type_name(desc->obj->type),
+ remote);
+ goto cleanup;
}
strbuf_addf(msg, "%s\t\tcommit '%s'\n",
struct commit_list *remoteheads,
struct commit *head)
{
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
const char *head_arg = "HEAD";
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
static void read_merge_msg(struct strbuf *msg)
{
- const char *filename = git_path_merge_msg();
+ const char *filename = git_path_merge_msg(the_repository);
strbuf_reset(msg);
if (strbuf_read_file(msg, filename, 0) < 0)
die_errno(_("Could not read from '%s'"), filename);
if (signoff)
append_signoff(&msg, ignore_non_trailer(msg.buf, msg.len), 0);
write_merge_heads(remoteheads);
- write_file_buf(git_path_merge_msg(), msg.buf, msg.len);
+ write_file_buf(git_path_merge_msg(the_repository), msg.buf, msg.len);
if (run_commit_hook(0 < option_edit, get_index_file(), "prepare-commit-msg",
- git_path_merge_msg(), "merge", NULL))
+ git_path_merge_msg(the_repository), "merge", NULL))
abort_commit(remoteheads, NULL);
if (0 < option_edit) {
- if (launch_editor(git_path_merge_msg(), NULL, NULL))
+ if (launch_editor(git_path_merge_msg(the_repository), NULL, NULL))
abort_commit(remoteheads, NULL);
}
if (verify_msg && run_commit_hook(0 < option_edit, get_index_file(),
"commit-msg",
- git_path_merge_msg(), NULL))
+ git_path_merge_msg(the_repository), NULL))
abort_commit(remoteheads, NULL);
read_merge_msg(&msg);
{
struct object_id result_tree, result_commit;
struct commit_list *parents, **pptr = &parents;
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
FILE *fp;
struct strbuf msgbuf = STRBUF_INIT;
- filename = git_path_merge_msg();
+ filename = git_path_merge_msg(the_repository);
fp = xfopen(filename, "a");
append_conflicts_hint(&msgbuf);
for (j = remoteheads; j; j = j->next) {
struct object_id *oid;
struct commit *c = j->item;
- if (c->util && merge_remote_util(c)->obj) {
- oid = &merge_remote_util(c)->obj->oid;
+ struct merge_remote_desc *desc;
+
+ desc = merge_remote_util(c);
+ if (desc && desc->obj) {
+ oid = &desc->obj->oid;
} else {
oid = &c->object.oid;
}
strbuf_addf(&buf, "%s\n", oid_to_hex(oid));
}
- write_file_buf(git_path_merge_head(), buf.buf, buf.len);
+ write_file_buf(git_path_merge_head(the_repository), buf.buf, buf.len);
strbuf_reset(&buf);
if (fast_forward == FF_NO)
strbuf_addstr(&buf, "no-ff");
- write_file_buf(git_path_merge_mode(), buf.buf, buf.len);
+ write_file_buf(git_path_merge_mode(the_repository), buf.buf, buf.len);
strbuf_release(&buf);
}
{
write_merge_heads(remoteheads);
strbuf_addch(&merge_msg, '\n');
- write_file_buf(git_path_merge_msg(), merge_msg.buf, merge_msg.len);
+ write_file_buf(git_path_merge_msg(the_repository), merge_msg.buf,
+ merge_msg.len);
}
static int default_edit_option(void)
if (!merge_names)
merge_names = &fetch_head_file;
- filename = git_path_fetch_head();
+ filename = git_path_fetch_head(the_repository);
fd = open(filename, O_RDONLY);
if (fd < 0)
die_errno(_("could not open '%s' for reading"), filename);
branch = branch_to_free = resolve_refdup("HEAD", 0, &head_oid, NULL);
if (branch)
skip_prefix(branch, "refs/heads/", &branch);
+
+ init_diff_ui_defaults();
+ git_config(git_merge_config, NULL);
+
if (!branch || is_null_oid(&head_oid))
head_commit = NULL;
else
head_commit = lookup_commit_or_die(&head_oid, "HEAD");
- init_diff_ui_defaults();
- git_config(git_merge_config, NULL);
-
if (branch_mergeoptions)
parse_branch_merge_options(branch_mergeoptions);
argc = parse_options(argc, argv, prefix, builtin_merge_options,
usage_msg_opt(_("--abort expects no arguments"),
builtin_merge_usage, builtin_merge_options);
- if (!file_exists(git_path_merge_head()))
+ if (!file_exists(git_path_merge_head(the_repository)))
die(_("There is no merge to abort (MERGE_HEAD missing)."));
/* Invoke 'git reset --merge' */
usage_msg_opt(_("--continue expects no arguments"),
builtin_merge_usage, builtin_merge_options);
- if (!file_exists(git_path_merge_head()))
+ if (!file_exists(git_path_merge_head(the_repository)))
die(_("There is no merge in progress (MERGE_HEAD missing)."));
/* Invoke 'git commit' */
if (read_cache_unmerged())
die_resolve_conflict("merge");
- if (file_exists(git_path_merge_head())) {
+ if (file_exists(git_path_merge_head(the_repository))) {
/*
* There is no unmerged entry, don't advise 'git
* add/rm <file>', just 'git commit'.
else
die(_("You have not concluded your merge (MERGE_HEAD exists)."));
}
- if (file_exists(git_path_cherry_pick_head())) {
+ if (file_exists(git_path_cherry_pick_head(the_repository))) {
if (advice_resolve_conflict)
die(_("You have not concluded your cherry-pick (CHERRY_PICK_HEAD exists).\n"
"Please, commit your changes before you merge."));
if (remoteheads->next)
die(_("Can merge only exactly one commit into empty head"));
remote_head_oid = &remoteheads->item->object.oid;
- read_empty(remote_head_oid->hash, 0);
+ read_empty(remote_head_oid, 0);
update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
goto done;
#include "config.h"
#include "builtin.h"
#include "notes.h"
+ #include "object-store.h"
#include "blob.h"
#include "pretty.h"
#include "refs.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "parse-options.h"
#include "string-list.h"
if (d.buf.len || allow_empty) {
write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
commit_notes(t, "Notes added by 'git notes add'");
} else {
fprintf(stderr, _("Removing note for object %s\n"),
}
if (add_note(t, &object, from_note, combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
commit_notes(t, "Notes added by 'git notes copy'");
out:
free_notes(t);
if (d.buf.len || allow_empty) {
write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
} else {
fprintf(stderr, _("Removing note for object %s\n"),
N_("resolve notes conflicts using the given strategy "
"(manual/ours/theirs/union/cat_sort_uniq)")),
OPT_GROUP(N_("Committing unmerged notes")),
- { OPTION_SET_INT, 0, "commit", &do_commit, NULL,
- N_("finalize notes merge by committing unmerged notes"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1},
+ OPT_SET_INT_F(0, "commit", &do_commit,
+ N_("finalize notes merge by committing unmerged notes"),
+ 1, PARSE_OPT_NONEG),
OPT_GROUP(N_("Aborting notes merge resolution")),
- { OPTION_SET_INT, 0, "abort", &do_abort, NULL,
- N_("abort notes merge"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1},
+ OPT_SET_INT_F(0, "abort", &do_abort,
+ N_("abort notes merge"),
+ 1, PARSE_OPT_NONEG),
OPT_END()
};
const char *short_ref = NULL;
if (!skip_prefix(o.local_ref, "refs/notes/", &short_ref))
- die("BUG: local ref %s is outside of refs/notes/",
+ BUG("local ref %s is outside of refs/notes/",
o.local_ref);
strbuf_addf(&merge_key, "notes.%s.mergeStrategy", short_ref);
#include "list.h"
#include "packfile.h"
#include "object-store.h"
+#include "dir.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int local;
static int have_non_local_packs;
static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
static int exclude_promisor_objects;
static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
die("unable to read %s",
- oid_to_hex(&entry->delta->idx.oid));
+ oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
- if (!delta_buf || delta_size != entry->delta_size)
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
die("delta size changed");
free(buf);
free(base_buf);
enum object_type type;
void *buf;
struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
FREE_AND_NULL(entry->delta_data);
entry->z_delta_size = 0;
} else if (entry->delta_data) {
- size = entry->delta_size;
+ size = DELTA_SIZE(entry);
buf = entry->delta_data;
entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
* encoding of the relative offset for the delta
* base from this object's position in the pack.
*/
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
} else if (type == OBJ_REF_DELTA) {
/*
* Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
+ * additional bytes for the base object ID.
*/
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
- hdrlen += 20;
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
- struct packed_git *p = entry->in_pack;
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
- enum object_type type = entry->type;
+ enum object_type type = oe_type(entry);
off_t datalen;
unsigned char header[MAX_PACK_OBJECT_HEADER],
dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
hdrlen = encode_in_pack_object_header(header, sizeof(header),
- type, entry->size);
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
datalen -= entry->in_pack_header_size;
if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
error("corrupt packed object for %s",
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
}
if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
- hdrlen += 20;
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
reused_delta++;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
else
limit = pack_size_limit - write_offset;
- if (!entry->delta)
+ if (!DELTA(entry))
usable_delta = 0; /* no delta */
else if (!pack_size_limit)
usable_delta = 1; /* unlimited packfile */
- else if (entry->delta->idx.offset == (off_t)-1)
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
usable_delta = 0; /* base was written to another pack */
- else if (entry->delta->idx.offset)
+ else if (DELTA(entry)->idx.offset)
usable_delta = 1; /* base already exists in this pack */
else
usable_delta = 0; /* base could end up in another pack */
if (!reuse_object)
to_reuse = 0; /* explicit */
- else if (!entry->in_pack)
+ else if (!IN_PACK(entry))
to_reuse = 0; /* can't reuse what we don't have */
- else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (entry->type != entry->in_pack_type)
+ else if (oe_type(entry) != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
- else if (entry->delta)
+ else if (DELTA(entry))
to_reuse = 0; /* we want to pack afresh */
else
to_reuse = 1; /* we have it in-pack undeltified,
}
/* if we are deltified, write out base object first. */
- if (e->delta) {
+ if (DELTA(e)) {
e->idx.offset = 1; /* now recurse */
- switch (write_one(f, e->delta, offset)) {
+ switch (write_one(f, DELTA(e), offset)) {
case WRITE_ONE_RECURSIVE:
/* we cannot depend on this one */
- e->delta = NULL;
+ SET_DELTA(e, NULL);
break;
default:
break;
/* add this node... */
add_to_write_order(wo, endp, e);
/* all its siblings... */
- for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
add_to_write_order(wo, endp, s);
}
}
/* drop down a level to add left subtree nodes if possible */
- if (e->delta_child) {
+ if (DELTA_CHILD(e)) {
add_to_order = 1;
- e = e->delta_child;
+ e = DELTA_CHILD(e);
} else {
add_to_order = 0;
/* our sibling might have some children, it is next */
- if (e->delta_sibling) {
- e = e->delta_sibling;
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
continue;
}
/* go back to our parent node */
- e = e->delta;
- while (e && !e->delta_sibling) {
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
/* we're on the right side of a subtree, keep
* going up until we can go right again */
- e = e->delta;
+ e = DELTA(e);
}
if (!e) {
/* done- we hit our original root node */
return;
}
/* pass it off to sibling at this level */
- e = e->delta_sibling;
+ e = DELTA_SIBLING(e);
}
};
}
{
struct object_entry *root;
- for (root = e; root->delta; root = root->delta)
+ for (root = e; DELTA(root); root = DELTA(root))
; /* nothing */
add_descendants_to_write_order(wo, endp, root);
}
for (i = 0; i < to_pack.nr_objects; i++) {
objects[i].tagged = 0;
objects[i].filled = 0;
- objects[i].delta_child = NULL;
- objects[i].delta_sibling = NULL;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
}
/*
*/
for (i = to_pack.nr_objects; i > 0;) {
struct object_entry *e = &objects[--i];
- if (!e->delta)
+ if (!DELTA(e))
continue;
/* Mark me as the first child */
- e->delta_sibling = e->delta->delta_child;
- e->delta->delta_child = e;
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
}
/*
* And then all remaining commits and tags.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_COMMIT &&
- objects[i].type != OBJ_TAG)
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
* And then all the trees.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_TREE)
+ if (oe_type(&objects[i]) != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
die_errno("unable to seek in reused packfile");
if (reuse_packfile_offset < 0)
- reuse_packfile_offset = reuse_packfile->pack_size - 20;
+ reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- hashclose(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- hashclose(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
if (write_bitmap_index) {
bitmap_writer_set_checksum(oid.hash);
- bitmap_writer_build_type_index(written_list, nr_written);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep &&
+ if (!ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs))
return 1;
if (local && !p->pack_local)
return 0;
- if (ignore_packed_keep && p->pack_local && p->pack_keep)
+ if (p->pack_local &&
+ ((ignore_packed_keep_on_disk && p->pack_keep) ||
+ (ignore_packed_keep_in_core && p->pack_keep_in_core)))
return 0;
/* we don't know yet; keep looking for more packs */
int want;
struct list_head *pos;
- if (!exclude && local && has_loose_object_nonlocal(oid->hash))
+ if (!exclude && local && has_loose_object_nonlocal(oid))
return 0;
/*
entry = packlist_alloc(&to_pack, oid->hash, index_pos);
entry->hash = hash;
- if (type)
- entry->type = type;
+ oe_set_type(entry, type);
if (exclude)
entry->preferred_base = 1;
else
nr_result++;
if (found_pack) {
- entry->in_pack = found_pack;
+ oe_set_in_pack(&to_pack, entry, found_pack);
entry->in_pack_offset = found_offset;
}
off_t found_offset = 0;
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
{
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
return 0;
create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
static void check_object(struct object_entry *entry)
{
- if (entry->in_pack) {
- struct packed_git *p = entry->in_pack;
+ unsigned long canonical_size;
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
unsigned long avail;
off_t ofs;
unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
* since non-delta representations could still be reused.
*/
used = unpack_object_header_buffer(buf, avail,
- &entry->in_pack_type,
- &entry->size);
+ &type,
+ &in_pack_size);
if (used == 0)
goto give_up;
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
/*
* Determine if this is a delta and if so whether we can
* reuse it or not. Otherwise let's find out as cheaply as
switch (entry->in_pack_type) {
default:
/* Not a delta hence we've already got all we need. */
- entry->type = entry->in_pack_type;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
entry->in_pack_header_size = used;
- if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
goto give_up;
unuse_pack(&w_curs);
return;
if (reuse_delta && !entry->preferred_base)
base_ref = use_pack(p, &w_curs,
entry->in_pack_offset + used, NULL);
- entry->in_pack_header_size = used + 20;
+ entry->in_pack_header_size = used + the_hash_algo->rawsz;
break;
case OBJ_OFS_DELTA:
buf = use_pack(p, &w_curs,
* deltify other objects against, in order to avoid
* circular deltas.
*/
- entry->type = entry->in_pack_type;
- entry->delta = base_entry;
- entry->delta_size = entry->size;
- entry->delta_sibling = base_entry->delta_child;
- base_entry->delta_child = entry;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA(entry, base_entry);
+ SET_DELTA_SIZE(entry, in_pack_size);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
unuse_pack(&w_curs);
return;
}
- if (entry->type) {
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
/*
* This must be a delta and we already know what the
* final object type is. Let's extract the actual
* object size from the delta header.
*/
- entry->size = get_size_from_delta(p, &w_curs,
- entry->in_pack_offset + entry->in_pack_header_size);
- if (entry->size == 0)
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
goto give_up;
+ SET_SIZE(entry, canonical_size);
unuse_pack(&w_curs);
return;
}
unuse_pack(&w_curs);
}
- entry->type = oid_object_info(the_repository, &entry->idx.oid,
- &entry->size);
- /*
- * The error condition is checked in prepare_pack(). This is
- * to permit a missing preferred base object to be ignored
- * as a preferred base. Doing so can result in a larger
- * pack file, but the transfer will still take place.
- */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
}
static int pack_offset_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
/* avoid filesystem trashing with loose objects */
- if (!a->in_pack && !b->in_pack)
+ if (!a_in_pack && !b_in_pack)
return oidcmp(&a->idx.oid, &b->idx.oid);
- if (a->in_pack < b->in_pack)
+ if (a_in_pack < b_in_pack)
return -1;
- if (a->in_pack > b->in_pack)
+ if (a_in_pack > b_in_pack)
return 1;
return a->in_pack_offset < b->in_pack_offset ? -1 :
(a->in_pack_offset > b->in_pack_offset);
*/
static void drop_reused_delta(struct object_entry *entry)
{
- struct object_entry **p = &entry->delta->delta_child;
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
+
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
- while (*p) {
- if (*p == entry)
- *p = (*p)->delta_sibling;
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
else
- p = &(*p)->delta_sibling;
+ idx = &oe->delta_sibling_idx;
}
- entry->delta = NULL;
+ SET_DELTA(entry, NULL);
entry->depth = 0;
- oi.sizep = &entry->size;
- oi.typep = &entry->type;
- if (packed_object_info(the_repository, entry->in_pack,
- entry->in_pack_offset, &oi) < 0) {
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
* fall back to sha1_object_info, which may find another copy.
- * And if that fails, the error will be recorded in entry->type
+ * And if that fails, the error will be recorded in oe_type(entry)
* and dealt with in prepare_pack().
*/
- entry->type = oid_object_info(the_repository, &entry->idx.oid,
- &entry->size);
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
}
+ SET_SIZE(entry, size);
}
/*
for (cur = entry, total_depth = 0;
cur;
- cur = cur->delta, total_depth++) {
+ cur = DELTA(cur), total_depth++) {
if (cur->dfs_state == DFS_DONE) {
/*
* We've already seen this object and know it isn't
* is a bug.
*/
if (cur->dfs_state != DFS_NONE)
- die("BUG: confusing delta dfs state in first pass: %d",
+ BUG("confusing delta dfs state in first pass: %d",
cur->dfs_state);
/*
* it's not a delta, we're done traversing, but we'll mark it
* done to save time on future traversals.
*/
- if (!cur->delta) {
+ if (!DELTA(cur)) {
cur->dfs_state = DFS_DONE;
break;
}
* We keep all commits in the chain that we examined.
*/
cur->dfs_state = DFS_ACTIVE;
- if (cur->delta->dfs_state == DFS_ACTIVE) {
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
drop_reused_delta(cur);
cur->dfs_state = DFS_DONE;
break;
* an extra "next" pointer to keep going after we reset cur->delta.
*/
for (cur = entry; cur; cur = next) {
- next = cur->delta;
+ next = DELTA(cur);
/*
* We should have a chain of zero or more ACTIVE states down to
if (cur->dfs_state == DFS_DONE)
break;
else if (cur->dfs_state != DFS_ACTIVE)
- die("BUG: confusing delta dfs state in second pass: %d",
+ BUG("confusing delta dfs state in second pass: %d",
cur->dfs_state);
/*
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold < entry->size)
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
/*
* This must happen in a second pass, since we rely on the delta
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ enum object_type a_type = oe_type(a);
+ enum object_type b_type = oe_type(b);
+ unsigned long a_size = SIZE(a);
+ unsigned long b_size = SIZE(b);
- if (a->type > b->type)
+ if (a_type > b_type)
return -1;
- if (a->type < b->type)
+ if (a_type < b_type)
return 1;
if (a->hash > b->hash)
return -1;
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
- if (a->size > b->size)
+ if (a_size > b_size)
return -1;
- if (a->size < b->size)
+ if (a_size < b_size)
return 1;
return a < b ? -1 : (a > b); /* newest first */
}
#endif
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
+
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ read_lock();
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ read_unlock();
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ read_lock();
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ read_unlock();
+ return size;
+}
+
static int try_delta(struct unpacked *trg, struct unpacked *src,
unsigned max_depth, unsigned long *mem_usage)
{
void *delta_buf;
/* Don't bother doing diffs between different types */
- if (trg_entry->type != src_entry->type)
+ if (oe_type(trg_entry) != oe_type(src_entry))
return -1;
/*
* it, we will still save the transfer cost, as we already know
* the other side has it and we won't send src_entry at all.
*/
- if (reuse_delta && trg_entry->in_pack &&
- trg_entry->in_pack == src_entry->in_pack &&
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
!src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
- max_size = trg_size/2 - 20;
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
+ max_size = trg_size/2 - the_hash_algo->rawsz;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
(max_depth - ref_depth + 1);
if (max_size == 0)
return 0;
- src_size = src_entry->size;
+ src_size = SIZE(src_entry);
sizediff = src_size < trg_size ? trg_size - src_size : 0;
if (sizediff >= max_size)
return 0;
delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
if (!delta_buf)
return 0;
+ if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
+ free(delta_buf);
+ return 0;
+ }
- if (trg_entry->delta) {
+ if (DELTA(trg_entry)) {
/* Prefer only shallower same-sized deltas. */
- if (delta_size == trg_entry->delta_size &&
+ if (delta_size == DELTA_SIZE(trg_entry) &&
src->depth + 1 >= trg->depth) {
free(delta_buf);
return 0;
free(trg_entry->delta_data);
cache_lock();
if (trg_entry->delta_data) {
- delta_cache_size -= trg_entry->delta_size;
+ delta_cache_size -= DELTA_SIZE(trg_entry);
trg_entry->delta_data = NULL;
}
if (delta_cacheable(src_size, trg_size, delta_size)) {
free(delta_buf);
}
- trg_entry->delta = src_entry;
- trg_entry->delta_size = delta_size;
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
trg->depth = src->depth + 1;
return 1;
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{
- struct object_entry *child = me->delta_child;
+ struct object_entry *child = DELTA_CHILD(me);
unsigned int m = n;
while (child) {
unsigned int c = check_delta_limit(child, n + 1);
if (m < c)
m = c;
- child = child->delta_sibling;
+ child = DELTA_SIBLING(child);
}
return m;
}
free_delta_index(n->index);
n->index = NULL;
if (n->data) {
- freed_mem += n->entry->size;
+ freed_mem += SIZE(n->entry);
FREE_AND_NULL(n->data);
}
n->entry = NULL;
* otherwise they would become too deep.
*/
max_depth = depth;
- if (entry->delta_child) {
+ if (DELTA_CHILD(entry)) {
max_depth -= check_delta_limit(entry, 0);
if (max_depth <= 0)
goto next;
* between writes at that moment.
*/
if (entry->delta_data && !pack_to_stdout) {
- entry->z_delta_size = do_compress(&entry->delta_data,
- entry->delta_size);
- cache_lock();
- delta_cache_size -= entry->delta_size;
- delta_cache_size += entry->z_delta_size;
- cache_unlock();
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
}
/* if we made n a delta, and if n is already at max
* depth, leaving it in the window is pointless. we
* should evict it first.
*/
- if (entry->delta && max_depth <= n->depth)
+ if (DELTA(entry) && max_depth <= n->depth)
continue;
/*
* currently deltified object, to keep it longer. It will
* be the first base object to be attempted next.
*/
- if (entry->delta) {
+ if (DELTA(entry)) {
struct unpacked swap = array[best_base];
int dist = (window + idx - best_base) % window;
int dst = best_base;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (entry->delta)
+ if (DELTA(entry))
/* This happens if we decided to reuse existing
* delta from a pack. "reuse_delta &&" is implied.
*/
continue;
- if (entry->size < 50)
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
continue;
if (entry->no_try_delta)
if (!entry->preferred_base) {
nr_deltas++;
- if (entry->type < 0)
+ if (oe_type(entry) < 0)
die("unable to get type of object %s",
oid_to_hex(&entry->idx.oid));
} else {
- if (entry->type < 0) {
+ if (oe_type(entry) < 0) {
/*
* This object is not found, but we
* don't have to include it anyway.
die("expected object ID, got garbage:\n %s", line);
add_preferred_base_object(p + 1);
- add_object_entry(&oid, 0, p + 1, 0);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
}
}
struct object_id oid;
struct object *o;
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
die("cannot open pack index");
get_packed_git(the_repository);
while (p) {
- if ((!p->pack_local || p->pack_keep) &&
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
find_pack_entry_one(oid->hash, p)) {
last_found = p;
return 1;
struct object_id oid;
for (p = get_packed_git(the_repository); p; p = p->next) {
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
{
return pack_to_stdout &&
allow_ofs_delta &&
- !ignore_packed_keep &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs) &&
!incremental;
}
setup_revisions(ac, av, &revs, NULL);
/* make sure shallows are read */
- is_repository_shallow();
+ is_repository_shallow(the_repository);
while (fgets(line, sizeof(line), stdin) != NULL) {
int len = strlen(line);
struct object_id oid;
if (get_oid_hex(line + 10, &oid))
die("not an SHA-1 '%s'", line + 10);
- register_shallow(&oid);
+ register_shallow(the_repository, &oid);
use_bitmap_index = 0;
continue;
}
oid_array_clear(&recent_objects);
}
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
+}
+
static int option_parse_index_version(const struct option *opt,
const char *arg, int unset)
{
struct argv_array rp = ARGV_ARRAY_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
N_("do not create an empty pack output")),
OPT_BOOL(0, "revs", &use_internal_rev_list,
N_("read revision arguments from standard input")),
- { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
- N_("limit the objects to those that are not yet packed"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
- N_("include objects reachable from any reference"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
- N_("include objects referred by reflog entries"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
- N_("include objects referred to by the index"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
+ OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
+ N_("limit the objects to those that are not yet packed"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "all", &rev_list_all,
+ N_("include objects reachable from any reference"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
+ N_("include objects referred by reflog entries"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
+ N_("include objects referred to by the index"),
+ 1, PARSE_OPT_NONEG),
OPT_BOOL(0, "stdout", &pack_to_stdout,
N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
N_("create thin packs")),
OPT_BOOL(0, "shallow", &shallow,
N_("create packs suitable for shallow fetches")),
- OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
OPT_INTEGER(0, "compression", &pack_compression_level,
N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
OPT_END(),
};
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
check_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
if (pack_to_stdout != !base_name || argc)
usage_with_options(pack_usage, pack_objects_options);
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+
argv_array_push(&rp, "pack-objects");
if (thin) {
use_internal_rev_list = 1;
fetch_if_missing = 0;
argv_array_push(&rp, "--exclude-promisor-objects");
}
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
if (!reuse_object)
reuse_delta = 0;
use_bitmap_index = use_bitmap_index_default;
/* "hard" reasons not to use bitmaps; these just won't work at all */
- if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
+ if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
use_bitmap_index = 0;
if (pack_to_stdout || !rev_list_all)
if (progress && all_progress_implied)
progress = 2;
- if (ignore_packed_keep) {
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
- ignore_packed_keep = 0;
+ ignore_packed_keep_on_disk = 0;
}
if (local) {
/*
- * unlike ignore_packed_keep above, we do not want to
- * unset "local" based on looking at packs, as it
- * also covers non-local objects
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
*/
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next) {
}
}
+ prepare_packing_data(&to_pack);
+
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
#include "config.h"
#include "builtin.h"
#include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "sha1-array.h"
#include "remote.h"
#include "dir.h"
#include "refs.h"
+#include "refspec.h"
#include "revision.h"
#include "submodule.h"
#include "submodule-config.h"
REBASE_FALSE = 0,
REBASE_TRUE,
REBASE_PRESERVE,
+ REBASE_MERGES,
REBASE_INTERACTIVE
};
/**
* Parses the value of --rebase. If value is a false value, returns
* REBASE_FALSE. If value is a true value, returns REBASE_TRUE. If value is
- * "preserve", returns REBASE_PRESERVE. If value is a invalid value, dies with
- * a fatal error if fatal is true, otherwise returns REBASE_INVALID.
+ * "merges", returns REBASE_MERGES. If value is "preserve", returns
+ * REBASE_PRESERVE. If value is a invalid value, dies with a fatal error if
+ * fatal is true, otherwise returns REBASE_INVALID.
*/
static enum rebase_type parse_config_rebase(const char *key, const char *value,
int fatal)
return REBASE_TRUE;
else if (!strcmp(value, "preserve"))
return REBASE_PRESERVE;
+ else if (!strcmp(value, "merges"))
+ return REBASE_MERGES;
else if (!strcmp(value, "interactive"))
return REBASE_INTERACTIVE;
/* Options passed to git-merge or git-rebase */
OPT_GROUP(N_("Options related to merging")),
{ OPTION_CALLBACK, 'r', "rebase", &opt_rebase,
- "false|true|preserve|interactive",
+ "false|true|merges|preserve|interactive",
N_("incorporate changes by rebasing rather than merging"),
PARSE_OPT_OPTARG, parse_opt_rebase },
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
*/
static void get_merge_heads(struct oid_array *merge_heads)
{
- const char *filename = git_path_fetch_head();
+ const char *filename = git_path_fetch_head(the_repository);
FILE *fp;
struct strbuf sb = STRBUF_INIT;
struct object_id oid;
argv_array_push(&args, repo);
argv_array_pushv(&args, refspecs);
} else if (*refspecs)
- die("BUG: refspecs without repo?");
+ BUG("refspecs without repo?");
ret = run_command_v_opt(args.argv, RUN_GIT_CMD);
argv_array_clear(&args);
return ret;
}
/**
- * Derives the remote tracking branch from the remote and refspec.
+ * Derives the remote-tracking branch from the remote and refspec.
*
* FIXME: The current implementation assumes the default mapping of
* refs/heads/<branch_name> to refs/remotes/<remote_name>/<branch_name>.
*/
static const char *get_tracking_branch(const char *remote, const char *refspec)
{
- struct refspec *spec;
+ struct refspec_item spec;
const char *spec_src;
const char *merge_branch;
- spec = parse_fetch_refspec(1, &refspec);
- spec_src = spec->src;
+ refspec_item_init_or_die(&spec, refspec, REFSPEC_FETCH);
+ spec_src = spec.src;
if (!*spec_src || !strcmp(spec_src, "HEAD"))
spec_src = "HEAD";
else if (skip_prefix(spec_src, "heads/", &spec_src))
} else
merge_branch = NULL;
- free_refspec(1, spec);
+ refspec_item_clear(&spec);
return merge_branch;
}
/**
* Given the repo and refspecs, sets fork_point to the point at which the
- * current branch forked from its remote tracking branch. Returns 0 on success,
+ * current branch forked from its remote-tracking branch. Returns 0 on success,
* -1 on failure.
*/
static int get_rebase_fork_point(struct object_id *fork_point, const char *repo,
argv_push_verbosity(&args);
/* Options passed to git-rebase */
- if (opt_rebase == REBASE_PRESERVE)
+ if (opt_rebase == REBASE_MERGES)
+ argv_array_push(&args, "--rebase-merges");
+ else if (opt_rebase == REBASE_PRESERVE)
argv_array_push(&args, "--preserve-merges");
else if (opt_rebase == REBASE_INTERACTIVE)
argv_array_push(&args, "--interactive");
if (read_cache_unmerged())
die_resolve_conflict("pull");
- if (file_exists(git_path_merge_head()))
+ if (file_exists(git_path_merge_head(the_repository)))
die_conclude_merge();
if (get_oid("HEAD", &orig_head))
#include "pkt-line.h"
#include "sideband.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "commit.h"
#include "object.h"
#include "remote.h"
#include "tmp-objdir.h"
#include "oidset.h"
#include "packfile.h"
+ #include "object-store.h"
#include "protocol.h"
static const char * const receive_pack_usage[] = {
/* RFC 2104 2. (6) & (7) */
git_SHA1_Init(&ctx);
git_SHA1_Update(&ctx, k_opad, sizeof(k_opad));
- git_SHA1_Update(&ctx, out, 20);
+ git_SHA1_Update(&ctx, out, GIT_SHA1_RAWSZ);
git_SHA1_Final(out, &ctx);
}
static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char sha1[20];
+ unsigned char sha1[GIT_SHA1_RAWSZ];
strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));;
strbuf_release(&buf);
/* RFC 2104 5. HMAC-SHA1-80 */
- strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, 20, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, GIT_SHA1_HEXSZ, sha1_to_hex(sha1));
return strbuf_detach(&buf, NULL);
}
static int command_singleton_iterator(void *cb_data, struct object_id *oid);
static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
{
- static struct lock_file shallow_lock;
+ struct lock_file shallow_lock = LOCK_INIT;
struct oid_array extra = OID_ARRAY_INIT;
struct check_connected_options opt = CHECK_CONNECTED_INIT;
uint32_t mask = 1 << (cmd->index % 32);
* not lose these new roots..
*/
for (i = 0; i < extra.nr; i++)
- register_shallow(&extra.oid[i]);
+ register_shallow(the_repository, &extra.oid[i]);
si->shallow_ref[cmd->index] = 0;
oid_array_clear(&extra);
return "Working directory has unstaged changes";
/* diff-index with either HEAD or an empty tree */
- diff_index[4] = head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX;
+ diff_index[4] = head_has_history() ? "HEAD" : empty_tree_oid_hex();
child_process_init(&child);
child.argv = diff_index;
}
}
if (!checked_connectivity)
- die("BUG: connectivity check skipped???");
+ BUG("connectivity check skipped???");
}
static void execute_commands_non_atomic(struct command *commands,
unpack_limit = receive_unpack_limit;
switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
#include "builtin.h"
#include "config.h"
#include "lockfile.h"
+ #include "object-store.h"
#include "commit.h"
#include "refs.h"
#include "dir.h"
for (i = 0; i < found.nr; i++) {
struct commit *c =
(struct commit *)found.objects[i].item;
- if (!tree_is_complete(&c->tree->object.oid)) {
+ if (!tree_is_complete(get_commit_tree_oid(c))) {
is_incomplete = 1;
c->object.flags |= INCOMPLETE;
}
#include "strbuf.h"
#include "run-command.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "argv-array.h"
static const char * const builtin_remote_usage[] = {
struct branch_info {
char *remote_name;
struct string_list merge;
- enum { NO_REBASE, NORMAL_REBASE, INTERACTIVE_REBASE } rebase;
+ enum {
+ NO_REBASE, NORMAL_REBASE, INTERACTIVE_REBASE, REBASE_MERGES
+ } rebase;
};
static struct string_list branch_list = STRING_LIST_INIT_NODUP;
info->rebase = v;
else if (!strcmp(value, "preserve"))
info->rebase = NORMAL_REBASE;
+ else if (!strcmp(value, "merges"))
+ info->rebase = REBASE_MERGES;
else if (!strcmp(value, "interactive"))
info->rebase = INTERACTIVE_REBASE;
}
struct ref *ref, *stale_refs;
int i;
- for (i = 0; i < states->remote->fetch_refspec_nr; i++)
- if (get_fetch_map(remote_refs, states->remote->fetch + i, &tail, 1))
+ for (i = 0; i < states->remote->fetch.nr; i++)
+ if (get_fetch_map(remote_refs, &states->remote->fetch.items[i], &tail, 1))
die(_("Could not get fetch map for refspec %s"),
- states->remote->fetch_refspec[i]);
+ states->remote->fetch.raw[i]);
states->new_refs.strdup_strings = 1;
states->tracked.strdup_strings = 1;
else
string_list_append(&states->tracked, abbrev_branch(ref->name));
}
- stale_refs = get_stale_heads(states->remote->fetch,
- states->remote->fetch_refspec_nr, fetch_map);
+ stale_refs = get_stale_heads(&states->remote->fetch, fetch_map);
for (ref = stale_refs; ref; ref = ref->next) {
struct string_list_item *item =
string_list_append(&states->stale, abbrev_branch(ref->name));
local_refs = get_local_heads();
push_map = copy_ref_list(remote_refs);
- match_push_refs(local_refs, &push_map, remote->push_refspec_nr,
- remote->push_refspec, MATCH_REFS_NONE);
+ match_push_refs(local_refs, &push_map, &remote->push, MATCH_REFS_NONE);
states->push.strdup_strings = 1;
for (ref = push_map; ref; ref = ref->next) {
return 0;
states->push.strdup_strings = 1;
- if (!remote->push_refspec_nr) {
+ if (!remote->push.nr) {
item = string_list_append(&states->push, _("(matching)"));
info = item->util = xcalloc(1, sizeof(struct push_info));
info->status = PUSH_STATUS_NOTQUERIED;
info->dest = xstrdup(item->string);
}
- for (i = 0; i < remote->push_refspec_nr; i++) {
- struct refspec *spec = remote->push + i;
+ for (i = 0; i < remote->push.nr; i++) {
+ const struct refspec_item *spec = &remote->push.items[i];
if (spec->matching)
item = string_list_append(&states->push, _("(matching)"));
else if (strlen(spec->src))
{
struct ref *ref, *matches;
struct ref *fetch_map = NULL, **fetch_map_tail = &fetch_map;
- struct refspec refspec;
+ struct refspec_item refspec;
refspec.force = 0;
refspec.pattern = 1;
const struct object_id *oid, int flags, void *cb_data)
{
struct branches_for_remote *branches = cb_data;
- struct refspec refspec;
+ struct refspec_item refspec;
struct known_remote *kr;
memset(&refspec, 0, sizeof(refspec));
git_config_set_multivar(buf.buf, remote->url[i], "^$", 0);
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.push", remote->name);
- for (i = 0; i < remote->push_refspec_nr; i++)
- git_config_set_multivar(buf.buf, remote->push_refspec[i], "^$", 0);
+ for (i = 0; i < remote->push.raw_nr; i++)
+ git_config_set_multivar(buf.buf, remote->push.raw[i], "^$", 0);
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.fetch", remote->name);
- for (i = 0; i < remote->fetch_refspec_nr; i++)
- git_config_set_multivar(buf.buf, remote->fetch_refspec[i], "^$", 0);
+ for (i = 0; i < remote->fetch.raw_nr; i++)
+ git_config_set_multivar(buf.buf, remote->fetch.raw[i], "^$", 0);
if (remote->origin == REMOTE_REMOTES)
unlink_or_warn(git_path("remotes/%s", remote->name));
else if (remote->origin == REMOTE_BRANCHES)
strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
git_config_set_multivar(buf.buf, NULL, NULL, 1);
strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
- for (i = 0; i < oldremote->fetch_refspec_nr; i++) {
+ for (i = 0; i < oldremote->fetch.raw_nr; i++) {
char *ptr;
strbuf_reset(&buf2);
- strbuf_addstr(&buf2, oldremote->fetch_refspec[i]);
+ strbuf_addstr(&buf2, oldremote->fetch.raw[i]);
ptr = strstr(buf2.buf, old_remote_context.buf);
if (ptr) {
refspec_updated = 1;
const struct object_id *oid, int flags, void *cb_data)
{
struct ref_states *states = cb_data;
- struct refspec refspec;
+ struct refspec_item refspec;
if (flags & REF_ISSYMREF)
return 0;
if (query) {
transport = transport_get(states->remote, states->remote->url_nr > 0 ?
states->remote->url[0] : NULL);
- remote_refs = transport_get_remote_refs(transport);
+ remote_refs = transport_get_remote_refs(transport, NULL);
transport_disconnect(transport);
states->queried = 1;
printf(" %-*s ", show_info->width, item->string);
if (branch_info->rebase) {
- printf_ln(branch_info->rebase == INTERACTIVE_REBASE
- ? _("rebases interactively onto remote %s")
- : _("rebases onto remote %s"), merge->items[0].string);
+ const char *msg;
+ if (branch_info->rebase == INTERACTIVE_REBASE)
+ msg = _("rebases interactively onto remote %s");
+ else if (branch_info->rebase == REBASE_MERGES)
+ msg = _("rebases interactively (with merges) onto "
+ "remote %s");
+ else
+ msg = _("rebases onto remote %s");
+ printf_ln(msg, merge->items[0].string);
return 0;
} else if (show_info->any_rebase) {
printf_ln(_(" merges with remote %s"), merge->items[0].string);
N_("git replace [-f] <object> <replacement>"),
N_("git replace [-f] --edit <object>"),
N_("git replace [-f] --graft <commit> [<parent>...]"),
+ N_("git replace [-f] --convert-graft-file"),
N_("git replace -d <object>..."),
N_("git replace [--format=<format>] [-l [<pattern>]]"),
NULL
else if (!strcmp(format, "long"))
data.format = REPLACE_FORMAT_LONG;
else
- die("invalid replace format '%s'\n"
- "valid formats are 'short', 'medium' and 'long'\n",
- format);
+ return error("invalid replace format '%s'\n"
+ "valid formats are 'short', 'medium' and 'long'\n",
+ format);
for_each_replace_ref(the_repository, show_reference, (void *)&data);
return 0;
}
-static void check_ref_valid(struct object_id *object,
+static int check_ref_valid(struct object_id *object,
struct object_id *prev,
struct strbuf *ref,
int force)
strbuf_reset(ref);
strbuf_addf(ref, "%s%s", git_replace_ref_base, oid_to_hex(object));
if (check_refname_format(ref->buf, 0))
- die("'%s' is not a valid ref name.", ref->buf);
+ return error("'%s' is not a valid ref name.", ref->buf);
if (read_ref(ref->buf, prev))
oidclr(prev);
else if (!force)
- die("replace ref '%s' already exists", ref->buf);
+ return error("replace ref '%s' already exists", ref->buf);
+ return 0;
}
static int replace_object_oid(const char *object_ref,
struct strbuf ref = STRBUF_INIT;
struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
+ int res = 0;
obj_type = oid_object_info(the_repository, object, NULL);
repl_type = oid_object_info(the_repository, repl, NULL);
if (!force && obj_type != repl_type)
- die("Objects must be of the same type.\n"
- "'%s' points to a replaced object of type '%s'\n"
- "while '%s' points to a replacement object of type '%s'.",
- object_ref, type_name(obj_type),
- replace_ref, type_name(repl_type));
-
- check_ref_valid(object, &prev, &ref, force);
+ return error("Objects must be of the same type.\n"
+ "'%s' points to a replaced object of type '%s'\n"
+ "while '%s' points to a replacement object of "
+ "type '%s'.",
+ object_ref, type_name(obj_type),
+ replace_ref, type_name(repl_type));
+
+ if (check_ref_valid(object, &prev, &ref, force)) {
+ strbuf_release(&ref);
+ return -1;
+ }
transaction = ref_transaction_begin(&err);
if (!transaction ||
ref_transaction_update(transaction, ref.buf, repl, &prev,
0, NULL, &err) ||
ref_transaction_commit(transaction, &err))
- die("%s", err.buf);
+ res = error("%s", err.buf);
ref_transaction_free(transaction);
strbuf_release(&ref);
- return 0;
+ return res;
}
static int replace_object(const char *object_ref, const char *replace_ref, int force)
struct object_id object, repl;
if (get_oid(object_ref, &object))
- die("Failed to resolve '%s' as a valid ref.", object_ref);
+ return error("Failed to resolve '%s' as a valid ref.",
+ object_ref);
if (get_oid(replace_ref, &repl))
- die("Failed to resolve '%s' as a valid ref.", replace_ref);
+ return error("Failed to resolve '%s' as a valid ref.",
+ replace_ref);
return replace_object_oid(object_ref, &object, replace_ref, &repl, force);
}
* If "raw" is true, then the object's raw contents are printed according to
* "type". Otherwise, we pretty-print the contents for human editing.
*/
-static void export_object(const struct object_id *oid, enum object_type type,
+static int export_object(const struct object_id *oid, enum object_type type,
int raw, const char *filename)
{
struct child_process cmd = CHILD_PROCESS_INIT;
fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
if (fd < 0)
- die_errno("unable to open %s for writing", filename);
+ return error_errno("unable to open %s for writing", filename);
argv_array_push(&cmd.args, "--no-replace-objects");
argv_array_push(&cmd.args, "cat-file");
cmd.out = fd;
if (run_command(&cmd))
- die("cat-file reported failure");
+ return error("cat-file reported failure");
+ return 0;
}
/*
* interpreting it as "type", and writing the result to the object database.
* The sha1 of the written object is returned via sha1.
*/
-static void import_object(struct object_id *oid, enum object_type type,
+static int import_object(struct object_id *oid, enum object_type type,
int raw, const char *filename)
{
int fd;
fd = open(filename, O_RDONLY);
if (fd < 0)
- die_errno("unable to open %s for reading", filename);
+ return error_errno("unable to open %s for reading", filename);
if (!raw && type == OBJ_TREE) {
const char *argv[] = { "mktree", NULL };
cmd.in = fd;
cmd.out = -1;
- if (start_command(&cmd))
- die("unable to spawn mktree");
+ if (start_command(&cmd)) {
+ close(fd);
+ return error("unable to spawn mktree");
+ }
- if (strbuf_read(&result, cmd.out, 41) < 0)
- die_errno("unable to read from mktree");
+ if (strbuf_read(&result, cmd.out, 41) < 0) {
+ error_errno("unable to read from mktree");
+ close(fd);
+ close(cmd.out);
+ return -1;
+ }
close(cmd.out);
- if (finish_command(&cmd))
- die("mktree reported failure");
- if (get_oid_hex(result.buf, oid) < 0)
- die("mktree did not return an object name");
+ if (finish_command(&cmd)) {
+ strbuf_release(&result);
+ return error("mktree reported failure");
+ }
+ if (get_oid_hex(result.buf, oid) < 0) {
+ strbuf_release(&result);
+ return error("mktree did not return an object name");
+ }
strbuf_release(&result);
} else {
struct stat st;
int flags = HASH_FORMAT_CHECK | HASH_WRITE_OBJECT;
- if (fstat(fd, &st) < 0)
- die_errno("unable to fstat %s", filename);
+ if (fstat(fd, &st) < 0) {
+ error_errno("unable to fstat %s", filename);
+ close(fd);
+ return -1;
+ }
if (index_fd(oid, fd, &st, type, NULL, flags) < 0)
- die("unable to write object to database");
+ return error("unable to write object to database");
/* index_fd close()s fd for us */
}
* No need to close(fd) here; both run-command and index-fd
* will have done it for us.
*/
+ return 0;
}
static int edit_and_replace(const char *object_ref, int force, int raw)
{
- char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
+ char *tmpfile;
enum object_type type;
struct object_id old_oid, new_oid, prev;
struct strbuf ref = STRBUF_INIT;
if (get_oid(object_ref, &old_oid) < 0)
- die("Not a valid object name: '%s'", object_ref);
+ return error("Not a valid object name: '%s'", object_ref);
type = oid_object_info(the_repository, &old_oid, NULL);
if (type < 0)
- die("unable to get object type for %s", oid_to_hex(&old_oid));
+ return error("unable to get object type for %s",
+ oid_to_hex(&old_oid));
- check_ref_valid(&old_oid, &prev, &ref, force);
+ if (check_ref_valid(&old_oid, &prev, &ref, force)) {
+ strbuf_release(&ref);
+ return -1;
+ }
strbuf_release(&ref);
- export_object(&old_oid, type, raw, tmpfile);
- if (launch_editor(tmpfile, NULL, NULL) < 0)
- die("editing object file failed");
- import_object(&new_oid, type, raw, tmpfile);
-
+ tmpfile = git_pathdup("REPLACE_EDITOBJ");
+ if (export_object(&old_oid, type, raw, tmpfile)) {
+ free(tmpfile);
+ return -1;
+ }
+ if (launch_editor(tmpfile, NULL, NULL) < 0) {
+ free(tmpfile);
+ return error("editing object file failed");
+ }
+ if (import_object(&new_oid, type, raw, tmpfile)) {
+ free(tmpfile);
+ return -1;
+ }
free(tmpfile);
if (!oidcmp(&old_oid, &new_oid))
return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
}
-static void replace_parents(struct strbuf *buf, int argc, const char **argv)
+static int replace_parents(struct strbuf *buf, int argc, const char **argv)
{
struct strbuf new_parents = STRBUF_INIT;
const char *parent_start, *parent_end;
/* prepare new parents */
for (i = 0; i < argc; i++) {
struct object_id oid;
- if (get_oid(argv[i], &oid) < 0)
- die(_("Not a valid object name: '%s'"), argv[i]);
- lookup_commit_or_die(&oid, argv[i]);
+ if (get_oid(argv[i], &oid) < 0) {
+ strbuf_release(&new_parents);
+ return error(_("Not a valid object name: '%s'"),
+ argv[i]);
+ }
+ if (!lookup_commit_reference(&oid)) {
+ strbuf_release(&new_parents);
+ return error(_("could not parse %s"), argv[i]);
+ }
strbuf_addf(&new_parents, "parent %s\n", oid_to_hex(&oid));
}
new_parents.buf, new_parents.len);
strbuf_release(&new_parents);
+ return 0;
}
struct check_mergetag_data {
const char **argv;
};
-static void check_one_mergetag(struct commit *commit,
+static int check_one_mergetag(struct commit *commit,
struct commit_extra_header *extra,
void *data)
{
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
tag = lookup_tag(&tag_oid);
if (!tag)
- die(_("bad mergetag in commit '%s'"), ref);
+ return error(_("bad mergetag in commit '%s'"), ref);
if (parse_tag_buffer(tag, extra->value, extra->len))
- die(_("malformed mergetag in commit '%s'"), ref);
+ return error(_("malformed mergetag in commit '%s'"), ref);
/* iterate over new parents */
for (i = 1; i < mergetag_data->argc; i++) {
struct object_id oid;
if (get_oid(mergetag_data->argv[i], &oid) < 0)
- die(_("Not a valid object name: '%s'"), mergetag_data->argv[i]);
+ return error(_("Not a valid object name: '%s'"),
+ mergetag_data->argv[i]);
if (!oidcmp(&tag->tagged->oid, &oid))
- return; /* found */
+ return 0; /* found */
}
- die(_("original commit '%s' contains mergetag '%s' that is discarded; "
- "use --edit instead of --graft"), ref, oid_to_hex(&tag_oid));
+ return error(_("original commit '%s' contains mergetag '%s' that is "
+ "discarded; use --edit instead of --graft"), ref,
+ oid_to_hex(&tag_oid));
}
-static void check_mergetags(struct commit *commit, int argc, const char **argv)
+static int check_mergetags(struct commit *commit, int argc, const char **argv)
{
struct check_mergetag_data mergetag_data;
mergetag_data.argc = argc;
mergetag_data.argv = argv;
- for_each_mergetag(check_one_mergetag, commit, &mergetag_data);
+ return for_each_mergetag(check_one_mergetag, commit, &mergetag_data);
}
-static int create_graft(int argc, const char **argv, int force)
+static int create_graft(int argc, const char **argv, int force, int gentle)
{
struct object_id old_oid, new_oid;
const char *old_ref = argv[0];
unsigned long size;
if (get_oid(old_ref, &old_oid) < 0)
- die(_("Not a valid object name: '%s'"), old_ref);
- commit = lookup_commit_or_die(&old_oid, old_ref);
+ return error(_("Not a valid object name: '%s'"), old_ref);
+ commit = lookup_commit_reference(&old_oid);
+ if (!commit)
+ return error(_("could not parse %s"), old_ref);
buffer = get_commit_buffer(commit, &size);
strbuf_add(&buf, buffer, size);
unuse_commit_buffer(commit, buffer);
- replace_parents(&buf, argc - 1, &argv[1]);
+ if (replace_parents(&buf, argc - 1, &argv[1]) < 0) {
+ strbuf_release(&buf);
+ return -1;
+ }
if (remove_signature(&buf)) {
warning(_("the original commit '%s' has a gpg signature."), old_ref);
warning(_("the signature will be removed in the replacement commit!"));
}
- check_mergetags(commit, argc, argv);
+ if (check_mergetags(commit, argc, argv)) {
+ strbuf_release(&buf);
+ return -1;
+ }
- if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
- die(_("could not write replacement commit for: '%s'"), old_ref);
+ if (write_object_file(buf.buf, buf.len, commit_type, &new_oid)) {
+ strbuf_release(&buf);
+ return error(_("could not write replacement commit for: '%s'"),
+ old_ref);
+ }
strbuf_release(&buf);
- if (!oidcmp(&old_oid, &new_oid))
+ if (!oidcmp(&old_oid, &new_oid)) {
+ if (gentle) {
+ warning("graft for '%s' unnecessary", oid_to_hex(&old_oid));
+ return 0;
+ }
return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
+ }
return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
}
- const char *graft_file = get_graft_file();
+static int convert_graft_file(int force)
+{
++ const char *graft_file = get_graft_file(the_repository);
+ FILE *fp = fopen_or_warn(graft_file, "r");
+ struct strbuf buf = STRBUF_INIT, err = STRBUF_INIT;
+ struct argv_array args = ARGV_ARRAY_INIT;
+
+ if (!fp)
+ return -1;
+
+ while (strbuf_getline(&buf, fp) != EOF) {
+ if (*buf.buf == '#')
+ continue;
+
+ argv_array_split(&args, buf.buf);
+ if (args.argc && create_graft(args.argc, args.argv, force, 1))
+ strbuf_addf(&err, "\n\t%s", buf.buf);
+ argv_array_clear(&args);
+ }
+ fclose(fp);
+
+ strbuf_release(&buf);
+
+ if (!err.len)
+ return unlink_or_warn(graft_file);
+
+ warning(_("could not convert the following graft(s):\n%s"), err.buf);
+ strbuf_release(&err);
+
+ return -1;
+}
+
int cmd_replace(int argc, const char **argv, const char *prefix)
{
int force = 0;
MODE_DELETE,
MODE_EDIT,
MODE_GRAFT,
+ MODE_CONVERT_GRAFT_FILE,
MODE_REPLACE
} cmdmode = MODE_UNSPECIFIED;
struct option options[] = {
OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
+ OPT_CMDMODE(0, "convert-graft-file", &cmdmode, N_("convert existing graft file"), MODE_CONVERT_GRAFT_FILE),
OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
if (force &&
cmdmode != MODE_REPLACE &&
cmdmode != MODE_EDIT &&
- cmdmode != MODE_GRAFT)
+ cmdmode != MODE_GRAFT &&
+ cmdmode != MODE_CONVERT_GRAFT_FILE)
usage_msg_opt("-f only makes sense when writing a replacement",
git_replace_usage, options);
if (argc < 1)
usage_msg_opt("-g needs at least one argument",
git_replace_usage, options);
- return create_graft(argc, argv, force);
+ return create_graft(argc, argv, force, 0);
+
+ case MODE_CONVERT_GRAFT_FILE:
+ if (argc != 0)
+ usage_msg_opt("--convert-graft-file takes no argument",
+ git_replace_usage, options);
+ return !!convert_graft_file(force);
case MODE_LIST:
if (argc > 1)
return list_replace_refs(argv[0], format);
default:
- die("BUG: invalid cmdmode %d", (int)cmdmode);
+ BUG("invalid cmdmode %d", (int)cmdmode);
}
}
static inline int is_merge(void)
{
- return !access(git_path_merge_head(), F_OK);
+ return !access(git_path_merge_head(the_repository), F_OK);
}
static int reset_index(const struct object_id *oid, int reset_type, int quiet)
unborn = !strcmp(rev, "HEAD") && get_oid("HEAD", &oid);
if (unborn) {
/* reset on unborn branch: treat as reset to empty tree */
- hashcpy(oid.hash, EMPTY_TREE_SHA1_BIN);
+ oidcpy(&oid, the_hash_algo->empty_tree);
} else if (!pathspec.nr) {
struct commit *commit;
if (get_oid_committish(rev, &oid))
struct commit *a, *b;
a = lookup_commit_reference(&start_oid);
b = lookup_commit_reference(&end_oid);
+ if (!a || !b) {
+ *dotdot = '.';
+ return 0;
+ }
exclude = get_merge_bases(a, b);
while (exclude) {
struct commit *commit = pop_commit(&exclude);
return 0;
*dotdot = 0;
- if (get_oid_committish(arg, &oid)) {
+ if (get_oid_committish(arg, &oid) ||
+ !(commit = lookup_commit_reference(&oid))) {
*dotdot = '^';
return 0;
}
- commit = lookup_commit_reference(&oid);
if (exclude_parent &&
exclude_parent > commit_list_count(commit->parents)) {
*dotdot = '^';
continue;
}
if (!strcmp(arg, "--is-shallow-repository")) {
- printf("%s\n", is_repository_shallow() ? "true"
+ printf("%s\n",
+ is_repository_shallow(the_repository) ? "true"
: "false");
continue;
}
if (read_cache() < 0)
die(_("Could not read the index"));
if (the_index.split_index) {
- const unsigned char *sha1 = the_index.split_index->base_sha1;
- const char *path = git_path("sharedindex.%s", sha1_to_hex(sha1));
+ const struct object_id *oid = &the_index.split_index->base_oid;
+ const char *path = git_path("sharedindex.%s", oid_to_hex(oid));
strbuf_reset(&buf);
puts(relative_path(path, prefix, &buf));
}
#include "config.h"
#include "builtin.h"
#include "refs.h"
+ #include "object-store.h"
#include "tag.h"
#include "run-command.h"
#include "parse-options.h"
return -1;
if (format->format)
- pretty_print_ref(name, oid->hash, format);
+ pretty_print_ref(name, oid, format);
return 0;
}
#include "builtin.h"
#include "cache.h"
#include "config.h"
+ #include "object-store.h"
#include "object.h"
#include "delta.h"
#include "pack.h"
if (!obj_buf)
die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid));
if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options))
- die("Error in object");
+ die("fsck error in packed object");
fsck_options.walk = check_object;
if (fsck_walk(obj, NULL, &fsck_options))
die("Error on reachable objects of %s", oid_to_hex(&obj->oid));
unpack_all();
the_hash_algo->update_fn(&ctx, buffer, offset);
the_hash_algo->final_fn(oid.hash, &ctx);
- if (strict)
+ if (strict) {
write_rest();
+ if (fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
+ }
if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
use(the_hash_algo->rawsz);
#include "pack.h"
#include "strbuf.h"
#include "packfile.h"
+ #include "object-store.h"
static struct bulk_checkin_state {
unsigned plugged:1;
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- hashclose(state->f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(state->f, oid.hash, 0);
+ int fd = finalize_hashfile(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
* pack, and write into it.
*/
if (!idx)
- die("BUG: should not happen");
+ BUG("should not happen");
hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
#include "cache.h"
#include "lockfile.h"
#include "bundle.h"
+ #include "object-store.h"
#include "object.h"
#include "commit.h"
#include "diff.h"
int create_bundle(struct bundle_header *header, const char *path,
int argc, const char **argv)
{
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
int bundle_fd = -1;
int bundle_to_stdout;
int ref_count = 0;
#include "tree.h"
#include "tree-walk.h"
#include "cache-tree.h"
+ #include "object-store.h"
#ifndef DEBUG
#define DEBUG 0
/*
* "sub" can be an empty tree if all subentries are i-t-a.
*/
- if (contains_ita && !oidcmp(oid, &empty_tree_oid))
+ if (contains_ita && is_empty_tree_oid(oid))
continue;
strbuf_grow(&buffer, entlen + 100);
if (0 <= it->entry_count) {
if (size < rawsz)
goto free_return;
- memcpy(it->oid.hash, (const unsigned char*)buf, rawsz);
+ oidread(&it->oid, (const unsigned char *)buf);
buf += rawsz;
size -= rawsz;
}
drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
- unsigned char sha1[20];
+ struct object_id oid;
struct untracked_cache *untracked;
uint64_t fsmonitor_last_update;
struct ewah_bitmap *fsmonitor_dirty;
#define read_blob_data_from_cache(path, sz) read_blob_data_from_index(&the_index, (path), (sz))
#endif
+#define TYPE_BITS 3
+
+/*
+ * Values in this enum (except those outside the 3 bit range) are part
+ * of pack file format. See Documentation/technical/pack-format.txt
+ * for more information.
+ */
enum object_type {
OBJ_BAD = -1,
OBJ_NONE = 0,
#define GIT_ICASE_PATHSPECS_ENVIRONMENT "GIT_ICASE_PATHSPECS"
#define GIT_QUARANTINE_ENVIRONMENT "GIT_QUARANTINE_PATH"
#define GIT_OPTIONAL_LOCKS_ENVIRONMENT "GIT_OPTIONAL_LOCKS"
+#define GIT_TEXT_DOMAIN_DIR_ENVIRONMENT "GIT_TEXTDOMAINDIR"
/*
* Environment variable used in handshaking the wire protocol.
extern const char *get_git_common_dir(void);
extern char *get_object_directory(void);
extern char *get_index_file(void);
- extern char *get_graft_file(void);
+ extern char *get_graft_file(struct repository *r);
-extern int set_git_dir(const char *path);
+extern void set_git_dir(const char *path);
extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
extern int get_common_dir(struct strbuf *sb, const char *gitdir);
extern const char *get_git_namespace(void);
*/
extern int index_has_changes(struct strbuf *sb);
-extern int verify_path(const char *path);
+extern int verify_path(const char *path, unsigned mode);
extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
extern int index_dir_exists(struct index_state *istate, const char *name, int namelen);
extern void adjust_dirname_case(struct index_state *istate, char *name);
extern int fsync_object_files;
extern int core_preload_index;
+extern int core_commit_graph;
extern int core_apply_sparse_checkout;
extern int precomposed_unicode;
extern int protect_hfs;
memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
-
-#define EMPTY_TREE_SHA1_HEX \
- "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
-#define EMPTY_TREE_SHA1_BIN_LITERAL \
- "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" \
- "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04"
-extern const struct object_id empty_tree_oid;
-#define EMPTY_TREE_SHA1_BIN (empty_tree_oid.hash)
-
-#define EMPTY_BLOB_SHA1_HEX \
- "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"
-#define EMPTY_BLOB_SHA1_BIN_LITERAL \
- "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
- "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
-extern const struct object_id empty_blob_oid;
+static inline void oidread(struct object_id *oid, const unsigned char *hash)
+{
+ memcpy(oid->hash, hash, the_hash_algo->rawsz);
+}
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
return !oidcmp(oid, the_hash_algo->empty_tree);
}
+const char *empty_tree_oid_hex(void);
+const char *empty_blob_oid_hex(void);
+
/* set default permissions by passing mode arguments to open(2) */
int git_mkstemps_mode(char *pattern, int suffix_len, int mode);
int git_mkstemp_mode(char *pattern, int mode);
int longest_ancestor_length(const char *path, struct string_list *prefixes);
char *strip_path_suffix(const char *path, const char *suffix);
int daemon_avoid_alias(const char *path);
-extern int is_ntfs_dotgit(const char *name);
+
+/*
+ * These functions match their is_hfs_dotgit() counterparts; see utf8.h for
+ * details.
+ */
+int is_ntfs_dotgit(const char *name);
+int is_ntfs_dotgitmodules(const char *name);
+int is_ntfs_dotgitignore(const char *name);
+int is_ntfs_dotgitattributes(const char *name);
/*
* Returns true iff "str" could be confused as a command-line option when
*/
extern char *xdg_cache_home(const char *filename);
- extern void *read_object_file_extended(const struct object_id *oid,
- enum object_type *type,
- unsigned long *size, int lookup_replace);
- static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
- {
- return read_object_file_extended(oid, type, size, 1);
- }
-
- /* Read and unpack an object file into memory, write memory to an object file */
- int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
-
- extern int hash_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid);
-
- extern int write_object_file(const void *buf, unsigned long len,
- const char *type, struct object_id *oid);
-
- extern int hash_object_file_literally(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- unsigned flags);
-
- extern int pretend_object_file(void *, unsigned long, enum object_type,
- struct object_id *oid);
-
- extern int force_object_loose(const struct object_id *oid, time_t mtime);
-
extern int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
extern int finalize_object_file(const char *tmpfile, const char *filename);
- /*
- * Open the loose object at path, check its hash, and return the contents,
- * type, and size. If the object is a blob, then "contents" may return NULL,
- * to allow streaming of large blobs.
- *
- * Returns 0 on success, negative on error (details may be written to stderr).
- */
- int read_loose_object(const char *path,
- const struct object_id *expected_oid,
- enum object_type *type,
- unsigned long *size,
- void **contents);
-
- /*
- * Convenience for sha1_object_info_extended() with a NULL struct
- * object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
- * nonzero flags to also set other flags.
- */
- extern int has_sha1_file_with_flags(const unsigned char *sha1, int flags);
- static inline int has_sha1_file(const unsigned char *sha1)
- {
- return has_sha1_file_with_flags(sha1, 0);
- }
-
- /* Same as the above, except for struct object_id. */
- extern int has_object_file(const struct object_id *oid);
- extern int has_object_file_with_flags(const struct object_id *oid, int flags);
-
- /*
- * Return true iff an alternate object database has a loose object
- * with the specified name. This function does not respect replace
- * references.
- */
- extern int has_loose_object_nonlocal(const struct object_id *oid);
-
- extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
-
/* Helper to check and "touch" a file */
extern int check_and_freshen_file(const char *fn, int freshen);
#define FALLBACK_DEFAULT_ABBREV 7
struct object_context {
- unsigned char tree[20];
unsigned mode;
/*
* symlink_path is only used by get_tree_entry_follow_symlinks,
struct pack_entry {
off_t offset;
- unsigned char sha1[20];
struct packed_git *p;
};
#define FOR_EACH_OBJECT_LOCAL_ONLY 0x1
extern int for_each_loose_object(each_loose_object_fn, void *, unsigned flags);
- struct object_info {
- /* Request */
- enum object_type *typep;
- unsigned long *sizep;
- off_t *disk_sizep;
- unsigned char *delta_base_sha1;
- struct strbuf *type_name;
- void **contentp;
-
- /* Response */
- enum {
- OI_CACHED,
- OI_LOOSE,
- OI_PACKED,
- OI_DBCACHED
- } whence;
- union {
- /*
- * struct {
- * ... Nothing to expose in this case
- * } cached;
- * struct {
- * ... Nothing to expose in this case
- * } loose;
- */
- struct {
- struct packed_git *pack;
- off_t offset;
- unsigned int is_delta;
- } packed;
- } u;
- };
-
- /*
- * Initializer for a "struct object_info" that wants no items. You may
- * also memset() the memory to all-zeroes.
- */
- #define OBJECT_INFO_INIT {NULL}
-
- /* Invoke lookup_replace_object() on the given hash */
- #define OBJECT_INFO_LOOKUP_REPLACE 1
- /* Allow reading from a loose object file of unknown/bogus type */
- #define OBJECT_INFO_ALLOW_UNKNOWN_TYPE 2
- /* Do not check cached storage */
- #define OBJECT_INFO_SKIP_CACHED 4
- /* Do not retry packed storage after checking packed and loose storage */
- #define OBJECT_INFO_QUICK 8
- /* Do not check loose object */
- #define OBJECT_INFO_IGNORE_LOOSE 16
-
- int oid_object_info_extended(struct repository *r,
- const struct object_id *,
- struct object_info *, unsigned flags);
-
/*
* Set this to 0 to prevent sha1_object_info_extended() from fetching missing
* blobs. This has a difference only if extensions.partialClone is set.
void overlay_tree_on_index(struct index_state *istate,
const char *tree_name, const char *prefix);
-char *alias_lookup(const char *alias);
-int split_cmdline(char *cmdline, const char ***argv);
-/* Takes a negative value returned by split_cmdline */
-const char *split_cmdline_strerror(int cmdline_errno);
-
/* setup.c */
struct startup_info {
int have_repository;
#include "cache.h"
#include "tag.h"
#include "commit.h"
+#include "commit-graph.h"
+ #include "repository.h"
+ #include "object-store.h"
#include "pkt-line.h"
#include "utf8.h"
#include "diff.h"
#include "prio-queue.h"
#include "sha1-lookup.h"
#include "wt-status.h"
+#include "advice.h"
static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
return parse_timestamp(dateptr, NULL, 10);
}
- static struct commit_graft **commit_graft;
- static int commit_graft_alloc, commit_graft_nr;
-
static const unsigned char *commit_graft_sha1_access(size_t index, void *table)
{
struct commit_graft **commit_graft_table = table;
return commit_graft_table[index]->oid.hash;
}
- static int commit_graft_pos(const unsigned char *sha1)
+ static int commit_graft_pos(struct repository *r, const unsigned char *sha1)
{
- return sha1_pos(sha1, commit_graft, commit_graft_nr,
+ return sha1_pos(sha1, r->parsed_objects->grafts,
+ r->parsed_objects->grafts_nr,
commit_graft_sha1_access);
}
- int register_commit_graft(struct commit_graft *graft, int ignore_dups)
+ int register_commit_graft(struct repository *r, struct commit_graft *graft,
+ int ignore_dups)
{
- int pos = commit_graft_pos(graft->oid.hash);
+ int pos = commit_graft_pos(r, graft->oid.hash);
if (0 <= pos) {
if (ignore_dups)
free(graft);
else {
- free(commit_graft[pos]);
- commit_graft[pos] = graft;
+ free(r->parsed_objects->grafts[pos]);
+ r->parsed_objects->grafts[pos] = graft;
}
return 1;
}
pos = -pos - 1;
- ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
- commit_graft_nr++;
- if (pos < commit_graft_nr)
- MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
- commit_graft_nr - pos - 1);
- commit_graft[pos] = graft;
+ ALLOC_GROW(r->parsed_objects->grafts,
+ r->parsed_objects->grafts_nr + 1,
+ r->parsed_objects->grafts_alloc);
+ r->parsed_objects->grafts_nr++;
+ if (pos < r->parsed_objects->grafts_nr)
+ memmove(r->parsed_objects->grafts + pos + 1,
+ r->parsed_objects->grafts + pos,
+ (r->parsed_objects->grafts_nr - pos - 1) *
+ sizeof(*r->parsed_objects->grafts));
+ r->parsed_objects->grafts[pos] = graft;
return 0;
}
return NULL;
}
- static int read_graft_file(const char *graft_file)
+ static int read_graft_file(struct repository *r, const char *graft_file)
{
FILE *fp = fopen_or_warn(graft_file, "r");
struct strbuf buf = STRBUF_INIT;
if (!fp)
return -1;
+ if (advice_graft_file_deprecated)
+ advise(_("Support for <GIT_DIR>/info/grafts is deprecated\n"
+ "and will be removed in a future Git version.\n"
+ "\n"
+ "Please use \"git replace --convert-graft-file\"\n"
+ "to convert the grafts into replace refs.\n"
+ "\n"
+ "Turn this message off by running\n"
+ "\"git config advice.graftFileDeprecated false\""));
while (!strbuf_getwholeline(&buf, fp, '\n')) {
/* The format is just "Commit Parent1 Parent2 ...\n" */
struct commit_graft *graft = read_graft_line(&buf);
if (!graft)
continue;
- if (register_commit_graft(graft, 1))
+ if (register_commit_graft(r, graft, 1))
error("duplicate graft data: %s", buf.buf);
}
fclose(fp);
return 0;
}
- static void prepare_commit_graft(void)
+ static void prepare_commit_graft(struct repository *r)
{
- static int commit_graft_prepared;
char *graft_file;
- if (commit_graft_prepared)
+ if (r->parsed_objects->commit_graft_prepared)
return;
- graft_file = get_graft_file();
- read_graft_file(graft_file);
+ if (!startup_info->have_repository)
+ return;
+
+ graft_file = get_graft_file(r);
+ read_graft_file(r, graft_file);
/* make sure shallows are read */
- is_repository_shallow();
- commit_graft_prepared = 1;
+ is_repository_shallow(r);
+ r->parsed_objects->commit_graft_prepared = 1;
}
- struct commit_graft *lookup_commit_graft(const struct object_id *oid)
+ struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid)
{
int pos;
- prepare_commit_graft();
- pos = commit_graft_pos(oid->hash);
+ prepare_commit_graft(r);
+ pos = commit_graft_pos(r, oid->hash);
if (pos < 0)
return NULL;
- return commit_graft[pos];
+ return r->parsed_objects->grafts[pos];
}
int for_each_commit_graft(each_commit_graft_fn fn, void *cb_data)
{
int i, ret;
- for (i = ret = 0; i < commit_graft_nr && !ret; i++)
- ret = fn(commit_graft[i], cb_data);
+ for (i = ret = 0; i < the_repository->parsed_objects->grafts_nr && !ret; i++)
+ ret = fn(the_repository->parsed_objects->grafts[i], cb_data);
return ret;
}
int unregister_shallow(const struct object_id *oid)
{
- int pos = commit_graft_pos(oid->hash);
+ int pos = commit_graft_pos(the_repository, oid->hash);
if (pos < 0)
return -1;
- if (pos + 1 < commit_graft_nr)
- MOVE_ARRAY(commit_graft + pos, commit_graft + pos + 1,
- commit_graft_nr - pos - 1);
- commit_graft_nr--;
+ if (pos + 1 < the_repository->parsed_objects->grafts_nr)
+ MOVE_ARRAY(the_repository->parsed_objects->grafts + pos,
+ the_repository->parsed_objects->grafts + pos + 1,
+ the_repository->parsed_objects->grafts_nr - pos - 1);
+ the_repository->parsed_objects->grafts_nr--;
return 0;
}
}
}
+struct tree *get_commit_tree(const struct commit *commit)
+{
+ if (commit->maybe_tree || !commit->object.parsed)
+ return commit->maybe_tree;
+
+ if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
+ BUG("commit has NULL tree, but was not loaded from commit-graph");
+
+ return get_commit_tree_in_graph(commit);
+}
+
+struct object_id *get_commit_tree_oid(const struct commit *commit)
+{
+ return &get_commit_tree(commit)->object.oid;
+}
+
void release_commit_memory(struct commit *c)
{
- c->tree = NULL;
+ c->maybe_tree = NULL;
c->index = 0;
free_commit_buffer(c);
free_commit_list(c->parents);
return ret;
}
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size)
+int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph)
{
const char *tail = buffer;
const char *bufptr = buffer;
if (tail <= bufptr + tree_entry_len + 1 || memcmp(bufptr, "tree ", 5) ||
bufptr[tree_entry_len] != '\n')
return error("bogus commit object %s", oid_to_hex(&item->object.oid));
- if (get_sha1_hex(bufptr + 5, parent.hash) < 0)
+ if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->tree = lookup_tree(&parent);
+ item->maybe_tree = lookup_tree(&parent);
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
- graft = lookup_commit_graft(&item->object.oid);
+ graft = lookup_commit_graft(the_repository, &item->object.oid);
while (bufptr + parent_entry_len < tail && !memcmp(bufptr, "parent ", 7)) {
struct commit *new_parent;
if (tail <= bufptr + parent_entry_len + 1 ||
- get_sha1_hex(bufptr + 7, parent.hash) ||
+ get_oid_hex(bufptr + 7, &parent) ||
bufptr[parent_entry_len] != '\n')
return error("bad parents in commit %s", oid_to_hex(&item->object.oid));
bufptr += parent_entry_len + 1;
}
item->date = parse_commit_date(bufptr, tail);
+ if (check_graph)
+ load_commit_graph_info(item);
+
return 0;
}
return -1;
if (item->object.parsed)
return 0;
+ if (parse_commit_in_graph(item))
+ return 0;
buffer = read_object_file(&item->object.oid, &type, &size);
if (!buffer)
return quiet_on_missing ? -1 :
return error("Object %s not a commit",
oid_to_hex(&item->object.oid));
}
- ret = parse_commit_buffer(item, buffer, size);
+ ret = parse_commit_buffer(item, buffer, size, 0);
if (save_commit_buffer && !ret) {
set_commit_buffer(item, buffer, size);
return 0;
return 0;
}
+int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused)
+{
+ const struct commit *a = a_, *b = b_;
+
+ /* newer commits first */
+ if (a->generation < b->generation)
+ return 1;
+ else if (a->generation > b->generation)
+ return -1;
+
+ /* use date as a heuristic when generations are equal */
+ if (a->date < b->date)
+ return 1;
+ else if (a->date > b->date)
+ return -1;
+ return 0;
+}
+
int compare_commits_by_commit_date(const void *a_, const void *b_, void *unused)
{
const struct commit *a = a_, *b = b_;
}
/* all input commits in one and twos[] must have been parsed! */
-static struct commit_list *paint_down_to_common(struct commit *one, int n, struct commit **twos)
+static struct commit_list *paint_down_to_common(struct commit *one, int n,
+ struct commit **twos,
+ int min_generation)
{
- struct prio_queue queue = { compare_commits_by_commit_date };
+ struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
struct commit_list *result = NULL;
int i;
+ uint32_t last_gen = GENERATION_NUMBER_INFINITY;
one->object.flags |= PARENT1;
if (!n) {
struct commit_list *parents;
int flags;
+ if (commit->generation > last_gen)
+ BUG("bad generation skip %8x > %8x at %s",
+ commit->generation, last_gen,
+ oid_to_hex(&commit->object.oid));
+ last_gen = commit->generation;
+
+ if (commit->generation < min_generation)
+ break;
+
flags = commit->object.flags & (PARENT1 | PARENT2 | STALE);
if (flags == (PARENT1 | PARENT2)) {
if (!(commit->object.flags & RESULT)) {
return NULL;
}
- list = paint_down_to_common(one, n, twos);
+ list = paint_down_to_common(one, n, twos, 0);
while (list) {
struct commit *commit = pop_commit(&list);
parse_commit(array[i]);
for (i = 0; i < cnt; i++) {
struct commit_list *common;
+ uint32_t min_generation = array[i]->generation;
if (redundant[i])
continue;
continue;
filled_index[filled] = j;
work[filled++] = array[j];
+
+ if (array[j]->generation < min_generation)
+ min_generation = array[j]->generation;
}
- common = paint_down_to_common(array[i], filled, work);
+ common = paint_down_to_common(array[i], filled, work,
+ min_generation);
if (array[i]->object.flags & PARENT2)
redundant[i] = 1;
for (j = 0; j < filled; j++)
{
struct commit_list *bases;
int ret = 0, i;
+ uint32_t min_generation = GENERATION_NUMBER_INFINITY;
if (parse_commit(commit))
return ret;
- for (i = 0; i < nr_reference; i++)
+ for (i = 0; i < nr_reference; i++) {
if (parse_commit(reference[i]))
return ret;
+ if (reference[i]->generation < min_generation)
+ min_generation = reference[i]->generation;
+ }
+
+ if (commit->generation > min_generation)
+ return ret;
- bases = paint_down_to_common(commit, nr_reference, reference);
+ bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
if (commit->object.flags & PARENT2)
ret = 1;
clear_commit_marks(commit, all_flags);
return extra;
}
-void for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
+int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
{
struct commit_extra_header *extra, *to_free;
+ int res = 0;
to_free = read_commit_extra_headers(commit, NULL);
- for (extra = to_free; extra; extra = extra->next) {
+ for (extra = to_free; !res && extra; extra = extra->next) {
if (strcmp(extra->key, "mergetag"))
continue; /* not a merge tag */
- fn(commit, extra, data);
+ res = fn(commit, extra, data);
}
free_commit_extra_headers(to_free);
+ return res;
}
static inline int standard_header_field(const char *field, size_t len)
return result;
}
+define_commit_slab(merge_desc_slab, struct merge_remote_desc *);
+static struct merge_desc_slab merge_desc_slab = COMMIT_SLAB_INIT(1, merge_desc_slab);
+
+struct merge_remote_desc *merge_remote_util(struct commit *commit)
+{
+ return *merge_desc_slab_at(&merge_desc_slab, commit);
+}
+
void set_merge_remote_desc(struct commit *commit,
const char *name, struct object *obj)
{
struct merge_remote_desc *desc;
FLEX_ALLOC_STR(desc, name, name);
desc->obj = obj;
- commit->util = desc;
+ *merge_desc_slab_at(&merge_desc_slab, commit) = desc;
}
struct commit *get_merge_parent(const char *name)
return NULL;
obj = parse_object(&oid);
commit = (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT);
- if (commit && !commit->util)
+ if (commit && !merge_remote_util(commit))
set_merge_remote_desc(commit, name, obj);
return commit;
}
#include "string-list.h"
#include "pretty.h"
+#define COMMIT_NOT_FROM_GRAPH 0xFFFFFFFF
+#define GENERATION_NUMBER_INFINITY 0xFFFFFFFF
+#define GENERATION_NUMBER_MAX 0x3FFFFFFF
+#define GENERATION_NUMBER_ZERO 0
+
struct commit_list {
struct commit *item;
struct commit_list *next;
};
+/*
+ * The size of this struct matters in full repo walk operations like
+ * 'git clone' or 'git gc'. Consider using commit-slab to attach data
+ * to a commit instead of adding new fields here.
+ */
struct commit {
struct object object;
- void *util;
- unsigned int index;
timestamp_t date;
struct commit_list *parents;
- struct tree *tree;
+
+ /*
+ * If the commit is loaded from the commit-graph file, then this
+ * member may be NULL. Only access it through get_commit_tree()
+ * or get_commit_tree_oid().
+ */
+ struct tree *maybe_tree;
+ uint32_t graph_pos;
+ uint32_t generation;
+ unsigned int index;
};
extern int save_commit_buffer;
*/
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name);
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size);
+int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph);
int parse_commit_gently(struct commit *item, int quiet_on_missing);
static inline int parse_commit(struct commit *item)
{
*/
void free_commit_buffer(struct commit *);
+struct tree *get_commit_tree(const struct commit *);
+struct object_id *get_commit_tree_oid(const struct commit *);
+
/*
* Release memory related to a commit, including the parent list and
* any cached object buffer.
typedef int (*each_commit_graft_fn)(const struct commit_graft *, void *);
struct commit_graft *read_graft_line(struct strbuf *line);
- int register_commit_graft(struct commit_graft *, int);
- struct commit_graft *lookup_commit_graft(const struct object_id *oid);
+ int register_commit_graft(struct repository *r, struct commit_graft *, int);
+ struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid);
extern struct commit_list *get_merge_bases(struct commit *rev1, struct commit *rev2);
extern struct commit_list *get_merge_bases_many(struct commit *one, int n, struct commit **twos);
struct oid_array;
struct ref;
- extern int register_shallow(const struct object_id *oid);
+ extern int register_shallow(struct repository *r, const struct object_id *oid);
extern int unregister_shallow(const struct object_id *oid);
extern int for_each_commit_graft(each_commit_graft_fn, void *);
- extern int is_repository_shallow(void);
+ extern int is_repository_shallow(struct repository *r);
extern struct commit_list *get_shallow_commits(struct object_array *heads,
int depth, int shallow_flag, int not_shallow_flag);
extern struct commit_list *get_shallow_commits_by_rev_list(
int ac, const char **av, int shallow_flag, int not_shallow_flag);
- extern void set_alternate_shallow_file(const char *path, int override);
+ extern void set_alternate_shallow_file(struct repository *r, const char *path, int override);
extern int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
const struct oid_array *extra);
extern void setup_alternate_shallow(struct lock_file *shallow_lock,
/* Find the end of the log message, the right place for a new trailer. */
extern int ignore_non_trailer(const char *buf, size_t len);
-typedef void (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
+typedef int (*each_mergetag_fn)(struct commit *commit, struct commit_extra_header *extra,
void *cb_data);
-extern void for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
+extern int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data);
struct merge_remote_desc {
struct object *obj; /* the named object, could be a tag */
char name[FLEX_ARRAY];
};
-#define merge_remote_util(commit) ((struct merge_remote_desc *)((commit)->util))
+extern struct merge_remote_desc *merge_remote_util(struct commit *);
extern void set_merge_remote_desc(struct commit *commit,
const char *name, struct object *obj);
extern int check_commit_signature(const struct commit *commit, struct signature_check *sigc);
int compare_commits_by_commit_date(const void *a_, const void *b_, void *unused);
+int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused);
LAST_ARG_MUST_BE_NULL
extern int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...);
#include "config.h"
#include "repository.h"
#include "lockfile.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "strbuf.h"
#include "quote.h"
#include "hashmap.h"
#include "string-list.h"
+ #include "object-store.h"
#include "utf8.h"
#include "dir.h"
+#include "color.h"
struct config_source {
struct config_source *prev;
if (conf->u.buf.pos > 0) {
conf->u.buf.pos--;
if (conf->u.buf.buf[conf->u.buf.pos] != c)
- die("BUG: config_buf can only ungetc the same character");
+ BUG("config_buf can only ungetc the same character");
return c;
}
strbuf_realpath(&path, cf->path, 1);
slash = find_last_dir_sep(path.buf);
if (!slash)
- die("BUG: how is this possible?");
+ BUG("how is this possible?");
strbuf_splice(pat, 0, 1, path.buf, slash - path.buf);
prefix = slash - path.buf + 1 /* slash */;
} else if (!is_absolute_path(pat->buf))
}
}
-static int git_parse_source(config_fn_t fn, void *data)
+struct parse_event_data {
+ enum config_event_t previous_type;
+ size_t previous_offset;
+ const struct config_options *opts;
+};
+
+static int do_event(enum config_event_t type, struct parse_event_data *data)
+{
+ size_t offset;
+
+ if (!data->opts || !data->opts->event_fn)
+ return 0;
+
+ if (type == CONFIG_EVENT_WHITESPACE &&
+ data->previous_type == type)
+ return 0;
+
+ offset = cf->do_ftell(cf);
+ /*
+ * At EOF, the parser always "inserts" an extra '\n', therefore
+ * the end offset of the event is the current file position, otherwise
+ * we will already have advanced to the next event.
+ */
+ if (type != CONFIG_EVENT_EOF)
+ offset--;
+
+ if (data->previous_type != CONFIG_EVENT_EOF &&
+ data->opts->event_fn(data->previous_type, data->previous_offset,
+ offset, data->opts->event_fn_data) < 0)
+ return -1;
+
+ data->previous_type = type;
+ data->previous_offset = offset;
+
+ return 0;
+}
+
+static int git_parse_source(config_fn_t fn, void *data,
+ const struct config_options *opts)
{
int comment = 0;
int baselen = 0;
/* U+FEFF Byte Order Mark in UTF8 */
const char *bomptr = utf8_bom;
+ /* For the parser event callback */
+ struct parse_event_data event_data = {
+ CONFIG_EVENT_EOF, 0, opts
+ };
+
for (;;) {
- int c = get_next_char();
+ int c;
+
+ c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
}
}
if (c == '\n') {
- if (cf->eof)
+ if (cf->eof) {
+ if (do_event(CONFIG_EVENT_EOF, &event_data) < 0)
+ return -1;
return 0;
+ }
+ if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+ return -1;
comment = 0;
continue;
}
- if (comment || isspace(c))
+ if (comment)
+ continue;
+ if (isspace(c)) {
+ if (do_event(CONFIG_EVENT_WHITESPACE, &event_data) < 0)
+ return -1;
continue;
+ }
if (c == '#' || c == ';') {
+ if (do_event(CONFIG_EVENT_COMMENT, &event_data) < 0)
+ return -1;
comment = 1;
continue;
}
if (c == '[') {
+ if (do_event(CONFIG_EVENT_SECTION, &event_data) < 0)
+ return -1;
+
/* Reset prior to determining a new stem */
strbuf_reset(var);
if (get_base_var(var) < 0 || var->len < 1)
}
if (!isalpha(c))
break;
+
+ if (do_event(CONFIG_EVENT_ENTRY, &event_data) < 0)
+ return -1;
+
/*
* Truncate the var name back to the section header
* stem prior to grabbing the suffix part of the name
break;
}
+ if (do_event(CONFIG_EVENT_ERROR, &event_data) < 0)
+ return -1;
+
switch (cf->origin_type) {
case CONFIG_ORIGIN_BLOB:
error_msg = xstrfmt(_("bad config line %d in blob %s"),
return 0;
}
+int git_config_color(char *dest, const char *var, const char *value)
+{
+ if (!value)
+ return config_error_nonbool(var);
+ if (color_parse(value, dest) < 0)
+ return -1;
+ return 0;
+}
+
static int git_default_core_config(const char *var, const char *value)
{
/* This needs a better name */
}
eol_rndtrp_die = git_config_bool(var, value);
global_conv_flags_eol = eol_rndtrp_die ?
- CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN;
+ CONV_EOL_RNDTRP_DIE : 0;
return 0;
}
return 0;
}
+ if (!strcmp(var, "core.checkroundtripencoding")) {
+ check_roundtrip_encoding = xstrdup(value);
+ return 0;
+ }
+
if (!strcmp(var, "core.notesref")) {
notes_ref_name = xstrdup(value);
return 0;
return 0;
}
+ if (!strcmp(var, "core.commitgraph")) {
+ core_commit_graph = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
if (starts_with(var, "mailmap."))
return git_default_mailmap_config(var, value);
- if (starts_with(var, "advice."))
+ if (starts_with(var, "advice.") || starts_with(var, "color.advice"))
return git_default_advice_config(var, value);
if (!strcmp(var, "pager.color") || !strcmp(var, "color.pager")) {
* fgetc, ungetc, ftell of top need to be initialized before calling
* this function.
*/
-static int do_config_from(struct config_source *top, config_fn_t fn, void *data)
+static int do_config_from(struct config_source *top, config_fn_t fn, void *data,
+ const struct config_options *opts)
{
int ret;
strbuf_init(&top->var, 1024);
cf = top;
- ret = git_parse_source(fn, data);
+ ret = git_parse_source(fn, data, opts);
/* pop config-file parsing state stack */
strbuf_release(&top->value);
static int do_config_from_file(config_fn_t fn,
const enum config_origin_type origin_type,
const char *name, const char *path, FILE *f,
- void *data)
+ void *data, const struct config_options *opts)
{
struct config_source top;
+ int ret;
top.u.file = f;
top.origin_type = origin_type;
top.do_ungetc = config_file_ungetc;
top.do_ftell = config_file_ftell;
- return do_config_from(&top, fn, data);
+ flockfile(f);
+ ret = do_config_from(&top, fn, data, opts);
+ funlockfile(f);
+ return ret;
}
static int git_config_from_stdin(config_fn_t fn, void *data)
{
- return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin, data);
+ return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", NULL, stdin,
+ data, NULL);
}
-int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+int git_config_from_file_with_options(config_fn_t fn, const char *filename,
+ void *data,
+ const struct config_options *opts)
{
int ret = -1;
FILE *f;
f = fopen_or_warn(filename, "r");
if (f) {
- flockfile(f);
- ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, filename, f, data);
- funlockfile(f);
+ ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename,
+ filename, f, data, opts);
fclose(f);
}
return ret;
}
+int git_config_from_file(config_fn_t fn, const char *filename, void *data)
+{
+ return git_config_from_file_with_options(fn, filename, data, NULL);
+}
+
int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_type,
const char *name, const char *buf, size_t len, void *data)
{
top.do_ungetc = config_buf_ungetc;
top.do_ftell = config_buf_ftell;
- return do_config_from(&top, fn, data);
+ return do_config_from(&top, fn, data, NULL);
}
int git_config_from_blob_oid(config_fn_t fn,
l_item->value_index = e->value_list.nr - 1;
if (!cf)
- die("BUG: configset_add_value has no source");
+ BUG("configset_add_value has no source");
if (cf->name) {
kv_info->filename = strintern(cf->name);
kv_info->linenr = cf->linenr;
* Find all the stuff for git_config_set() below.
*/
-static struct {
+struct config_store_data {
int baselen;
char *key;
int do_not_match;
regex_t *value_regex;
int multi_replace;
- size_t *offset;
- unsigned int offset_alloc;
- enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
- unsigned int seen;
-} store;
+ struct {
+ size_t begin, end;
+ enum config_event_t type;
+ int is_keys_section;
+ } *parsed;
+ unsigned int parsed_nr, parsed_alloc, *seen, seen_nr, seen_alloc;
+ unsigned int key_seen:1, section_seen:1, is_keys_section:1;
+};
-static int matches(const char *key, const char *value)
+static void config_store_data_clear(struct config_store_data *store)
{
- if (strcmp(key, store.key))
+ free(store->key);
+ if (store->value_regex != NULL &&
+ store->value_regex != CONFIG_REGEX_NONE) {
+ regfree(store->value_regex);
+ free(store->value_regex);
+ }
+ free(store->parsed);
+ free(store->seen);
+ memset(store, 0, sizeof(*store));
+}
+
+static int matches(const char *key, const char *value,
+ const struct config_store_data *store)
+{
+ if (strcmp(key, store->key))
return 0; /* not ours */
- if (!store.value_regex)
+ if (!store->value_regex)
return 1; /* always matches */
- if (store.value_regex == CONFIG_REGEX_NONE)
+ if (store->value_regex == CONFIG_REGEX_NONE)
return 0; /* never matches */
- return store.do_not_match ^
- (value && !regexec(store.value_regex, value, 0, NULL, 0));
+ return store->do_not_match ^
+ (value && !regexec(store->value_regex, value, 0, NULL, 0));
+}
+
+static int store_aux_event(enum config_event_t type,
+ size_t begin, size_t end, void *data)
+{
+ struct config_store_data *store = data;
+
+ ALLOC_GROW(store->parsed, store->parsed_nr + 1, store->parsed_alloc);
+ store->parsed[store->parsed_nr].begin = begin;
+ store->parsed[store->parsed_nr].end = end;
+ store->parsed[store->parsed_nr].type = type;
+
+ if (type == CONFIG_EVENT_SECTION) {
+ if (cf->var.len < 2 || cf->var.buf[cf->var.len - 1] != '.')
+ return error("invalid section name '%s'", cf->var.buf);
+
+ /* Is this the section we were looking for? */
+ store->is_keys_section =
+ store->parsed[store->parsed_nr].is_keys_section =
+ cf->var.len - 1 == store->baselen &&
+ !strncasecmp(cf->var.buf, store->key, store->baselen);
+ if (store->is_keys_section) {
+ store->section_seen = 1;
+ ALLOC_GROW(store->seen, store->seen_nr + 1,
+ store->seen_alloc);
+ store->seen[store->seen_nr] = store->parsed_nr;
+ }
+ }
+
+ store->parsed_nr++;
+
+ return 0;
}
static int store_aux(const char *key, const char *value, void *cb)
{
- const char *ep;
- size_t section_len;
+ struct config_store_data *store = cb;
- switch (store.state) {
- case KEY_SEEN:
- if (matches(key, value)) {
- if (store.seen == 1 && store.multi_replace == 0) {
+ if (store->key_seen) {
+ if (matches(key, value, store)) {
+ if (store->seen_nr == 1 && store->multi_replace == 0) {
warning(_("%s has multiple values"), key);
}
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
+ ALLOC_GROW(store->seen, store->seen_nr + 1,
+ store->seen_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- store.seen++;
+ store->seen[store->seen_nr] = store->parsed_nr;
+ store->seen_nr++;
}
- break;
- case SECTION_SEEN:
+ } else if (store->is_keys_section) {
/*
- * What we are looking for is in store.key (both
- * section and var), and its section part is baselen
- * long. We found key (again, both section and var).
- * We would want to know if this key is in the same
- * section as what we are looking for. We already
- * know we are in the same section as what should
- * hold store.key.
+ * Do not increment matches yet: this may not be a match, but we
+ * are in the desired section.
*/
- ep = strrchr(key, '.');
- section_len = ep - key;
-
- if ((section_len != store.baselen) ||
- memcmp(key, store.key, section_len+1)) {
- store.state = SECTION_END_SEEN;
- break;
- }
+ ALLOC_GROW(store->seen, store->seen_nr + 1, store->seen_alloc);
+ store->seen[store->seen_nr] = store->parsed_nr;
+ store->section_seen = 1;
- /*
- * Do not increment matches: this is no match, but we
- * just made sure we are in the desired section.
- */
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- /* fallthru */
- case SECTION_END_SEEN:
- case START:
- if (matches(key, value)) {
- ALLOC_GROW(store.offset, store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- store.state = KEY_SEEN;
- store.seen++;
- } else {
- if (strrchr(key, '.') - key == store.baselen &&
- !strncmp(key, store.key, store.baselen)) {
- store.state = SECTION_SEEN;
- ALLOC_GROW(store.offset,
- store.seen + 1,
- store.offset_alloc);
- store.offset[store.seen] = cf->do_ftell(cf);
- }
+ if (matches(key, value, store)) {
+ store->seen_nr++;
+ store->key_seen = 1;
}
}
+
return 0;
}
return 4;
}
-static struct strbuf store_create_section(const char *key)
+static struct strbuf store_create_section(const char *key,
+ const struct config_store_data *store)
{
const char *dot;
int i;
struct strbuf sb = STRBUF_INIT;
- dot = memchr(key, '.', store.baselen);
+ dot = memchr(key, '.', store->baselen);
if (dot) {
strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
- for (i = dot - key + 1; i < store.baselen; i++) {
+ for (i = dot - key + 1; i < store->baselen; i++) {
if (key[i] == '"' || key[i] == '\\')
strbuf_addch(&sb, '\\');
strbuf_addch(&sb, key[i]);
}
strbuf_addstr(&sb, "\"]\n");
} else {
- strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
+ strbuf_addf(&sb, "[%.*s]\n", store->baselen, key);
}
return sb;
}
-static ssize_t write_section(int fd, const char *key)
+static ssize_t write_section(int fd, const char *key,
+ const struct config_store_data *store)
{
- struct strbuf sb = store_create_section(key);
+ struct strbuf sb = store_create_section(key, store);
ssize_t ret;
ret = write_in_full(fd, sb.buf, sb.len);
return ret;
}
-static ssize_t write_pair(int fd, const char *key, const char *value)
+static ssize_t write_pair(int fd, const char *key, const char *value,
+ const struct config_store_data *store)
{
int i;
ssize_t ret;
- int length = strlen(key + store.baselen + 1);
+ int length = strlen(key + store->baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
quote = "\"";
strbuf_addf(&sb, "\t%.*s = %s",
- length, key + store.baselen + 1, quote);
+ length, key + store->baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
return ret;
}
-static ssize_t find_beginning_of_line(const char *contents, size_t size,
- size_t offset_, int *found_bracket)
+/*
+ * If we are about to unset the last key(s) in a section, and if there are
+ * no comments surrounding (or included in) the section, we will want to
+ * extend begin/end to remove the entire section.
+ *
+ * Note: the parameter `seen_ptr` points to the index into the store.seen
+ * array. * This index may be incremented if a section has more than one
+ * entry (which all are to be removed).
+ */
+static void maybe_remove_section(struct config_store_data *store,
+ const char *contents,
+ size_t *begin_offset, size_t *end_offset,
+ int *seen_ptr)
{
- size_t equal_offset = size, bracket_offset = size;
- ssize_t offset;
+ size_t begin;
+ int i, seen, section_seen = 0;
-contline:
- for (offset = offset_-2; offset > 0
- && contents[offset] != '\n'; offset--)
- switch (contents[offset]) {
- case '=': equal_offset = offset; break;
- case ']': bracket_offset = offset; break;
+ /*
+ * First, ensure that this is the first key, and that there are no
+ * comments before the entry nor before the section header.
+ */
+ seen = *seen_ptr;
+ for (i = store->seen[seen]; i > 0; i--) {
+ enum config_event_t type = store->parsed[i - 1].type;
+
+ if (type == CONFIG_EVENT_COMMENT)
+ /* There is a comment before this entry or section */
+ return;
+ if (type == CONFIG_EVENT_ENTRY) {
+ if (!section_seen)
+ /* This is not the section's first entry. */
+ return;
+ /* We encountered no comment before the section. */
+ break;
+ }
+ if (type == CONFIG_EVENT_SECTION) {
+ if (!store->parsed[i - 1].is_keys_section)
+ break;
+ section_seen = 1;
}
- if (offset > 0 && contents[offset-1] == '\\') {
- offset_ = offset;
- goto contline;
}
- if (bracket_offset < equal_offset) {
- *found_bracket = 1;
- offset = bracket_offset+1;
- } else
- offset++;
+ begin = store->parsed[i].begin;
- return offset;
+ /*
+ * Next, make sure that we are removing he last key(s) in the section,
+ * and that there are no comments that are possibly about the current
+ * section.
+ */
+ for (i = store->seen[seen] + 1; i < store->parsed_nr; i++) {
+ enum config_event_t type = store->parsed[i].type;
+
+ if (type == CONFIG_EVENT_COMMENT)
+ return;
+ if (type == CONFIG_EVENT_SECTION) {
+ if (store->parsed[i].is_keys_section)
+ continue;
+ break;
+ }
+ if (type == CONFIG_EVENT_ENTRY) {
+ if (++seen < store->seen_nr &&
+ i == store->seen[seen])
+ /* We want to remove this entry, too */
+ continue;
+ /* There is another entry in this section. */
+ return;
+ }
+ }
+
+ /*
+ * We are really removing the last entry/entries from this section, and
+ * there are no enclosed or surrounding comments. Remove the entire,
+ * now-empty section.
+ */
+ *seen_ptr = seen;
+ *begin_offset = begin;
+ if (i < store->parsed_nr)
+ *end_offset = store->parsed[i].begin;
+ else
+ *end_offset = store->parsed[store->parsed_nr - 1].end;
}
int git_config_set_in_file_gently(const char *config_filename,
char *filename_buf = NULL;
char *contents = NULL;
size_t contents_sz;
+ struct config_store_data store;
+
+ memset(&store, 0, sizeof(store));
/* parse-key returns negative; flip the sign to feed exit(3) */
ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (fd < 0) {
error_errno("could not lock config file %s", config_filename);
- free(store.key);
ret = CONFIG_NO_LOCK;
goto out_free;
}
*/
in_fd = open(config_filename, O_RDONLY);
if ( in_fd < 0 ) {
- free(store.key);
-
if ( ENOENT != errno ) {
error_errno("opening %s", config_filename);
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
- store.key = (char *)key;
- if (write_section(fd, key) < 0 ||
- write_pair(fd, key, value) < 0)
+ free(store.key);
+ store.key = xstrdup(key);
+ if (write_section(fd, key, &store) < 0 ||
+ write_pair(fd, key, value, &store) < 0)
goto write_err_out;
} else {
struct stat st;
size_t copy_begin, copy_end;
int i, new_line = 0;
+ struct config_options opts;
if (value_regex == NULL)
store.value_regex = NULL;
if (regcomp(store.value_regex, value_regex,
REG_EXTENDED)) {
error("invalid pattern: %s", value_regex);
- free(store.value_regex);
+ FREE_AND_NULL(store.value_regex);
ret = CONFIG_INVALID_PATTERN;
goto out_free;
}
}
- ALLOC_GROW(store.offset, 1, store.offset_alloc);
- store.offset[0] = 0;
- store.state = START;
- store.seen = 0;
+ ALLOC_GROW(store.parsed, 1, store.parsed_alloc);
+ store.parsed[0].end = 0;
+
+ memset(&opts, 0, sizeof(opts));
+ opts.event_fn = store_aux_event;
+ opts.event_fn_data = &store;
/*
- * After this, store.offset will contain the *end* offset
- * of the last match, or remain at 0 if no match was found.
+ * After this, store.parsed will contain offsets of all the
+ * parsed elements, and store.seen will contain a list of
+ * matches, as indices into store.parsed.
+ *
* As a side effect, we make sure to transform only a valid
* existing config file.
*/
- if (git_config_from_file(store_aux, config_filename, NULL)) {
+ if (git_config_from_file_with_options(store_aux,
+ config_filename,
+ &store, &opts)) {
error("invalid config file %s", config_filename);
- free(store.key);
- if (store.value_regex != NULL &&
- store.value_regex != CONFIG_REGEX_NONE) {
- regfree(store.value_regex);
- free(store.value_regex);
- }
ret = CONFIG_INVALID_FILE;
goto out_free;
}
- free(store.key);
- if (store.value_regex != NULL &&
- store.value_regex != CONFIG_REGEX_NONE) {
- regfree(store.value_regex);
- free(store.value_regex);
- }
-
/* if nothing to unset, or too many matches, error out */
- if ((store.seen == 0 && value == NULL) ||
- (store.seen > 1 && multi_replace == 0)) {
+ if ((store.seen_nr == 0 && value == NULL) ||
+ (store.seen_nr > 1 && multi_replace == 0)) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
goto out_free;
}
- if (store.seen == 0)
- store.seen = 1;
+ if (store.seen_nr == 0) {
+ if (!store.seen_alloc) {
+ /* Did not see key nor section */
+ ALLOC_GROW(store.seen, 1, store.seen_alloc);
+ store.seen[0] = store.parsed_nr
+ - !!store.parsed_nr;
+ }
+ store.seen_nr = 1;
+ }
- for (i = 0, copy_begin = 0; i < store.seen; i++) {
- if (store.offset[i] == 0) {
- store.offset[i] = copy_end = contents_sz;
- } else if (store.state != KEY_SEEN) {
- copy_end = store.offset[i];
- } else
- copy_end = find_beginning_of_line(
- contents, contents_sz,
- store.offset[i]-2, &new_line);
+ for (i = 0, copy_begin = 0; i < store.seen_nr; i++) {
+ size_t replace_end;
+ int j = store.seen[i];
+
+ new_line = 0;
+ if (!store.key_seen) {
+ copy_end = store.parsed[j].end;
+ /* include '\n' when copying section header */
+ if (copy_end > 0 && copy_end < contents_sz &&
+ contents[copy_end - 1] != '\n' &&
+ contents[copy_end] == '\n')
+ copy_end++;
+ replace_end = copy_end;
+ } else {
+ replace_end = store.parsed[j].end;
+ copy_end = store.parsed[j].begin;
+ if (!value)
+ maybe_remove_section(&store, contents,
+ ©_end,
+ &replace_end, &i);
+ /*
+ * Swallow preceding white-space on the same
+ * line.
+ */
+ while (copy_end > 0 ) {
+ char c = contents[copy_end - 1];
+
+ if (isspace(c) && c != '\n')
+ copy_end--;
+ else
+ break;
+ }
+ }
if (copy_end > 0 && contents[copy_end-1] != '\n')
new_line = 1;
write_str_in_full(fd, "\n") < 0)
goto write_err_out;
}
- copy_begin = store.offset[i];
+ copy_begin = replace_end;
}
/* write the pair (value == NULL means unset) */
if (value != NULL) {
- if (store.state == START) {
- if (write_section(fd, key) < 0)
+ if (!store.section_seen) {
+ if (write_section(fd, key, &store) < 0)
goto write_err_out;
}
- if (write_pair(fd, key, value) < 0)
+ if (write_pair(fd, key, value, &store) < 0)
goto write_err_out;
}
munmap(contents, contents_sz);
if (in_fd >= 0)
close(in_fd);
+ config_store_data_clear(&store);
return ret;
write_err_out:
/* if new_name == NULL, the section is removed instead */
static int git_config_copy_or_rename_section_in_file(const char *config_filename,
- const char *old_name, const char *new_name, int copy)
+ const char *old_name,
+ const char *new_name, int copy)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
FILE *config_file = NULL;
struct stat st;
struct strbuf copystr = STRBUF_INIT;
+ struct config_store_data store;
+
+ memset(&store, 0, sizeof(store));
if (new_name && !section_name_is_ok(new_name)) {
ret = error("invalid section name: %s", new_name);
}
store.baselen = strlen(new_name);
if (!copy) {
- if (write_section(out_fd, new_name) < 0) {
+ if (write_section(out_fd, new_name, &store) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
output[0] = '\t';
}
} else {
- copystr = store_create_section(new_name);
+ copystr = store_create_section(new_name, &store);
}
}
remove = 0;
rollback_lock_file(&lock);
out_no_rollback:
free(filename_buf);
+ config_store_data_clear(&store);
return ret;
}
else if(cf)
type = cf->origin_type;
else
- die("BUG: current_config_origin_type called outside config callback");
+ BUG("current_config_origin_type called outside config callback");
switch (type) {
case CONFIG_ORIGIN_BLOB:
case CONFIG_ORIGIN_CMDLINE:
return "command line";
default:
- die("BUG: unknown config origin type");
+ BUG("unknown config origin type");
}
}
else if (cf)
name = cf->name;
else
- die("BUG: current_config_name called outside config callback");
+ BUG("current_config_name called outside config callback");
return name ? name : "";
}
else
return current_parsing_scope;
}
+
+int lookup_config(const char **mapping, int nr_mapping, const char *var)
+{
+ int i;
+
+ for (i = 0; i < nr_mapping; i++) {
+ const char *name = mapping[i];
+
+ if (name && !strcasecmp(var, name))
+ return i;
+ }
+ return -1;
+}
#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
+ #include "object-store.h"
#include "attr.h"
#include "run-command.h"
#include "quote.h"
#include "sigchain.h"
#include "pkt-line.h"
#include "sub-process.h"
+#include "utf8.h"
/*
* convert.c - convert a file when checking it out and checking it in.
}
+static int validate_encoding(const char *path, const char *enc,
+ const char *data, size_t len, int die_on_error)
+{
+ /* We only check for UTF here as UTF?? can be an alias for UTF-?? */
+ if (istarts_with(enc, "UTF")) {
+ /*
+ * Check for detectable errors in UTF encodings
+ */
+ if (has_prohibited_utf_bom(enc, data, len)) {
+ const char *error_msg = _(
+ "BOM is prohibited in '%s' if encoded as %s");
+ /*
+ * This advice is shown for UTF-??BE and UTF-??LE encodings.
+ * We cut off the last two characters of the encoding name
+ * to generate the encoding name suitable for BOMs.
+ */
+ const char *advise_msg = _(
+ "The file '%s' contains a byte order "
+ "mark (BOM). Please use UTF-%s as "
+ "working-tree-encoding.");
+ const char *stripped = NULL;
+ char *upper = xstrdup_toupper(enc);
+ upper[strlen(upper)-2] = '\0';
+ if (!skip_prefix(upper, "UTF-", &stripped))
+ skip_prefix(stripped, "UTF", &stripped);
+ advise(advise_msg, path, stripped);
+ free(upper);
+ if (die_on_error)
+ die(error_msg, path, enc);
+ else {
+ return error(error_msg, path, enc);
+ }
+
+ } else if (is_missing_required_utf_bom(enc, data, len)) {
+ const char *error_msg = _(
+ "BOM is required in '%s' if encoded as %s");
+ const char *advise_msg = _(
+ "The file '%s' is missing a byte order "
+ "mark (BOM). Please use UTF-%sBE or UTF-%sLE "
+ "(depending on the byte order) as "
+ "working-tree-encoding.");
+ const char *stripped = NULL;
+ char *upper = xstrdup_toupper(enc);
+ if (!skip_prefix(upper, "UTF-", &stripped))
+ skip_prefix(stripped, "UTF", &stripped);
+ advise(advise_msg, path, stripped, stripped);
+ free(upper);
+ if (die_on_error)
+ die(error_msg, path, enc);
+ else {
+ return error(error_msg, path, enc);
+ }
+ }
+
+ }
+ return 0;
+}
+
+static void trace_encoding(const char *context, const char *path,
+ const char *encoding, const char *buf, size_t len)
+{
+ static struct trace_key coe = TRACE_KEY_INIT(WORKING_TREE_ENCODING);
+ struct strbuf trace = STRBUF_INIT;
+ int i;
+
+ strbuf_addf(&trace, "%s (%s, considered %s):\n", context, path, encoding);
+ for (i = 0; i < len && buf; ++i) {
+ strbuf_addf(
+ &trace,"| \e[2m%2i:\e[0m %2x \e[2m%c\e[0m%c",
+ i,
+ (unsigned char) buf[i],
+ (buf[i] > 32 && buf[i] < 127 ? buf[i] : ' '),
+ ((i+1) % 8 && (i+1) < len ? ' ' : '\n')
+ );
+ }
+ strbuf_addchars(&trace, '\n', 1);
+
+ trace_strbuf(&coe, &trace);
+ strbuf_release(&trace);
+}
+
+static int check_roundtrip(const char *enc_name)
+{
+ /*
+ * check_roundtrip_encoding contains a string of comma and/or
+ * space separated encodings (eg. "UTF-16, ASCII, CP1125").
+ * Search for the given encoding in that string.
+ */
+ const char *found = strcasestr(check_roundtrip_encoding, enc_name);
+ const char *next;
+ int len;
+ if (!found)
+ return 0;
+ next = found + strlen(enc_name);
+ len = strlen(check_roundtrip_encoding);
+ return (found && (
+ /*
+ * check that the found encoding is at the
+ * beginning of check_roundtrip_encoding or
+ * that it is prefixed with a space or comma
+ */
+ found == check_roundtrip_encoding || (
+ (isspace(found[-1]) || found[-1] == ',')
+ )
+ ) && (
+ /*
+ * check that the found encoding is at the
+ * end of check_roundtrip_encoding or
+ * that it is suffixed with a space or comma
+ */
+ next == check_roundtrip_encoding + len || (
+ next < check_roundtrip_encoding + len &&
+ (isspace(next[0]) || next[0] == ',')
+ )
+ ));
+}
+
+static const char *default_encoding = "UTF-8";
+
+static int encode_to_git(const char *path, const char *src, size_t src_len,
+ struct strbuf *buf, const char *enc, int conv_flags)
+{
+ char *dst;
+ int dst_len;
+ int die_on_error = conv_flags & CONV_WRITE_OBJECT;
+
+ /*
+ * No encoding is specified or there is nothing to encode.
+ * Tell the caller that the content was not modified.
+ */
+ if (!enc || (src && !src_len))
+ return 0;
+
+ /*
+ * Looks like we got called from "would_convert_to_git()".
+ * This means Git wants to know if it would encode (= modify!)
+ * the content. Let's answer with "yes", since an encoding was
+ * specified.
+ */
+ if (!buf && !src)
+ return 1;
+
+ if (validate_encoding(path, enc, src, src_len, die_on_error))
+ return 0;
+
+ trace_encoding("source", path, enc, src, src_len);
+ dst = reencode_string_len(src, src_len, default_encoding, enc,
+ &dst_len);
+ if (!dst) {
+ /*
+ * We could add the blob "as-is" to Git. However, on checkout
+ * we would try to reencode to the original encoding. This
+ * would fail and we would leave the user with a messed-up
+ * working tree. Let's try to avoid this by screaming loud.
+ */
+ const char* msg = _("failed to encode '%s' from %s to %s");
+ if (die_on_error)
+ die(msg, path, enc, default_encoding);
+ else {
+ error(msg, path, enc, default_encoding);
+ return 0;
+ }
+ }
+ trace_encoding("destination", path, default_encoding, dst, dst_len);
+
+ /*
+ * UTF supports lossless conversion round tripping [1] and conversions
+ * between UTF and other encodings are mostly round trip safe as
+ * Unicode aims to be a superset of all other character encodings.
+ * However, certain encodings (e.g. SHIFT-JIS) are known to have round
+ * trip issues [2]. Check the round trip conversion for all encodings
+ * listed in core.checkRoundtripEncoding.
+ *
+ * The round trip check is only performed if content is written to Git.
+ * This ensures that no information is lost during conversion to/from
+ * the internal UTF-8 representation.
+ *
+ * Please note, the code below is not tested because I was not able to
+ * generate a faulty round trip without an iconv error. Iconv errors
+ * are already caught above.
+ *
+ * [1] http://unicode.org/faq/utf_bom.html#gen2
+ * [2] https://support.microsoft.com/en-us/help/170559/prb-conversion-problem-between-shift-jis-and-unicode
+ */
+ if (die_on_error && check_roundtrip(enc)) {
+ char *re_src;
+ int re_src_len;
+
+ re_src = reencode_string_len(dst, dst_len,
+ enc, default_encoding,
+ &re_src_len);
+
+ trace_printf("Checking roundtrip encoding for %s...\n", enc);
+ trace_encoding("reencoded source", path, enc,
+ re_src, re_src_len);
+
+ if (!re_src || src_len != re_src_len ||
+ memcmp(src, re_src, src_len)) {
+ const char* msg = _("encoding '%s' from %s to %s and "
+ "back is not the same");
+ die(msg, path, enc, default_encoding);
+ }
+
+ free(re_src);
+ }
+
+ strbuf_attach(buf, dst, dst_len, dst_len + 1);
+ return 1;
+}
+
+static int encode_to_worktree(const char *path, const char *src, size_t src_len,
+ struct strbuf *buf, const char *enc)
+{
+ char *dst;
+ int dst_len;
+
+ /*
+ * No encoding is specified or there is nothing to encode.
+ * Tell the caller that the content was not modified.
+ */
+ if (!enc || (src && !src_len))
+ return 0;
+
+ dst = reencode_string_len(src, src_len, enc, default_encoding,
+ &dst_len);
+ if (!dst) {
+ error("failed to encode '%s' from %s to %s",
+ path, default_encoding, enc);
+ return 0;
+ }
+
+ strbuf_attach(buf, dst, dst_len, dst_len + 1);
+ return 1;
+}
+
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
return 1;
}
+static const char *git_path_check_encoding(struct attr_check_item *check)
+{
+ const char *value = check->value;
+
+ if (ATTR_UNSET(value) || !strlen(value))
+ return NULL;
+
+ if (ATTR_TRUE(value) || ATTR_FALSE(value)) {
+ die(_("true/false are no valid working-tree-encodings"));
+ }
+
+ /* Don't encode to the default encoding */
+ if (same_encoding(value, default_encoding))
+ return NULL;
+
+ return value;
+}
+
static enum crlf_action git_path_check_crlf(struct attr_check_item *check)
{
const char *value = check->value;
enum crlf_action attr_action; /* What attr says */
enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
int ident;
+ const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
};
static void convert_attrs(struct conv_attrs *ca, const char *path)
if (!check) {
check = attr_check_initl("crlf", "ident", "filter",
- "eol", "text", NULL);
+ "eol", "text", "working-tree-encoding",
+ NULL);
user_convert_tail = &user_convert;
git_config(read_convert_config, NULL);
}
else if (eol_attr == EOL_CRLF)
ca->crlf_action = CRLF_TEXT_CRLF;
}
+ ca->working_tree_encoding = git_path_check_encoding(ccheck + 5);
} else {
ca->drv = NULL;
ca->crlf_action = CRLF_UNDEFINED;
src = dst->buf;
len = dst->len;
}
+
+ ret |= encode_to_git(path, src, len, dst, ca.working_tree_encoding, conv_flags);
+ if (ret && dst) {
+ src = dst->buf;
+ len = dst->len;
+ }
+
if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
if (ret && dst) {
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
die("%s: clean filter '%s' failed", path, ca.drv->name);
+ encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags);
crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
}
}
+ ret |= encode_to_worktree(path, src, len, dst, ca.working_tree_encoding);
+ if (ret) {
+ src = dst->buf;
+ len = dst->len;
+ }
+
ret_filter = apply_filter(
path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
if (!ret_filter && ca.drv && ca.drv->required)
if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
return NULL;
+ if (ca.working_tree_encoding)
+ return NULL;
+
if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
return NULL;
#include "attr.h"
#include "run-command.h"
#include "utf8.h"
+ #include "object-store.h"
#include "userdiff.h"
#include "submodule-config.h"
#include "submodule.h"
#include "argv-array.h"
#include "graph.h"
#include "packfile.h"
+#include "help.h"
#ifdef NO_FAST_WORKING_DIRECTORY
#define FAST_WORKING_DIRECTORY 0
GIT_COLOR_FAINT_ITALIC, /* NEW_MOVED_ALTERNATIVE_DIM */
};
+static const char *color_diff_slots[] = {
+ [DIFF_CONTEXT] = "context",
+ [DIFF_METAINFO] = "meta",
+ [DIFF_FRAGINFO] = "frag",
+ [DIFF_FILE_OLD] = "old",
+ [DIFF_FILE_NEW] = "new",
+ [DIFF_COMMIT] = "commit",
+ [DIFF_WHITESPACE] = "whitespace",
+ [DIFF_FUNCINFO] = "func",
+ [DIFF_FILE_OLD_MOVED] = "oldMoved",
+ [DIFF_FILE_OLD_MOVED_ALT] = "oldMovedAlternative",
+ [DIFF_FILE_OLD_MOVED_DIM] = "oldMovedDimmed",
+ [DIFF_FILE_OLD_MOVED_ALT_DIM] = "oldMovedAlternativeDimmed",
+ [DIFF_FILE_NEW_MOVED] = "newMoved",
+ [DIFF_FILE_NEW_MOVED_ALT] = "newMovedAlternative",
+ [DIFF_FILE_NEW_MOVED_DIM] = "newMovedDimmed",
+ [DIFF_FILE_NEW_MOVED_ALT_DIM] = "newMovedAlternativeDimmed",
+};
+
static NORETURN void die_want_option(const char *option_name)
{
die(_("option '%s' requires a value"), option_name);
}
+define_list_config_array_extra(color_diff_slots, {"plain"});
+
static int parse_diff_color_slot(const char *var)
{
- if (!strcasecmp(var, "context") || !strcasecmp(var, "plain"))
+ if (!strcasecmp(var, "plain"))
return DIFF_CONTEXT;
- if (!strcasecmp(var, "meta"))
- return DIFF_METAINFO;
- if (!strcasecmp(var, "frag"))
- return DIFF_FRAGINFO;
- if (!strcasecmp(var, "old"))
- return DIFF_FILE_OLD;
- if (!strcasecmp(var, "new"))
- return DIFF_FILE_NEW;
- if (!strcasecmp(var, "commit"))
- return DIFF_COMMIT;
- if (!strcasecmp(var, "whitespace"))
- return DIFF_WHITESPACE;
- if (!strcasecmp(var, "func"))
- return DIFF_FUNCINFO;
- if (!strcasecmp(var, "oldmoved"))
- return DIFF_FILE_OLD_MOVED;
- if (!strcasecmp(var, "oldmovedalternative"))
- return DIFF_FILE_OLD_MOVED_ALT;
- if (!strcasecmp(var, "oldmoveddimmed"))
- return DIFF_FILE_OLD_MOVED_DIM;
- if (!strcasecmp(var, "oldmovedalternativedimmed"))
- return DIFF_FILE_OLD_MOVED_ALT_DIM;
- if (!strcasecmp(var, "newmoved"))
- return DIFF_FILE_NEW_MOVED;
- if (!strcasecmp(var, "newmovedalternative"))
- return DIFF_FILE_NEW_MOVED_ALT;
- if (!strcasecmp(var, "newmoveddimmed"))
- return DIFF_FILE_NEW_MOVED_DIM;
- if (!strcasecmp(var, "newmovedalternativedimmed"))
- return DIFF_FILE_NEW_MOVED_ALT_DIM;
- return -1;
+ return LOOKUP_CONFIG(color_diff_slots, var);
}
static int parse_dirstat_params(struct diff_options *options, const char *params_string,
return 0;
}
-static int git_config_rename(const char *var, const char *value)
+int git_config_rename(const char *var, const char *value)
{
if (!value)
return DIFF_DETECT_RENAME;
fputs(o->stat_sep, o->file);
break;
default:
- die("BUG: unknown diff symbol");
+ BUG("unknown diff symbol");
}
strbuf_release(&sb);
}
for (i = 0; i < ARRAY_SIZE(diff_temp); i++)
if (!diff_temp[i].name)
return diff_temp + i;
- die("BUG: diff is failing to clean up its tempfiles");
+ BUG("diff is failing to clean up its tempfiles");
}
static void remove_tempfile(void)
* objects however would tend to be slower as they need
* to be individually opened and inflated.
*/
- if (!FAST_WORKING_DIRECTORY && !want_file && has_sha1_pack(oid->hash))
+ if (!FAST_WORKING_DIRECTORY && !want_file && has_object_pack(oid))
return 0;
/*
if (abbrev < 0)
abbrev = FALLBACK_DEFAULT_ABBREV;
if (abbrev > GIT_SHA1_HEXSZ)
- die("BUG: oid abbreviation out of range: %d", abbrev);
+ BUG("oid abbreviation out of range: %d", abbrev);
if (abbrev)
hex[abbrev] = '\0';
return hex;
*must_show_header = 0;
}
if (one && two && oidcmp(&one->oid, &two->oid)) {
- int abbrev = o->flags.full_index ? 40 : DEFAULT_ABBREV;
+ const unsigned hexsz = the_hash_algo->hexsz;
+ int abbrev = o->flags.full_index ? hexsz : DEFAULT_ABBREV;
if (o->flags.binary) {
mmfile_t mf;
if ((!fill_mmfile(&mf, one) && diff_filespec_is_binary(one)) ||
(!fill_mmfile(&mf, two) && diff_filespec_is_binary(two)))
- abbrev = 40;
+ abbrev = hexsz;
}
strbuf_addf(msg, "%s%sindex %s..%s", line_prefix, set,
diff_abbrev_oid(&one->oid, abbrev),
DIFF_FORMAT_NAME_STATUS |
DIFF_FORMAT_CHECKDIFF |
DIFF_FORMAT_NO_OUTPUT;
+ /*
+ * This must be signed because we're comparing against a potentially
+ * negative value.
+ */
+ const int hexsz = the_hash_algo->hexsz;
if (options->set_default)
options->set_default(options);
*/
read_cache();
}
- if (40 < options->abbrev)
- options->abbrev = 40; /* full */
+ if (hexsz < options->abbrev)
+ options->abbrev = hexsz; /* full */
/*
* It does not make sense to show the first hit we happened
int argcount = 1;
if (!skip_prefix(arg, "--stat", &arg))
- die("BUG: stat option does not begin with --stat: %s", arg);
+ BUG("stat option does not begin with --stat: %s", arg);
end = (char *)arg;
switch (*arg) {
options->abbrev = strtoul(arg, NULL, 10);
if (options->abbrev < MINIMUM_ABBREV)
options->abbrev = MINIMUM_ABBREV;
- else if (40 < options->abbrev)
- options->abbrev = 40;
+ else if (the_hash_algo->hexsz < options->abbrev)
+ options->abbrev = the_hash_algo->hexsz;
}
else if ((argcount = parse_long_opt("src-prefix", av, &optarg))) {
options->a_prefix = optarg;
struct diff_queue_struct *q = &diff_queued_diff;
if (WSEH_NEW & WS_RULE_MASK)
- die("BUG: WS rules bit mask overlaps with diff symbol flags");
+ BUG("WS rules bit mask overlaps with diff symbol flags");
if (o->color_moved)
o->emitted_symbols = &esm;
}
if (!driver->textconv)
- die("BUG: fill_textconv called with non-textconv driver");
+ BUG("fill_textconv called with non-textconv driver");
if (driver->textconv_cache && df->oid_valid) {
*outbuf = notes_cache_get(driver->textconv_cache,
#include "cache.h"
#include "config.h"
#include "dir.h"
+ #include "object-store.h"
#include "attr.h"
#include "refs.h"
#include "wildmatch.h"
#include "varint.h"
#include "ewah/ewok.h"
#include "fsmonitor.h"
+#include "submodule-config.h"
/*
* Tells read_directory_recursive how a file or directory should be treated.
if (size == 0) {
if (oid_stat) {
fill_stat_data(&oid_stat->stat, &st);
- oidcpy(&oid_stat->oid, &empty_blob_oid);
+ oidcpy(&oid_stat->oid, the_hash_algo->empty_blob);
oid_stat->valid = 1;
}
close(fd);
(!untracked || !untracked->valid ||
/*
* .. and .gitignore does not exist before
- * (i.e. null exclude_sha1). Then we can skip
+ * (i.e. null exclude_oid). Then we can skip
* loading .gitignore, which would result in
* ENOENT anyway.
*/
- !is_null_sha1(untracked->exclude_sha1))) {
+ !is_null_oid(&untracked->exclude_oid))) {
/*
* dir->basebuf gets reused by the traversal, but we
* need fname to remain unchanged to ensure the src
* order, though, if you do that.
*/
if (untracked &&
- hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
+ oidcmp(&oid_stat.oid, &untracked->exclude_oid)) {
invalidate_gitignore(dir->untracked, untracked);
- hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
+ oidcpy(&untracked->exclude_oid, &oid_stat.oid);
}
dir->exclude_stack = stk;
current = stk->baselen;
stat_data_to_disk(&stat_data, &untracked->stat_data);
strbuf_add(&wd->sb_stat, &stat_data, sizeof(stat_data));
}
- if (!is_null_sha1(untracked->exclude_sha1)) {
+ if (!is_null_oid(&untracked->exclude_oid)) {
ewah_set(wd->sha1_valid, i);
- strbuf_add(&wd->sb_sha1, untracked->exclude_sha1, 20);
+ strbuf_add(&wd->sb_sha1, untracked->exclude_oid.hash,
+ the_hash_algo->rawsz);
}
intlen = encode_varint(untracked->untracked_nr, intbuf);
ud->valid = 1;
}
-static void read_sha1(size_t pos, void *cb)
+static void read_oid(size_t pos, void *cb)
{
struct read_data *rd = cb;
struct untracked_cache_dir *ud = rd->ucd[pos];
- if (rd->data + 20 > rd->end) {
+ if (rd->data + the_hash_algo->rawsz > rd->end) {
rd->data = rd->end + 1;
return;
}
- hashcpy(ud->exclude_sha1, rd->data);
- rd->data += 20;
+ hashcpy(ud->exclude_oid.hash, rd->data);
+ rd->data += the_hash_algo->rawsz;
}
static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
struct read_data rd;
const unsigned char *next = data, *end = (const unsigned char *)data + sz;
const char *ident;
- int ident_len, len;
+ int ident_len;
+ ssize_t len;
const char *exclude_per_dir;
if (sz <= 1 || end[-1] != '\0')
ewah_each_bit(rd.check_only, set_check_only, &rd);
rd.data = next + len;
ewah_each_bit(rd.valid, read_stat, &rd);
- ewah_each_bit(rd.sha1_valid, read_sha1, &rd);
+ ewah_each_bit(rd.sha1_valid, read_oid, &rd);
next = rd.data;
done:
{
if (!istate->untracked || !istate->untracked->root)
return;
- if (!safe_path && !verify_path(path))
+ if (!safe_path && !verify_path(path, 0))
return;
invalidate_one_component(istate->untracked, istate->untracked->root,
path, strlen(path));
untracked_cache_invalidate_path(istate, path, 1);
}
-/* Update gitfile and core.worktree setting to connect work tree and git dir */
-void connect_work_tree_and_git_dir(const char *work_tree_, const char *git_dir_)
+static void connect_wt_gitdir_in_nested(const char *sub_worktree,
+ const char *sub_gitdir)
+{
+ int i;
+ struct repository subrepo;
+ struct strbuf sub_wt = STRBUF_INIT;
+ struct strbuf sub_gd = STRBUF_INIT;
+
+ const struct submodule *sub;
+
+ /* If the submodule has no working tree, we can ignore it. */
+ if (repo_init(&subrepo, sub_gitdir, sub_worktree))
+ return;
+
+ if (repo_read_index(&subrepo) < 0)
+ die("index file corrupt in repo %s", subrepo.gitdir);
+
+ for (i = 0; i < subrepo.index->cache_nr; i++) {
+ const struct cache_entry *ce = subrepo.index->cache[i];
+
+ if (!S_ISGITLINK(ce->ce_mode))
+ continue;
+
+ while (i + 1 < subrepo.index->cache_nr &&
+ !strcmp(ce->name, subrepo.index->cache[i + 1]->name))
+ /*
+ * Skip entries with the same name in different stages
+ * to make sure an entry is returned only once.
+ */
+ i++;
+
+ sub = submodule_from_path(&subrepo, &null_oid, ce->name);
+ if (!sub || !is_submodule_active(&subrepo, ce->name))
+ /* .gitmodules broken or inactive sub */
+ continue;
+
+ strbuf_reset(&sub_wt);
+ strbuf_reset(&sub_gd);
+ strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path);
+ strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name);
+
+ connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1);
+ }
+ strbuf_release(&sub_wt);
+ strbuf_release(&sub_gd);
+ repo_clear(&subrepo);
+}
+
+void connect_work_tree_and_git_dir(const char *work_tree_,
+ const char *git_dir_,
+ int recurse_into_nested)
{
struct strbuf gitfile_sb = STRBUF_INIT;
struct strbuf cfg_sb = STRBUF_INIT;
strbuf_release(&gitfile_sb);
strbuf_release(&cfg_sb);
strbuf_release(&rel_path);
+
+ if (recurse_into_nested)
+ connect_wt_gitdir_in_nested(work_tree, git_dir);
+
free(work_tree);
free(git_dir);
}
die_errno(_("could not migrate git directory from '%s' to '%s'"),
old_git_dir, new_git_dir);
- connect_work_tree_and_git_dir(path, new_git_dir);
+ connect_work_tree_and_git_dir(path, new_git_dir, 0);
}
#include "commit.h"
#include "argv-array.h"
#include "object-store.h"
+#include "chdir-notify.h"
int trust_executable_bit = 1;
int trust_ctime = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
+char *check_roundtrip_encoding = "SHIFT-JIS";
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
enum object_creation_mode object_creation_mode = OBJECT_CREATION_MODE;
char *notes_ref_name;
int grafts_replace_parents = 1;
+int core_commit_graph;
int core_apply_sparse_checkout;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
if (shallow_file)
- set_alternate_shallow_file(shallow_file, 0);
+ set_alternate_shallow_file(the_repository, shallow_file, 0);
}
int is_bare_repository(void)
return the_repository->index_file;
}
- char *get_graft_file(void)
+ char *get_graft_file(struct repository *r)
{
- if (!the_repository->graft_file)
+ if (!r->graft_file)
BUG("git environment hasn't been setup");
- return the_repository->graft_file;
+ return r->graft_file;
}
-int set_git_dir(const char *path)
+static void set_git_dir_1(const char *path)
{
if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
- return error("Could not set GIT_DIR to '%s'", path);
+ die("could not set GIT_DIR to '%s'", path);
setup_git_env(path);
- return 0;
+}
+
+static void update_relative_gitdir(const char *name,
+ const char *old_cwd,
+ const char *new_cwd,
+ void *data)
+{
+ char *path = reparent_relative_path(old_cwd, new_cwd, get_git_dir());
+ trace_printf_key(&trace_setup_key,
+ "setup: move $GIT_DIR to '%s'",
+ path);
+ set_git_dir_1(path);
+ free(path);
+}
+
+void set_git_dir(const char *path)
+{
+ set_git_dir_1(path);
+ if (!is_absolute_path(path))
+ chdir_notify_register(NULL, update_relative_gitdir, NULL);
}
const char *get_log_output_encoding(void)
#include "pkt-line.h"
#include "commit.h"
#include "tag.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "pack.h"
#include "sideband.h"
#include "fetch-pack.h"
#include "sha1-array.h"
#include "oidset.h"
#include "packfile.h"
+ #include "object-store.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
#define PIPESAFE_FLUSH 32
#define LARGE_FLUSH 16384
-static int next_flush(struct fetch_pack_args *args, int count)
+static int next_flush(int stateless_rpc, int count)
{
- if (args->stateless_rpc) {
+ if (stateless_rpc) {
if (count < LARGE_FLUSH)
count <<= 1;
else
return 1;
}
- if (is_repository_shallow())
+ if (is_repository_shallow(the_repository))
write_shallow_commits(&req_buf, 1, NULL);
if (args->depth > 0)
packet_buf_write(&req_buf, "deepen %d", args->depth);
if (skip_prefix(line, "shallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), line);
- register_shallow(&oid);
+ register_shallow(the_repository, &oid);
continue;
}
if (skip_prefix(line, "unshallow ", &arg)) {
send_request(args, fd[1], &req_buf);
strbuf_setlen(&req_buf, state_len);
flushes++;
- flush_at = next_flush(args, count);
+ flush_at = next_flush(args->stateless_rpc, count);
/*
* We keep one window "ahead" of the other side, and
}
i++;
}
- }
- if (!keep && args->fetch_all &&
- (!args->deepen || !starts_with(ref->name, "refs/tags/")))
- keep = 1;
+ if (!keep && args->fetch_all &&
+ (!args->deepen || !starts_with(ref->name, "refs/tags/")))
+ keep = 1;
+ }
if (keep) {
*newtail = ref;
sort_ref_list(&ref, ref_compare_name);
QSORT(sought, nr_sought, cmp_ref_by_name);
- if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
+ if ((args->depth > 0 || is_repository_shallow(the_repository)) && !server_supports("shallow"))
die(_("Server does not support shallow clients"));
if (args->depth > 0 || args->deepen_since || args->deepen_not)
args->deepen = 1;
return ref;
}
- if (is_repository_shallow())
+static void add_shallow_requests(struct strbuf *req_buf,
+ const struct fetch_pack_args *args)
+{
- else if (is_repository_shallow() || args->deepen)
++ if (is_repository_shallow(the_repository))
+ write_shallow_commits(req_buf, 1, NULL);
+ if (args->depth > 0)
+ packet_buf_write(req_buf, "deepen %d", args->depth);
+ if (args->deepen_since) {
+ timestamp_t max_age = approxidate(args->deepen_since);
+ packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+ }
+ if (args->deepen_not) {
+ int i;
+ for (i = 0; i < args->deepen_not->nr; i++) {
+ struct string_list_item *s = args->deepen_not->items + i;
+ packet_buf_write(req_buf, "deepen-not %s", s->string);
+ }
+ }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+ for ( ; wants ; wants = wants->next) {
+ const struct object_id *remote = &wants->old_oid;
+ const char *remote_hex;
+ struct object *o;
+
+ /*
+ * If that object is complete (i.e. it is an ancestor of a
+ * local ref), we tell them we have it but do not have to
+ * tell them about its ancestors, which they already know
+ * about.
+ *
+ * We use lookup_object here because we are only
+ * interested in the case we *know* the object is
+ * reachable and we have already scanned it.
+ */
+ if (((o = lookup_object(remote->hash)) != NULL) &&
+ (o->flags & COMPLETE)) {
+ continue;
+ }
+
+ remote_hex = oid_to_hex(remote);
+ packet_buf_write(req_buf, "want %s\n", remote_hex);
+ }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+ oidset_iter_init(common, &iter);
+
+ while ((oid = oidset_iter_next(&iter))) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ int haves_added = 0;
+ const struct object_id *oid;
+
+ while ((oid = get_rev())) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ if (++haves_added >= *haves_to_send)
+ break;
+ }
+
+ *in_vain += haves_added;
+ if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+ /* Send Done */
+ packet_buf_write(req_buf, "done\n");
+ ret = 1;
+ }
+
+ /* Increase haves to send on next round */
+ *haves_to_send = next_flush(1, *haves_to_send);
+
+ return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+ const struct ref *wants, struct oidset *common,
+ int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ struct strbuf req_buf = STRBUF_INIT;
+
+ if (server_supports_v2("fetch", 1))
+ packet_buf_write(&req_buf, "command=fetch");
+ if (server_supports_v2("agent", 0))
+ packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+ if (args->server_options && args->server_options->nr &&
+ server_supports_v2("server-option", 1)) {
+ int i;
+ for (i = 0; i < args->server_options->nr; i++)
+ packet_write_fmt(fd_out, "server-option=%s",
+ args->server_options->items[i].string);
+ }
+
+ packet_buf_delim(&req_buf);
+ if (args->use_thin_pack)
+ packet_buf_write(&req_buf, "thin-pack");
+ if (args->no_progress)
+ packet_buf_write(&req_buf, "no-progress");
+ if (args->include_tag)
+ packet_buf_write(&req_buf, "include-tag");
+ if (prefer_ofs_delta)
+ packet_buf_write(&req_buf, "ofs-delta");
+
+ /* Add shallow-info and deepen request */
+ if (server_supports_feature("fetch", "shallow", 0))
+ add_shallow_requests(&req_buf, args);
- register_shallow(&oid);
++ else if (is_repository_shallow(the_repository) || args->deepen)
+ die(_("Server does not support shallow requests"));
+
+ /* Add filter */
+ if (server_supports_feature("fetch", "filter", 0) &&
+ args->filter_options.choice) {
+ print_verbose(args, _("Server supports filter"));
+ packet_buf_write(&req_buf, "filter %s",
+ args->filter_options.filter_spec);
+ } else if (args->filter_options.choice) {
+ warning("filtering not recognized by server, ignoring");
+ }
+
+ /* add wants */
+ add_wants(wants, &req_buf);
+
+ if (args->no_dependents) {
+ packet_buf_write(&req_buf, "done");
+ ret = 1;
+ } else {
+ /* Add all of the common commits we've found in previous rounds */
+ add_common(&req_buf, common);
+
+ /* Add initial haves */
+ ret = add_haves(&req_buf, haves_to_send, in_vain);
+ }
+
+ /* Send request */
+ packet_buf_flush(&req_buf);
+ write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+ strbuf_release(&req_buf);
+ return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`. If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+ const char *section, int peek)
+{
+ int ret;
+
+ if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+ die("error reading section header '%s'", section);
+
+ ret = !strcmp(reader->line, section);
+
+ if (!peek) {
+ if (!ret)
+ die("expected '%s', received '%s'",
+ section, reader->line);
+ packet_reader_read(reader);
+ }
+
+ return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+ /* received */
+ int received_ready = 0;
+ int received_ack = 0;
+
+ process_section_header(reader, "acknowledgments", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+
+ if (!strcmp(reader->line, "NAK"))
+ continue;
+
+ if (skip_prefix(reader->line, "ACK ", &arg)) {
+ struct object_id oid;
+ if (!get_oid_hex(arg, &oid)) {
+ struct commit *commit;
+ oidset_insert(common, &oid);
+ commit = lookup_commit(&oid);
+ mark_common(commit, 0, 1);
+ }
+ continue;
+ }
+
+ if (!strcmp(reader->line, "ready")) {
+ clear_prio_queue(&rev_list);
+ received_ready = 1;
+ continue;
+ }
+
+ die("unexpected acknowledgment line: '%s'", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing acks: %d", reader->status);
+
+ /* return 0 if no common, 1 if there are common, or 2 if ready */
+ return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+ struct packet_reader *reader)
+{
+ process_section_header(reader, "shallow-info", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+ struct object_id oid;
+
+ if (skip_prefix(reader->line, "shallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid shallow line: %s"), reader->line);
++ register_shallow(the_repository, &oid);
+ continue;
+ }
+ if (skip_prefix(reader->line, "unshallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid unshallow line: %s"), reader->line);
+ if (!lookup_object(oid.hash))
+ die(_("object not found: %s"), reader->line);
+ /* make sure that it is parsed as shallow */
+ if (!parse_object(&oid))
+ die(_("error in object: %s"), reader->line);
+ if (unregister_shallow(&oid))
+ die(_("no shallow found: %s"), reader->line);
+ continue;
+ }
+ die(_("expected shallow/unshallow, got %s"), reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing shallow info: %d", reader->status);
+
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+ args->deepen = 1;
+}
+
+enum fetch_state {
+ FETCH_CHECK_LOCAL = 0,
+ FETCH_SEND_REQUEST,
+ FETCH_PROCESS_ACKS,
+ FETCH_GET_PACK,
+ FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+ int fd[2],
+ const struct ref *orig_ref,
+ struct ref **sought, int nr_sought,
+ char **pack_lockfile)
+{
+ struct ref *ref = copy_ref_list(orig_ref);
+ enum fetch_state state = FETCH_CHECK_LOCAL;
+ struct oidset common = OIDSET_INIT;
+ struct packet_reader reader;
+ int in_vain = 0;
+ int haves_to_send = INITIAL_FLUSH;
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_CHECK_LOCAL:
+ sort_ref_list(&ref, ref_compare_name);
+ QSORT(sought, nr_sought, cmp_ref_by_name);
+
+ /* v2 supports these by default */
+ allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+ use_sideband = 2;
+ if (args->depth > 0 || args->deepen_since || args->deepen_not)
+ args->deepen = 1;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+
+ for_each_ref(rev_list_insert_ref_oid, NULL);
+ for_each_cached_alternate(insert_one_alternate_object);
+
+ /* Filter 'ref' by 'sought' and those that aren't local */
+ if (everything_local(args, &ref, sought, nr_sought))
+ state = FETCH_DONE;
+ else
+ state = FETCH_SEND_REQUEST;
+ break;
+ case FETCH_SEND_REQUEST:
+ if (send_fetch_request(fd[1], args, ref, &common,
+ &haves_to_send, &in_vain))
+ state = FETCH_GET_PACK;
+ else
+ state = FETCH_PROCESS_ACKS;
+ break;
+ case FETCH_PROCESS_ACKS:
+ /* Process ACKs/NAKs */
+ switch (process_acks(&reader, &common)) {
+ case 2:
+ state = FETCH_GET_PACK;
+ break;
+ case 1:
+ in_vain = 0;
+ /* fallthrough */
+ default:
+ state = FETCH_SEND_REQUEST;
+ break;
+ }
+ break;
+ case FETCH_GET_PACK:
+ /* Check for shallow-info section */
+ if (process_section_header(&reader, "shallow-info", 1))
+ receive_shallow_info(args, &reader);
+
+ /* get the pack */
+ process_section_header(&reader, "packfile", 0);
+ if (get_pack(args, fd, pack_lockfile))
+ die(_("git fetch-pack: fetch failed."));
+
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ oidset_clear(&common);
+ return ref;
+}
+
static void fetch_pack_config(void)
{
git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
if (args->deepen && alternate_shallow_file) {
if (*alternate_shallow_file == '\0') { /* --unshallow */
- unlink_or_warn(git_path_shallow());
+ unlink_or_warn(git_path_shallow(the_repository));
rollback_lock_file(&shallow_lock);
} else
commit_lock_file(&shallow_lock);
const char *dest,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile)
+ char **pack_lockfile,
+ enum protocol_version version)
{
struct ref *ref_cpy;
struct shallow_info si;
die(_("no matching remote head"));
}
prepare_shallow_info(&si, shallow);
- ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
- &si, pack_lockfile);
+ if (version == protocol_v2)
+ ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+ pack_lockfile);
+ else
+ ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
+ &si, pack_lockfile);
reprepare_packed_git(the_repository);
update_shallow(args, sought, nr_sought, &si);
clear_shallow_info(&si);
#include "cache.h"
+ #include "object-store.h"
#include "object.h"
#include "blob.h"
#include "tree.h"
#include "utf8.h"
#include "sha1-array.h"
#include "decorate.h"
+#include "oidset.h"
+#include "packfile.h"
+#include "submodule-config.h"
+#include "config.h"
+#include "help.h"
+
+static struct oidset gitmodules_found = OIDSET_INIT;
+static struct oidset gitmodules_done = OIDSET_INIT;
#define FSCK_FATAL -1
#define FSCK_INFO -2
FUNC(MISSING_TAG_ENTRY, ERROR) \
FUNC(MISSING_TAG_OBJECT, ERROR) \
FUNC(MISSING_TREE, ERROR) \
+ FUNC(MISSING_TREE_OBJECT, ERROR) \
FUNC(MISSING_TYPE, ERROR) \
FUNC(MISSING_TYPE_ENTRY, ERROR) \
FUNC(MULTIPLE_AUTHORS, ERROR) \
FUNC(TREE_NOT_SORTED, ERROR) \
FUNC(UNKNOWN_TYPE, ERROR) \
FUNC(ZERO_PADDED_DATE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_PARSE, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
/* warnings */ \
FUNC(BAD_FILEMODE, WARN) \
FUNC(EMPTY_NAME, WARN) \
#undef MSG_ID
#define STR(x) #x
-#define MSG_ID(id, msg_type) { STR(id), NULL, FSCK_##msg_type },
+#define MSG_ID(id, msg_type) { STR(id), NULL, NULL, FSCK_##msg_type },
static struct {
const char *id_string;
const char *downcased;
+ const char *camelcased;
int msg_type;
} msg_id_info[FSCK_MSG_MAX + 1] = {
FOREACH_MSG_ID(MSG_ID)
- { NULL, NULL, -1 }
+ { NULL, NULL, NULL, -1 }
};
#undef MSG_ID
-static int parse_msg_id(const char *text)
+static void prepare_msg_ids(void)
{
int i;
- if (!msg_id_info[0].downcased) {
- /* convert id_string to lower case, without underscores. */
- for (i = 0; i < FSCK_MSG_MAX; i++) {
- const char *p = msg_id_info[i].id_string;
- int len = strlen(p);
- char *q = xmalloc(len);
-
- msg_id_info[i].downcased = q;
- while (*p)
- if (*p == '_')
- p++;
- else
- *(q)++ = tolower(*(p)++);
- *q = '\0';
+ if (msg_id_info[0].downcased)
+ return;
+
+ /* convert id_string to lower case, without underscores. */
+ for (i = 0; i < FSCK_MSG_MAX; i++) {
+ const char *p = msg_id_info[i].id_string;
+ int len = strlen(p);
+ char *q = xmalloc(len);
+
+ msg_id_info[i].downcased = q;
+ while (*p)
+ if (*p == '_')
+ p++;
+ else
+ *(q)++ = tolower(*(p)++);
+ *q = '\0';
+
+ p = msg_id_info[i].id_string;
+ q = xmalloc(len);
+ msg_id_info[i].camelcased = q;
+ while (*p) {
+ if (*p == '_') {
+ p++;
+ if (*p)
+ *q++ = *p++;
+ } else {
+ *q++ = tolower(*p++);
+ }
}
+ *q = '\0';
}
+}
+
+static int parse_msg_id(const char *text)
+{
+ int i;
+
+ prepare_msg_ids();
for (i = 0; i < FSCK_MSG_MAX; i++)
if (!strcmp(text, msg_id_info[i].downcased))
return -1;
}
+void list_config_fsck_msg_ids(struct string_list *list, const char *prefix)
+{
+ int i;
+
+ prepare_msg_ids();
+
+ for (i = 0; i < FSCK_MSG_MAX; i++)
+ list_config_item(list, prefix, msg_id_info[i].camelcased);
+}
+
static int fsck_msg_type(enum fsck_msg_id msg_id,
struct fsck_options *options)
{
name = get_object_name(options, &commit->object);
if (name)
- put_object_name(options, &commit->tree->object, "%s:", name);
+ put_object_name(options, &get_commit_tree(commit)->object,
+ "%s:", name);
- result = options->walk((struct object *)commit->tree, OBJ_TREE, data, options);
+ result = options->walk((struct object *)get_commit_tree(commit),
+ OBJ_TREE, data, options);
if (result < 0)
return result;
res = result;
has_empty_name |= !*name;
has_dot |= !strcmp(name, ".");
has_dotdot |= !strcmp(name, "..");
- has_dotgit |= (!strcmp(name, ".git") ||
- is_hfs_dotgit(name) ||
- is_ntfs_dotgit(name));
+ has_dotgit |= is_hfs_dotgit(name) || is_ntfs_dotgit(name);
has_zero_pad |= *(char *)desc.buffer == '0';
+
+ if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) {
+ if (!S_ISLNK(mode))
+ oidset_insert(&gitmodules_found, oid);
+ else
+ retval += report(options, &item->object,
+ FSCK_MSG_GITMODULES_SYMLINK,
+ ".gitmodules is a symbolic link");
+ }
+
if (update_tree_entry_gently(&desc)) {
retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree");
break;
static int fsck_commit_buffer(struct commit *commit, const char *buffer,
unsigned long size, struct fsck_options *options)
{
- unsigned char tree_sha1[20], sha1[20];
+ struct object_id tree_oid, oid;
struct commit_graft *graft;
unsigned parent_count, parent_line_count = 0, author_count;
int err;
const char *buffer_begin = buffer;
+ const char *p;
if (verify_headers(buffer, size, &commit->object, options))
return -1;
if (!skip_prefix(buffer, "tree ", &buffer))
return report(options, &commit->object, FSCK_MSG_MISSING_TREE, "invalid format - expected 'tree' line");
- if (get_sha1_hex(buffer, tree_sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &tree_oid, &p) || *p != '\n') {
err = report(options, &commit->object, FSCK_MSG_BAD_TREE_SHA1, "invalid 'tree' line format - bad sha1");
if (err)
return err;
}
- buffer += 41;
+ buffer = p + 1;
while (skip_prefix(buffer, "parent ", &buffer)) {
- if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') {
err = report(options, &commit->object, FSCK_MSG_BAD_PARENT_SHA1, "invalid 'parent' line format - bad sha1");
if (err)
return err;
}
- buffer += 41;
+ buffer = p + 1;
parent_line_count++;
}
- graft = lookup_commit_graft(&commit->object.oid);
+ graft = lookup_commit_graft(the_repository, &commit->object.oid);
parent_count = commit_list_count(commit->parents);
if (graft) {
if (graft->nr_parent == -1 && !parent_count)
err = fsck_ident(&buffer, &commit->object, options);
if (err)
return err;
- if (!commit->tree) {
- err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
+ if (!get_commit_tree(commit)) {
+ err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", oid_to_hex(&tree_oid));
if (err)
return err;
}
static int fsck_tag_buffer(struct tag *tag, const char *data,
unsigned long size, struct fsck_options *options)
{
- unsigned char sha1[20];
+ struct object_id oid;
int ret = 0;
const char *buffer;
char *to_free = NULL, *eol;
struct strbuf sb = STRBUF_INIT;
+ const char *p;
if (data)
buffer = data;
ret = report(options, &tag->object, FSCK_MSG_MISSING_OBJECT, "invalid format - expected 'object' line");
goto done;
}
- if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') {
ret = report(options, &tag->object, FSCK_MSG_BAD_OBJECT_SHA1, "invalid 'object' line format - bad sha1");
if (ret)
goto done;
}
- buffer += 41;
+ buffer = p + 1;
if (!skip_prefix(buffer, "type ", &buffer)) {
ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE_ENTRY, "invalid format - expected 'type' line");
return fsck_tag_buffer(tag, data, size, options);
}
+struct fsck_gitmodules_data {
+ struct object *obj;
+ struct fsck_options *options;
+ int ret;
+};
+
+static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata)
+{
+ struct fsck_gitmodules_data *data = vdata;
+ const char *subsection, *key;
+ int subsection_len;
+ char *name;
+
+ if (parse_config_key(var, "submodule", &subsection, &subsection_len, &key) < 0 ||
+ !subsection)
+ return 0;
+
+ name = xmemdupz(subsection, subsection_len);
+ if (check_submodule_name(name) < 0)
+ data->ret |= report(data->options, data->obj,
+ FSCK_MSG_GITMODULES_NAME,
+ "disallowed submodule name: %s",
+ name);
+ free(name);
+
+ return 0;
+}
+
+static int fsck_blob(struct blob *blob, const char *buf,
+ unsigned long size, struct fsck_options *options)
+{
+ struct fsck_gitmodules_data data;
+
+ if (!oidset_contains(&gitmodules_found, &blob->object.oid))
+ return 0;
+ oidset_insert(&gitmodules_done, &blob->object.oid);
+
+ if (!buf) {
+ /*
+ * A missing buffer here is a sign that the caller found the
+ * blob too gigantic to load into memory. Let's just consider
+ * that an error.
+ */
+ return report(options, &blob->object,
+ FSCK_MSG_GITMODULES_PARSE,
+ ".gitmodules too large to parse");
+ }
+
+ data.obj = &blob->object;
+ data.options = options;
+ data.ret = 0;
+ if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB,
+ ".gitmodules", buf, size, &data))
+ data.ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_PARSE,
+ "could not parse gitmodules blob");
+
+ return data.ret;
+}
+
int fsck_object(struct object *obj, void *data, unsigned long size,
struct fsck_options *options)
{
return report(options, obj, FSCK_MSG_BAD_OBJECT_SHA1, "no valid object to fsck");
if (obj->type == OBJ_BLOB)
- return 0;
+ return fsck_blob((struct blob *)obj, data, size, options);
if (obj->type == OBJ_TREE)
return fsck_tree((struct tree *) obj, options);
if (obj->type == OBJ_COMMIT)
error("object %s: %s", describe_object(o, obj), message);
return 1;
}
+
+int fsck_finish(struct fsck_options *options)
+{
+ int ret = 0;
+ struct oidset_iter iter;
+ const struct object_id *oid;
+
+ oidset_iter_init(&gitmodules_found, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ struct blob *blob;
+ enum object_type type;
+ unsigned long size;
+ char *buf;
+
+ if (oidset_contains(&gitmodules_done, oid))
+ continue;
+
+ blob = lookup_blob(oid);
+ if (!blob) {
+ struct object *obj = lookup_unknown_object(oid->hash);
+ ret |= report(options, obj,
+ FSCK_MSG_GITMODULES_BLOB,
+ "non-blob found at .gitmodules");
+ continue;
+ }
+
+ buf = read_object_file(oid, &type, &size);
+ if (!buf) {
+ if (is_promisor_object(&blob->object.oid))
+ continue;
+ ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_MISSING,
+ "unable to read .gitmodules blob");
+ continue;
+ }
+
+ if (type == OBJ_BLOB)
+ ret |= fsck_blob(blob, buf, size, options);
+ else
+ ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_BLOB,
+ "non-blob found at .gitmodules");
+ free(buf);
+ }
+
+
+ oidset_clear(&gitmodules_found);
+ oidset_clear(&gitmodules_done);
+ return ret;
+}
#include "builtin.h"
#include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "help.h"
#include "run-command.h"
+#include "alias.h"
#define RUN_SETUP (1<<0)
#define RUN_SETUP_GENTLY (1<<1)
const char git_usage_string[] =
N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
" [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
- " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+ " [-p | --paginate | -P | --no-pager] [--no-replace-objects] [--bare]\n"
" [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
" <command> [<args>]");
static int use_pager = -1;
-static void list_builtins(unsigned int exclude_option, char sep);
+static void list_builtins(struct string_list *list, unsigned int exclude_option);
+
+static void exclude_helpers_from_list(struct string_list *list)
+{
+ int i = 0;
+
+ while (i < list->nr) {
+ if (strstr(list->items[i].string, "--"))
+ unsorted_string_list_delete_item(list, i, 0);
+ else
+ i++;
+ }
+}
+
+static int match_token(const char *spec, int len, const char *token)
+{
+ int token_len = strlen(token);
+
+ return len == token_len && !strncmp(spec, token, token_len);
+}
+
+static int list_cmds(const char *spec)
+{
+ struct string_list list = STRING_LIST_INIT_DUP;
+ int i;
+
+ while (*spec) {
+ const char *sep = strchrnul(spec, ',');
+ int len = sep - spec;
+
+ if (match_token(spec, len, "builtins"))
+ list_builtins(&list, 0);
+ else if (match_token(spec, len, "main"))
+ list_all_main_cmds(&list);
+ else if (match_token(spec, len, "others"))
+ list_all_other_cmds(&list);
+ else if (match_token(spec, len, "nohelpers"))
+ exclude_helpers_from_list(&list);
+ else if (match_token(spec, len, "alias"))
+ list_aliases(&list);
+ else if (match_token(spec, len, "config"))
+ list_cmds_by_config(&list);
+ else if (len > 5 && !strncmp(spec, "list-", 5)) {
+ struct strbuf sb = STRBUF_INIT;
+
+ strbuf_add(&sb, spec + 5, len - 5);
+ list_cmds_by_category(&list, sb.buf);
+ strbuf_release(&sb);
+ }
+ else
+ die(_("unsupported command listing type '%s'"), spec);
+ spec += len;
+ if (*spec == ',')
+ spec++;
+ }
+ for (i = 0; i < list.nr; i++)
+ puts(list.items[i].string);
+ string_list_clear(&list, 0);
+ return 0;
+}
static void commit_pager_choice(void) {
switch (use_pager) {
*/
if (skip_prefix(cmd, "--exec-path", &cmd)) {
if (*cmd == '=')
- git_set_argv_exec_path(cmd + 1);
+ git_set_exec_path(cmd + 1);
else {
puts(git_exec_path());
exit(0);
exit(0);
} else if (!strcmp(cmd, "-p") || !strcmp(cmd, "--paginate")) {
use_pager = 1;
- } else if (!strcmp(cmd, "--no-pager")) {
+ } else if (!strcmp(cmd, "-P") || !strcmp(cmd, "--no-pager")) {
use_pager = 0;
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "--shallow-file")) {
(*argv)++;
(*argc)--;
- set_alternate_shallow_file((*argv)[0], 1);
+ set_alternate_shallow_file(the_repository, (*argv)[0], 1);
if (envchanged)
*envchanged = 1;
} else if (!strcmp(cmd, "-C")) {
}
(*argv)++;
(*argc)--;
- } else if (!strcmp(cmd, "--list-builtins")) {
- list_builtins(0, '\n');
- exit(0);
- } else if (!strcmp(cmd, "--list-parseopt-builtins")) {
- list_builtins(NO_PARSEOPT, ' ');
- exit(0);
+ } else if (skip_prefix(cmd, "--list-cmds=", &cmd)) {
+ if (!strcmp(cmd, "parseopt")) {
+ struct string_list list = STRING_LIST_INIT_DUP;
+ int i;
+
+ list_builtins(&list, NO_PARSEOPT);
+ for (i = 0; i < list.nr; i++)
+ printf("%s ", list.items[i].string);
+ string_list_clear(&list, 0);
+ exit(0);
+ } else {
+ exit(list_cmds(cmd));
+ }
} else {
fprintf(stderr, _("unknown option: %s\n"), cmd);
usage(git_usage_string);
{ "clone", cmd_clone },
{ "column", cmd_column, RUN_SETUP_GENTLY },
{ "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
+ { "commit-graph", cmd_commit_graph, RUN_SETUP },
{ "commit-tree", cmd_commit_tree, RUN_SETUP | NO_PARSEOPT },
{ "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
{ "count-objects", cmd_count_objects, RUN_SETUP },
{ "revert", cmd_revert, RUN_SETUP | NEED_WORK_TREE },
{ "rm", cmd_rm, RUN_SETUP },
{ "send-pack", cmd_send_pack, RUN_SETUP },
+ { "serve", cmd_serve, RUN_SETUP },
{ "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
{ "show", cmd_show, RUN_SETUP },
{ "show-branch", cmd_show_branch, RUN_SETUP },
+ { "show-index", cmd_show_index },
{ "show-ref", cmd_show_ref, RUN_SETUP },
{ "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
{ "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
{ "update-server-info", cmd_update_server_info, RUN_SETUP },
{ "upload-archive", cmd_upload_archive, NO_PARSEOPT },
{ "upload-archive--writer", cmd_upload_archive_writer, NO_PARSEOPT },
+ { "upload-pack", cmd_upload_pack },
{ "var", cmd_var, RUN_SETUP_GENTLY | NO_PARSEOPT },
{ "verify-commit", cmd_verify_commit, RUN_SETUP },
{ "verify-pack", cmd_verify_pack },
return !!get_builtin(s);
}
-static void list_builtins(unsigned int exclude_option, char sep)
+static void list_builtins(struct string_list *out, unsigned int exclude_option)
{
int i;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
if (exclude_option &&
(commands[i].option & exclude_option))
continue;
- printf("%s%c", commands[i].cmd, sep);
+ string_list_append(out, commands[i].cmd);
}
}
#include "cache.h"
#include "config.h"
#include "grep.h"
+ #include "object-store.h"
#include "userdiff.h"
#include "xdiff-interface.h"
#include "diff.h"
#include "diffcore.h"
#include "commit.h"
#include "quote.h"
+#include "help.h"
static int grep_source_load(struct grep_source *gs);
static int grep_source_is_binary(struct grep_source *gs);
static struct grep_opt grep_defaults;
+static const char *color_grep_slots[] = {
+ [GREP_COLOR_CONTEXT] = "context",
+ [GREP_COLOR_FILENAME] = "filename",
+ [GREP_COLOR_FUNCTION] = "function",
+ [GREP_COLOR_LINENO] = "lineNumber",
+ [GREP_COLOR_MATCH_CONTEXT] = "matchContext",
+ [GREP_COLOR_MATCH_SELECTED] = "matchSelected",
+ [GREP_COLOR_SELECTED] = "selected",
+ [GREP_COLOR_SEP] = "separator",
+};
+
static void std_output(struct grep_opt *opt, const void *buf, size_t size)
{
fwrite(buf, size, 1, stdout);
opt->pathname = 1;
opt->max_depth = -1;
opt->pattern_type_option = GREP_PATTERN_TYPE_UNSPECIFIED;
- color_set(opt->color_context, "");
- color_set(opt->color_filename, "");
- color_set(opt->color_function, "");
- color_set(opt->color_lineno, "");
- color_set(opt->color_match_context, GIT_COLOR_BOLD_RED);
- color_set(opt->color_match_selected, GIT_COLOR_BOLD_RED);
- color_set(opt->color_selected, "");
- color_set(opt->color_sep, GIT_COLOR_CYAN);
+ color_set(opt->colors[GREP_COLOR_CONTEXT], "");
+ color_set(opt->colors[GREP_COLOR_FILENAME], "");
+ color_set(opt->colors[GREP_COLOR_FUNCTION], "");
+ color_set(opt->colors[GREP_COLOR_LINENO], "");
+ color_set(opt->colors[GREP_COLOR_MATCH_CONTEXT], GIT_COLOR_BOLD_RED);
+ color_set(opt->colors[GREP_COLOR_MATCH_SELECTED], GIT_COLOR_BOLD_RED);
+ color_set(opt->colors[GREP_COLOR_SELECTED], "");
+ color_set(opt->colors[GREP_COLOR_SEP], GIT_COLOR_CYAN);
opt->color = -1;
opt->output = std_output;
}
die("bad %s argument: %s", opt, arg);
}
+define_list_config_array_extra(color_grep_slots, {"match"});
+
/*
* Read the configuration file once and store it in
* the grep_defaults template.
int grep_config(const char *var, const char *value, void *cb)
{
struct grep_opt *opt = &grep_defaults;
- char *color = NULL;
+ const char *slot;
if (userdiff_config(var, value) < 0)
return -1;
if (!strcmp(var, "color.grep"))
opt->color = git_config_colorbool(var, value);
- else if (!strcmp(var, "color.grep.context"))
- color = opt->color_context;
- else if (!strcmp(var, "color.grep.filename"))
- color = opt->color_filename;
- else if (!strcmp(var, "color.grep.function"))
- color = opt->color_function;
- else if (!strcmp(var, "color.grep.linenumber"))
- color = opt->color_lineno;
- else if (!strcmp(var, "color.grep.matchcontext"))
- color = opt->color_match_context;
- else if (!strcmp(var, "color.grep.matchselected"))
- color = opt->color_match_selected;
- else if (!strcmp(var, "color.grep.selected"))
- color = opt->color_selected;
- else if (!strcmp(var, "color.grep.separator"))
- color = opt->color_sep;
- else if (!strcmp(var, "color.grep.match")) {
- int rc = 0;
- if (!value)
- return config_error_nonbool(var);
- rc |= color_parse(value, opt->color_match_context);
- rc |= color_parse(value, opt->color_match_selected);
- return rc;
- }
-
- if (color) {
+ if (!strcmp(var, "color.grep.match")) {
+ if (grep_config("color.grep.matchcontext", value, cb) < 0)
+ return -1;
+ if (grep_config("color.grep.matchselected", value, cb) < 0)
+ return -1;
+ } else if (skip_prefix(var, "color.grep.", &slot)) {
+ int i = LOOKUP_CONFIG(color_grep_slots, slot);
+ char *color;
+
+ if (i < 0)
+ return -1;
+ color = opt->colors[i];
if (!value)
return config_error_nonbool(var);
return color_parse(value, color);
void grep_init(struct grep_opt *opt, const char *prefix)
{
struct grep_opt *def = &grep_defaults;
+ int i;
memset(opt, 0, sizeof(*opt));
opt->prefix = prefix;
opt->relative = def->relative;
opt->output = def->output;
- color_set(opt->color_context, def->color_context);
- color_set(opt->color_filename, def->color_filename);
- color_set(opt->color_function, def->color_function);
- color_set(opt->color_lineno, def->color_lineno);
- color_set(opt->color_match_context, def->color_match_context);
- color_set(opt->color_match_selected, def->color_match_selected);
- color_set(opt->color_selected, def->color_selected);
- color_set(opt->color_sep, def->color_sep);
+ for (i = 0; i < NR_GREP_COLORS; i++)
+ color_set(opt->colors[i], def->colors[i]);
}
static void grep_set_pattern_type_option(enum grep_pattern_type pattern_type, struct grep_opt *opt)
die("Couldn't allocate PCRE JIT stack");
pcre_assign_jit_stack(p->pcre1_extra_info, NULL, p->pcre1_jit_stack);
} else if (p->pcre1_jit_on != 0) {
- die("BUG: The pcre1_jit_on variable should be 0 or 1, not %d",
+ BUG("The pcre1_jit_on variable should be 0 or 1, not %d",
p->pcre1_jit_on);
}
#endif
die("Couldn't allocate PCRE2 match context");
pcre2_jit_stack_assign(p->pcre2_match_context, NULL, p->pcre2_jit_stack);
} else if (p->pcre2_jit_on != 0) {
- die("BUG: The pcre2_jit_on variable should be 0 or 1, not %d",
+ BUG("The pcre2_jit_on variable should be 0 or 1, not %d",
p->pcre1_jit_on);
}
}
if (err) {
char errbuf[1024];
regerror(err, &p->regexp, errbuf, sizeof(errbuf));
- regfree(&p->regexp);
compile_regexp_failed(p, errbuf);
}
}
if (err) {
char errbuf[1024];
regerror(err, &p->regexp, errbuf, 1024);
- regfree(&p->regexp);
compile_regexp_failed(p, errbuf);
}
}
for (p = opt->header_list; p; p = p->next) {
if (p->token != GREP_PATTERN_HEAD)
- die("BUG: a non-header pattern in grep header list.");
+ BUG("a non-header pattern in grep header list.");
if (p->field < GREP_HEADER_FIELD_MIN ||
GREP_HEADER_FIELD_MAX <= p->field)
- die("BUG: unknown header field %d", p->field);
+ BUG("unknown header field %d", p->field);
compile_regexp(p, opt);
}
h = compile_pattern_atom(&pp);
if (!h || pp != p->next)
- die("BUG: malformed header expr");
+ BUG("malformed header expr");
if (!header_group[p->field]) {
header_group[p->field] = h;
continue;
if (opt->null_following_name)
opt->output(opt, "\0", 1);
else
- output_color(opt, &sign, 1, opt->color_sep);
+ output_color(opt, &sign, 1, opt->colors[GREP_COLOR_SEP]);
}
static void show_name(struct grep_opt *opt, const char *name)
{
- output_color(opt, name, strlen(name), opt->color_filename);
+ output_color(opt, name, strlen(name), opt->colors[GREP_COLOR_FILENAME]);
opt->output(opt, opt->null_following_name ? "\0" : "\n", 1);
}
} else if (opt->pre_context || opt->post_context || opt->funcbody) {
if (opt->last_shown == 0) {
if (opt->show_hunk_mark) {
- output_color(opt, "--", 2, opt->color_sep);
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
opt->output(opt, "\n", 1);
}
} else if (lno > opt->last_shown + 1) {
- output_color(opt, "--", 2, opt->color_sep);
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
opt->output(opt, "\n", 1);
}
}
if (opt->heading && opt->last_shown == 0) {
- output_color(opt, name, strlen(name), opt->color_filename);
+ output_color(opt, name, strlen(name), opt->colors[GREP_COLOR_FILENAME]);
opt->output(opt, "\n", 1);
}
opt->last_shown = lno;
if (!opt->heading && opt->pathname) {
- output_color(opt, name, strlen(name), opt->color_filename);
+ output_color(opt, name, strlen(name), opt->colors[GREP_COLOR_FILENAME]);
output_sep(opt, sign);
}
if (opt->linenum) {
char buf[32];
xsnprintf(buf, sizeof(buf), "%d", lno);
- output_color(opt, buf, strlen(buf), opt->color_lineno);
+ output_color(opt, buf, strlen(buf), opt->colors[GREP_COLOR_LINENO]);
output_sep(opt, sign);
}
if (opt->color) {
int eflags = 0;
if (sign == ':')
- match_color = opt->color_match_selected;
+ match_color = opt->colors[GREP_COLOR_MATCH_SELECTED];
else
- match_color = opt->color_match_context;
+ match_color = opt->colors[GREP_COLOR_MATCH_CONTEXT];
if (sign == ':')
- line_color = opt->color_selected;
+ line_color = opt->colors[GREP_COLOR_SELECTED];
else if (sign == '-')
- line_color = opt->color_context;
+ line_color = opt->colors[GREP_COLOR_CONTEXT];
else if (sign == '=')
- line_color = opt->color_function;
+ line_color = opt->colors[GREP_COLOR_FUNCTION];
*eol = '\0';
while (next_match(opt, bol, eol, ctx, &match, eflags)) {
if (match.rm_so == match.rm_eo)
fill_filespec(df, &null_oid, 0, 0100644);
break;
default:
- die("BUG: attempt to textconv something without a path?");
+ BUG("attempt to textconv something without a path?");
}
/*
case GREP_BINARY_TEXT:
break;
default:
- die("BUG: unknown binary handling mode");
+ BUG("unknown binary handling mode");
}
}
if (binary_match_only) {
opt->output(opt, "Binary file ", 12);
output_color(opt, gs->name, strlen(gs->name),
- opt->color_filename);
+ opt->colors[GREP_COLOR_FILENAME]);
opt->output(opt, " matches\n", 9);
return 1;
}
char buf[32];
if (opt->pathname) {
output_color(opt, gs->name, strlen(gs->name),
- opt->color_filename);
+ opt->colors[GREP_COLOR_FILENAME]);
output_sep(opt, ':');
}
xsnprintf(buf, sizeof(buf), "%u\n", count);
case GREP_SOURCE_BUF:
return gs->buf ? 0 : -1;
}
- die("BUG: invalid grep_source type to load");
+ BUG("invalid grep_source type to load");
}
void grep_source_load_driver(struct grep_source *gs)
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
#include "oidset.h"
+ #include "object-store.h"
/* Remember to update object flag allocation in object.h */
/*
* in the traversal (until we mark it SEEN). This is a way to
* let us silently de-dup calls to show() in the caller. This
* is subtly different from the "revision.h:SHOWN" and the
- * "sha1_name.c:ONELINE_SEEN" bits. And also different from
+ * "sha1-name.c:ONELINE_SEEN" bits. And also different from
* the non-de-dup usage in pack-bitmap.c
*/
#define FILTER_SHOWN_BUT_REVISIT (1<<21)
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
#include "packfile.h"
+ #include "object-store.h"
static void process_blob(struct rev_info *revs,
struct blob *blob,
struct commit *parent = parents->item;
if (!(parent->object.flags & UNINTERESTING))
continue;
- mark_tree_uninteresting(parent->tree);
+ mark_tree_uninteresting(get_commit_tree(parent));
if (revs->edge_hint && !(parent->object.flags & SHOWN)) {
parent->object.flags |= SHOWN;
show_edge(parent);
struct commit *commit = list->item;
if (commit->object.flags & UNINTERESTING) {
- mark_tree_uninteresting(commit->tree);
+ mark_tree_uninteresting(get_commit_tree(commit));
if (revs->edge_hint_aggressive && !(commit->object.flags & SHOWN)) {
commit->object.flags |= SHOWN;
show_edge(commit);
struct commit *commit = (struct commit *)obj;
if (obj->type != OBJ_COMMIT || !(obj->flags & UNINTERESTING))
continue;
- mark_tree_uninteresting(commit->tree);
+ mark_tree_uninteresting(get_commit_tree(commit));
if (!(obj->flags & SHOWN)) {
obj->flags |= SHOWN;
show_edge(commit);
* an uninteresting boundary commit may not have its tree
* parsed yet, but we are not going to show them anyway
*/
- if (commit->tree)
- add_pending_tree(revs, commit->tree);
+ if (get_commit_tree(commit))
+ add_pending_tree(revs, get_commit_tree(commit));
show_commit(commit, show_data);
if (revs->tree_blobs_in_commit_order)
#include "cache.h"
#include "config.h"
#include "diff.h"
+ #include "object-store.h"
#include "commit.h"
#include "tag.h"
#include "graph.h"
#include "gpg-interface.h"
#include "sequencer.h"
#include "line-log.h"
+#include "help.h"
static struct decoration name_decoration = { "object names" };
static int decoration_loaded;
GIT_COLOR_BOLD_BLUE, /* GRAFTED */
};
+static const char *color_decorate_slots[] = {
+ [DECORATION_REF_LOCAL] = "branch",
+ [DECORATION_REF_REMOTE] = "remoteBranch",
+ [DECORATION_REF_TAG] = "tag",
+ [DECORATION_REF_STASH] = "stash",
+ [DECORATION_REF_HEAD] = "HEAD",
+ [DECORATION_GRAFTED] = "grafted",
+};
+
static const char *decorate_get_color(int decorate_use_color, enum decoration_type ix)
{
if (want_color(decorate_use_color))
return "";
}
-static int parse_decorate_color_slot(const char *slot)
-{
- /*
- * We're comparing with 'ignore-case' on
- * (because config.c sets them all tolower),
- * but let's match the letters in the literal
- * string values here with how they are
- * documented in Documentation/config.txt, for
- * consistency.
- *
- * We love being consistent, don't we?
- */
- if (!strcasecmp(slot, "branch"))
- return DECORATION_REF_LOCAL;
- if (!strcasecmp(slot, "remoteBranch"))
- return DECORATION_REF_REMOTE;
- if (!strcasecmp(slot, "tag"))
- return DECORATION_REF_TAG;
- if (!strcasecmp(slot, "stash"))
- return DECORATION_REF_STASH;
- if (!strcasecmp(slot, "HEAD"))
- return DECORATION_REF_HEAD;
- return -1;
-}
+define_list_config_array(color_decorate_slots);
int parse_decorate_color_config(const char *var, const char *slot_name, const char *value)
{
- int slot = parse_decorate_color_slot(slot_name);
+ int slot = LOOKUP_CONFIG(color_decorate_slots, slot_name);
if (slot < 0)
return 0;
if (!value)
{
struct strbuf sb = STRBUF_INIT;
- if (opt->show_source && commit->util)
- fprintf(opt->diffopt.file, "\t%s", (char *) commit->util);
+ if (opt->sources) {
+ char **slot = revision_sources_peek(opt->sources, commit);
+
+ if (slot && *slot)
+ fprintf(opt->diffopt.file, "\t%s", *slot);
+ }
if (!opt->show_decorations)
return;
format_decorations(&sb, commit, opt->diffopt.use_color);
void log_write_email_headers(struct rev_info *opt, struct commit *commit,
const char **extra_headers_p,
- int *need_8bit_cte_p)
+ int *need_8bit_cte_p,
+ int maybe_multipart)
{
const char *extra_headers = opt->extra_headers;
const char *name = oid_to_hex(opt->zero_commit ?
opt->ref_message_ids->items[i].string);
graph_show_oneline(opt->graph);
}
- if (opt->mime_boundary) {
- static char subject_buffer[1024];
- static char buffer[1024];
+ if (opt->mime_boundary && maybe_multipart) {
+ static struct strbuf subject_buffer = STRBUF_INIT;
+ static struct strbuf buffer = STRBUF_INIT;
struct strbuf filename = STRBUF_INIT;
*need_8bit_cte_p = -1; /* NEVER */
- snprintf(subject_buffer, sizeof(subject_buffer) - 1,
+
+ strbuf_reset(&subject_buffer);
+ strbuf_reset(&buffer);
+
+ strbuf_addf(&subject_buffer,
"%s"
"MIME-Version: 1.0\n"
"Content-Type: multipart/mixed;"
extra_headers ? extra_headers : "",
mime_boundary_leader, opt->mime_boundary,
mime_boundary_leader, opt->mime_boundary);
- extra_headers = subject_buffer;
+ extra_headers = subject_buffer.buf;
if (opt->numbered_files)
strbuf_addf(&filename, "%d", opt->nr);
else
fmt_output_commit(&filename, commit, opt);
- snprintf(buffer, sizeof(buffer) - 1,
+ strbuf_addf(&buffer,
"\n--%s%s\n"
"Content-Type: text/x-patch;"
" name=\"%s\"\n"
filename.buf,
opt->no_inline ? "attachment" : "inline",
filename.buf);
- opt->diffopt.stat_sep = buffer;
+ opt->diffopt.stat_sep = buffer.buf;
strbuf_release(&filename);
}
*extra_headers_p = extra_headers;
&& !commit->parents->next->next);
}
-static void show_one_mergetag(struct commit *commit,
- struct commit_extra_header *extra,
- void *data)
+static int show_one_mergetag(struct commit *commit,
+ struct commit_extra_header *extra,
+ void *data)
{
struct rev_info *opt = (struct rev_info *)data;
struct object_id oid;
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
tag = lookup_tag(&oid);
if (!tag)
- return; /* error message already given */
+ return -1; /* error message already given */
strbuf_init(&verify_message, 256);
if (parse_tag_buffer(tag, extra->value, extra->len))
show_sig_lines(opt, status, verify_message.buf);
strbuf_release(&verify_message);
+ return 0;
}
-static void show_mergetag(struct rev_info *opt, struct commit *commit)
+static int show_mergetag(struct rev_info *opt, struct commit *commit)
{
- for_each_mergetag(show_one_mergetag, commit, opt);
+ return for_each_mergetag(show_one_mergetag, commit, opt);
}
void show_log(struct rev_info *opt)
if (cmit_fmt_is_mail(opt->commit_format)) {
log_write_email_headers(opt, commit, &extra_headers,
- &ctx.need_8bit_cte);
+ &ctx.need_8bit_cte, 1);
ctx.rev = opt;
ctx.print_email_subject = 1;
} else if (opt->commit_format != CMIT_FMT_USERFORMAT) {
return 0;
parse_commit_or_die(commit);
- oid = &commit->tree->object.oid;
+ oid = get_commit_tree_oid(commit);
/* Root commit? */
parents = get_saved_parents(opt, commit);
* we merged _in_.
*/
parse_commit_or_die(parents->item);
- diff_tree_oid(&parents->item->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parents->item),
oid, "", &opt->diffopt);
log_tree_diff_flush(opt);
return !opt->loginfo;
struct commit *parent = parents->item;
parse_commit_or_die(parent);
- diff_tree_oid(&parent->tree->object.oid,
+ diff_tree_oid(get_commit_tree_oid(parent),
oid, "", &opt->diffopt);
log_tree_diff_flush(opt);
#include "advice.h"
#include "lockfile.h"
#include "cache-tree.h"
+ #include "object-store.h"
#include "commit.h"
#include "blob.h"
#include "builtin.h"
#include "merge-recursive.h"
#include "dir.h"
#include "submodule.h"
+#include "revision.h"
struct path_hashmap_entry {
struct hashmap_entry e;
return ignore_case ? strihash(path) : strhash(path);
}
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+ char *dir)
+{
+ struct dir_rename_entry key;
+
+ if (dir == NULL)
+ return NULL;
+ hashmap_entry_init(&key, strhash(dir));
+ key.dir = dir;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+ const void *entry,
+ const void *entry_or_key,
+ const void *unused_keydata)
+{
+ const struct dir_rename_entry *e1 = entry;
+ const struct dir_rename_entry *e2 = entry_or_key;
+
+ return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+ hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+ char *directory)
+{
+ hashmap_entry_init(entry, strhash(directory));
+ entry->dir = directory;
+ entry->non_unique_new_dir = 0;
+ strbuf_init(&entry->new_dir, 0);
+ string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+ char *target_file)
+{
+ struct collision_entry key;
+
+ hashmap_entry_init(&key, strhash(target_file));
+ key.target_file = target_file;
+ return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+ const struct collision_entry *e1,
+ const struct collision_entry *e2,
+ const void *unused_keydata)
+{
+ return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+ hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
static void flush_output(struct merge_options *o)
{
if (o->buffer_output < 2 && o->obuf.len) {
struct commit *commit = alloc_commit_node(the_repository);
set_merge_remote_desc(commit, comment, (struct object *)commit);
- commit->tree = tree;
+ commit->maybe_tree = tree;
commit->object.parsed = 1;
return commit;
}
enum rename_type {
RENAME_NORMAL = 0,
+ RENAME_DIR,
RENAME_DELETE,
RENAME_ONE_FILE_TO_ONE,
RENAME_ONE_FILE_TO_TWO,
static void output_commit_title(struct merge_options *o, struct commit *commit)
{
+ struct merge_remote_desc *desc;
+
strbuf_addchars(&o->obuf, ' ', o->call_depth * 2);
- if (commit->util)
- strbuf_addf(&o->obuf, "virtual %s\n",
- merge_remote_util(commit)->name);
+ desc = merge_remote_util(commit);
+ if (desc)
+ strbuf_addf(&o->obuf, "virtual %s\n", desc->name);
else {
strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
DEFAULT_ABBREV);
ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
if (!ce)
- return err(o, _("addinfo_cache failed for path '%s'"), path);
+ return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
ret = add_cache_entry(ce, options);
if (refresh) {
nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
- return err(o, _("addinfo_cache failed for path '%s'"), path);
+ return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)
ret = add_cache_entry(nce, options);
}
init_tree_desc(desc, tree->buffer, tree->size);
}
-static int git_merge_trees(int index_only,
- struct tree *common,
- struct tree *head,
- struct tree *merge)
+static int unpack_trees_start(struct merge_options *o,
+ struct tree *common,
+ struct tree *head,
+ struct tree *merge)
{
int rc;
struct tree_desc t[3];
- struct unpack_trees_options opts;
+ struct index_state tmp_index = { NULL };
- memset(&opts, 0, sizeof(opts));
- if (index_only)
- opts.index_only = 1;
+ memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+ if (o->call_depth)
+ o->unpack_opts.index_only = 1;
else
- opts.update = 1;
- opts.merge = 1;
- opts.head_idx = 2;
- opts.fn = threeway_merge;
- opts.src_index = &the_index;
- opts.dst_index = &the_index;
- setup_unpack_trees_porcelain(&opts, "merge");
+ o->unpack_opts.update = 1;
+ o->unpack_opts.merge = 1;
+ o->unpack_opts.head_idx = 2;
+ o->unpack_opts.fn = threeway_merge;
+ o->unpack_opts.src_index = &the_index;
+ o->unpack_opts.dst_index = &tmp_index;
+ o->unpack_opts.aggressive = !merge_detect_rename(o);
+ setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
init_tree_desc_from_tree(t+2, merge);
- rc = unpack_trees(3, t, &opts);
+ rc = unpack_trees(3, t, &o->unpack_opts);
cache_tree_free(&active_cache_tree);
+
+ /*
+ * Update the_index to match the new results, AFTER saving a copy
+ * in o->orig_index. Update src_index to point to the saved copy.
+ * (verify_uptodate() checks src_index, and the original index is
+ * the one that had the necessary modification timestamps.)
+ */
+ o->orig_index = the_index;
+ the_index = tmp_index;
+ o->unpack_opts.src_index = &o->orig_index;
+
return rc;
}
+static void unpack_trees_finish(struct merge_options *o)
+{
+ discard_index(&o->orig_index);
+ clear_unpack_trees_porcelain(&o->unpack_opts);
+}
+
struct tree *write_tree_from_memory(struct merge_options *o)
{
struct tree *result = NULL;
fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
(int)ce_namelen(ce), ce->name);
}
- die("BUG: unmerged index entries in merge-recursive.c");
+ BUG("unmerged index entries in merge-recursive.c");
}
if (!active_cache_tree)
read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
}
+static int get_tree_entry_if_blob(const struct object_id *tree,
+ const char *path,
+ struct object_id *hashy,
+ unsigned int *mode_o)
+{
+ int ret;
+
+ ret = get_tree_entry(tree, path, hashy, mode_o);
+ if (S_ISDIR(*mode_o)) {
+ oidcpy(hashy, &null_oid);
+ *mode_o = 0;
+ }
+ return ret;
+}
+
/*
* Returns an index_entry instance which doesn't have to correspond to
* a real cache entry in Git's index.
{
struct string_list_item *item;
struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
- get_tree_entry(&o->object.oid, path,
- &e->stages[1].oid, &e->stages[1].mode);
- get_tree_entry(&a->object.oid, path,
- &e->stages[2].oid, &e->stages[2].mode);
- get_tree_entry(&b->object.oid, path,
- &e->stages[3].oid, &e->stages[3].mode);
+ get_tree_entry_if_blob(&o->object.oid, path,
+ &e->stages[1].oid, &e->stages[1].mode);
+ get_tree_entry_if_blob(&a->object.oid, path,
+ &e->stages[2].oid, &e->stages[2].mode);
+ get_tree_entry_if_blob(&b->object.oid, path,
+ &e->stages[3].oid, &e->stages[3].mode);
item = string_list_insert(entries, path);
item->util = e;
return e;
*/
struct stage_data *src_entry;
struct stage_data *dst_entry;
+ unsigned add_turned_into_rename:1;
unsigned processed:1;
};
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
- struct tree *tree,
- struct tree *o_tree,
- struct tree *a_tree,
- struct tree *b_tree,
- struct string_list *entries)
-{
- int i;
- struct string_list *renames;
- struct diff_options opts;
-
- renames = xcalloc(1, sizeof(struct string_list));
- if (!o->detect_rename)
- return renames;
-
- diff_setup(&opts);
- opts.flags.recursive = 1;
- opts.flags.rename_empty = 0;
- opts.detect_rename = DIFF_DETECT_RENAME;
- opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
- o->diff_rename_limit >= 0 ? o->diff_rename_limit :
- 1000;
- opts.rename_score = o->rename_score;
- opts.show_rename_progress = o->show_rename_progress;
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_setup_done(&opts);
- diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
- diffcore_std(&opts);
- if (opts.needed_rename_limit > o->needed_rename_limit)
- o->needed_rename_limit = opts.needed_rename_limit;
- for (i = 0; i < diff_queued_diff.nr; ++i) {
- struct string_list_item *item;
- struct rename *re;
- struct diff_filepair *pair = diff_queued_diff.queue[i];
- if (pair->status != 'R') {
- diff_free_filepair(pair);
- continue;
- }
- re = xmalloc(sizeof(*re));
- re->processed = 0;
- re->pair = pair;
- item = string_list_lookup(entries, re->pair->one->path);
- if (!item)
- re->src_entry = insert_stage_data(re->pair->one->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->src_entry = item->util;
-
- item = string_list_lookup(entries, re->pair->two->path);
- if (!item)
- re->dst_entry = insert_stage_data(re->pair->two->path,
- o_tree, a_tree, b_tree, entries);
- else
- re->dst_entry = item->util;
- item = string_list_insert(renames, pair->one->path);
- item->util = re;
- }
- opts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_queued_diff.nr = 0;
- diff_flush(&opts);
- return renames;
-}
-
static int update_stages(struct merge_options *opt, const char *path,
const struct diff_filespec *o,
const struct diff_filespec *a,
return 0;
}
+static int update_stages_for_stage_data(struct merge_options *opt,
+ const char *path,
+ const struct stage_data *stage_data)
+{
+ struct diff_filespec o, a, b;
+
+ o.mode = stage_data->stages[1].mode;
+ oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+ a.mode = stage_data->stages[2].mode;
+ oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+ b.mode = stage_data->stages[3].mode;
+ oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+ return update_stages(opt, path,
+ is_null_oid(&o.oid) ? NULL : &o,
+ is_null_oid(&a.oid) ? NULL : &a,
+ is_null_oid(&b.oid) ? NULL : &b);
+}
+
static void update_entry(struct stage_data *entry,
struct diff_filespec *o,
struct diff_filespec *a,
!(empty_ok && is_empty_dir(path));
}
-static int was_tracked(const char *path)
+/*
+ * Returns whether path was tracked in the index before the merge started,
+ * and its oid and mode match the specified values
+ */
+static int was_tracked_and_matches(struct merge_options *o, const char *path,
+ const struct object_id *oid, unsigned mode)
{
- int pos = cache_name_pos(path, strlen(path));
+ int pos = index_name_pos(&o->orig_index, path, strlen(path));
+ struct cache_entry *ce;
+
+ if (0 > pos)
+ /* we were not tracking this path before the merge */
+ return 0;
+
+ /* See if the file we were tracking before matches */
+ ce = o->orig_index.cache[pos];
+ return (oid_eq(&ce->oid, oid) && ce->ce_mode == mode);
+}
+
+/*
+ * Returns whether path was tracked in the index before the merge started
+ */
+static int was_tracked(struct merge_options *o, const char *path)
+{
+ int pos = index_name_pos(&o->orig_index, path, strlen(path));
if (0 <= pos)
- /* we have been tracking this path */
+ /* we were tracking this path before the merge */
return 1;
- /*
- * Look for an unmerged entry for the path,
- * specifically stage #2, which would indicate
- * that "our" side before the merge started
- * had the path tracked (and resulted in a conflict).
- */
- for (pos = -1 - pos;
- pos < active_nr && !strcmp(path, active_cache[pos]->name);
- pos++)
- if (ce_stage(active_cache[pos]) == 2)
- return 1;
return 0;
}
static int would_lose_untracked(const char *path)
{
- return !was_tracked(path) && file_exists(path);
+ /*
+ * This may look like it can be simplified to:
+ * return !was_tracked(o, path) && file_exists(path)
+ * but it can't. This function needs to know whether path was in
+ * the working tree due to EITHER having been tracked in the index
+ * before the merge OR having been put into the working copy and
+ * index by unpack_trees(). Due to that either-or requirement, we
+ * check the current index instead of the original one.
+ *
+ * Note that we do not need to worry about merge-recursive itself
+ * updating the index after unpack_trees() and before calling this
+ * function, because we strictly require all code paths in
+ * merge-recursive to update the working tree first and the index
+ * second. Doing otherwise would break
+ * update_file()/would_lose_untracked(); see every comment in this
+ * file which mentions "update_stages".
+ */
+ int pos = cache_name_pos(path, strlen(path));
+
+ if (pos < 0)
+ pos = -1 - pos;
+ while (pos < active_nr &&
+ !strcmp(path, active_cache[pos]->name)) {
+ /*
+ * If stage #0, it is definitely tracked.
+ * If it has stage #2 then it was tracked
+ * before this merge started. All other
+ * cases the path was not tracked.
+ */
+ switch (ce_stage(active_cache[pos])) {
+ case 0:
+ case 2:
+ return 0;
+ }
+ pos++;
+ }
+ return file_exists(path);
+}
+
+static int was_dirty(struct merge_options *o, const char *path)
+{
+ struct cache_entry *ce;
+ int dirty = 1;
+
+ if (o->call_depth || !was_tracked(o, path))
+ return !dirty;
+
+ ce = index_file_exists(o->unpack_opts.src_index,
+ path, strlen(path), ignore_case);
+ dirty = verify_uptodate(ce, &o->unpack_opts) != 0;
+ return dirty;
}
static int make_room_for_path(struct merge_options *o, const char *path)
}
update_index:
if (!ret && update_cache)
- add_cacheinfo(o, mode, oid, path, 0, update_wd, ADD_CACHE_OK_TO_ADD);
+ if (add_cacheinfo(o, mode, oid, path, 0, update_wd,
+ ADD_CACHE_OK_TO_ADD))
+ return -1;
return ret;
}
return merge_status;
}
+static int find_first_merges(struct object_array *result, const char *path,
+ struct commit *a, struct commit *b)
+{
+ int i, j;
+ struct object_array merges = OBJECT_ARRAY_INIT;
+ struct commit *commit;
+ int contains_another;
+
+ char merged_revision[42];
+ const char *rev_args[] = { "rev-list", "--merges", "--ancestry-path",
+ "--all", merged_revision, NULL };
+ struct rev_info revs;
+ struct setup_revision_opt rev_opts;
+
+ memset(result, 0, sizeof(struct object_array));
+ memset(&rev_opts, 0, sizeof(rev_opts));
+
+ /* get all revisions that merge commit a */
+ xsnprintf(merged_revision, sizeof(merged_revision), "^%s",
+ oid_to_hex(&a->object.oid));
+ init_revisions(&revs, NULL);
+ rev_opts.submodule = path;
+ /* FIXME: can't handle linked worktrees in submodules yet */
+ revs.single_worktree = path != NULL;
+ setup_revisions(ARRAY_SIZE(rev_args)-1, rev_args, &revs, &rev_opts);
+
+ /* save all revisions from the above list that contain b */
+ if (prepare_revision_walk(&revs))
+ die("revision walk setup failed");
+ while ((commit = get_revision(&revs)) != NULL) {
+ struct object *o = &(commit->object);
+ if (in_merge_bases(b, commit))
+ add_object_array(o, NULL, &merges);
+ }
+ reset_revision_walk();
+
+ /* Now we've got all merges that contain a and b. Prune all
+ * merges that contain another found merge and save them in
+ * result.
+ */
+ for (i = 0; i < merges.nr; i++) {
+ struct commit *m1 = (struct commit *) merges.objects[i].item;
+
+ contains_another = 0;
+ for (j = 0; j < merges.nr; j++) {
+ struct commit *m2 = (struct commit *) merges.objects[j].item;
+ if (i != j && in_merge_bases(m2, m1)) {
+ contains_another = 1;
+ break;
+ }
+ }
+
+ if (!contains_another)
+ add_object_array(merges.objects[i].item, NULL, result);
+ }
+
+ object_array_clear(&merges);
+ return result->nr;
+}
+
+static void print_commit(struct commit *commit)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct pretty_print_context ctx = {0};
+ ctx.date_mode.type = DATE_NORMAL;
+ format_commit_message(commit, " %h: %m %s", &sb, &ctx);
+ fprintf(stderr, "%s\n", sb.buf);
+ strbuf_release(&sb);
+}
+
+static int merge_submodule(struct merge_options *o,
+ struct object_id *result, const char *path,
+ const struct object_id *base, const struct object_id *a,
+ const struct object_id *b)
+{
+ struct commit *commit_base, *commit_a, *commit_b;
+ int parent_count;
+ struct object_array merges;
+
+ int i;
+ int search = !o->call_depth;
+
+ /* store a in result in case we fail */
+ oidcpy(result, a);
+
+ /* we can not handle deletion conflicts */
+ if (is_null_oid(base))
+ return 0;
+ if (is_null_oid(a))
+ return 0;
+ if (is_null_oid(b))
+ return 0;
+
+ if (add_submodule_odb(path)) {
+ output(o, 1, _("Failed to merge submodule %s (not checked out)"), path);
+ return 0;
+ }
+
+ if (!(commit_base = lookup_commit_reference(base)) ||
+ !(commit_a = lookup_commit_reference(a)) ||
+ !(commit_b = lookup_commit_reference(b))) {
+ output(o, 1, _("Failed to merge submodule %s (commits not present)"), path);
+ return 0;
+ }
+
+ /* check whether both changes are forward */
+ if (!in_merge_bases(commit_base, commit_a) ||
+ !in_merge_bases(commit_base, commit_b)) {
+ output(o, 1, _("Failed to merge submodule %s (commits don't follow merge-base)"), path);
+ return 0;
+ }
+
+ /* Case #1: a is contained in b or vice versa */
+ if (in_merge_bases(commit_a, commit_b)) {
+ oidcpy(result, b);
+ if (show(o, 3)) {
+ output(o, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
+ output_commit_title(o, commit_b);
+ } else if (show(o, 2))
+ output(o, 2, _("Fast-forwarding submodule %s"), path);
+ else
+ ; /* no output */
+
+ return 1;
+ }
+ if (in_merge_bases(commit_b, commit_a)) {
+ oidcpy(result, a);
+ if (show(o, 3)) {
+ output(o, 3, _("Fast-forwarding submodule %s to the following commit:"), path);
+ output_commit_title(o, commit_a);
+ } else if (show(o, 2))
+ output(o, 2, _("Fast-forwarding submodule %s"), path);
+ else
+ ; /* no output */
+
+ return 1;
+ }
+
+ /*
+ * Case #2: There are one or more merges that contain a and b in
+ * the submodule. If there is only one, then present it as a
+ * suggestion to the user, but leave it marked unmerged so the
+ * user needs to confirm the resolution.
+ */
+
+ /* Skip the search if makes no sense to the calling context. */
+ if (!search)
+ return 0;
+
+ /* find commit which merges them */
+ parent_count = find_first_merges(&merges, path, commit_a, commit_b);
+ switch (parent_count) {
+ case 0:
+ output(o, 1, _("Failed to merge submodule %s (merge following commits not found)"), path);
+ break;
+
+ case 1:
+ output(o, 1, _("Failed to merge submodule %s (not fast-forward)"), path);
+ output(o, 2, _("Found a possible merge resolution for the submodule:\n"));
+ print_commit((struct commit *) merges.objects[0].item);
+ output(o, 2, _(
+ "If this is correct simply add it to the index "
+ "for example\n"
+ "by using:\n\n"
+ " git update-index --cacheinfo 160000 %s \"%s\"\n\n"
+ "which will accept this suggestion.\n"),
+ oid_to_hex(&merges.objects[0].item->oid), path);
+ break;
+
+ default:
+ output(o, 1, _("Failed to merge submodule %s (multiple merges found)"), path);
+ for (i = 0; i < merges.nr; i++)
+ print_commit((struct commit *) merges.objects[i].item);
+ }
+
+ object_array_clear(&merges);
+ return 0;
+}
+
static int merge_file_1(struct merge_options *o,
- const struct diff_filespec *one,
- const struct diff_filespec *a,
- const struct diff_filespec *b,
- const char *branch1,
- const char *branch2,
- struct merge_file_info *result)
+ const struct diff_filespec *one,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
+ const char *filename,
+ const char *branch1,
+ const char *branch2,
+ struct merge_file_info *result)
{
result->merge = 0;
result->clean = 1;
return ret;
result->clean = (merge_status == 0);
} else if (S_ISGITLINK(a->mode)) {
- result->clean = merge_submodule(&result->oid,
+ result->clean = merge_submodule(o, &result->oid,
one->path,
&one->oid,
&a->oid,
- &b->oid,
- !o->call_depth);
+ &b->oid);
} else if (S_ISLNK(a->mode)) {
switch (o->recursive_variant) {
case MERGE_RECURSIVE_NORMAL:
break;
}
} else
- die("BUG: unsupported object type in the tree");
+ BUG("unsupported object type in the tree");
}
+ if (result->merge)
+ output(o, 2, _("Auto-merging %s"), filename);
+
return 0;
}
static int merge_file_special_markers(struct merge_options *o,
- const struct diff_filespec *one,
- const struct diff_filespec *a,
- const struct diff_filespec *b,
- const char *branch1,
- const char *filename1,
- const char *branch2,
- const char *filename2,
- struct merge_file_info *mfi)
+ const struct diff_filespec *one,
+ const struct diff_filespec *a,
+ const struct diff_filespec *b,
+ const char *target_filename,
+ const char *branch1,
+ const char *filename1,
+ const char *branch2,
+ const char *filename2,
+ struct merge_file_info *mfi)
{
char *side1 = NULL;
char *side2 = NULL;
if (filename2)
side2 = xstrfmt("%s:%s", branch2, filename2);
- ret = merge_file_1(o, one, a, b,
+ ret = merge_file_1(o, one, a, b, target_filename,
side1 ? side1 : branch1,
side2 ? side2 : branch2, mfi);
+
free(side1);
free(side2);
return ret;
}
static int merge_file_one(struct merge_options *o,
- const char *path,
- const struct object_id *o_oid, int o_mode,
- const struct object_id *a_oid, int a_mode,
- const struct object_id *b_oid, int b_mode,
- const char *branch1,
- const char *branch2,
- struct merge_file_info *mfi)
+ const char *path,
+ const struct object_id *o_oid, int o_mode,
+ const struct object_id *a_oid, int a_mode,
+ const struct object_id *b_oid, int b_mode,
+ const char *branch1,
+ const char *branch2,
+ struct merge_file_info *mfi)
{
struct diff_filespec one, a, b;
a.mode = a_mode;
oidcpy(&b.oid, b_oid);
b.mode = b_mode;
- return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
+ return merge_file_1(o, &one, &a, &b, path, branch1, branch2, mfi);
+}
+
+static int conflict_rename_dir(struct merge_options *o,
+ struct diff_filepair *pair,
+ const char *rename_branch,
+ const char *other_branch)
+{
+ const struct diff_filespec *dest = pair->two;
+
+ if (!o->call_depth && would_lose_untracked(dest->path)) {
+ char *alt_path = unique_path(o, dest->path, rename_branch);
+
+ output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+ "writing to %s instead."),
+ dest->path, alt_path);
+ /*
+ * Write the file in worktree at alt_path, but not in the
+ * index. Instead, write to dest->path for the index but
+ * only at the higher appropriate stage.
+ */
+ if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+ return -1;
+ free(alt_path);
+ return update_stages(o, dest->path, NULL,
+ rename_branch == o->branch1 ? dest : NULL,
+ rename_branch == o->branch1 ? NULL : dest);
+ }
+
+ /* Update dest->path both in index and in worktree */
+ if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+ return -1;
+ return 0;
}
static int handle_change_delete(struct merge_options *o,
const char *update_path = path;
int ret = 0;
- if (dir_in_way(path, !o->call_depth, 0)) {
+ if (dir_in_way(path, !o->call_depth, 0) ||
+ (!o->call_depth && would_lose_untracked(path))) {
update_path = alt_path = unique_path(o, path, change_branch);
}
add = filespec_from_entry(&other, dst_entry, stage ^ 1);
if (add) {
+ int ren_src_was_dirty = was_dirty(o, rename->path);
char *add_name = unique_path(o, rename->path, other_branch);
if (update_file(o, 0, &add->oid, add->mode, add_name))
return -1;
- remove_file(o, 0, rename->path, 0);
+ if (ren_src_was_dirty) {
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ rename->path);
+ }
+ /*
+ * Because the double negatives somehow keep confusing me...
+ * 1) update_wd iff !ren_src_was_dirty.
+ * 2) no_wd iff !update_wd
+ * 3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+ */
+ remove_file(o, 0, rename->path, ren_src_was_dirty);
dst_name = unique_path(o, rename->path, cur_branch);
} else {
if (dir_in_way(rename->path, !o->call_depth, 0)) {
dst_name = unique_path(o, rename->path, cur_branch);
output(o, 1, _("%s is a directory in %s adding as %s instead"),
rename->path, other_branch, dst_name);
+ } else if (!o->call_depth &&
+ would_lose_untracked(rename->path)) {
+ dst_name = unique_path(o, rename->path, cur_branch);
+ output(o, 1, _("Refusing to lose untracked file at %s; "
+ "adding as %s instead"),
+ rename->path, dst_name);
}
}
if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
struct diff_filespec *c1 = ci->pair1->two;
struct diff_filespec *c2 = ci->pair2->two;
char *path = c1->path; /* == c2->path */
+ char *path_side_1_desc;
+ char *path_side_2_desc;
struct merge_file_info mfi_c1;
struct merge_file_info mfi_c2;
int ret;
remove_file(o, 1, a->path, o->call_depth || would_lose_untracked(a->path));
remove_file(o, 1, b->path, o->call_depth || would_lose_untracked(b->path));
+ path_side_1_desc = xstrfmt("%s (was %s)", path, a->path);
+ path_side_2_desc = xstrfmt("%s (was %s)", path, b->path);
if (merge_file_special_markers(o, a, c1, &ci->ren1_other,
+ path_side_1_desc,
o->branch1, c1->path,
o->branch2, ci->ren1_other.path, &mfi_c1) ||
merge_file_special_markers(o, b, &ci->ren2_other, c2,
+ path_side_2_desc,
o->branch1, ci->ren2_other.path,
o->branch2, c2->path, &mfi_c2))
return -1;
+ free(path_side_1_desc);
+ free(path_side_2_desc);
if (o->call_depth) {
/*
char *new_path2 = unique_path(o, path, ci->branch2);
output(o, 1, _("Renaming %s to %s and %s to %s instead"),
a->path, new_path1, b->path, new_path2);
- remove_file(o, 0, path, 0);
+ if (was_dirty(o, path))
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ path);
+ else if (would_lose_untracked(path))
+ /*
+ * Only way we get here is if both renames were from
+ * a directory rename AND user had an untracked file
+ * at the location where both files end up after the
+ * two directory renames. See testcase 10d of t6043.
+ */
+ output(o, 1, _("Refusing to lose untracked file at "
+ "%s, even though it's in the way."),
+ path);
+ else
+ remove_file(o, 0, path, 0);
ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
if (!ret)
ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
new_path2);
+ /*
+ * unpack_trees() actually populates the index for us for
+ * "normal" rename/rename(2to1) situtations so that the
+ * correct entries are at the higher stages, which would
+ * make the call below to update_stages_for_stage_data
+ * unnecessary. However, if either of the renames came
+ * from a directory rename, then unpack_trees() will not
+ * have gotten the right data loaded into the index, so we
+ * need to do so now. (While it'd be tempting to move this
+ * call to update_stages_for_stage_data() to
+ * apply_directory_rename_modifications(), that would break
+ * our intermediate calls to would_lose_untracked() since
+ * those rely on the current in-memory index. See also the
+ * big "NOTE" in update_stages()).
+ */
+ if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+ ret = -1;
+
free(new_path2);
free(new_path1);
}
return ret;
}
-static int process_renames(struct merge_options *o,
- struct string_list *a_renames,
- struct string_list *b_renames)
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+ struct tree *o_tree,
+ struct tree *tree)
{
- int clean_merge = 1, i, j;
- struct string_list a_by_dst = STRING_LIST_INIT_NODUP;
- struct string_list b_by_dst = STRING_LIST_INIT_NODUP;
- const struct rename *sre;
-
- for (i = 0; i < a_renames->nr; i++) {
- sre = a_renames->items[i].util;
- string_list_insert(&a_by_dst, sre->pair->two->path)->util
- = (void *)sre;
- }
- for (i = 0; i < b_renames->nr; i++) {
- sre = b_renames->items[i].util;
- string_list_insert(&b_by_dst, sre->pair->two->path)->util
- = (void *)sre;
- }
-
- for (i = 0, j = 0; i < a_renames->nr || j < b_renames->nr;) {
- struct string_list *renames1, *renames2Dst;
- struct rename *ren1 = NULL, *ren2 = NULL;
- const char *branch1, *branch2;
- const char *ren1_src, *ren1_dst;
- struct string_list_item *lookup;
+ struct diff_queue_struct *ret;
+ struct diff_options opts;
- if (i >= a_renames->nr) {
- ren2 = b_renames->items[j++].util;
- } else if (j >= b_renames->nr) {
- ren1 = a_renames->items[i++].util;
- } else {
- int compare = strcmp(a_renames->items[i].string,
- b_renames->items[j].string);
+ diff_setup(&opts);
+ opts.flags.recursive = 1;
+ opts.flags.rename_empty = 0;
+ opts.detect_rename = merge_detect_rename(o);
+ /*
+ * We do not have logic to handle the detection of copies. In
+ * fact, it may not even make sense to add such logic: would we
+ * really want a change to a base file to be propagated through
+ * multiple other files by a merge?
+ */
+ if (opts.detect_rename > DIFF_DETECT_RENAME)
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+ o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+ 1000;
+ opts.rename_score = o->rename_score;
+ opts.show_rename_progress = o->show_rename_progress;
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_setup_done(&opts);
+ diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+ diffcore_std(&opts);
+ if (opts.needed_rename_limit > o->needed_rename_limit)
+ o->needed_rename_limit = opts.needed_rename_limit;
+
+ ret = xmalloc(sizeof(*ret));
+ *ret = diff_queued_diff;
+
+ opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+ diff_queued_diff.nr = 0;
+ diff_queued_diff.queue = NULL;
+ diff_flush(&opts);
+ return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+ struct object_id hashy;
+ unsigned int mode_o;
+
+ return !get_tree_entry(&tree->object.oid, path,
+ &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir. In perl-speak:
+ * new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ * Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+ const char *old_path)
+{
+ struct strbuf new_path = STRBUF_INIT;
+ int oldlen, newlen;
+
+ if (entry->non_unique_new_dir)
+ return NULL;
+
+ oldlen = strlen(entry->dir);
+ newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+ strbuf_grow(&new_path, newlen);
+ strbuf_addbuf(&new_path, &entry->new_dir);
+ strbuf_addstr(&new_path, &old_path[oldlen]);
+
+ return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+ char **old_dir, char **new_dir)
+{
+ char *end_of_old, *end_of_new;
+ int old_len, new_len;
+
+ *old_dir = NULL;
+ *new_dir = NULL;
+
+ /*
+ * For
+ * "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+ * the "e/foo.c" part is the same, we just want to know that
+ * "a/b/c/d" was renamed to "a/b/some/thing/else"
+ * so, for this example, this function returns "a/b/c/d" in
+ * *old_dir and "a/b/some/thing/else" in *new_dir.
+ *
+ * Also, if the basename of the file changed, we don't care. We
+ * want to know which portion of the directory, if any, changed.
+ */
+ end_of_old = strrchr(old_path, '/');
+ end_of_new = strrchr(new_path, '/');
+
+ if (end_of_old == NULL || end_of_new == NULL)
+ return;
+ while (*--end_of_new == *--end_of_old &&
+ end_of_old != old_path &&
+ end_of_new != new_path)
+ ; /* Do nothing; all in the while loop */
+ /*
+ * We've found the first non-matching character in the directory
+ * paths. That means the current directory we were comparing
+ * represents the rename. Move end_of_old and end_of_new back
+ * to the full directory name.
+ */
+ if (*end_of_old == '/')
+ end_of_old++;
+ if (*end_of_old != '/')
+ end_of_new++;
+ end_of_old = strchr(end_of_old, '/');
+ end_of_new = strchr(end_of_new, '/');
+
+ /*
+ * It may have been the case that old_path and new_path were the same
+ * directory all along. Don't claim a rename if they're the same.
+ */
+ old_len = end_of_old - old_path;
+ new_len = end_of_new - new_path;
+
+ if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+ *old_dir = xstrndup(old_path, old_len);
+ *new_dir = xstrndup(new_path, new_len);
+ }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+ struct string_list *items_to_remove)
+{
+ int i;
+ struct dir_rename_entry *entry;
+
+ for (i = 0; i < items_to_remove->nr; i++) {
+ entry = items_to_remove->items[i].util;
+ hashmap_remove(dir_renames, entry, NULL);
+ }
+ string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location. If there is a rename and
+ * there are no conflicts, return the new name. Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+ const char *path,
+ struct dir_rename_entry *entry,
+ struct hashmap *collisions,
+ struct tree *tree)
+{
+ char *new_path = NULL;
+ struct collision_entry *collision_ent;
+ int clean = 1;
+ struct strbuf collision_paths = STRBUF_INIT;
+
+ /*
+ * entry has the mapping of old directory name to new directory name
+ * that we want to apply to path.
+ */
+ new_path = apply_dir_rename(entry, path);
+
+ if (!new_path) {
+ /* This should only happen when entry->non_unique_new_dir set */
+ if (!entry->non_unique_new_dir)
+ BUG("entry->non_unqiue_dir not set and !new_path");
+ output(o, 1, _("CONFLICT (directory rename split): "
+ "Unclear where to place %s because directory "
+ "%s was renamed to multiple other directories, "
+ "with no destination getting a majority of the "
+ "files."),
+ path, entry->dir);
+ clean = 0;
+ return NULL;
+ }
+
+ /*
+ * The caller needs to have ensured that it has pre-populated
+ * collisions with all paths that map to new_path. Do a quick check
+ * to ensure that's the case.
+ */
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (collision_ent == NULL)
+ BUG("collision_ent is NULL");
+
+ /*
+ * Check for one-sided add/add/.../add conflicts, i.e.
+ * where implicit renames from the other side doing
+ * directory rename(s) can affect this side of history
+ * to put multiple paths into the same location. Warn
+ * and bail on directory renames for such paths.
+ */
+ if (collision_ent->reported_already) {
+ clean = 0;
+ } else if (tree_has_path(tree, new_path)) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+ "file/dir at %s in the way of implicit "
+ "directory rename(s) putting the following "
+ "path(s) there: %s."),
+ new_path, collision_paths.buf);
+ clean = 0;
+ } else if (collision_ent->source_files.nr > 1) {
+ collision_ent->reported_already = 1;
+ strbuf_add_separated_string_list(&collision_paths, ", ",
+ &collision_ent->source_files);
+ output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+ "more than one path to %s; implicit directory "
+ "renames tried to put these paths there: %s"),
+ new_path, collision_paths.buf);
+ clean = 0;
+ }
+
+ /* Free memory we no longer need */
+ strbuf_release(&collision_paths);
+ if (!clean && new_path) {
+ free(new_path);
+ return NULL;
+ }
+
+ return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ * 1. Check for both sides renaming to the same thing, in order to avoid
+ * implicit renaming of files that should be left in place. (See
+ * testcase 6b in t6043 for details.)
+ * 2. Prune directory renames if there are still files left in the
+ * the original directory. These represent a partial directory rename,
+ * i.e. a rename where only some of the files within the directory
+ * were renamed elsewhere. (Technically, this could be done earlier
+ * in get_directory_renames(), except that would prevent us from
+ * doing the previous check and thus failing testcase 6b.)
+ * 3. Check for rename/rename(1to2) conflicts (at the directory level).
+ * In the future, we could potentially record this info as well and
+ * omit reporting rename/rename(1to2) conflicts for each path within
+ * the affected directories, thus cleaning up the merge output.
+ * NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ * directory level, because merging directories is fine. If it
+ * causes conflicts for files within those merged directories, then
+ * that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+ struct hashmap *dir_re_head,
+ struct tree *head,
+ struct hashmap *dir_re_merge,
+ struct tree *merge)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *head_ent;
+ struct dir_rename_entry *merge_ent;
+
+ struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+ struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+ hashmap_iter_init(dir_re_head, &iter);
+ while ((head_ent = hashmap_iter_next(&iter))) {
+ merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+ if (merge_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir &&
+ !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+ /* 1. Renamed identically; remove it from both sides */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ } else if (tree_has_path(head, head_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+ hashmap_iter_init(dir_re_merge, &iter);
+ while ((merge_ent = hashmap_iter_next(&iter))) {
+ head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+ if (tree_has_path(merge, merge_ent->dir)) {
+ /* 2. This wasn't a directory rename after all */
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ } else if (head_ent &&
+ !head_ent->non_unique_new_dir &&
+ !merge_ent->non_unique_new_dir) {
+ /* 3. rename/rename(1to2) */
+ /*
+ * We can assume it's not rename/rename(1to1) because
+ * that was case (1), already checked above. So we
+ * know that head_ent->new_dir and merge_ent->new_dir
+ * are different strings.
+ */
+ output(o, 1, _("CONFLICT (rename/rename): "
+ "Rename directory %s->%s in %s. "
+ "Rename directory %s->%s in %s"),
+ head_ent->dir, head_ent->new_dir.buf, o->branch1,
+ head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+ string_list_append(&remove_from_head,
+ head_ent->dir)->util = head_ent;
+ strbuf_release(&head_ent->new_dir);
+ string_list_append(&remove_from_merge,
+ merge_ent->dir)->util = merge_ent;
+ strbuf_release(&merge_ent->new_dir);
+ }
+ }
+
+ remove_hashmap_entries(dir_re_head, &remove_from_head);
+ remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+ struct tree *tree)
+{
+ struct hashmap *dir_renames;
+ struct hashmap_iter iter;
+ struct dir_rename_entry *entry;
+ int i;
+
+ /*
+ * Typically, we think of a directory rename as all files from a
+ * certain directory being moved to a target directory. However,
+ * what if someone first moved two files from the original
+ * directory in one commit, and then renamed the directory
+ * somewhere else in a later commit? At merge time, we just know
+ * that files from the original directory went to two different
+ * places, and that the bulk of them ended up in the same place.
+ * We want each directory rename to represent where the bulk of the
+ * files from that directory end up; this function exists to find
+ * where the bulk of the files went.
+ *
+ * The first loop below simply iterates through the list of file
+ * renames, finding out how often each directory rename pair
+ * possibility occurs.
+ */
+ dir_renames = xmalloc(sizeof(*dir_renames));
+ dir_rename_init(dir_renames);
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ int *count;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *old_dir, *new_dir;
+
+ /* File not part of directory rename if it wasn't renamed */
+ if (pair->status != 'R')
+ continue;
+
+ get_renamed_dir_portion(pair->one->path, pair->two->path,
+ &old_dir, &new_dir);
+ if (!old_dir)
+ /* Directory didn't change at all; ignore this one. */
+ continue;
+
+ entry = dir_rename_find_entry(dir_renames, old_dir);
+ if (!entry) {
+ entry = xmalloc(sizeof(*entry));
+ dir_rename_entry_init(entry, old_dir);
+ hashmap_put(dir_renames, entry);
+ } else {
+ free(old_dir);
+ }
+ item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+ if (!item) {
+ item = string_list_insert(&entry->possible_new_dirs,
+ new_dir);
+ item->util = xcalloc(1, sizeof(int));
+ } else {
+ free(new_dir);
+ }
+ count = item->util;
+ *count += 1;
+ }
+
+ /*
+ * For each directory with files moved out of it, we find out which
+ * target directory received the most files so we can declare it to
+ * be the "winning" target location for the directory rename. This
+ * winner gets recorded in new_dir. If there is no winner
+ * (multiple target directories received the same number of files),
+ * we set non_unique_new_dir. Once we've determined the winner (or
+ * that there is no winner), we no longer need possible_new_dirs.
+ */
+ hashmap_iter_init(dir_renames, &iter);
+ while ((entry = hashmap_iter_next(&iter))) {
+ int max = 0;
+ int bad_max = 0;
+ char *best = NULL;
+
+ for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+ int *count = entry->possible_new_dirs.items[i].util;
+
+ if (*count == max)
+ bad_max = max;
+ else if (*count > max) {
+ max = *count;
+ best = entry->possible_new_dirs.items[i].string;
+ }
+ }
+ if (bad_max == max)
+ entry->non_unique_new_dir = 1;
+ else {
+ assert(entry->new_dir.len == 0);
+ strbuf_addstr(&entry->new_dir, best);
+ }
+ /*
+ * The relevant directory sub-portion of the original full
+ * filepaths were xstrndup'ed before inserting into
+ * possible_new_dirs, and instead of manually iterating the
+ * list and free'ing each, just lie and tell
+ * possible_new_dirs that it did the strdup'ing so that it
+ * will free them for us.
+ */
+ entry->possible_new_dirs.strdup_strings = 1;
+ string_list_clear(&entry->possible_new_dirs, 1);
+ }
+
+ return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+ struct hashmap *dir_renames)
+{
+ char *temp = xstrdup(path);
+ char *end;
+ struct dir_rename_entry *entry = NULL;;
+
+ while ((end = strrchr(temp, '/'))) {
+ *end = '\0';
+ entry = dir_rename_find_entry(dir_renames, temp);
+ if (entry)
+ break;
+ }
+ free(temp);
+ return entry;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+ struct hashmap *dir_renames,
+ struct diff_queue_struct *pairs)
+{
+ int i;
+
+ /*
+ * Multiple files can be mapped to the same path due to directory
+ * renames done by the other side of history. Since that other
+ * side of history could have merged multiple directories into one,
+ * if our side of history added the same file basename to each of
+ * those directories, then all N of them would get implicitly
+ * renamed by the directory rename detection into the same path,
+ * and we'd get an add/add/.../add conflict, and all those adds
+ * from *this* side of history. This is not representable in the
+ * index, and users aren't going to easily be able to make sense of
+ * it. So we need to provide a good warning about what's
+ * happening, and fall back to no-directory-rename detection
+ * behavior for those paths.
+ *
+ * See testcases 9e and all of section 5 from t6043 for examples.
+ */
+ collision_init(collisions);
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct dir_rename_entry *dir_rename_ent;
+ struct collision_entry *collision_ent;
+ char *new_path;
+ struct diff_filepair *pair = pairs->queue[i];
+
+ if (pair->status != 'A' && pair->status != 'R')
+ continue;
+ dir_rename_ent = check_dir_renamed(pair->two->path,
+ dir_renames);
+ if (!dir_rename_ent)
+ continue;
+
+ new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+ if (!new_path)
+ /*
+ * dir_rename_ent->non_unique_new_path is true, which
+ * means there is no directory rename for us to use,
+ * which means it won't cause us any additional
+ * collisions.
+ */
+ continue;
+ collision_ent = collision_find_entry(collisions, new_path);
+ if (!collision_ent) {
+ collision_ent = xcalloc(1,
+ sizeof(struct collision_entry));
+ hashmap_entry_init(collision_ent, strhash(new_path));
+ hashmap_put(collisions, collision_ent);
+ collision_ent->target_file = new_path;
+ } else {
+ free(new_path);
+ }
+ string_list_insert(&collision_ent->source_files,
+ pair->two->path);
+ }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+ const char *path,
+ struct tree *tree,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct hashmap *collisions,
+ int *clean_merge)
+{
+ char *new_path = NULL;
+ struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+ struct dir_rename_entry *oentry = NULL;
+
+ if (!entry)
+ return new_path;
+
+ /*
+ * This next part is a little weird. We do not want to do an
+ * implicit rename into a directory we renamed on our side, because
+ * that will result in a spurious rename/rename(1to2) conflict. An
+ * example:
+ * Base commit: dumbdir/afile, otherdir/bfile
+ * Side 1: smrtdir/afile, otherdir/bfile
+ * Side 2: dumbdir/afile, dumbdir/bfile
+ * Here, while working on Side 1, we could notice that otherdir was
+ * renamed/merged to dumbdir, and change the diff_filepair for
+ * otherdir/bfile into a rename into dumbdir/bfile. However, Side
+ * 2 will notice the rename from dumbdir to smrtdir, and do the
+ * transitive rename to move it from dumbdir/bfile to
+ * smrtdir/bfile. That gives us bfile in dumbdir vs being in
+ * smrtdir, a rename/rename(1to2) conflict. We really just want
+ * the file to end up in smrtdir. And the way to achieve that is
+ * to not let Side1 do the rename to dumbdir, since we know that is
+ * the source of one of our directory renames.
+ *
+ * That's why oentry and dir_rename_exclusions is here.
+ *
+ * As it turns out, this also prevents N-way transient rename
+ * confusion; See testcases 9c and 9d of t6043.
+ */
+ oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+ if (oentry) {
+ output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+ "to %s, because %s itself was renamed."),
+ entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+ } else {
+ new_path = handle_path_level_conflicts(o, path, entry,
+ collisions, tree);
+ *clean_merge &= (new_path != NULL);
+ }
+
+ return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+ struct diff_filepair *pair,
+ char *new_path,
+ struct rename *re,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean)
+{
+ struct string_list_item *item;
+ int stage = (tree == a_tree ? 2 : 3);
+ int update_wd;
+
+ /*
+ * In all cases where we can do directory rename detection,
+ * unpack_trees() will have read pair->two->path into the
+ * index and the working copy. We need to remove it so that
+ * we can instead place it at new_path. It is guaranteed to
+ * not be untracked (unpack_trees() would have errored out
+ * saying the file would have been overwritten), but it might
+ * be dirty, though.
+ */
+ update_wd = !was_dirty(o, pair->two->path);
+ if (!update_wd)
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ pair->two->path);
+ remove_file(o, 1, pair->two->path, !update_wd);
+
+ /* Find or create a new re->dst_entry */
+ item = string_list_lookup(entries, new_path);
+ if (item) {
+ /*
+ * Since we're renaming on this side of history, and it's
+ * due to a directory rename on the other side of history
+ * (which we only allow when the directory in question no
+ * longer exists on the other side of history), the
+ * original entry for re->dst_entry is no longer
+ * necessary...
+ */
+ re->dst_entry->processed = 1;
+
+ /*
+ * ...because we'll be using this new one.
+ */
+ re->dst_entry = item->util;
+ } else {
+ /*
+ * re->dst_entry is for the before-dir-rename path, and we
+ * need it to hold information for the after-dir-rename
+ * path. Before creating a new entry, we need to mark the
+ * old one as unnecessary (...unless it is shared by
+ * src_entry, i.e. this didn't use to be a rename, in which
+ * case we can just allow the normal processing to happen
+ * for it).
+ */
+ if (pair->status == 'R')
+ re->dst_entry->processed = 1;
+
+ re->dst_entry = insert_stage_data(new_path,
+ o_tree, a_tree, b_tree,
+ entries);
+ item = string_list_insert(entries, new_path);
+ item->util = re->dst_entry;
+ }
+
+ /*
+ * Update the stage_data with the information about the path we are
+ * moving into place. That slot will be empty and available for us
+ * to write to because of the collision checks in
+ * handle_path_level_conflicts(). In other words,
+ * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+ * open for us to write to.
+ *
+ * It may be tempting to actually update the index at this point as
+ * well, using update_stages_for_stage_data(), but as per the big
+ * "NOTE" in update_stages(), doing so will modify the current
+ * in-memory index which will break calls to would_lose_untracked()
+ * that we need to make. Instead, we need to just make sure that
+ * the various conflict_rename_*() functions update the index
+ * explicitly rather than relying on unpack_trees() to have done it.
+ */
+ get_tree_entry(&tree->object.oid,
+ pair->two->path,
+ &re->dst_entry->stages[stage].oid,
+ &re->dst_entry->stages[stage].mode);
+
+ /* Update pair status */
+ if (pair->status == 'A') {
+ /*
+ * Recording rename information for this add makes it look
+ * like a rename/delete conflict. Make sure we can
+ * correctly handle this as an add that was moved to a new
+ * directory instead of reporting a rename/delete conflict.
+ */
+ re->add_turned_into_rename = 1;
+ }
+ /*
+ * We don't actually look at pair->status again, but it seems
+ * pedagogically correct to adjust it.
+ */
+ pair->status = 'R';
+
+ /*
+ * Finally, record the new location.
+ */
+ pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+ struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames,
+ struct hashmap *dir_rename_exclusions,
+ struct tree *tree,
+ struct tree *o_tree,
+ struct tree *a_tree,
+ struct tree *b_tree,
+ struct string_list *entries,
+ int *clean_merge)
+{
+ int i;
+ struct hashmap collisions;
+ struct hashmap_iter iter;
+ struct collision_entry *e;
+ struct string_list *renames;
+
+ compute_collisions(&collisions, dir_renames, pairs);
+ renames = xcalloc(1, sizeof(struct string_list));
+
+ for (i = 0; i < pairs->nr; ++i) {
+ struct string_list_item *item;
+ struct rename *re;
+ struct diff_filepair *pair = pairs->queue[i];
+ char *new_path; /* non-NULL only with directory renames */
+
+ if (pair->status != 'A' && pair->status != 'R') {
+ diff_free_filepair(pair);
+ continue;
+ }
+ new_path = check_for_directory_rename(o, pair->two->path, tree,
+ dir_renames,
+ dir_rename_exclusions,
+ &collisions,
+ clean_merge);
+ if (pair->status != 'R' && !new_path) {
+ diff_free_filepair(pair);
+ continue;
+ }
+
+ re = xmalloc(sizeof(*re));
+ re->processed = 0;
+ re->add_turned_into_rename = 0;
+ re->pair = pair;
+ item = string_list_lookup(entries, re->pair->one->path);
+ if (!item)
+ re->src_entry = insert_stage_data(re->pair->one->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->src_entry = item->util;
+
+ item = string_list_lookup(entries, re->pair->two->path);
+ if (!item)
+ re->dst_entry = insert_stage_data(re->pair->two->path,
+ o_tree, a_tree, b_tree, entries);
+ else
+ re->dst_entry = item->util;
+ item = string_list_insert(renames, pair->one->path);
+ item->util = re;
+ if (new_path)
+ apply_directory_rename_modifications(o, pair, new_path,
+ re, tree, o_tree,
+ a_tree, b_tree,
+ entries,
+ clean_merge);
+ }
+
+ hashmap_iter_init(&collisions, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->target_file);
+ string_list_clear(&e->source_files, 0);
+ }
+ hashmap_free(&collisions, 1);
+ return renames;
+}
+
+static int process_renames(struct merge_options *o,
+ struct string_list *a_renames,
+ struct string_list *b_renames)
+{
+ int clean_merge = 1, i, j;
+ struct string_list a_by_dst = STRING_LIST_INIT_NODUP;
+ struct string_list b_by_dst = STRING_LIST_INIT_NODUP;
+ const struct rename *sre;
+
+ for (i = 0; i < a_renames->nr; i++) {
+ sre = a_renames->items[i].util;
+ string_list_insert(&a_by_dst, sre->pair->two->path)->util
+ = (void *)sre;
+ }
+ for (i = 0; i < b_renames->nr; i++) {
+ sre = b_renames->items[i].util;
+ string_list_insert(&b_by_dst, sre->pair->two->path)->util
+ = (void *)sre;
+ }
+
+ for (i = 0, j = 0; i < a_renames->nr || j < b_renames->nr;) {
+ struct string_list *renames1, *renames2Dst;
+ struct rename *ren1 = NULL, *ren2 = NULL;
+ const char *branch1, *branch2;
+ const char *ren1_src, *ren1_dst;
+ struct string_list_item *lookup;
+
+ if (i >= a_renames->nr) {
+ ren2 = b_renames->items[j++].util;
+ } else if (j >= b_renames->nr) {
+ ren1 = a_renames->items[i++].util;
+ } else {
+ int compare = strcmp(a_renames->items[i].string,
+ b_renames->items[j].string);
if (compare <= 0)
ren1 = a_renames->items[i++].util;
if (compare >= 0)
const char *ren2_dst = ren2->pair->two->path;
enum rename_type rename_type;
if (strcmp(ren1_src, ren2_src) != 0)
- die("BUG: ren1_src != ren2_src");
+ BUG("ren1_src != ren2_src");
ren2->dst_entry->processed = 1;
ren2->processed = 1;
if (strcmp(ren1_dst, ren2_dst) != 0) {
ren2 = lookup->util;
ren2_dst = ren2->pair->two->path;
if (strcmp(ren1_dst, ren2_dst) != 0)
- die("BUG: ren1_dst != ren2_dst");
+ BUG("ren1_dst != ren2_dst");
clean_merge = 0;
ren2->processed = 1;
* add-source case).
*/
remove_file(o, 1, ren1_src,
- renamed_stage == 2 || !was_tracked(ren1_src));
+ renamed_stage == 2 || !was_tracked(o, ren1_src));
oidcpy(&src_other.oid,
&ren1->src_entry->stages[other_stage].oid);
dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
try_merge = 0;
- if (oid_eq(&src_other.oid, &null_oid)) {
+ if (oid_eq(&src_other.oid, &null_oid) &&
+ ren1->add_turned_into_rename) {
+ setup_rename_conflict_info(RENAME_DIR,
+ ren1->pair,
+ NULL,
+ branch1,
+ branch2,
+ ren1->dst_entry,
+ NULL,
+ o,
+ NULL,
+ NULL);
+ } else if (oid_eq(&src_other.oid, &null_oid)) {
setup_rename_conflict_info(RENAME_DELETE,
ren1->pair,
NULL,
return clean_merge;
}
+struct rename_info {
+ struct string_list *head_renames;
+ struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+ struct hashmap *dir_renames)
+{
+ struct hashmap_iter iter;
+ struct dir_rename_entry *e;
+
+ hashmap_iter_init(dir_renames, &iter);
+ while ((e = hashmap_iter_next(&iter))) {
+ free(e->dir);
+ strbuf_release(&e->new_dir);
+ /* possible_new_dirs already cleared in get_directory_renames */
+ }
+ hashmap_free(dir_renames, 1);
+ free(dir_renames);
+
+ free(pairs->queue);
+ free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+ struct tree *common,
+ struct tree *head,
+ struct tree *merge,
+ struct string_list *entries,
+ struct rename_info *ri)
+{
+ struct diff_queue_struct *head_pairs, *merge_pairs;
+ struct hashmap *dir_re_head, *dir_re_merge;
+ int clean = 1;
+
+ ri->head_renames = NULL;
+ ri->merge_renames = NULL;
+
+ if (!merge_detect_rename(o))
+ return 1;
+
+ head_pairs = get_diffpairs(o, common, head);
+ merge_pairs = get_diffpairs(o, common, merge);
+
+ dir_re_head = get_directory_renames(head_pairs, head);
+ dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+ handle_directory_level_conflicts(o,
+ dir_re_head, head,
+ dir_re_merge, merge);
+
+ ri->head_renames = get_renames(o, head_pairs,
+ dir_re_merge, dir_re_head, head,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ ri->merge_renames = get_renames(o, merge_pairs,
+ dir_re_head, dir_re_merge, merge,
+ common, head, merge, entries,
+ &clean);
+ if (clean < 0)
+ goto cleanup;
+ clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+ /*
+ * Some cleanup is deferred until cleanup_renames() because the
+ * data structures are still needed and referenced in
+ * process_entry(). But there are a few things we can free now.
+ */
+ initial_cleanup_rename(head_pairs, dir_re_head);
+ initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+ return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+ const struct rename *re;
+ int i;
+
+ if (rename == NULL)
+ return;
+
+ for (i = 0; i < rename->nr; i++) {
+ re = rename->items[i].util;
+ diff_free_filepair(re->pair);
+ }
+ string_list_clear(rename, 1);
+ free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+ final_cleanup_rename(re_info->head_renames);
+ final_cleanup_rename(re_info->merge_renames);
+}
+
static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
{
return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
static int merge_content(struct merge_options *o,
const char *path,
+ int is_dirty,
struct object_id *o_oid, int o_mode,
struct object_id *a_oid, int a_mode,
struct object_id *b_oid, int b_mode,
S_ISGITLINK(pair1->two->mode)))
df_conflict_remains = 1;
}
- if (merge_file_special_markers(o, &one, &a, &b,
+ if (merge_file_special_markers(o, &one, &a, &b, path,
o->branch1, path1,
o->branch2, path2, &mfi))
return -1;
- if (mfi.clean && !df_conflict_remains &&
- oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
- int path_renamed_outside_HEAD;
+ /*
+ * We can skip updating the working tree file iff:
+ * a) The merge is clean
+ * b) The merge matches what was in HEAD (content, mode, pathname)
+ * c) The target path is usable (i.e. not involved in D/F conflict)
+ */
+ if (mfi.clean &&
+ was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
+ !df_conflict_remains) {
output(o, 3, _("Skipped %s (merged same as existing)"), path);
- /*
- * The content merge resulted in the same file contents we
- * already had. We can return early if those file contents
- * are recorded at the correct path (which may not be true
- * if the merge involves a rename).
- */
- path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
- if (!path_renamed_outside_HEAD) {
- add_cacheinfo(o, mfi.mode, &mfi.oid, path,
- 0, (!o->call_depth), 0);
- return mfi.clean;
- }
- } else
- output(o, 2, _("Auto-merging %s"), path);
+ if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
+ 0, (!o->call_depth && !is_dirty), 0))
+ return -1;
+ return mfi.clean;
+ }
if (!mfi.clean) {
if (S_ISGITLINK(mfi.mode))
return -1;
}
- if (df_conflict_remains) {
+ if (df_conflict_remains || is_dirty) {
char *new_path;
if (o->call_depth) {
remove_file_from_cache(path);
if (update_stages(o, path, &one, &a, &b))
return -1;
} else {
- int file_from_stage2 = was_tracked(path);
+ int file_from_stage2 = was_tracked(o, path);
struct diff_filespec merged;
oidcpy(&merged.oid, &mfi.oid);
merged.mode = mfi.mode;
}
new_path = unique_path(o, path, rename_conflict_info->branch1);
+ if (is_dirty) {
+ output(o, 1, _("Refusing to lose dirty file at %s"),
+ path);
+ }
output(o, 1, _("Adding as %s instead"), new_path);
if (update_file(o, 0, &mfi.oid, mfi.mode, new_path)) {
free(new_path);
mfi.clean = 0;
} else if (update_file(o, mfi.clean, &mfi.oid, mfi.mode, path))
return -1;
- return mfi.clean;
+ return !is_dirty && mfi.clean;
+}
+
+static int conflict_rename_normal(struct merge_options *o,
+ const char *path,
+ struct object_id *o_oid, unsigned int o_mode,
+ struct object_id *a_oid, unsigned int a_mode,
+ struct object_id *b_oid, unsigned int b_mode,
+ struct rename_conflict_info *ci)
+{
+ /* Merge the content and write it out */
+ return merge_content(o, path, was_dirty(o, path),
+ o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+ ci);
}
/* Per entry merge function */
switch (conflict_info->rename_type) {
case RENAME_NORMAL:
case RENAME_ONE_FILE_TO_ONE:
- clean_merge = merge_content(o, path,
- o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
- conflict_info);
+ clean_merge = conflict_rename_normal(o,
+ path,
+ o_oid, o_mode,
+ a_oid, a_mode,
+ b_oid, b_mode,
+ conflict_info);
+ break;
+ case RENAME_DIR:
+ clean_merge = 1;
+ if (conflict_rename_dir(o,
+ conflict_info->pair1,
+ conflict_info->branch1,
+ conflict_info->branch2))
+ clean_merge = -1;
break;
case RENAME_DELETE:
clean_merge = 0;
} else if (a_oid && b_oid) {
/* Case C: Added in both (check for same permissions) and */
/* case D: Modified in both, but differently. */
- clean_merge = merge_content(o, path,
+ int is_dirty = 0; /* unpack_trees would have bailed if dirty */
+ clean_merge = merge_content(o, path, is_dirty,
o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
NULL);
} else if (!o_oid && !a_oid && !b_oid) {
*/
remove_file(o, 1, path, !a_mode);
} else
- die("BUG: fatal merge failure, shouldn't happen.");
+ BUG("fatal merge failure, shouldn't happen.");
return clean_merge;
}
return 1;
}
- code = git_merge_trees(o->call_depth, common, head, merge);
+ code = unpack_trees_start(o, common, head, merge);
if (code != 0) {
if (show(o, 4) || o->call_depth)
err(o, _("merging of trees %s and %s failed"),
oid_to_hex(&head->object.oid),
oid_to_hex(&merge->object.oid));
+ unpack_trees_finish(o);
return -1;
}
if (unmerged_cache()) {
- struct string_list *entries, *re_head, *re_merge;
+ struct string_list *entries;
+ struct rename_info re_info;
int i;
/*
* Only need the hashmap while processing entries, so
get_files_dirs(o, merge);
entries = get_unmerged();
- re_head = get_renames(o, head, common, head, merge, entries);
- re_merge = get_renames(o, merge, common, head, merge, entries);
- clean = process_renames(o, re_head, re_merge);
+ clean = handle_renames(o, common, head, merge, entries,
+ &re_info);
record_df_conflict_files(o, entries);
if (clean < 0)
goto cleanup;
for (i = 0; i < entries->nr; i++) {
struct stage_data *e = entries->items[i].util;
if (!e->processed)
- die("BUG: unprocessed path??? %s",
+ BUG("unprocessed path??? %s",
entries->items[i].string);
}
cleanup:
- string_list_clear(re_merge, 0);
- string_list_clear(re_head, 0);
+ final_cleanup_renames(&re_info);
+
string_list_clear(entries, 1);
+ free(entries);
hashmap_free(&o->current_file_dir_set, 1);
- free(re_merge);
- free(re_head);
- free(entries);
-
- if (clean < 0)
+ if (clean < 0) {
+ unpack_trees_finish(o);
return clean;
+ }
}
else
clean = 1;
+ unpack_trees_finish(o);
+
if (o->call_depth && !(*result = write_tree_from_memory(o)))
return -1;
read_cache();
o->ancestor = "merged common ancestors";
- clean = merge_trees(o, h1->tree, h2->tree, merged_common_ancestors->tree,
+ clean = merge_trees(o, get_commit_tree(h1), get_commit_tree(h2),
+ get_commit_tree(merged_common_ancestors),
&mrtree);
if (clean < 0) {
flush_output(o);
static void merge_recursive_config(struct merge_options *o)
{
+ char *value = NULL;
git_config_get_int("merge.verbosity", &o->verbosity);
git_config_get_int("diff.renamelimit", &o->diff_rename_limit);
git_config_get_int("merge.renamelimit", &o->merge_rename_limit);
+ if (!git_config_get_string("diff.renames", &value)) {
+ o->diff_detect_rename = git_config_rename("diff.renames", value);
+ free(value);
+ }
+ if (!git_config_get_string("merge.renames", &value)) {
+ o->merge_detect_rename = git_config_rename("merge.renames", value);
+ free(value);
+ }
git_config(git_xmerge_config, NULL);
}
o->diff_rename_limit = -1;
o->merge_rename_limit = -1;
o->renormalize = 0;
- o->detect_rename = 1;
+ o->diff_detect_rename = -1;
+ o->merge_detect_rename = -1;
merge_recursive_config(o);
merge_verbosity = getenv("GIT_MERGE_VERBOSITY");
if (merge_verbosity)
else if (!strcmp(s, "no-renormalize"))
o->renormalize = 0;
else if (!strcmp(s, "no-renames"))
- o->detect_rename = 0;
+ o->merge_detect_rename = 0;
else if (!strcmp(s, "find-renames")) {
- o->detect_rename = 1;
+ o->merge_detect_rename = 1;
o->rename_score = 0;
}
else if (skip_prefix(s, "find-renames=", &arg) ||
skip_prefix(s, "rename-threshold=", &arg)) {
if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
- o->detect_rename = 1;
+ o->merge_detect_rename = 1;
}
else
return -1;
#include "cache.h"
#include "commit.h"
#include "refs.h"
+ #include "object-store.h"
#include "diff.h"
#include "diffcore.h"
#include "xdiff-interface.h"
printf("Using remote notes for %s\n",
oid_to_hex(&p->obj));
if (add_note(t, &p->obj, &p->remote, combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
return 0;
case NOTES_MERGE_RESOLVE_UNION:
if (o->verbosity >= 2)
trace_printf("\t\t\tno local change, adopted remote\n");
if (add_note(t, &p->obj, &p->remote,
combine_notes_overwrite))
- die("BUG: combine_notes_overwrite failed");
+ BUG("combine_notes_overwrite failed");
} else {
/* need file-level merge between local and remote */
trace_printf("\t\t\tneed content-level merge\n");
printf("No merge base found; doing history-less merge\n");
} else if (!bases->next) {
base_oid = &bases->item->object.oid;
- base_tree_oid = &bases->item->tree->object.oid;
+ base_tree_oid = get_commit_tree_oid(bases->item);
if (o->verbosity >= 4)
printf("One merge base found (%.7s)\n",
oid_to_hex(base_oid));
} else {
/* TODO: How to handle multiple merge-bases? */
base_oid = &bases->item->object.oid;
- base_tree_oid = &bases->item->tree->object.oid;
+ base_tree_oid = get_commit_tree_oid(bases->item);
if (o->verbosity >= 3)
printf("Multiple merge bases found. Using the first "
"(%.7s)\n", oid_to_hex(base_oid));
goto found_result;
}
- result = merge_from_diffs(o, base_tree_oid, &local->tree->object.oid,
- &remote->tree->object.oid, local_tree);
+ result = merge_from_diffs(o, base_tree_oid,
+ get_commit_tree_oid(local),
+ get_commit_tree_oid(remote), local_tree);
if (result != 0) { /* non-trivial merge (with or without conflicts) */
/* Commit (partial) result */
int index_version;
time_t mtime;
int pack_fd;
+ int index; /* for builtin/pack-objects.c */
unsigned pack_local:1,
pack_keep:1,
+ pack_keep_in_core:1,
freshened:1,
do_not_close:1,
pack_promisor:1;
void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
-extern int has_loose_object_nonlocal(const unsigned char *sha1);
+ extern void *read_object_file_extended(const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size, int lookup_replace);
+ static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
+ {
+ return read_object_file_extended(oid, type, size, 1);
+ }
+
+ /* Read and unpack an object file into memory, write memory to an object file */
+ int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
+
+ extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+ extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+ extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+ extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+ extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
+ /*
+ * Open the loose object at path, check its hash, and return the contents,
+ * type, and size. If the object is a blob, then "contents" may return NULL,
+ * to allow streaming of large blobs.
+ *
+ * Returns 0 on success, negative on error (details may be written to stderr).
+ */
+ int read_loose_object(const char *path,
+ const struct object_id *expected_oid,
+ enum object_type *type,
+ unsigned long *size,
+ void **contents);
+
+ /*
+ * Convenience for sha1_object_info_extended() with a NULL struct
+ * object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
+ * nonzero flags to also set other flags.
+ */
+ extern int has_sha1_file_with_flags(const unsigned char *sha1, int flags);
+ static inline int has_sha1_file(const unsigned char *sha1)
+ {
+ return has_sha1_file_with_flags(sha1, 0);
+ }
+
+ /* Same as the above, except for struct object_id. */
+ extern int has_object_file(const struct object_id *oid);
+ extern int has_object_file_with_flags(const struct object_id *oid, int flags);
+
+ /*
+ * Return true iff an alternate object database has a loose object
+ * with the specified name. This function does not respect replace
+ * references.
+ */
++extern int has_loose_object_nonlocal(const struct object_id *);
+
+ extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
+
+ struct object_info {
+ /* Request */
+ enum object_type *typep;
+ unsigned long *sizep;
+ off_t *disk_sizep;
+ unsigned char *delta_base_sha1;
+ struct strbuf *type_name;
+ void **contentp;
+
+ /* Response */
+ enum {
+ OI_CACHED,
+ OI_LOOSE,
+ OI_PACKED,
+ OI_DBCACHED
+ } whence;
+ union {
+ /*
+ * struct {
+ * ... Nothing to expose in this case
+ * } cached;
+ * struct {
+ * ... Nothing to expose in this case
+ * } loose;
+ */
+ struct {
+ struct packed_git *pack;
+ off_t offset;
+ unsigned int is_delta;
+ } packed;
+ } u;
+ };
+
+ /*
+ * Initializer for a "struct object_info" that wants no items. You may
+ * also memset() the memory to all-zeroes.
+ */
+ #define OBJECT_INFO_INIT {NULL}
+
+ /* Invoke lookup_replace_object() on the given hash */
+ #define OBJECT_INFO_LOOKUP_REPLACE 1
+ /* Allow reading from a loose object file of unknown/bogus type */
+ #define OBJECT_INFO_ALLOW_UNKNOWN_TYPE 2
+ /* Do not check cached storage */
+ #define OBJECT_INFO_SKIP_CACHED 4
+ /* Do not retry packed storage after checking packed and loose storage */
+ #define OBJECT_INFO_QUICK 8
+ /* Do not check loose object */
+ #define OBJECT_INFO_IGNORE_LOOSE 16
+
+ int oid_object_info_extended(struct repository *r,
+ const struct object_id *,
+ struct object_info *, unsigned flags);
+
#endif /* OBJECT_STORE_H */
#include "cache.h"
#include "object.h"
#include "replace-object.h"
+ #include "object-store.h"
#include "blob.h"
#include "tree.h"
#include "commit.h"
} else if (type == OBJ_COMMIT) {
struct commit *commit = lookup_commit(oid);
if (commit) {
- if (parse_commit_buffer(commit, buffer, size))
+ if (parse_commit_buffer(commit, buffer, size, 1))
return NULL;
if (!get_cached_commit_buffer(commit, NULL)) {
set_commit_buffer(commit, buffer, size);
o->tag_state = allocate_alloc_state();
o->object_state = allocate_alloc_state();
+ o->is_shallow = -1;
+ o->shallow_stat = xcalloc(1, sizeof(*o->shallow_stat));
+
return o;
}
FREE_AND_NULL(o->objectdir);
FREE_AND_NULL(o->alternate_db);
+ oidmap_free(o->replace_map, 1);
+ FREE_AND_NULL(o->replace_map);
+
free_alt_odbs(o);
o->alt_odb_tail = NULL;
struct alloc_state *tag_state;
struct alloc_state *object_state;
unsigned commit_count;
+
+ /* parent substitutions from .git/info/grafts and .git/shallow */
+ struct commit_graft **grafts;
+ int grafts_alloc, grafts_nr;
+
+ int is_shallow;
+ struct stat_validity *shallow_stat;
+ char *alternate_shallow_file;
+
+ int commit_graft_prepared;
};
struct parsed_object_pool *parsed_object_pool_new(void);
#define OBJECT_ARRAY_INIT { 0, 0, NULL }
-#define TYPE_BITS 3
/*
* object flag allocation:
* revision.h: 0---------10 26
* bundle.c: 16
* http-push.c: 16-----19
* commit.c: 16-----19
- * sha1_name.c: 20
+ * sha1-name.c: 20
* list-objects-filter.c: 21
* builtin/fsck.c: 0--3
* builtin/index-pack.c: 2021
* builtin/pack-objects.c: 20
* builtin/reflog.c: 10--12
+ * builtin/show-branch.c: 0-------------------------------------------26
* builtin/unpack-objects.c: 2021
*/
#define FLAG_BITS 27
#include "cache.h"
+ #include "object-store.h"
#include "commit.h"
#include "tag.h"
#include "diff.h"
/**
* Build the initial type index for the packfile
*/
-void bitmap_writer_build_type_index(struct pack_idx_entry **index,
+void bitmap_writer_build_type_index(struct packing_data *to_pack,
+ struct pack_idx_entry **index,
uint32_t index_nr)
{
uint32_t i;
writer.trees = ewah_new();
writer.blobs = ewah_new();
writer.tags = ewah_new();
+ ALLOC_ARRAY(to_pack->in_pack_pos, to_pack->nr_objects);
for (i = 0; i < index_nr; ++i) {
struct object_entry *entry = (struct object_entry *)index[i];
enum object_type real_type;
- entry->in_pack_pos = i;
+ oe_set_in_pack_pos(to_pack, entry, i);
- switch (entry->type) {
+ switch (oe_type(entry)) {
case OBJ_COMMIT:
case OBJ_TREE:
case OBJ_BLOB:
case OBJ_TAG:
- real_type = entry->type;
+ real_type = oe_type(entry);
break;
default:
default:
die("Missing type information for %s (%d/%d)",
oid_to_hex(&entry->idx.oid), real_type,
- entry->type);
+ oe_type(entry));
}
}
}
"(object %s is missing)", sha1_to_hex(sha1));
}
- return entry->in_pack_pos;
+ return oe_in_pack_pos(writer.to_pack, entry);
}
static void show_object(struct object *object, const char *name, void *data)
sha1_pos(stored->commit->object.oid.hash, index, index_nr, sha1_access);
if (commit_pos < 0)
- die("BUG: trying to write commit not in index");
+ BUG("trying to write commit not in index");
hashwrite_be32(f, commit_pos);
hashwrite_u8(f, stored->xor_offset);
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- hashclose(f, NULL, CSUM_FSYNC);
+ finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
#include "oidset.h"
+ /* in object-store.h */
+ struct packed_git;
+ struct object_info;
+ enum object_type;
+
/*
* Generate the filename to be used for a pack file with checksum "sha1" and
* extension "ext". The result is written into the strbuf "buf", overwriting
extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
extern void close_pack_windows(struct packed_git *);
+extern void close_pack(struct packed_git *);
extern void close_all_packs(struct raw_object_store *o);
extern void unuse_pack(struct pack_window **);
extern void clear_delta_base_cache(void);
* Iff a pack file in the given repository contains the object named by sha1,
* return true and store its location to e.
*/
-extern int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e);
+extern int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
-extern int has_sha1_pack(const unsigned char *sha1);
+extern int has_object_pack(const struct object_id *oid);
extern int has_pack_index(const unsigned char *sha1);
struct packed_git *pack,
uint32_t pos,
void *data);
+extern int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn, void *data);
extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
/*
int is_ntfs_dotgit(const char *name)
{
- int len;
+ size_t len;
for (len = 0; ; len++)
if (!name[len] || name[len] == '\\' || is_dir_sep(name[len])) {
}
}
+static int is_ntfs_dot_generic(const char *name,
+ const char *dotgit_name,
+ size_t len,
+ const char *dotgit_ntfs_shortname_prefix)
+{
+ int saw_tilde;
+ size_t i;
+
+ if ((name[0] == '.' && !strncasecmp(name + 1, dotgit_name, len))) {
+ i = len + 1;
+only_spaces_and_periods:
+ for (;;) {
+ char c = name[i++];
+ if (!c)
+ return 1;
+ if (c != ' ' && c != '.')
+ return 0;
+ }
+ }
+
+ /*
+ * Is it a regular NTFS short name, i.e. shortened to 6 characters,
+ * followed by ~1, ... ~4?
+ */
+ if (!strncasecmp(name, dotgit_name, 6) && name[6] == '~' &&
+ name[7] >= '1' && name[7] <= '4') {
+ i = 8;
+ goto only_spaces_and_periods;
+ }
+
+ /*
+ * Is it a fall-back NTFS short name (for details, see
+ * https://en.wikipedia.org/wiki/8.3_filename?
+ */
+ for (i = 0, saw_tilde = 0; i < 8; i++)
+ if (name[i] == '\0')
+ return 0;
+ else if (saw_tilde) {
+ if (name[i] < '0' || name[i] > '9')
+ return 0;
+ } else if (name[i] == '~') {
+ if (name[++i] < '1' || name[i] > '9')
+ return 0;
+ saw_tilde = 1;
+ } else if (i >= 6)
+ return 0;
+ else if (name[i] < 0) {
+ /*
+ * We know our needles contain only ASCII, so we clamp
+ * here to make the results of tolower() sane.
+ */
+ return 0;
+ } else if (tolower(name[i]) != dotgit_ntfs_shortname_prefix[i])
+ return 0;
+
+ goto only_spaces_and_periods;
+}
+
+/*
+ * Inline helper to make sure compiler resolves strlen() on literals at
+ * compile time.
+ */
+static inline int is_ntfs_dot_str(const char *name, const char *dotgit_name,
+ const char *dotgit_ntfs_shortname_prefix)
+{
+ return is_ntfs_dot_generic(name, dotgit_name, strlen(dotgit_name),
+ dotgit_ntfs_shortname_prefix);
+}
+
+int is_ntfs_dotgitmodules(const char *name)
+{
+ return is_ntfs_dot_str(name, "gitmodules", "gi7eba");
+}
+
+int is_ntfs_dotgitignore(const char *name)
+{
+ return is_ntfs_dot_str(name, "gitignore", "gi250a");
+}
+
+int is_ntfs_dotgitattributes(const char *name)
+{
+ return is_ntfs_dot_str(name, "gitattributes", "gi7d29");
+}
+
int looks_like_command_line_option(const char *str)
{
return str && str[0] == '-';
return NULL;
}
- GIT_PATH_FUNC(git_path_cherry_pick_head, "CHERRY_PICK_HEAD")
- GIT_PATH_FUNC(git_path_revert_head, "REVERT_HEAD")
- GIT_PATH_FUNC(git_path_squash_msg, "SQUASH_MSG")
- GIT_PATH_FUNC(git_path_merge_msg, "MERGE_MSG")
- GIT_PATH_FUNC(git_path_merge_rr, "MERGE_RR")
- GIT_PATH_FUNC(git_path_merge_mode, "MERGE_MODE")
- GIT_PATH_FUNC(git_path_merge_head, "MERGE_HEAD")
- GIT_PATH_FUNC(git_path_fetch_head, "FETCH_HEAD")
- GIT_PATH_FUNC(git_path_shallow, "shallow")
+ REPO_GIT_PATH_FUNC(cherry_pick_head, "CHERRY_PICK_HEAD")
+ REPO_GIT_PATH_FUNC(revert_head, "REVERT_HEAD")
+ REPO_GIT_PATH_FUNC(squash_msg, "SQUASH_MSG")
+ REPO_GIT_PATH_FUNC(merge_msg, "MERGE_MSG")
+ REPO_GIT_PATH_FUNC(merge_rr, "MERGE_RR")
+ REPO_GIT_PATH_FUNC(merge_mode, "MERGE_MODE")
+ REPO_GIT_PATH_FUNC(merge_head, "MERGE_HEAD")
+ REPO_GIT_PATH_FUNC(fetch_head, "FETCH_HEAD")
+ REPO_GIT_PATH_FUNC(shallow, "shallow")
#include "cache-tree.h"
#include "refs.h"
#include "dir.h"
+ #include "object-store.h"
#include "tree.h"
#include "commit.h"
#include "blob.h"
int size, len;
struct cache_entry *ce, *ret;
- if (!verify_path(path)) {
+ if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
return NULL;
}
* Also, we don't want double slashes or slashes at the
* end that can make pathnames ambiguous.
*/
-static int verify_dotfile(const char *rest)
+static int verify_dotfile(const char *rest, unsigned mode)
{
/*
* The first character was '.', but that
switch (*rest) {
/*
- * ".git" followed by NUL or slash is bad. This
- * shares the path end test with the ".." case.
+ * ".git" followed by NUL or slash is bad. Note that we match
+ * case-insensitively here, even if ignore_case is not set.
+ * This outlaws ".GIT" everywhere out of an abundance of caution,
+ * since there's really no good reason to allow it.
+ *
+ * Once we've seen ".git", we can also find ".gitmodules", etc (also
+ * case-insensitively).
*/
case 'g':
case 'G':
break;
if (rest[2] != 't' && rest[2] != 'T')
break;
- rest += 2;
- /* fallthrough */
+ if (rest[3] == '\0' || is_dir_sep(rest[3]))
+ return 0;
+ if (S_ISLNK(mode)) {
+ rest += 3;
+ if (skip_iprefix(rest, "modules", &rest) &&
+ (*rest == '\0' || is_dir_sep(*rest)))
+ return 0;
+ }
+ break;
case '.':
if (rest[1] == '\0' || is_dir_sep(rest[1]))
return 0;
return 1;
}
-int verify_path(const char *path)
+int verify_path(const char *path, unsigned mode)
{
char c;
return 1;
if (is_dir_sep(c)) {
inside:
- if (protect_hfs && is_hfs_dotgit(path))
- return 0;
- if (protect_ntfs && is_ntfs_dotgit(path))
- return 0;
+ if (protect_hfs) {
+ if (is_hfs_dotgit(path))
+ return 0;
+ if (S_ISLNK(mode)) {
+ if (is_hfs_dotgitmodules(path))
+ return 0;
+ }
+ }
+ if (protect_ntfs) {
+ if (is_ntfs_dotgit(path))
+ return 0;
+ if (S_ISLNK(mode)) {
+ if (is_ntfs_dotgitmodules(path))
+ return 0;
+ }
+ }
+
c = *path++;
- if ((c == '.' && !verify_dotfile(path)) ||
+ if ((c == '.' && !verify_dotfile(path, mode)) ||
is_dir_sep(c) || c == '\0')
return 0;
}
if (!ok_to_add)
return -1;
- if (!verify_path(ce->name))
+ if (!verify_path(ce->name, ce->ce_mode))
return error("Invalid path '%s'", ce->name);
if (!skip_df_check &&
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
+ hashcpy(istate->oid.hash, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
- char *base_sha1_hex;
+ char *base_oid_hex;
char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
- if (!split_index || is_null_sha1(split_index->base_sha1)) {
+ if (!split_index || is_null_oid(&split_index->base_oid)) {
post_read_index_from(istate);
return ret;
}
else
split_index->base = xcalloc(1, sizeof(*split_index->base));
- base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
+ base_oid_hex = oid_to_hex(&split_index->base_oid);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
ret = do_read_index(split_index->base, base_path, 1);
- if (hashcmp(split_index->base_sha1, split_index->base->sha1))
+ if (oidcmp(&split_index->base_oid, &split_index->base->oid))
die("broken index, expect %s in %s, got %s",
- base_sha1_hex, base_path,
- sha1_to_hex(split_index->base->sha1));
+ base_oid_hex, base_path,
+ oid_to_hex(&split_index->base->oid));
freshen_shared_index(base_path, 0);
merge_base_index(istate);
if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, hash))
+ if (hashcmp(istate->oid.hash, hash))
goto out;
close(fd);
if (!istate->version) {
istate->version = get_index_format_default();
- if (getenv("GIT_TEST_SPLIT_INDEX"))
+ if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
init_split_index(istate);
}
return -1;
}
- if (ce_flush(&c, newfd, istate->sha1))
+ if (ce_flush(&c, newfd, istate->oid.hash))
return -1;
if (close_tempfile_gently(tempfile)) {
error(_("could not close '%s'"), tempfile->filename.buf);
return ret;
}
ret = rename_tempfile(temp,
- git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
+ git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
if (!ret) {
- hashcpy(si->base_sha1, si->base->sha1);
- clean_shared_index_files(sha1_to_hex(si->base->sha1));
+ oidcpy(&si->base_oid, &si->base->oid);
+ clean_shared_index_files(oid_to_hex(&si->base->oid));
}
return ret;
if (!si || alternate_index_output ||
(istate->cache_changed & ~EXTMASK)) {
if (si)
- hashclr(si->base_sha1);
+ oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
goto out;
}
- if (getenv("GIT_TEST_SPLIT_INDEX")) {
- int v = si->base_sha1[0];
+ if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
+ int v = si->base_oid.hash[0];
if ((v & 15) < 6)
istate->cache_changed |= SPLIT_INDEX_ORDERED;
}
temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
if (!temp) {
- hashclr(si->base_sha1);
+ oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
goto out;
}
/* Freshen the shared index only if the split-index was written */
if (!ret && !new_shared_index) {
const char *shared_index = git_path("sharedindex.%s",
- sha1_to_hex(si->base_sha1));
+ oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
}
#include "parse-options.h"
#include "refs.h"
#include "wildmatch.h"
+ #include "object-store.h"
#include "commit.h"
#include "remote.h"
#include "color.h"
#include "trailer.h"
#include "wt-status.h"
#include "commit-slab.h"
+#include "commit-graph.h"
static struct ref_msg {
const char *gone;
} *used_atom;
static int used_atom_cnt, need_tagged, need_symref;
-static void color_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *color_value)
+/*
+ * Expand string, append it to strbuf *sb, then return error code ret.
+ * Allow to save few lines of code.
+ */
+static int strbuf_addf_ret(struct strbuf *sb, int ret, const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ strbuf_vaddf(sb, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+static int color_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *color_value, struct strbuf *err)
{
if (!color_value)
- die(_("expected format: %%(color:<color>)"));
+ return strbuf_addf_ret(err, -1, _("expected format: %%(color:<color>)"));
if (color_parse(color_value, atom->u.color) < 0)
- die(_("unrecognized color: %%(color:%s)"), color_value);
+ return strbuf_addf_ret(err, -1, _("unrecognized color: %%(color:%s)"),
+ color_value);
/*
* We check this after we've parsed the color, which lets us complain
* about syntactically bogus color names even if they won't be used.
*/
if (!want_color(format->use_color))
color_parse("", atom->u.color);
+ return 0;
}
-static void refname_atom_parser_internal(struct refname_atom *atom,
- const char *arg, const char *name)
+static int refname_atom_parser_internal(struct refname_atom *atom, const char *arg,
+ const char *name, struct strbuf *err)
{
if (!arg)
atom->option = R_NORMAL;
skip_prefix(arg, "strip=", &arg)) {
atom->option = R_LSTRIP;
if (strtol_i(arg, 10, &atom->lstrip))
- die(_("Integer value expected refname:lstrip=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("Integer value expected refname:lstrip=%s"), arg);
} else if (skip_prefix(arg, "rstrip=", &arg)) {
atom->option = R_RSTRIP;
if (strtol_i(arg, 10, &atom->rstrip))
- die(_("Integer value expected refname:rstrip=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("Integer value expected refname:rstrip=%s"), arg);
} else
- die(_("unrecognized %%(%s) argument: %s"), name, arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(%s) argument: %s"), name, arg);
+ return 0;
}
-static void remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int remote_ref_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct string_list params = STRING_LIST_INIT_DUP;
int i;
if (!arg) {
atom->u.remote_ref.option = RR_REF;
- refname_atom_parser_internal(&atom->u.remote_ref.refname,
- arg, atom->name);
- return;
+ return refname_atom_parser_internal(&atom->u.remote_ref.refname,
+ arg, atom->name, err);
}
atom->u.remote_ref.nobracket = 0;
atom->u.remote_ref.push_remote = 1;
} else {
atom->u.remote_ref.option = RR_REF;
- refname_atom_parser_internal(&atom->u.remote_ref.refname,
- arg, atom->name);
+ if (refname_atom_parser_internal(&atom->u.remote_ref.refname,
+ arg, atom->name, err)) {
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
}
string_list_clear(¶ms, 0);
+ return 0;
}
-static void body_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int body_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (arg)
- die(_("%%(body) does not take arguments"));
+ return strbuf_addf_ret(err, -1, _("%%(body) does not take arguments"));
atom->u.contents.option = C_BODY_DEP;
+ return 0;
}
-static void subject_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int subject_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (arg)
- die(_("%%(subject) does not take arguments"));
+ return strbuf_addf_ret(err, -1, _("%%(subject) does not take arguments"));
atom->u.contents.option = C_SUB;
+ return 0;
}
-static void trailers_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int trailers_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct string_list params = STRING_LIST_INIT_DUP;
int i;
atom->u.contents.trailer_opts.unfold = 1;
else if (!strcmp(s, "only"))
atom->u.contents.trailer_opts.only_trailers = 1;
- else
- die(_("unknown %%(trailers) argument: %s"), s);
+ else {
+ strbuf_addf(err, _("unknown %%(trailers) argument: %s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
}
atom->u.contents.option = C_TRAILERS;
string_list_clear(¶ms, 0);
+ return 0;
}
-static void contents_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int contents_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg)
atom->u.contents.option = C_BARE;
atom->u.contents.option = C_SUB;
else if (skip_prefix(arg, "trailers", &arg)) {
skip_prefix(arg, ":", &arg);
- trailers_atom_parser(format, atom, *arg ? arg : NULL);
+ if (trailers_atom_parser(format, atom, *arg ? arg : NULL, err))
+ return -1;
} else if (skip_prefix(arg, "lines=", &arg)) {
atom->u.contents.option = C_LINES;
if (strtoul_ui(arg, 10, &atom->u.contents.nlines))
- die(_("positive value expected contents:lines=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("positive value expected contents:lines=%s"), arg);
} else
- die(_("unrecognized %%(contents) argument: %s"), arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(contents) argument: %s"), arg);
+ return 0;
}
-static void objectname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int objectname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg)
atom->u.objectname.option = O_FULL;
atom->u.objectname.option = O_LENGTH;
if (strtoul_ui(arg, 10, &atom->u.objectname.length) ||
atom->u.objectname.length == 0)
- die(_("positive value expected objectname:short=%s"), arg);
+ return strbuf_addf_ret(err, -1, _("positive value expected objectname:short=%s"), arg);
if (atom->u.objectname.length < MINIMUM_ABBREV)
atom->u.objectname.length = MINIMUM_ABBREV;
} else
- die(_("unrecognized %%(objectname) argument: %s"), arg);
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(objectname) argument: %s"), arg);
+ return 0;
}
-static void refname_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int refname_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
- refname_atom_parser_internal(&atom->u.refname, arg, atom->name);
+ return refname_atom_parser_internal(&atom->u.refname, arg, atom->name, err);
}
static align_type parse_align_position(const char *s)
return -1;
}
-static void align_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int align_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
struct align *align = &atom->u.align;
struct string_list params = STRING_LIST_INIT_DUP;
unsigned int width = ~0U;
if (!arg)
- die(_("expected format: %%(align:<width>,<position>)"));
+ return strbuf_addf_ret(err, -1, _("expected format: %%(align:<width>,<position>)"));
align->position = ALIGN_LEFT;
if (skip_prefix(s, "position=", &s)) {
position = parse_align_position(s);
- if (position < 0)
- die(_("unrecognized position:%s"), s);
+ if (position < 0) {
+ strbuf_addf(err, _("unrecognized position:%s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
align->position = position;
} else if (skip_prefix(s, "width=", &s)) {
- if (strtoul_ui(s, 10, &width))
- die(_("unrecognized width:%s"), s);
+ if (strtoul_ui(s, 10, &width)) {
+ strbuf_addf(err, _("unrecognized width:%s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
} else if (!strtoul_ui(s, 10, &width))
;
else if ((position = parse_align_position(s)) >= 0)
align->position = position;
- else
- die(_("unrecognized %%(align) argument: %s"), s);
+ else {
+ strbuf_addf(err, _("unrecognized %%(align) argument: %s"), s);
+ string_list_clear(¶ms, 0);
+ return -1;
+ }
}
- if (width == ~0U)
- die(_("positive width expected with the %%(align) atom"));
+ if (width == ~0U) {
+ string_list_clear(¶ms, 0);
+ return strbuf_addf_ret(err, -1, _("positive width expected with the %%(align) atom"));
+ }
align->width = width;
string_list_clear(¶ms, 0);
+ return 0;
}
-static void if_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int if_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err)
{
if (!arg) {
atom->u.if_then_else.cmp_status = COMPARE_NONE;
- return;
+ return 0;
} else if (skip_prefix(arg, "equals=", &atom->u.if_then_else.str)) {
atom->u.if_then_else.cmp_status = COMPARE_EQUAL;
} else if (skip_prefix(arg, "notequals=", &atom->u.if_then_else.str)) {
atom->u.if_then_else.cmp_status = COMPARE_UNEQUAL;
- } else {
- die(_("unrecognized %%(if) argument: %s"), arg);
- }
+ } else
+ return strbuf_addf_ret(err, -1, _("unrecognized %%(if) argument: %s"), arg);
+ return 0;
}
-static void head_atom_parser(const struct ref_format *format, struct used_atom *atom, const char *arg)
+static int head_atom_parser(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *unused_err)
{
atom->u.head = resolve_refdup("HEAD", RESOLVE_REF_READING, NULL, NULL);
+ return 0;
}
static struct {
const char *name;
cmp_type cmp_type;
- void (*parser)(const struct ref_format *format, struct used_atom *atom, const char *arg);
+ int (*parser)(const struct ref_format *format, struct used_atom *atom,
+ const char *arg, struct strbuf *err);
} valid_atom[] = {
{ "refname" , FIELD_STR, refname_atom_parser },
{ "objecttype" },
struct atom_value {
const char *s;
- void (*handler)(struct atom_value *atomv, struct ref_formatting_state *state);
+ int (*handler)(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err);
uintmax_t value; /* used for sorting when not FIELD_STR */
struct used_atom *atom;
};
* Used to parse format string and sort specifiers
*/
static int parse_ref_filter_atom(const struct ref_format *format,
- const char *atom, const char *ep)
+ const char *atom, const char *ep,
+ struct strbuf *err)
{
const char *sp;
const char *arg;
if (*sp == '*' && sp < ep)
sp++; /* deref */
if (ep <= sp)
- die(_("malformed field name: %.*s"), (int)(ep-atom), atom);
+ return strbuf_addf_ret(err, -1, _("malformed field name: %.*s"),
+ (int)(ep-atom), atom);
/* Do we have the atom already used elsewhere? */
for (i = 0; i < used_atom_cnt; i++) {
}
if (ARRAY_SIZE(valid_atom) <= i)
- die(_("unknown field name: %.*s"), (int)(ep-atom), atom);
+ return strbuf_addf_ret(err, -1, _("unknown field name: %.*s"),
+ (int)(ep-atom), atom);
/* Add it in, including the deref prefix */
at = used_atom_cnt;
}
}
memset(&used_atom[at].u, 0, sizeof(used_atom[at].u));
- if (valid_atom[i].parser)
- valid_atom[i].parser(format, &used_atom[at], arg);
+ if (valid_atom[i].parser && valid_atom[i].parser(format, &used_atom[at], arg, err))
+ return -1;
if (*atom == '*')
need_tagged = 1;
if (!strcmp(valid_atom[i].name, "symref"))
}
}
-static void append_atom(struct atom_value *v, struct ref_formatting_state *state)
+static int append_atom(struct atom_value *v, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
/*
* Quote formatting is only done when the stack has a single
quote_formatting(&state->stack->output, v->s, state->quote_style);
else
strbuf_addstr(&state->stack->output, v->s);
+ return 0;
}
static void push_stack_element(struct ref_formatting_stack **stack)
strbuf_release(&s);
}
-static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
struct ref_formatting_stack *new_stack;
new_stack = state->stack;
new_stack->at_end = end_align_handler;
new_stack->at_end_data = &atomv->atom->u.align;
+ return 0;
}
static void if_then_else_handler(struct ref_formatting_stack **stack)
free(if_then_else);
}
-static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *unused_err)
{
struct ref_formatting_stack *new_stack;
struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
new_stack = state->stack;
new_stack->at_end = if_then_else_handler;
new_stack->at_end_data = if_then_else;
+ return 0;
}
static int is_empty(const char *s)
return 1;
}
-static void then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int then_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *cur = state->stack;
struct if_then_else *if_then_else = NULL;
if (cur->at_end == if_then_else_handler)
if_then_else = (struct if_then_else *)cur->at_end_data;
if (!if_then_else)
- die(_("format: %%(then) atom used without an %%(if) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used without an %%(if) atom"));
if (if_then_else->then_atom_seen)
- die(_("format: %%(then) atom used more than once"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used more than once"));
if (if_then_else->else_atom_seen)
- die(_("format: %%(then) atom used after %%(else)"));
+ return strbuf_addf_ret(err, -1, _("format: %%(then) atom used after %%(else)"));
if_then_else->then_atom_seen = 1;
/*
* If the 'equals' or 'notequals' attribute is used then
} else if (cur->output.len && !is_empty(cur->output.buf))
if_then_else->condition_satisfied = 1;
strbuf_reset(&cur->output);
+ return 0;
}
-static void else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int else_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *prev = state->stack;
struct if_then_else *if_then_else = NULL;
if (prev->at_end == if_then_else_handler)
if_then_else = (struct if_then_else *)prev->at_end_data;
if (!if_then_else)
- die(_("format: %%(else) atom used without an %%(if) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without an %%(if) atom"));
if (!if_then_else->then_atom_seen)
- die(_("format: %%(else) atom used without a %%(then) atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used without a %%(then) atom"));
if (if_then_else->else_atom_seen)
- die(_("format: %%(else) atom used more than once"));
+ return strbuf_addf_ret(err, -1, _("format: %%(else) atom used more than once"));
if_then_else->else_atom_seen = 1;
push_stack_element(&state->stack);
state->stack->at_end_data = prev->at_end_data;
state->stack->at_end = prev->at_end;
+ return 0;
}
-static void end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
+static int end_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state,
+ struct strbuf *err)
{
struct ref_formatting_stack *current = state->stack;
struct strbuf s = STRBUF_INIT;
if (!current->at_end)
- die(_("format: %%(end) atom used without corresponding atom"));
+ return strbuf_addf_ret(err, -1, _("format: %%(end) atom used without corresponding atom"));
current->at_end(&state->stack);
/* Stack may have been popped within at_end(), hence reset the current pointer */
}
strbuf_release(&s);
pop_stack_element(&state->stack);
+ return 0;
}
/*
format->need_color_reset_at_eol = 0;
for (cp = format->format; *cp && (sp = find_next(cp)); ) {
+ struct strbuf err = STRBUF_INIT;
const char *color, *ep = strchr(sp, ')');
int at;
if (!ep)
return error(_("malformed format string %s"), sp);
/* sp points at "%(" and ep points at the closing ")" */
- at = parse_ref_filter_atom(format, sp + 2, ep);
+ at = parse_ref_filter_atom(format, sp + 2, ep, &err);
+ if (at < 0)
+ die("%s", err.buf);
cp = ep + 1;
if (skip_prefix(used_atom[at].name, "color:", &color))
format->need_color_reset_at_eol = !!strcmp(color, "reset");
+ strbuf_release(&err);
}
if (format->need_color_reset_at_eol && !want_color(format->use_color))
format->need_color_reset_at_eol = 0;
v->s = xstrdup(find_unique_abbrev(oid, atom->u.objectname.length));
return 1;
} else
- die("BUG: unknown %%(objectname) option");
+ BUG("unknown %%(objectname) option");
}
return 0;
}
if (deref)
name++;
if (!strcmp(name, "tree")) {
- v->s = xstrdup(oid_to_hex(&commit->tree->object.oid));
+ v->s = xstrdup(oid_to_hex(get_commit_tree_oid(commit)));
}
else if (!strcmp(name, "numparent")) {
v->value = commit_list_count(commit->parents);
else
*s = "";
} else
- die("BUG: unhandled RR_* enum");
+ BUG("unhandled RR_* enum");
}
char *get_head_description(void)
memset(&state, 0, sizeof(state));
wt_status_get_state(&state, 1);
if (state.rebase_in_progress ||
- state.rebase_interactive_in_progress)
- strbuf_addf(&desc, _("(no branch, rebasing %s)"),
- state.branch);
- else if (state.bisect_in_progress)
+ state.rebase_interactive_in_progress) {
+ if (state.branch)
+ strbuf_addf(&desc, _("(no branch, rebasing %s)"),
+ state.branch);
+ else
+ strbuf_addf(&desc, _("(no branch, rebasing detached HEAD %s)"),
+ state.detached_from);
+ } else if (state.bisect_in_progress)
strbuf_addf(&desc, _("(no branch, bisect started on %s)"),
state.branch);
else if (state.detached_from) {
return show_ref(&atom->u.refname, ref->refname);
}
-static void get_object(struct ref_array_item *ref, const struct object_id *oid,
- int deref, struct object **obj)
+static int get_object(struct ref_array_item *ref, const struct object_id *oid,
+ int deref, struct object **obj, struct strbuf *err)
{
int eaten;
+ int ret = 0;
unsigned long size;
void *buf = get_obj(oid, obj, &size, &eaten);
if (!buf)
- die(_("missing object %s for %s"),
- oid_to_hex(oid), ref->refname);
- if (!*obj)
- die(_("parse_object_buffer failed on %s for %s"),
- oid_to_hex(oid), ref->refname);
-
- grab_values(ref->value, deref, *obj, buf, size);
+ ret = strbuf_addf_ret(err, -1, _("missing object %s for %s"),
+ oid_to_hex(oid), ref->refname);
+ else if (!*obj)
+ ret = strbuf_addf_ret(err, -1, _("parse_object_buffer failed on %s for %s"),
+ oid_to_hex(oid), ref->refname);
+ else
+ grab_values(ref->value, deref, *obj, buf, size);
if (!eaten)
free(buf);
+ return ret;
}
/*
* Parse the object referred by ref, and grab needed value.
*/
-static void populate_value(struct ref_array_item *ref)
+static int populate_value(struct ref_array_item *ref, struct strbuf *err)
{
struct object *obj;
int i;
break;
}
if (used_atom_cnt <= i)
- return;
+ return 0;
- get_object(ref, &ref->objectname, 0, &obj);
+ if (get_object(ref, &ref->objectname, 0, &obj, err))
+ return -1;
/*
* If there is no atom that wants to know about tagged
* object, we are done.
*/
if (!need_tagged || (obj->type != OBJ_TAG))
- return;
+ return 0;
/*
* If it is a tag object, see if we use a value that derefs
* is not consistent with what deref_tag() does
* which peels the onion to the core.
*/
- get_object(ref, tagged, 1, &obj);
+ return get_object(ref, tagged, 1, &obj, err);
}
/*
* Given a ref, return the value for the atom. This lazily gets value
* out of the object by calling populate value.
*/
-static void get_ref_atom_value(struct ref_array_item *ref, int atom, struct atom_value **v)
+static int get_ref_atom_value(struct ref_array_item *ref, int atom,
+ struct atom_value **v, struct strbuf *err)
{
if (!ref->value) {
- populate_value(ref);
+ if (populate_value(ref, err))
+ return -1;
fill_missing_values(ref->value);
}
*v = &ref->value[atom];
+ return 0;
}
/*
}
/*
- * Test whether the candidate or one of its parents is contained in the list.
+ * Test whether the candidate is contained in the list.
* Do not recurse to find out, though, but return -1 if inconclusive.
*/
static enum contains_result contains_test(struct commit *candidate,
const struct commit_list *want,
- struct contains_cache *cache)
+ struct contains_cache *cache,
+ uint32_t cutoff)
{
enum contains_result *cached = contains_cache_at(cache, candidate);
/* Otherwise, we don't know; prepare to recurse */
parse_commit_or_die(candidate);
+
+ if (candidate->generation < cutoff)
+ return CONTAINS_NO;
+
return CONTAINS_UNKNOWN;
}
struct contains_cache *cache)
{
struct contains_stack contains_stack = { 0, 0, NULL };
- enum contains_result result = contains_test(candidate, want, cache);
+ enum contains_result result;
+ uint32_t cutoff = GENERATION_NUMBER_INFINITY;
+ const struct commit_list *p;
+
+ for (p = want; p; p = p->next) {
+ struct commit *c = p->item;
+ load_commit_graph_info(c);
+ if (c->generation < cutoff)
+ cutoff = c->generation;
+ }
+ result = contains_test(candidate, want, cache, cutoff);
if (result != CONTAINS_UNKNOWN)
return result;
* If we just popped the stack, parents->item has been marked,
* therefore contains_test will return a meaningful yes/no.
*/
- else switch (contains_test(parents->item, want, cache)) {
+ else switch (contains_test(parents->item, want, cache, cutoff)) {
case CONTAINS_YES:
*contains_cache_at(cache, commit) = CONTAINS_YES;
contains_stack.nr--;
}
}
free(contains_stack.contains_stack);
- return contains_test(candidate, want, cache);
+ return contains_test(candidate, want, cache, cutoff);
}
static int commit_contains(struct ref_filter *filter, struct commit *commit,
return NULL;
}
-/* Allocate space for a new ref_array_item and copy the objectname and flag to it */
+/*
+ * Allocate space for a new ref_array_item and copy the name and oid to it.
+ *
+ * Callers can then fill in other struct members at their leisure.
+ */
static struct ref_array_item *new_ref_array_item(const char *refname,
- const unsigned char *objectname,
- int flag)
+ const struct object_id *oid)
{
struct ref_array_item *ref;
+
FLEX_ALLOC_STR(ref, refname, refname);
- hashcpy(ref->objectname.hash, objectname);
- ref->flag = flag;
+ oidcpy(&ref->objectname, oid);
+
+ return ref;
+}
+
+struct ref_array_item *ref_array_push(struct ref_array *array,
+ const char *refname,
+ const struct object_id *oid)
+{
+ struct ref_array_item *ref = new_ref_array_item(refname, oid);
+
+ ALLOC_GROW(array->items, array->nr + 1, array->alloc);
+ array->items[array->nr++] = ref;
return ref;
}
* to do its job and the resulting list may yet to be pruned
* by maxcount logic.
*/
- ref = new_ref_array_item(refname, oid->hash, flag);
+ ref = ref_array_push(ref_cbdata->array, refname, oid);
ref->commit = commit;
-
- REALLOC_ARRAY(ref_cbdata->array->items, ref_cbdata->array->nr + 1);
- ref_cbdata->array->items[ref_cbdata->array->nr++] = ref;
+ ref->flag = flag;
ref->kind = kind;
+
return 0;
}
int cmp;
cmp_type cmp_type = used_atom[s->atom].type;
int (*cmp_fn)(const char *, const char *);
+ struct strbuf err = STRBUF_INIT;
- get_ref_atom_value(a, s->atom, &va);
- get_ref_atom_value(b, s->atom, &vb);
+ if (get_ref_atom_value(a, s->atom, &va, &err))
+ die("%s", err.buf);
+ if (get_ref_atom_value(b, s->atom, &vb, &err))
+ die("%s", err.buf);
+ strbuf_release(&err);
cmp_fn = s->ignore_case ? strcasecmp : strcmp;
if (s->version)
cmp = versioncmp(va->s, vb->s);
}
}
-void format_ref_array_item(struct ref_array_item *info,
+int format_ref_array_item(struct ref_array_item *info,
const struct ref_format *format,
- struct strbuf *final_buf)
+ struct strbuf *final_buf,
+ struct strbuf *error_buf)
{
const char *cp, *sp, *ep;
struct ref_formatting_state state = REF_FORMATTING_STATE_INIT;
for (cp = format->format; *cp && (sp = find_next(cp)); cp = ep + 1) {
struct atom_value *atomv;
+ int pos;
ep = strchr(sp, ')');
if (cp < sp)
append_literal(cp, sp, &state);
- get_ref_atom_value(info,
- parse_ref_filter_atom(format, sp + 2, ep),
- &atomv);
- atomv->handler(atomv, &state);
+ pos = parse_ref_filter_atom(format, sp + 2, ep, error_buf);
+ if (pos < 0 || get_ref_atom_value(info, pos, &atomv, error_buf) ||
+ atomv->handler(atomv, &state, error_buf)) {
+ pop_stack_element(&state.stack);
+ return -1;
+ }
}
if (*cp) {
sp = cp + strlen(cp);
if (format->need_color_reset_at_eol) {
struct atom_value resetv;
resetv.s = GIT_COLOR_RESET;
- append_atom(&resetv, &state);
+ if (append_atom(&resetv, &state, error_buf)) {
+ pop_stack_element(&state.stack);
+ return -1;
+ }
+ }
+ if (state.stack->prev) {
+ pop_stack_element(&state.stack);
+ return strbuf_addf_ret(error_buf, -1, _("format: %%(end) atom missing"));
}
- if (state.stack->prev)
- die(_("format: %%(end) atom missing"));
strbuf_addbuf(final_buf, &state.stack->output);
pop_stack_element(&state.stack);
+ return 0;
}
void show_ref_array_item(struct ref_array_item *info,
const struct ref_format *format)
{
struct strbuf final_buf = STRBUF_INIT;
+ struct strbuf error_buf = STRBUF_INIT;
- format_ref_array_item(info, format, &final_buf);
+ if (format_ref_array_item(info, format, &final_buf, &error_buf))
+ die("%s", error_buf.buf);
fwrite(final_buf.buf, 1, final_buf.len, stdout);
+ strbuf_release(&error_buf);
strbuf_release(&final_buf);
putchar('\n');
}
-void pretty_print_ref(const char *name, const unsigned char *sha1,
+void pretty_print_ref(const char *name, const struct object_id *oid,
const struct ref_format *format)
{
struct ref_array_item *ref_item;
- ref_item = new_ref_array_item(name, sha1, 0);
+ ref_item = new_ref_array_item(name, oid);
ref_item->kind = ref_kind_from_refname(name);
show_ref_array_item(ref_item, format);
free_array_item(ref_item);
*/
struct ref_format dummy = REF_FORMAT_INIT;
const char *end = atom + strlen(atom);
- return parse_ref_filter_atom(&dummy, atom, end);
+ struct strbuf err = STRBUF_INIT;
+ int res = parse_ref_filter_atom(&dummy, atom, end, &err);
+ if (res < 0)
+ die("%s", err.buf);
+ strbuf_release(&err);
+ return res;
}
/* If no sorting option is given, use refname to sort as default */
#include "iterator.h"
#include "refs.h"
#include "refs/refs-internal.h"
+ #include "object-store.h"
#include "object.h"
#include "tag.h"
#include "submodule.h"
#include "worktree.h"
+#include "argv-array.h"
#include "repository.h"
/*
return 0;
}
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix)
+{
+ const char **p;
+ int len = strlen(prefix);
+
+ for (p = ref_rev_parse_rules; *p; p++)
+ argv_array_pushf(prefixes, *p, len, prefix);
+}
+
/*
* *string and *len will only be substituted, and *string returned (for
* later free()ing) if the string passed in is a magic short-hand form
static int is_per_worktree_ref(const char *refname)
{
return !strcmp(refname, "HEAD") ||
- starts_with(refname, "refs/bisect/");
+ starts_with(refname, "refs/bisect/") ||
+ starts_with(refname, "refs/rewritten/");
}
static int is_pseudoref_syntax(const char *refname)
{
const char *filename;
int fd;
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
struct strbuf buf = STRBUF_INIT;
int ret = -1;
strbuf_addf(&buf, "%s\n", oid_to_hex(oid));
filename = git_path("%s", pseudoref);
- fd = hold_lock_file_for_update_timeout(&lock, filename,
- LOCK_DIE_ON_ERROR,
+ fd = hold_lock_file_for_update_timeout(&lock, filename, 0,
get_files_ref_lock_timeout_ms());
if (fd < 0) {
strbuf_addf(err, "could not open '%s' for writing: %s",
if (old_oid) {
struct object_id actual_old_oid;
- if (read_ref(pseudoref, &actual_old_oid))
- die("could not read ref '%s'", pseudoref);
- if (oidcmp(&actual_old_oid, old_oid)) {
- strbuf_addf(err, "unexpected sha1 when writing '%s'", pseudoref);
+ if (read_ref(pseudoref, &actual_old_oid)) {
+ if (!is_null_oid(old_oid)) {
+ strbuf_addf(err, "could not read ref '%s'",
+ pseudoref);
+ rollback_lock_file(&lock);
+ goto done;
+ }
+ } else if (is_null_oid(old_oid)) {
+ strbuf_addf(err, "ref '%s' already exists",
+ pseudoref);
+ rollback_lock_file(&lock);
+ goto done;
+ } else if (oidcmp(&actual_old_oid, old_oid)) {
+ strbuf_addf(err, "unexpected object ID when writing '%s'",
+ pseudoref);
rollback_lock_file(&lock);
goto done;
}
static int delete_pseudoref(const char *pseudoref, const struct object_id *old_oid)
{
- static struct lock_file lock;
const char *filename;
filename = git_path("%s", pseudoref);
if (old_oid && !is_null_oid(old_oid)) {
+ struct lock_file lock = LOCK_INIT;
int fd;
struct object_id actual_old_oid;
fd = hold_lock_file_for_update_timeout(
- &lock, filename, LOCK_DIE_ON_ERROR,
+ &lock, filename, 0,
get_files_ref_lock_timeout_ms());
- if (fd < 0)
- die_errno(_("Could not open '%s' for writing"), filename);
+ if (fd < 0) {
+ error_errno(_("could not open '%s' for writing"),
+ filename);
+ return -1;
+ }
if (read_ref(pseudoref, &actual_old_oid))
die("could not read ref '%s'", pseudoref);
if (oidcmp(&actual_old_oid, old_oid)) {
- warning("Unexpected sha1 when deleting %s", pseudoref);
+ error("unexpected object ID when deleting '%s'",
+ pseudoref);
rollback_lock_file(&lock);
return -1;
}
/* OK */
break;
case REF_TRANSACTION_PREPARED:
- die("BUG: free called on a prepared reference transaction");
+ BUG("free called on a prepared reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
struct ref_update *update;
if (transaction->state != REF_TRANSACTION_OPEN)
- die("BUG: update called for transaction that is not open");
+ BUG("update called for transaction that is not open");
FLEX_ALLOC_STR(update, refname, refname);
ALLOC_GROW(transaction->updates, transaction->nr + 1, transaction->alloc);
struct strbuf *err)
{
if (!new_oid || is_null_oid(new_oid))
- die("BUG: create called without valid new_oid");
+ BUG("create called without valid new_oid");
return ref_transaction_update(transaction, refname, new_oid,
&null_oid, flags, msg, err);
}
struct strbuf *err)
{
if (old_oid && is_null_oid(old_oid))
- die("BUG: delete called with old_oid set to zeros");
+ BUG("delete called with old_oid set to zeros");
return ref_transaction_update(transaction, refname,
&null_oid, old_oid,
flags, msg, err);
struct strbuf *err)
{
if (!old_oid)
- die("BUG: verify called with old_oid set to NULL");
+ BUG("verify called with old_oid set to NULL");
return ref_transaction_update(transaction, refname,
NULL, old_oid,
flags, NULL, err);
for (i = 0; i < nr_rules; i++) {
assert(offset < total_len);
scanf_fmts[i] = (char *)&scanf_fmts[nr_rules] + offset;
- offset += snprintf(scanf_fmts[i], total_len - offset,
- ref_rev_parse_rules[i], 2, "%s") + 1;
+ offset += xsnprintf(scanf_fmts[i], total_len - offset,
+ ref_rev_parse_rules[i], 2, "%s") + 1;
}
}
struct ref_store *refs;
if (!be)
- die("BUG: reference backend %s is unknown", be_name);
+ BUG("reference backend %s is unknown", be_name);
refs = be->init(gitdir, flags);
return refs;
if (r->refs)
return r->refs;
+ if (!r->gitdir)
+ BUG("attempting to get main_ref_store outside of repository");
+
r->refs = ref_store_init(r->gitdir, REF_STORE_ALL_CAPS);
return r->refs;
}
hashmap_init(map, ref_store_hash_cmp, NULL, 0);
if (hashmap_put(map, alloc_ref_store_hash_entry(name, refs)))
- die("BUG: %s ref_store '%s' initialized twice", type, name);
+ BUG("%s ref_store '%s' initialized twice", type, name);
}
struct ref_store *get_submodule_ref_store(const char *submodule)
refnames->items[i].string);
return 1;
} else if (cmp > 0) {
- die("BUG: ref_update_reject_duplicates() received unsorted list");
+ BUG("ref_update_reject_duplicates() received unsorted list");
}
}
return 0;
/* Good. */
break;
case REF_TRANSACTION_PREPARED:
- die("BUG: prepare called twice on reference transaction");
+ BUG("prepare called twice on reference transaction");
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: prepare called on a closed reference transaction");
+ BUG("prepare called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
ret = refs->be->transaction_abort(refs, transaction, err);
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: abort called on a closed reference transaction");
+ BUG("abort called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
/* Fall through to finish. */
break;
case REF_TRANSACTION_CLOSED:
- die("BUG: commit called on a closed reference transaction");
+ BUG("commit called on a closed reference transaction");
break;
default:
- die("BUG: unexpected reference transaction state");
+ BUG("unexpected reference transaction state");
break;
}
}
if (ok != ITER_DONE)
- die("BUG: error while iterating over references");
+ BUG("error while iterating over references");
extra_refname = find_descendant_ref(dirname.buf, extras, skip);
if (extra_refname)
#include "cache.h"
#include "refs.h"
#include "remote.h"
+ #include "object-store.h"
#include "strbuf.h"
#include "url.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "vcs-svn/svndump.h"
#include "notes.h"
#include "config.h"
#include "remote.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "diff.h"
#include "revision.h"
enum map_direction { FROM_SRC, FROM_DST };
-static struct refspec s_tag_refspec = {
- 0,
- 1,
- 0,
- 0,
- "refs/tags/*",
- "refs/tags/*"
-};
-
-/* See TAG_REFSPEC for the string version */
-const struct refspec *tag_refspec = &s_tag_refspec;
-
struct counted_string {
size_t len;
const char *s;
return xstrfmt("%s%s", r->rewrite[longest_i]->base, url + longest->len);
}
-static void add_push_refspec(struct remote *remote, const char *ref)
-{
- ALLOC_GROW(remote->push_refspec,
- remote->push_refspec_nr + 1,
- remote->push_refspec_alloc);
- remote->push_refspec[remote->push_refspec_nr++] = ref;
-}
-
-static void add_fetch_refspec(struct remote *remote, const char *ref)
-{
- ALLOC_GROW(remote->fetch_refspec,
- remote->fetch_refspec_nr + 1,
- remote->fetch_refspec_alloc);
- remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
-}
-
-void add_prune_tags_to_fetch_refspec(struct remote *remote)
-{
- int nr = remote->fetch_refspec_nr;
- int bufsize = nr + 1;
- int size = sizeof(struct refspec);
-
- remote->fetch = xrealloc(remote->fetch, size * bufsize);
- memcpy(&remote->fetch[nr], tag_refspec, size);
- add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
-}
-
static void add_url(struct remote *remote, const char *url)
{
ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
ret = xcalloc(1, sizeof(struct remote));
ret->prune = -1; /* unspecified */
ret->prune_tags = -1; /* unspecified */
+ ret->name = xstrndup(name, len);
+ refspec_init(&ret->push, REFSPEC_PUSH);
+ refspec_init(&ret->fetch, REFSPEC_FETCH);
+
ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
remotes[remotes_nr++] = ret;
- ret->name = xstrndup(name, len);
hashmap_entry_init(ret, lookup_entry.hash);
replaced = hashmap_put(&remotes_hash, ret);
if (skip_prefix(buf.buf, "URL:", &v))
add_url_alias(remote, xstrdup(skip_spaces(v)));
else if (skip_prefix(buf.buf, "Push:", &v))
- add_push_refspec(remote, xstrdup(skip_spaces(v)));
+ refspec_append(&remote->push, skip_spaces(v));
else if (skip_prefix(buf.buf, "Pull:", &v))
- add_fetch_refspec(remote, xstrdup(skip_spaces(v)));
+ refspec_append(&remote->fetch, skip_spaces(v));
}
strbuf_release(&buf);
fclose(f);
frag = "master";
add_url_alias(remote, strbuf_detach(&buf, NULL));
- add_fetch_refspec(remote, xstrfmt("refs/heads/%s:refs/heads/%s",
- frag, remote->name));
+ strbuf_addf(&buf, "refs/heads/%s:refs/heads/%s",
+ frag, remote->name);
+ refspec_append(&remote->fetch, buf.buf);
/*
* Cogito compatible push: push current HEAD to remote #branch
* (master if missing)
*/
- add_push_refspec(remote, xstrfmt("HEAD:refs/heads/%s", frag));
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "HEAD:refs/heads/%s", frag);
+ refspec_append(&remote->push, buf.buf);
remote->fetch_tags = 1; /* always auto-follow */
+ strbuf_release(&buf);
}
static int handle_config(const char *key, const char *value, void *cb)
const char *v;
if (git_config_string(&v, key, value))
return -1;
- add_push_refspec(remote, v);
+ refspec_append(&remote->push, v);
+ free((char *)v);
} else if (!strcmp(subkey, "fetch")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
- add_fetch_refspec(remote, v);
+ refspec_append(&remote->fetch, v);
+ free((char *)v);
} else if (!strcmp(subkey, "receivepack")) {
const char *v;
if (git_config_string(&v, key, value))
alias_all_urls();
}
-static struct refspec *parse_refspec_internal(int nr_refspec, const char **refspec, int fetch, int verify)
-{
- int i;
- struct refspec *rs = xcalloc(nr_refspec, sizeof(*rs));
-
- for (i = 0; i < nr_refspec; i++) {
- size_t llen;
- int is_glob;
- const char *lhs, *rhs;
- int flags;
-
- is_glob = 0;
-
- lhs = refspec[i];
- if (*lhs == '+') {
- rs[i].force = 1;
- lhs++;
- }
-
- rhs = strrchr(lhs, ':');
-
- /*
- * Before going on, special case ":" (or "+:") as a refspec
- * for pushing matching refs.
- */
- if (!fetch && rhs == lhs && rhs[1] == '\0') {
- rs[i].matching = 1;
- continue;
- }
-
- if (rhs) {
- size_t rlen = strlen(++rhs);
- is_glob = (1 <= rlen && strchr(rhs, '*'));
- rs[i].dst = xstrndup(rhs, rlen);
- }
-
- llen = (rhs ? (rhs - lhs - 1) : strlen(lhs));
- if (1 <= llen && memchr(lhs, '*', llen)) {
- if ((rhs && !is_glob) || (!rhs && fetch))
- goto invalid;
- is_glob = 1;
- } else if (rhs && is_glob) {
- goto invalid;
- }
-
- rs[i].pattern = is_glob;
- rs[i].src = xstrndup(lhs, llen);
- flags = REFNAME_ALLOW_ONELEVEL | (is_glob ? REFNAME_REFSPEC_PATTERN : 0);
-
- if (fetch) {
- struct object_id unused;
-
- /* LHS */
- if (!*rs[i].src)
- ; /* empty is ok; it means "HEAD" */
- else if (llen == GIT_SHA1_HEXSZ && !get_oid_hex(rs[i].src, &unused))
- rs[i].exact_sha1 = 1; /* ok */
- else if (!check_refname_format(rs[i].src, flags))
- ; /* valid looking ref is ok */
- else
- goto invalid;
- /* RHS */
- if (!rs[i].dst)
- ; /* missing is ok; it is the same as empty */
- else if (!*rs[i].dst)
- ; /* empty is ok; it means "do not store" */
- else if (!check_refname_format(rs[i].dst, flags))
- ; /* valid looking ref is ok */
- else
- goto invalid;
- } else {
- /*
- * LHS
- * - empty is allowed; it means delete.
- * - when wildcarded, it must be a valid looking ref.
- * - otherwise, it must be an extended SHA-1, but
- * there is no existing way to validate this.
- */
- if (!*rs[i].src)
- ; /* empty is ok */
- else if (is_glob) {
- if (check_refname_format(rs[i].src, flags))
- goto invalid;
- }
- else
- ; /* anything goes, for now */
- /*
- * RHS
- * - missing is allowed, but LHS then must be a
- * valid looking ref.
- * - empty is not allowed.
- * - otherwise it must be a valid looking ref.
- */
- if (!rs[i].dst) {
- if (check_refname_format(rs[i].src, flags))
- goto invalid;
- } else if (!*rs[i].dst) {
- goto invalid;
- } else {
- if (check_refname_format(rs[i].dst, flags))
- goto invalid;
- }
- }
- }
- return rs;
-
- invalid:
- if (verify) {
- /*
- * nr_refspec must be greater than zero and i must be valid
- * since it is only possible to reach this point from within
- * the for loop above.
- */
- free_refspec(i+1, rs);
- return NULL;
- }
- die("Invalid refspec '%s'", refspec[i]);
-}
-
-int valid_fetch_refspec(const char *fetch_refspec_str)
-{
- struct refspec *refspec;
-
- refspec = parse_refspec_internal(1, &fetch_refspec_str, 1, 1);
- free_refspec(1, refspec);
- return !!refspec;
-}
-
-struct refspec *parse_fetch_refspec(int nr_refspec, const char **refspec)
-{
- return parse_refspec_internal(nr_refspec, refspec, 1, 0);
-}
-
-struct refspec *parse_push_refspec(int nr_refspec, const char **refspec)
-{
- return parse_refspec_internal(nr_refspec, refspec, 0, 0);
-}
-
-void free_refspec(int nr_refspec, struct refspec *refspec)
-{
- int i;
-
- if (!refspec)
- return;
-
- for (i = 0; i < nr_refspec; i++) {
- free(refspec[i].src);
- free(refspec[i].dst);
- }
- free(refspec);
-}
-
static int valid_remote_nick(const char *name)
{
if (!name[0] || is_dot_or_dotdot(name))
pushremote_for_branch(branch, NULL);
struct remote *remote = remote_get(remote_name);
- if (remote && remote->push_refspec_nr &&
- (dst = apply_refspecs(remote->push,
- remote->push_refspec_nr,
+ if (remote && remote->push.nr &&
+ (dst = apply_refspecs(&remote->push,
branch->refname))) {
if (explicit)
*explicit = 1;
add_url_alias(ret, name);
if (!valid_remote(ret))
return NULL;
- ret->fetch = parse_fetch_refspec(ret->fetch_refspec_nr, ret->fetch_refspec);
- ret->push = parse_push_refspec(ret->push_refspec_nr, ret->push_refspec);
return ret;
}
struct remote *r = remotes[i];
if (!r)
continue;
- if (!r->fetch)
- r->fetch = parse_fetch_refspec(r->fetch_refspec_nr,
- r->fetch_refspec);
- if (!r->push)
- r->push = parse_push_refspec(r->push_refspec_nr,
- r->push_refspec);
result = fn(r, priv);
}
return result;
return ret;
}
-static void query_refspecs_multiple(struct refspec *refs, int ref_count, struct refspec *query, struct string_list *results)
+static void query_refspecs_multiple(struct refspec *rs,
+ struct refspec_item *query,
+ struct string_list *results)
{
int i;
int find_src = !query->src;
if (find_src && !query->dst)
error("query_refspecs_multiple: need either src or dst");
- for (i = 0; i < ref_count; i++) {
- struct refspec *refspec = &refs[i];
+ for (i = 0; i < rs->nr; i++) {
+ struct refspec_item *refspec = &rs->items[i];
const char *key = find_src ? refspec->dst : refspec->src;
const char *value = find_src ? refspec->src : refspec->dst;
const char *needle = find_src ? query->dst : query->src;
}
}
-int query_refspecs(struct refspec *refs, int ref_count, struct refspec *query)
+int query_refspecs(struct refspec *rs, struct refspec_item *query)
{
int i;
int find_src = !query->src;
if (find_src && !query->dst)
return error("query_refspecs: need either src or dst");
- for (i = 0; i < ref_count; i++) {
- struct refspec *refspec = &refs[i];
+ for (i = 0; i < rs->nr; i++) {
+ struct refspec_item *refspec = &rs->items[i];
const char *key = find_src ? refspec->dst : refspec->src;
const char *value = find_src ? refspec->src : refspec->dst;
return -1;
}
-char *apply_refspecs(struct refspec *refspecs, int nr_refspec,
- const char *name)
+char *apply_refspecs(struct refspec *rs, const char *name)
{
- struct refspec query;
+ struct refspec_item query;
- memset(&query, 0, sizeof(struct refspec));
+ memset(&query, 0, sizeof(struct refspec_item));
query.src = (char *)name;
- if (query_refspecs(refspecs, nr_refspec, &query))
+ if (query_refspecs(rs, &query))
return NULL;
return query.dst;
}
-int remote_find_tracking(struct remote *remote, struct refspec *refspec)
+int remote_find_tracking(struct remote *remote, struct refspec_item *refspec)
{
- return query_refspecs(remote->fetch, remote->fetch_refspec_nr, refspec);
+ return query_refspecs(&remote->fetch, refspec);
}
static struct ref *alloc_ref_with_prefix(const char *prefix, size_t prefixlen,
}
static int match_explicit_lhs(struct ref *src,
- struct refspec *rs,
+ struct refspec_item *rs,
struct ref **match,
int *allocated_match)
{
static int match_explicit(struct ref *src, struct ref *dst,
struct ref ***dst_tail,
- struct refspec *rs)
+ struct refspec_item *rs)
{
struct ref *matched_src, *matched_dst;
int allocated_src;
}
static int match_explicit_refs(struct ref *src, struct ref *dst,
- struct ref ***dst_tail, struct refspec *rs,
- int rs_nr)
+ struct ref ***dst_tail, struct refspec *rs)
{
int i, errs;
- for (i = errs = 0; i < rs_nr; i++)
- errs += match_explicit(src, dst, dst_tail, &rs[i]);
+ for (i = errs = 0; i < rs->nr; i++)
+ errs += match_explicit(src, dst, dst_tail, &rs->items[i]);
return errs;
}
-static char *get_ref_match(const struct refspec *rs, int rs_nr, const struct ref *ref,
- int send_mirror, int direction, const struct refspec **ret_pat)
+static char *get_ref_match(const struct refspec *rs, const struct ref *ref,
+ int send_mirror, int direction,
+ const struct refspec_item **ret_pat)
{
- const struct refspec *pat;
+ const struct refspec_item *pat;
char *name;
int i;
int matching_refs = -1;
- for (i = 0; i < rs_nr; i++) {
- if (rs[i].matching &&
- (matching_refs == -1 || rs[i].force)) {
+ for (i = 0; i < rs->nr; i++) {
+ const struct refspec_item *item = &rs->items[i];
+ if (item->matching &&
+ (matching_refs == -1 || item->force)) {
matching_refs = i;
continue;
}
- if (rs[i].pattern) {
- const char *dst_side = rs[i].dst ? rs[i].dst : rs[i].src;
+ if (item->pattern) {
+ const char *dst_side = item->dst ? item->dst : item->src;
int match;
if (direction == FROM_SRC)
- match = match_name_with_pattern(rs[i].src, ref->name, dst_side, &name);
+ match = match_name_with_pattern(item->src, ref->name, dst_side, &name);
else
- match = match_name_with_pattern(dst_side, ref->name, rs[i].src, &name);
+ match = match_name_with_pattern(dst_side, ref->name, item->src, &name);
if (match) {
matching_refs = i;
break;
if (matching_refs == -1)
return NULL;
- pat = rs + matching_refs;
+ pat = &rs->items[matching_refs];
if (pat->matching) {
/*
* "matching refs"; traditionally we pushed everything
* but we can catch some errors early before even talking to the
* remote side.
*/
-int check_push_refs(struct ref *src, int nr_refspec, const char **refspec_names)
+int check_push_refs(struct ref *src, struct refspec *rs)
{
- struct refspec *refspec = parse_push_refspec(nr_refspec, refspec_names);
int ret = 0;
int i;
- for (i = 0; i < nr_refspec; i++) {
- struct refspec *rs = refspec + i;
+ for (i = 0; i < rs->nr; i++) {
+ struct refspec_item *item = &rs->items[i];
- if (rs->pattern || rs->matching)
+ if (item->pattern || item->matching)
continue;
- ret |= match_explicit_lhs(src, rs, NULL, NULL);
+ ret |= match_explicit_lhs(src, item, NULL, NULL);
}
- free_refspec(nr_refspec, refspec);
return ret;
}
* dst (e.g. pushing to a new branch, done in match_explicit_refs).
*/
int match_push_refs(struct ref *src, struct ref **dst,
- int nr_refspec, const char **refspec, int flags)
+ struct refspec *rs, int flags)
{
- struct refspec *rs;
int send_all = flags & MATCH_REFS_ALL;
int send_mirror = flags & MATCH_REFS_MIRROR;
int send_prune = flags & MATCH_REFS_PRUNE;
int errs;
- static const char *default_refspec[] = { ":", NULL };
struct ref *ref, **dst_tail = tail_ref(dst);
struct string_list dst_ref_index = STRING_LIST_INIT_NODUP;
- if (!nr_refspec) {
- nr_refspec = 1;
- refspec = default_refspec;
- }
- rs = parse_push_refspec(nr_refspec, (const char **) refspec);
- errs = match_explicit_refs(src, *dst, &dst_tail, rs, nr_refspec);
+ /* If no refspec is provided, use the default ":" */
+ if (!rs->nr)
+ refspec_append(rs, ":");
+
+ errs = match_explicit_refs(src, *dst, &dst_tail, rs);
/* pick the remainder */
for (ref = src; ref; ref = ref->next) {
struct string_list_item *dst_item;
struct ref *dst_peer;
- const struct refspec *pat = NULL;
+ const struct refspec_item *pat = NULL;
char *dst_name;
- dst_name = get_ref_match(rs, nr_refspec, ref, send_mirror, FROM_SRC, &pat);
+ dst_name = get_ref_match(rs, ref, send_mirror, FROM_SRC, &pat);
if (!dst_name)
continue;
/* We're already sending something to this ref. */
continue;
- src_name = get_ref_match(rs, nr_refspec, ref, send_mirror, FROM_DST, NULL);
+ src_name = get_ref_match(rs, ref, send_mirror, FROM_DST, NULL);
if (src_name) {
if (!src_ref_index.nr)
prepare_ref_index(&src_ref_index, src);
}
string_list_clear(&src_ref_index, 0);
}
+
if (errs)
return -1;
return 0;
{
char *ret;
- ret = apply_refspecs(remote->fetch, remote->fetch_refspec_nr, refname);
+ ret = apply_refspecs(&remote->fetch, refname);
if (!ret)
return error_buf(err,
_("push destination '%s' on remote '%s' has no local tracking branch"),
_("branch '%s' has no remote for pushing"),
branch->name);
- if (remote->push_refspec_nr) {
+ if (remote->push.nr) {
char *dst;
const char *ret;
- dst = apply_refspecs(remote->push, remote->push_refspec_nr,
- branch->refname);
+ dst = apply_refspecs(&remote->push, branch->refname);
if (!dst)
return error_buf(err,
_("push refspecs for '%s' do not include '%s'"),
}
}
- die("BUG: unhandled push situation");
+ BUG("unhandled push situation");
}
const char *branch_get_push(struct branch *branch, struct strbuf *err)
* local symbolic ref.
*/
static struct ref *get_expanded_map(const struct ref *remote_refs,
- const struct refspec *refspec)
+ const struct refspec_item *refspec)
{
const struct ref *ref;
struct ref *ret = NULL;
}
int get_fetch_map(const struct ref *remote_refs,
- const struct refspec *refspec,
+ const struct refspec_item *refspec,
struct ref ***tail,
int missing_ok)
{
struct stale_heads_info {
struct string_list *ref_names;
struct ref **stale_refs_tail;
- struct refspec *refs;
- int ref_count;
+ struct refspec *rs;
};
static int get_stale_heads_cb(const char *refname, const struct object_id *oid,
{
struct stale_heads_info *info = cb_data;
struct string_list matches = STRING_LIST_INIT_DUP;
- struct refspec query;
+ struct refspec_item query;
int i, stale = 1;
- memset(&query, 0, sizeof(struct refspec));
+ memset(&query, 0, sizeof(struct refspec_item));
query.dst = (char *)refname;
- query_refspecs_multiple(info->refs, info->ref_count, &query, &matches);
+ query_refspecs_multiple(info->rs, &query, &matches);
if (matches.nr == 0)
goto clean_exit; /* No matches */
return 0;
}
-struct ref *get_stale_heads(struct refspec *refs, int ref_count, struct ref *fetch_map)
+struct ref *get_stale_heads(struct refspec *rs, struct ref *fetch_map)
{
struct ref *ref, *stale_refs = NULL;
struct string_list ref_names = STRING_LIST_INIT_NODUP;
info.ref_names = &ref_names;
info.stale_refs_tail = &stale_refs;
- info.refs = refs;
- info.ref_count = ref_count;
+ info.rs = rs;
for (ref = fetch_map; ref; ref = ref->next)
string_list_append(&ref_names, ref->name);
string_list_sort(&ref_names);
{
char *dst;
- dst = apply_refspecs(remote->fetch, remote->fetch_refspec_nr, refname);
+ dst = apply_refspecs(&remote->fetch, refname);
if (!dst)
return -1; /* no tracking ref for refname at remote */
if (read_ref(dst, oid))
/* The store in which the refs are held. */
struct ref_store *refs;
+ /*
+ * Contains path to often used file names.
+ */
+ struct path_cache cached_paths;
+
/*
* Path to the repository's graft file.
* Cannot be NULL after initialization.
extern void repo_set_worktree(struct repository *repo, const char *path);
extern void repo_set_hash_algo(struct repository *repo, int algo);
extern void initialize_the_repository(void);
+extern int repo_init(struct repository *r,
+ const char *gitdir,
+ const char *worktree);
extern int repo_submodule_init(struct repository *submodule,
struct repository *superproject,
const char *path);
#include "ll-merge.h"
#include "attr.h"
#include "pathspec.h"
+ #include "object-store.h"
#include "sha1-lookup.h"
#define RESOLVED 0
static void read_rr(struct string_list *rr)
{
struct strbuf buf = STRBUF_INIT;
- FILE *in = fopen_or_warn(git_path_merge_rr(), "r");
+ FILE *in = fopen_or_warn(git_path_merge_rr(the_repository), "r");
if (!in)
return;
return ret;
}
-static struct lock_file index_lock;
-
static void update_paths(struct string_list *update)
{
+ struct lock_file index_lock = LOCK_INIT;
int i;
hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR);
if (flags & RERERE_READONLY)
fd = 0;
else
- fd = hold_lock_file_for_update(&write_lock, git_path_merge_rr(),
+ fd = hold_lock_file_for_update(&write_lock,
+ git_path_merge_rr(the_repository),
LOCK_DIE_ON_ERROR);
read_rr(merge_rr);
return fd;
rmdir(rerere_path(id, NULL));
}
}
- unlink_or_warn(git_path_merge_rr());
+ unlink_or_warn(git_path_merge_rr(the_repository));
rollback_lock_file(&write_lock);
}
#include "cache.h"
+ #include "object-store.h"
#include "tag.h"
#include "blob.h"
#include "tree.h"
static const char *term_bad;
static const char *term_good;
+implement_shared_commit_slab(revision_sources, char *);
+
void show_object_with_name(FILE *out, struct object *obj, const char *name)
{
const char *p;
{
struct tree_desc desc;
struct name_entry entry;
- struct object *obj = &tree->object;
- if (!has_object_file(&obj->oid))
+ if (parse_tree_gently(tree, 1) < 0)
return;
- if (parse_tree(tree) < 0)
- die("bad tree %s", oid_to_hex(&obj->oid));
init_tree_desc(&desc, tree->buffer, tree->size);
while (tree_entry(&desc, &entry)) {
mark_tree_contents_uninteresting(tree);
}
-void mark_parents_uninteresting(struct commit *commit)
+struct commit_stack {
+ struct commit **items;
+ size_t nr, alloc;
+};
+#define COMMIT_STACK_INIT { NULL, 0, 0 }
+
+static void commit_stack_push(struct commit_stack *stack, struct commit *commit)
{
- struct commit_list *parents = NULL, *l;
+ ALLOC_GROW(stack->items, stack->nr + 1, stack->alloc);
+ stack->items[stack->nr++] = commit;
+}
- for (l = commit->parents; l; l = l->next)
- commit_list_insert(l->item, &parents);
+static struct commit *commit_stack_pop(struct commit_stack *stack)
+{
+ return stack->nr ? stack->items[--stack->nr] : NULL;
+}
- while (parents) {
- struct commit *commit = pop_commit(&parents);
+static void commit_stack_clear(struct commit_stack *stack)
+{
+ FREE_AND_NULL(stack->items);
+ stack->nr = stack->alloc = 0;
+}
- while (commit) {
- /*
- * A missing commit is ok iff its parent is marked
- * uninteresting.
- *
- * We just mark such a thing parsed, so that when
- * it is popped next time around, we won't be trying
- * to parse it and get an error.
- */
- if (!commit->object.parsed &&
- !has_object_file(&commit->object.oid))
- commit->object.parsed = 1;
+static void mark_one_parent_uninteresting(struct commit *commit,
+ struct commit_stack *pending)
+{
+ struct commit_list *l;
- if (commit->object.flags & UNINTERESTING)
- break;
+ if (commit->object.flags & UNINTERESTING)
+ return;
+ commit->object.flags |= UNINTERESTING;
+
+ /*
+ * Normally we haven't parsed the parent
+ * yet, so we won't have a parent of a parent
+ * here. However, it may turn out that we've
+ * reached this commit some other way (where it
+ * wasn't uninteresting), in which case we need
+ * to mark its parents recursively too..
+ */
+ for (l = commit->parents; l; l = l->next)
+ commit_stack_push(pending, l->item);
+}
- commit->object.flags |= UNINTERESTING;
+void mark_parents_uninteresting(struct commit *commit)
+{
+ struct commit_stack pending = COMMIT_STACK_INIT;
+ struct commit_list *l;
- /*
- * Normally we haven't parsed the parent
- * yet, so we won't have a parent of a parent
- * here. However, it may turn out that we've
- * reached this commit some other way (where it
- * wasn't uninteresting), in which case we need
- * to mark its parents recursively too..
- */
- if (!commit->parents)
- break;
+ for (l = commit->parents; l; l = l->next)
+ mark_one_parent_uninteresting(l->item, &pending);
- for (l = commit->parents->next; l; l = l->next)
- commit_list_insert(l->item, &parents);
- commit = commit->parents->item;
- }
- }
+ while (pending.nr > 0)
+ mark_one_parent_uninteresting(commit_stack_pop(&pending),
+ &pending);
+
+ commit_stack_clear(&pending);
}
static void add_pending_object_with_path(struct rev_info *revs,
*/
if (object->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)object;
+
if (parse_commit(commit) < 0)
die("unable to parse commit %s", name);
if (flags & UNINTERESTING) {
mark_parents_uninteresting(commit);
revs->limited = 1;
}
- if (revs->show_source && !commit->util)
- commit->util = xstrdup(name);
+ if (revs->sources) {
+ char **slot = revision_sources_at(revs->sources, commit);
+
+ if (!*slot)
+ *slot = xstrdup(name);
+ }
return commit;
}
static int rev_compare_tree(struct rev_info *revs,
struct commit *parent, struct commit *commit)
{
- struct tree *t1 = parent->tree;
- struct tree *t2 = commit->tree;
+ struct tree *t1 = get_commit_tree(parent);
+ struct tree *t2 = get_commit_tree(commit);
if (!t1)
return REV_TREE_NEW;
static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
{
int retval;
- struct tree *t1 = commit->tree;
+ struct tree *t1 = get_commit_tree(commit);
if (!t1)
return 0;
if (!revs->prune)
return;
- if (!commit->tree)
+ if (!get_commit_tree(commit))
return;
if (!commit->parents) {
}
return -1;
}
- if (revs->show_source && !p->util)
- p->util = commit->util;
+ if (revs->sources) {
+ char **slot = revision_sources_at(revs->sources, p);
+
+ if (!*slot)
+ *slot = *revision_sources_at(revs->sources, commit);
+ }
p->object.flags |= left_flag;
if (!(p->object.flags & SEEN)) {
p->object.flags |= SEEN;
const char *arg = argv[0];
const char *optarg;
int argcount;
+ const unsigned hexsz = the_hash_algo->hexsz;
/* pseudo revision arguments */
if (!strcmp(arg, "--all") || !strcmp(arg, "--branches") ||
revs->abbrev = strtoul(optarg, NULL, 10);
if (revs->abbrev < MINIMUM_ABBREV)
revs->abbrev = MINIMUM_ABBREV;
- else if (revs->abbrev > 40)
- revs->abbrev = 40;
+ else if (revs->abbrev > hexsz)
+ revs->abbrev = hexsz;
} else if (!strcmp(arg, "--abbrev-commit")) {
revs->abbrev_commit = 1;
revs->abbrev_commit_given = 1;
revs->ignore_missing = 1;
} else if (!strcmp(arg, "--exclude-promisor-objects")) {
if (fetch_if_missing)
- die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ BUG("exclude_promisor_objects can only be used when fetch_if_missing is 0");
revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
* supported right now, so stick to single worktree.
*/
if (!revs->single_worktree)
- die("BUG: --single-worktree cannot be used together with submodule");
+ BUG("--single-worktree cannot be used together with submodule");
refs = get_submodule_ref_store(submodule);
} else
refs = get_main_ref_store(the_repository);
{
if (commit->object.flags & SHOWN)
return commit_ignore;
- if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
+ if (revs->unpacked && has_object_pack(&commit->object.oid))
return commit_ignore;
if (commit->object.flags & UNINTERESTING)
return commit_ignore;
#include "config.h"
#include "lockfile.h"
#include "dir.h"
+ #include "object-store.h"
#include "object.h"
#include "commit.h"
#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "utf8.h"
#include "cache-tree.h"
#include "diff.h"
#include "hashmap.h"
#include "notes-utils.h"
#include "sigchain.h"
+#include "unpack-trees.h"
+#include "worktree.h"
+#include "oidmap.h"
+#include "oidset.h"
+#include "commit-slab.h"
+#include "alias.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
* previous commit and from the first squash/fixup commit are written
* to it. The commit message for each subsequent squash/fixup commit
* is appended to the file as it is processed.
- *
- * The first line of the file is of the form
- * # This is a combination of $count commits.
- * where $count is the number of commits whose messages have been
- * written to the file so far (including the initial "pick" commit).
- * Each time that a commit message is processed, this line is read and
- * updated. It is deleted just before the combined commit is made.
*/
static GIT_PATH_FUNC(rebase_path_squash_msg, "rebase-merge/message-squash")
/*
* commit without opening the editor.)
*/
static GIT_PATH_FUNC(rebase_path_fixup_msg, "rebase-merge/message-fixup")
+/*
+ * This file contains the list fixup/squash commands that have been
+ * accumulated into message-fixup or message-squash so far.
+ */
+static GIT_PATH_FUNC(rebase_path_current_fixups, "rebase-merge/current-fixups")
/*
* A script to set the GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, and
* GIT_AUTHOR_DATE that will be used for the commit that is currently
static GIT_PATH_FUNC(rebase_path_rewritten_list, "rebase-merge/rewritten-list")
static GIT_PATH_FUNC(rebase_path_rewritten_pending,
"rebase-merge/rewritten-pending")
+
+/*
+ * The path of the file containig the OID of the "squash onto" commit, i.e.
+ * the dummy commit used for `reset [new root]`.
+ */
+static GIT_PATH_FUNC(rebase_path_squash_onto, "rebase-merge/squash-onto")
+
+/*
+ * The path of the file listing refs that need to be deleted after the rebase
+ * finishes. This is used by the `label` command to record the need for cleanup.
+ */
+static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete")
+
/*
* The following files are written by git-rebase just after parsing the
* command-line (and are only consumed, not modified, by the sequencer).
static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt")
static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head")
static GIT_PATH_FUNC(rebase_path_verbose, "rebase-merge/verbose")
+static GIT_PATH_FUNC(rebase_path_signoff, "rebase-merge/signoff")
static GIT_PATH_FUNC(rebase_path_head_name, "rebase-merge/head-name")
static GIT_PATH_FUNC(rebase_path_onto, "rebase-merge/onto")
static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash")
warning(_("invalid commit message cleanup mode '%s'"),
s);
+ free((char *)s);
return status;
}
int sequencer_remove_state(struct replay_opts *opts)
{
- struct strbuf dir = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
int i;
+ if (is_rebase_i(opts) &&
+ strbuf_read_file(&buf, rebase_path_refs_to_delete(), 0) > 0) {
+ char *p = buf.buf;
+ while (*p) {
+ char *eol = strchr(p, '\n');
+ if (eol)
+ *eol = '\0';
+ if (delete_ref("(rebase -i) cleanup", p, NULL, 0) < 0)
+ warning(_("could not delete '%s'"), p);
+ if (!eol)
+ break;
+ p = eol + 1;
+ }
+ }
+
free(opts->gpg_sign);
free(opts->strategy);
for (i = 0; i < opts->xopts_nr; i++)
free(opts->xopts[i]);
free(opts->xopts);
+ strbuf_release(&opts->current_fixups);
- strbuf_addstr(&dir, get_dir(opts));
- remove_dir_recursively(&dir, 0);
- strbuf_release(&dir);
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, get_dir(opts));
+ remove_dir_recursively(&buf, 0);
+ strbuf_release(&buf);
return 0;
}
* (typically rebase --interactive) wants to take care
* of the commit itself so remove CHERRY_PICK_HEAD
*/
- unlink(git_path_cherry_pick_head());
+ unlink(git_path_cherry_pick_head(the_repository));
return;
}
if (msg_fd < 0)
return error_errno(_("could not lock '%s'"), filename);
if (write_in_full(msg_fd, buf, len) < 0) {
+ error_errno(_("could not write to '%s'"), filename);
rollback_lock_file(&msg_file);
- return error_errno(_("could not write to '%s'"), filename);
+ return -1;
}
if (append_eol && write(msg_fd, "\n", 1) < 0) {
+ error_errno(_("could not write eol to '%s'"), filename);
rollback_lock_file(&msg_file);
- return error_errno(_("could not write eol to '%s'"), filename);
+ return -1;
}
if (commit_lock_file(&msg_file) < 0)
return error(_("failed to finalize '%s'"), filename);
transaction = ref_transaction_begin(&err);
if (!transaction ||
ref_transaction_update(transaction, "HEAD",
- to, unborn ? &null_oid : from,
+ to, unborn && !is_rebase_i(opts) ?
+ &null_oid : from,
0, sb.buf, &err) ||
ref_transaction_commit(transaction, &err)) {
ref_transaction_free(transaction);
o.show_rename_progress = 1;
head_tree = parse_tree_indirect(head);
- next_tree = next ? next->tree : empty_tree();
- base_tree = base ? base->tree : empty_tree();
+ next_tree = next ? get_commit_tree(next) : empty_tree();
+ base_tree = base ? get_commit_tree(base) : empty_tree();
for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
parse_merge_opt(&o, *xopt);
return !clean;
}
+static struct object_id *get_cache_tree_oid(void)
+{
+ if (!active_cache_tree)
+ active_cache_tree = cache_tree();
+
+ if (!cache_tree_fully_valid(active_cache_tree))
+ if (cache_tree_update(&the_index, 0)) {
+ error(_("unable to update cache tree"));
+ return NULL;
+ }
+
+ return &active_cache_tree->oid;
+}
+
static int is_index_unchanged(void)
{
- struct object_id head_oid;
+ struct object_id head_oid, *cache_tree_oid;
struct commit *head_commit;
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
if (parse_commit(head_commit))
return -1;
- if (!active_cache_tree)
- active_cache_tree = cache_tree();
-
- if (!cache_tree_fully_valid(active_cache_tree))
- if (cache_tree_update(&the_index, 0))
- return error(_("unable to update cache tree"));
+ if (!(cache_tree_oid = get_cache_tree_oid()))
+ return -1;
- return !oidcmp(&active_cache_tree->oid,
- &head_commit->tree->object.oid);
+ return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit));
}
static int write_author_script(const char *message)
return NULL;
}
+/* Read author-script and return an ident line (author <email> timestamp) */
+static const char *read_author_ident(struct strbuf *buf)
+{
+ const char *keys[] = {
+ "GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE="
+ };
+ char *in, *out, *eol;
+ int i = 0, len;
+
+ if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0)
+ return NULL;
+
+ /* dequote values and construct ident line in-place */
+ for (in = out = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
+ if (!skip_prefix(in, keys[i], (const char **)&in)) {
+ warning("could not parse '%s' (looking for '%s'",
+ rebase_path_author_script(), keys[i]);
+ return NULL;
+ }
+
+ eol = strchrnul(in, '\n');
+ *eol = '\0';
+ sq_dequote(in);
+ len = strlen(in);
+
+ if (i > 0) /* separate values by spaces */
+ *(out++) = ' ';
+ if (i == 1) /* email needs to be surrounded by <...> */
+ *(out++) = '<';
+ memmove(out, in, len);
+ out += len;
+ if (i == 1) /* email needs to be surrounded by <...> */
+ *(out++) = '>';
+ in = eol + 1;
+ }
+
+ if (i < 3) {
+ warning("could not parse '%s' (looking for '%s')",
+ rebase_path_author_script(), keys[i]);
+ return NULL;
+ }
+
+ buf->len = out - buf->buf;
+ return buf->buf;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
#define AMEND_MSG (1<<2)
#define CLEANUP_MSG (1<<3)
#define VERIFY_MSG (1<<4)
+#define CREATE_ROOT_COMMIT (1<<5)
/*
* If we are cherry-pick, and if the merge did not result in
struct child_process cmd = CHILD_PROCESS_INIT;
const char *value;
+ if ((flags & CREATE_ROOT_COMMIT) && !(flags & AMEND_MSG)) {
+ struct strbuf msg = STRBUF_INIT, script = STRBUF_INIT;
+ const char *author = is_rebase_i(opts) ?
+ read_author_ident(&script) : NULL;
+ struct object_id root_commit, *cache_tree_oid;
+ int res = 0;
+
+ if (!defmsg)
+ BUG("root commit without message");
+
+ if (!(cache_tree_oid = get_cache_tree_oid()))
+ res = -1;
+
+ if (!res)
+ res = strbuf_read_file(&msg, defmsg, 0);
+
+ if (res <= 0)
+ res = error_errno(_("could not read '%s'"), defmsg);
+ else
+ res = commit_tree(msg.buf, msg.len, cache_tree_oid,
+ NULL, &root_commit, author,
+ opts->gpg_sign);
+
+ strbuf_release(&msg);
+ strbuf_release(&script);
+ if (!res) {
+ update_ref(NULL, "CHERRY_PICK_HEAD", &root_commit, NULL,
+ REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR);
+ res = update_ref(NULL, "HEAD", &root_commit, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ }
+ return res < 0 ? error(_("writing root commit")) : 0;
+ }
+
cmd.git_cmd = 1;
if (is_rebase_i(opts)) {
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
+ else if (!(flags & EDIT_MSG))
+ argv_array_pushl(&cmd.args, "-C", "HEAD", NULL);
if ((flags & CLEANUP_MSG))
argv_array_push(&cmd.args, "--cleanup=strip");
if ((flags & EDIT_MSG))
}
if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
- ¤t_head->tree->object.oid :
- &empty_tree_oid, &tree)) {
+ get_commit_tree_oid(current_head) :
+ the_hash_algo->empty_tree, &tree)) {
res = 1; /* run 'git commit' to display error message */
goto out;
}
goto out;
}
+ reset_ident_date();
+
if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
oid, author, opts->gpg_sign, extra)) {
res = error(_("failed to write commit object"));
{
int res = 1;
- if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG) &&
+ !(flags & CREATE_ROOT_COMMIT)) {
struct object_id oid;
struct strbuf sb = STRBUF_INIT;
&oid);
strbuf_release(&sb);
if (!res) {
- unlink(git_path_cherry_pick_head());
- unlink(git_path_merge_msg());
+ unlink(git_path_cherry_pick_head(the_repository));
+ unlink(git_path_merge_msg(the_repository));
if (!is_rebase_i(opts))
print_commit_summary(NULL, &oid,
SUMMARY_SHOW_AUTHOR_DATE);
if (parse_commit(parent))
return error(_("could not parse parent commit %s"),
oid_to_hex(&parent->object.oid));
- ptree_oid = &parent->tree->object.oid;
+ ptree_oid = get_commit_tree_oid(parent);
} else {
ptree_oid = the_hash_algo->empty_tree; /* commit is root */
}
- return !oidcmp(ptree_oid, &commit->tree->object.oid);
+ return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
}
/*
TODO_SQUASH,
/* commands that do something else than handling a single commit */
TODO_EXEC,
+ TODO_LABEL,
+ TODO_RESET,
+ TODO_MERGE,
/* commands that do nothing but are counted for reporting progress */
TODO_NOOP,
TODO_DROP,
{ 'f', "fixup" },
{ 's', "squash" },
{ 'x', "exec" },
+ { 'l', "label" },
+ { 't', "reset" },
+ { 'm', "merge" },
{ 0, "noop" },
{ 'd', "drop" },
{ 0, NULL }
return command == TODO_FIXUP || command == TODO_SQUASH;
}
+/* Does this command create a (non-merge) commit? */
+static int is_pick_or_similar(enum todo_command command)
+{
+ switch (command) {
+ case TODO_PICK:
+ case TODO_REVERT:
+ case TODO_EDIT:
+ case TODO_REWORD:
+ case TODO_FIXUP:
+ case TODO_SQUASH:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static int update_squash_messages(enum todo_command command,
struct commit *commit, struct replay_opts *opts)
{
struct strbuf buf = STRBUF_INIT;
- int count, res;
+ int res;
const char *message, *body;
- if (file_exists(rebase_path_squash_msg())) {
+ if (opts->current_fixup_count > 0) {
struct strbuf header = STRBUF_INIT;
- char *eol, *p;
+ char *eol;
- if (strbuf_read_file(&buf, rebase_path_squash_msg(), 2048) <= 0)
+ if (strbuf_read_file(&buf, rebase_path_squash_msg(), 9) <= 0)
return error(_("could not read '%s'"),
rebase_path_squash_msg());
- p = buf.buf + 1;
- eol = strchrnul(buf.buf, '\n');
- if (buf.buf[0] != comment_line_char ||
- (p += strcspn(p, "0123456789\n")) == eol)
- return error(_("unexpected 1st line of squash message:"
- "\n\n\t%.*s"),
- (int)(eol - buf.buf), buf.buf);
- count = strtol(p, NULL, 10);
-
- if (count < 1)
- return error(_("invalid 1st line of squash message:\n"
- "\n\t%.*s"),
- (int)(eol - buf.buf), buf.buf);
+ eol = buf.buf[0] != comment_line_char ?
+ buf.buf : strchrnul(buf.buf, '\n');
strbuf_addf(&header, "%c ", comment_line_char);
- strbuf_addf(&header,
- _("This is a combination of %d commits."), ++count);
+ strbuf_addf(&header, _("This is a combination of %d commits."),
+ opts->current_fixup_count + 2);
strbuf_splice(&buf, 0, eol - buf.buf, header.buf, header.len);
strbuf_release(&header);
} else {
rebase_path_fixup_msg());
}
- count = 2;
strbuf_addf(&buf, "%c ", comment_line_char);
- strbuf_addf(&buf, _("This is a combination of %d commits."),
- count);
+ strbuf_addf(&buf, _("This is a combination of %d commits."), 2);
strbuf_addf(&buf, "\n%c ", comment_line_char);
strbuf_addstr(&buf, _("This is the 1st commit message:"));
strbuf_addstr(&buf, "\n\n");
if (command == TODO_SQUASH) {
unlink(rebase_path_fixup_msg());
strbuf_addf(&buf, "\n%c ", comment_line_char);
- strbuf_addf(&buf, _("This is the commit message #%d:"), count);
+ strbuf_addf(&buf, _("This is the commit message #%d:"),
+ ++opts->current_fixup_count);
strbuf_addstr(&buf, "\n\n");
strbuf_addstr(&buf, body);
} else if (command == TODO_FIXUP) {
strbuf_addf(&buf, "\n%c ", comment_line_char);
strbuf_addf(&buf, _("The commit message #%d will be skipped:"),
- count);
+ ++opts->current_fixup_count);
strbuf_addstr(&buf, "\n\n");
strbuf_add_commented_lines(&buf, body, strlen(body));
} else
res = write_message(buf.buf, buf.len, rebase_path_squash_msg(), 0);
strbuf_release(&buf);
+
+ if (!res) {
+ strbuf_addf(&opts->current_fixups, "%s%s %s",
+ opts->current_fixups.len ? "\n" : "",
+ command_to_string(command),
+ oid_to_hex(&commit->object.oid));
+ res = write_message(opts->current_fixups.buf,
+ opts->current_fixups.len,
+ rebase_path_current_fixups(), 0);
+ }
+
return res;
}
struct replay_opts *opts, int final_fixup)
{
unsigned int flags = opts->edit ? EDIT_MSG : 0;
- const char *msg_file = opts->edit ? NULL : git_path_merge_msg();
+ const char *msg_file = opts->edit ? NULL : git_path_merge_msg(the_repository);
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
return error(_("your index file is unmerged."));
} else {
unborn = get_oid("HEAD", &head);
- if (unborn)
+ /* Do we want to generate a root commit? */
+ if (is_pick_or_similar(command) && opts->have_squash_onto &&
+ !oidcmp(&head, &opts->squash_onto)) {
+ if (is_fixup(command))
+ return error(_("cannot fixup root commit"));
+ flags |= CREATE_ROOT_COMMIT;
+ unborn = 1;
+ } else if (unborn)
oidcpy(&head, the_hash_algo->empty_tree);
- if (index_differs_from(unborn ? EMPTY_TREE_SHA1_HEX : "HEAD",
+ if (index_differs_from(unborn ? empty_tree_oid_hex() : "HEAD",
NULL, 0))
return error_dirty_index(opts);
}
flags |= CLEANUP_MSG;
msg_file = rebase_path_fixup_msg();
} else {
- const char *dest = git_path_squash_msg();
+ const char *dest = git_path_squash_msg(the_repository);
unlink(dest);
if (copy_file(dest, rebase_path_squash_msg(), 0666))
return error(_("could not rename '%s' to '%s'"),
rebase_path_squash_msg(), dest);
- unlink(git_path_merge_msg());
+ unlink(git_path_merge_msg(the_repository));
msg_file = dest;
flags |= EDIT_MSG;
}
}
- if (opts->signoff)
+ if (opts->signoff && !is_fixup(command))
append_signoff(&msgbuf, 0, 0);
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
res = do_recursive_merge(base, next, base_label, next_label,
&head, &msgbuf, opts);
if (res < 0)
- return res;
+ goto leave;
+
res |= write_message(msgbuf.buf, msgbuf.len,
- git_path_merge_msg(), 0);
+ git_path_merge_msg(the_repository), 0);
} else {
struct commit_list *common = NULL;
struct commit_list *remotes = NULL;
res = write_message(msgbuf.buf, msgbuf.len,
- git_path_merge_msg(), 0);
+ git_path_merge_msg(the_repository), 0);
commit_list_insert(base, &common);
commit_list_insert(next, &remotes);
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
unlink(rebase_path_squash_msg());
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
}
leave:
return 0;
}
+enum todo_item_flags {
+ TODO_EDIT_MERGE_MSG = 1
+};
+
struct todo_item {
enum todo_command command;
struct commit *commit;
+ unsigned int flags;
const char *arg;
int arg_len;
size_t offset_in_buf;
char *end_of_object_name;
int i, saved, status, padding;
+ item->flags = 0;
+
/* left-trim */
bol += strspn(bol, " \t");
return error(_("missing arguments for %s"),
command_to_string(item->command));
- if (item->command == TODO_EXEC) {
+ if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
+ item->command == TODO_RESET) {
item->commit = NULL;
item->arg = bol;
item->arg_len = (int)(eol - bol);
return 0;
}
+ if (item->command == TODO_MERGE) {
+ if (skip_prefix(bol, "-C", &bol))
+ bol += strspn(bol, " \t");
+ else if (skip_prefix(bol, "-c", &bol)) {
+ bol += strspn(bol, " \t");
+ item->flags |= TODO_EDIT_MERGE_MSG;
+ } else {
+ item->flags |= TODO_EDIT_MERGE_MSG;
+ item->commit = NULL;
+ item->arg = bol;
+ item->arg_len = (int)(eol - bol);
+ return 0;
+ }
+ }
+
end_of_object_name = (char *) bol + strcspn(bol, " \t\n");
saved = *end_of_object_name;
*end_of_object_name = '\0';
return count;
}
+static int get_item_line_offset(struct todo_list *todo_list, int index)
+{
+ return index < todo_list->nr ?
+ todo_list->items[index].offset_in_buf : todo_list->buf.len;
+}
+
+static const char *get_item_line(struct todo_list *todo_list, int index)
+{
+ return todo_list->buf.buf + get_item_line_offset(todo_list, index);
+}
+
+static int get_item_line_length(struct todo_list *todo_list, int index)
+{
+ return get_item_line_offset(todo_list, index + 1)
+ - get_item_line_offset(todo_list, index);
+}
+
static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
{
int fd;
if (file_exists(rebase_path_verbose()))
opts->verbose = 1;
+ if (file_exists(rebase_path_signoff())) {
+ opts->allow_ff = 0;
+ opts->signoff = 1;
+ }
+
read_strategy_opts(opts, &buf);
strbuf_release(&buf);
+ if (read_oneliner(&opts->current_fixups,
+ rebase_path_current_fixups(), 1)) {
+ const char *p = opts->current_fixups.buf;
+ opts->current_fixup_count = 1;
+ while ((p = strchr(p, '\n'))) {
+ opts->current_fixup_count++;
+ p++;
+ }
+ }
+
+ if (read_oneliner(&buf, rebase_path_squash_onto(), 0)) {
+ if (get_oid_hex(buf.buf, &opts->squash_onto) < 0)
+ return error(_("unusable squash-onto"));
+ opts->have_squash_onto = 1;
+ }
+
return 0;
}
written = write_in_full(fd, buf.buf, buf.len);
strbuf_release(&buf);
if (written < 0) {
+ error_errno(_("could not write to '%s'"), git_path_head_file());
rollback_lock_file(&head_lock);
- return error_errno(_("could not write to '%s'"),
- git_path_head_file());
+ return -1;
}
if (commit_lock_file(&head_lock) < 0)
return error(_("failed to finalize '%s'"), git_path_head_file());
{
struct object_id head_oid;
- if (!file_exists(git_path_cherry_pick_head()) &&
- !file_exists(git_path_revert_head()))
+ if (!file_exists(git_path_cherry_pick_head(the_repository)) &&
+ !file_exists(git_path_revert_head(the_repository)))
return error(_("no cherry-pick or revert in progress"));
if (read_ref_full("HEAD", 0, &head_oid, NULL))
return error(_("cannot resolve HEAD"));
fd = hold_lock_file_for_update(&todo_lock, todo_path, 0);
if (fd < 0)
return error_errno(_("could not lock '%s'"), todo_path);
- offset = next < todo_list->nr ?
- todo_list->items[next].offset_in_buf : todo_list->buf.len;
+ offset = get_item_line_offset(todo_list, next);
if (write_in_full(fd, todo_list->buf.buf + offset,
todo_list->buf.len - offset) < 0)
return error_errno(_("could not write to '%s'"), todo_path);
if (commit_lock_file(&todo_lock) < 0)
return error(_("failed to finalize '%s'"), todo_path);
- if (is_rebase_i(opts)) {
- const char *done_path = rebase_path_done();
- int fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
- int prev_offset = !next ? 0 :
- todo_list->items[next - 1].offset_in_buf;
+ if (is_rebase_i(opts) && next > 0) {
+ const char *done = rebase_path_done();
+ int fd = open(done, O_CREAT | O_WRONLY | O_APPEND, 0666);
+ int ret = 0;
- if (fd >= 0 && offset > prev_offset &&
- write_in_full(fd, todo_list->buf.buf + prev_offset,
- offset - prev_offset) < 0) {
- close(fd);
- return error_errno(_("could not write to '%s'"),
- done_path);
- }
- if (fd >= 0)
- close(fd);
+ if (fd < 0)
+ return 0;
+ if (write_in_full(fd, get_item_line(todo_list, next - 1),
+ get_item_line_length(todo_list, next - 1))
+ < 0)
+ ret = error_errno(_("could not write to '%s'"), done);
+ if (close(fd) < 0)
+ ret = error_errno(_("failed to finalize '%s'"), done);
+ return ret;
}
return 0;
}
static int error_failed_squash(struct commit *commit,
struct replay_opts *opts, int subject_len, const char *subject)
{
- if (rename(rebase_path_squash_msg(), rebase_path_message()))
- return error(_("could not rename '%s' to '%s'"),
+ if (copy_file(rebase_path_message(), rebase_path_squash_msg(), 0666))
+ return error(_("could not copy '%s' to '%s'"),
rebase_path_squash_msg(), rebase_path_message());
- unlink(git_path_merge_msg());
- if (copy_file(git_path_merge_msg(), rebase_path_message(), 0666))
- unlink(rebase_path_fixup_msg());
+ unlink(git_path_merge_msg(the_repository));
+ if (copy_file(git_path_merge_msg(the_repository), rebase_path_message(), 0666))
return error(_("could not copy '%s' to '%s'"),
- rebase_path_message(), git_path_merge_msg());
+ rebase_path_message(),
+ git_path_merge_msg(the_repository));
return error_with_patch(commit, subject, subject_len, opts, 1, 0);
}
return status;
}
- ret = write_message(body, len, git_path_merge_msg(), 0);
+static int safe_append(const char *filename, const char *fmt, ...)
+{
+ va_list ap;
+ struct lock_file lock = LOCK_INIT;
+ int fd = hold_lock_file_for_update(&lock, filename,
+ LOCK_REPORT_ON_ERROR);
+ struct strbuf buf = STRBUF_INIT;
+
+ if (fd < 0)
+ return -1;
+
+ if (strbuf_read_file(&buf, filename, 0) < 0 && errno != ENOENT) {
+ error_errno(_("could not read '%s'"), filename);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ strbuf_complete(&buf, '\n');
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ error_errno(_("could not write to '%s'"), filename);
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ if (commit_lock_file(&lock) < 0) {
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return error(_("failed to finalize '%s'"), filename);
+ }
+
+ strbuf_release(&buf);
+ return 0;
+}
+
+static int do_label(const char *name, int len)
+{
+ struct ref_store *refs = get_main_ref_store(the_repository);
+ struct ref_transaction *transaction;
+ struct strbuf ref_name = STRBUF_INIT, err = STRBUF_INIT;
+ struct strbuf msg = STRBUF_INIT;
+ int ret = 0;
+ struct object_id head_oid;
+
+ if (len == 1 && *name == '#')
+ return error("Illegal label name: '%.*s'", len, name);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
+
+ transaction = ref_store_transaction_begin(refs, &err);
+ if (!transaction) {
+ error("%s", err.buf);
+ ret = -1;
+ } else if (get_oid("HEAD", &head_oid)) {
+ error(_("could not read HEAD"));
+ ret = -1;
+ } else if (ref_transaction_update(transaction, ref_name.buf, &head_oid,
+ NULL, 0, msg.buf, &err) < 0 ||
+ ref_transaction_commit(transaction, &err)) {
+ error("%s", err.buf);
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ strbuf_release(&msg);
+
+ if (!ret)
+ ret = safe_append(rebase_path_refs_to_delete(),
+ "%s\n", ref_name.buf);
+ strbuf_release(&ref_name);
+
+ return ret;
+}
+
+static const char *reflog_message(struct replay_opts *opts,
+ const char *sub_action, const char *fmt, ...);
+
+static int do_reset(const char *name, int len, struct replay_opts *opts)
+{
+ struct strbuf ref_name = STRBUF_INIT;
+ struct object_id oid;
+ struct lock_file lock = LOCK_INIT;
+ struct tree_desc desc;
+ struct tree *tree;
+ struct unpack_trees_options unpack_tree_opts;
+ int ret = 0, i;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0)
+ return -1;
+
+ if (len == 10 && !strncmp("[new root]", name, len)) {
+ if (!opts->have_squash_onto) {
+ const char *hex;
+ if (commit_tree("", 0, the_hash_algo->empty_tree,
+ NULL, &opts->squash_onto,
+ NULL, NULL))
+ return error(_("writing fake root commit"));
+ opts->have_squash_onto = 1;
+ hex = oid_to_hex(&opts->squash_onto);
+ if (write_message(hex, strlen(hex),
+ rebase_path_squash_onto(), 0))
+ return error(_("writing squash-onto"));
+ }
+ oidcpy(&oid, &opts->squash_onto);
+ } else {
+ /* Determine the length of the label */
+ for (i = 0; i < len; i++)
+ if (isspace(name[i]))
+ len = i;
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ if (get_oid(ref_name.buf, &oid) &&
+ get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
+ error(_("could not read '%s'"), ref_name.buf);
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+ }
+
+ memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+ setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
+ unpack_tree_opts.head_idx = 1;
+ unpack_tree_opts.src_index = &the_index;
+ unpack_tree_opts.dst_index = &the_index;
+ unpack_tree_opts.fn = oneway_merge;
+ unpack_tree_opts.merge = 1;
+ unpack_tree_opts.update = 1;
+
+ if (read_cache_unmerged()) {
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return error_resolve_conflict(_(action_name(opts)));
+ }
+
+ if (!fill_tree_descriptor(&desc, &oid)) {
+ error(_("failed to find tree of %s"), oid_to_hex(&oid));
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ if (unpack_trees(1, &desc, &unpack_tree_opts)) {
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ tree = parse_tree_indirect(&oid);
+ prime_cache_tree(&the_index, tree);
+
+ if (write_locked_index(&the_index, &lock, COMMIT_LOCK) < 0)
+ ret = error(_("could not write index"));
+ free((void *)desc.buffer);
+
+ if (!ret)
+ ret = update_ref(reflog_message(opts, "reset", "'%.*s'",
+ len, name), "HEAD", &oid,
+ NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+
+ strbuf_release(&ref_name);
+ return ret;
+}
+
+static int do_merge(struct commit *commit, const char *arg, int arg_len,
+ int flags, struct replay_opts *opts)
+{
+ int run_commit_flags = (flags & TODO_EDIT_MERGE_MSG) ?
+ EDIT_MSG | VERIFY_MSG : 0;
+ struct strbuf ref_name = STRBUF_INIT;
+ struct commit *head_commit, *merge_commit, *i;
+ struct commit_list *bases, *j, *reversed = NULL;
+ struct merge_options o;
+ int merge_arg_len, oneline_offset, can_fast_forward, ret;
+ static struct lock_file lock;
+ const char *p;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) {
+ ret = -1;
+ goto leave_merge;
+ }
+
+ head_commit = lookup_commit_reference_by_name("HEAD");
+ if (!head_commit) {
+ ret = error(_("cannot merge without a current revision"));
+ goto leave_merge;
+ }
+
+ oneline_offset = arg_len;
+ merge_arg_len = strcspn(arg, " \t\n");
+ p = arg + merge_arg_len;
+ p += strspn(p, " \t\n");
+ if (*p == '#' && (!p[1] || isspace(p[1]))) {
+ p += 1 + strspn(p + 1, " \t\n");
+ oneline_offset = p - arg;
+ } else if (p - arg < arg_len)
+ BUG("octopus merges are not supported yet: '%s'", p);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ if (!merge_commit) {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ }
+
+ if (!merge_commit) {
+ ret = error(_("could not resolve '%s'"), ref_name.buf);
+ goto leave_merge;
+ }
+
+ if (opts->have_squash_onto &&
+ !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
+ /*
+ * When the user tells us to "merge" something into a
+ * "[new root]", let's simply fast-forward to the merge head.
+ */
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&merge_commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ if (commit) {
+ const char *message = get_commit_buffer(commit, NULL);
+ const char *body;
+ int len;
+
+ if (!message) {
+ ret = error(_("could not get commit message of '%s'"),
+ oid_to_hex(&commit->object.oid));
+ goto leave_merge;
+ }
+ write_author_script(message);
+ find_commit_subject(message, &body);
+ len = strlen(body);
- git_path_merge_msg());
++ ret = write_message(body, len, git_path_merge_msg(the_repository), 0);
+ unuse_commit_buffer(commit, message);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
- ret = write_message(p, len, git_path_merge_msg(), 0);
++ git_path_merge_msg(the_repository));
+ goto leave_merge;
+ }
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ strbuf_addf(&buf, "author %s", git_author_info(0));
+ write_author_script(buf.buf);
+ strbuf_reset(&buf);
+
+ if (oneline_offset < arg_len) {
+ p = arg + oneline_offset;
+ len = arg_len - oneline_offset;
+ } else {
+ strbuf_addf(&buf, "Merge branch '%.*s'",
+ merge_arg_len, arg);
+ p = buf.buf;
+ len = buf.len;
+ }
+
- git_path_merge_msg());
++ ret = write_message(p, len, git_path_merge_msg(the_repository), 0);
+ strbuf_release(&buf);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
- git_path_merge_head(), 0);
- write_message("no-ff", 5, git_path_merge_mode(), 0);
++ git_path_merge_msg(the_repository));
+ goto leave_merge;
+ }
+ }
+
+ /*
+ * If HEAD is not identical to the first parent of the original merge
+ * commit, we cannot fast-forward.
+ */
+ can_fast_forward = opts->allow_ff && commit && commit->parents &&
+ !oidcmp(&commit->parents->item->object.oid,
+ &head_commit->object.oid);
+
+ /*
+ * If the merge head is different from the original one, we cannot
+ * fast-forward.
+ */
+ if (can_fast_forward) {
+ struct commit_list *second_parent = commit->parents->next;
+
+ if (second_parent && !second_parent->next &&
+ oidcmp(&merge_commit->object.oid,
+ &second_parent->item->object.oid))
+ can_fast_forward = 0;
+ }
+
+ if (can_fast_forward && commit->parents->next &&
+ !commit->parents->next->next &&
+ !oidcmp(&commit->parents->next->item->object.oid,
+ &merge_commit->object.oid)) {
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
- ret = !!run_git_commit(git_path_merge_msg(), opts,
++ git_path_merge_head(the_repository), 0);
++ write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
+
+ bases = get_merge_bases(head_commit, merge_commit);
+ if (bases && !oidcmp(&merge_commit->object.oid,
+ &bases->item->object.oid)) {
+ ret = 0;
+ /* skip merging an ancestor of HEAD */
+ goto leave_merge;
+ }
+
+ for (j = bases; j; j = j->next)
+ commit_list_insert(j->item, &reversed);
+ free_commit_list(bases);
+
+ read_cache();
+ init_merge_options(&o);
+ o.branch1 = "HEAD";
+ o.branch2 = ref_name.buf;
+ o.buffer_output = 2;
+
+ ret = merge_recursive(&o, head_commit, merge_commit, reversed, &i);
+ if (ret <= 0)
+ fputs(o.obuf.buf, stdout);
+ strbuf_release(&o.obuf);
+ if (ret < 0) {
+ error(_("could not even attempt to merge '%.*s'"),
+ merge_arg_len, arg);
+ goto leave_merge;
+ }
+ /*
+ * The return value of merge_recursive() is 1 on clean, and 0 on
+ * unclean merge.
+ *
+ * Let's reverse that, so that do_merge() returns 0 upon success and
+ * 1 upon failed merge (keeping the return value -1 for the cases where
+ * we will want to reschedule the `merge` command).
+ */
+ ret = !ret;
+
+ if (active_cache_changed &&
+ write_locked_index(&the_index, &lock, COMMIT_LOCK)) {
+ ret = error(_("merge: Unable to write new index file"));
+ goto leave_merge;
+ }
+
+ rollback_lock_file(&lock);
+ if (ret)
+ rerere(opts->allow_rerere_auto);
+ else
+ /*
+ * In case of problems, we now want to return a positive
+ * value (a negative one would indicate that the `merge`
+ * command needs to be rescheduled).
+ */
++ ret = !!run_git_commit(git_path_merge_msg(the_repository), opts,
+ run_commit_flags);
+
+leave_merge:
+ strbuf_release(&ref_name);
+ rollback_lock_file(&lock);
+ return ret;
+}
+
static int is_final_fixup(struct todo_list *todo_list)
{
int i = todo_list->current;
return buf.buf;
}
+static const char rescheduled_advice[] =
+N_("Could not execute the todo command\n"
+"\n"
+" %.*s"
+"\n"
+"It has been rescheduled; To edit the command before continuing, please\n"
+"edit the todo list first:\n"
+"\n"
+" git rebase --edit-todo\n"
+" git rebase --continue\n");
+
static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
{
- int res = 0;
+ int res = 0, reschedule = 0;
setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
if (opts->allow_ff)
opts, is_final_fixup(todo_list));
if (is_rebase_i(opts) && res < 0) {
/* Reschedule */
+ advise(_(rescheduled_advice),
+ get_item_line_length(todo_list,
+ todo_list->current),
+ get_item_line(todo_list,
+ todo_list->current));
todo_list->current--;
if (save_todo(todo_list, opts))
return -1;
intend_to_amend();
return error_failed_squash(item->commit, opts,
item->arg_len, item->arg);
- } else if (res && is_rebase_i(opts))
+ } else if (res && is_rebase_i(opts) && item->commit)
return res | error_with_patch(item->commit,
item->arg, item->arg_len, opts, res,
item->command == TODO_REWORD);
/* `current` will be incremented below */
todo_list->current = -1;
}
+ } else if (item->command == TODO_LABEL) {
+ if ((res = do_label(item->arg, item->arg_len)))
+ reschedule = 1;
+ } else if (item->command == TODO_RESET) {
+ if ((res = do_reset(item->arg, item->arg_len, opts)))
+ reschedule = 1;
+ } else if (item->command == TODO_MERGE) {
+ if ((res = do_merge(item->commit,
+ item->arg, item->arg_len,
+ item->flags, opts)) < 0)
+ reschedule = 1;
+ else if (item->commit)
+ record_in_rewritten(&item->commit->object.oid,
+ peek_command(todo_list, 1));
+ if (res > 0)
+ /* failed with merge conflicts */
+ return error_with_patch(item->commit,
+ item->arg,
+ item->arg_len, opts,
+ res, 0);
} else if (!is_noop(item->command))
return error(_("unknown command %d"), item->command);
+ if (reschedule) {
+ advise(_(rescheduled_advice),
+ get_item_line_length(todo_list,
+ todo_list->current),
+ get_item_line(todo_list, todo_list->current));
+ todo_list->current--;
+ if (save_todo(todo_list, opts))
+ return -1;
+ if (item->commit)
+ return error_with_patch(item->commit,
+ item->arg,
+ item->arg_len, opts,
+ res, 0);
+ }
+
todo_list->current++;
if (res)
return res;
{
const char *argv[] = { "commit", NULL };
- if (!file_exists(git_path_cherry_pick_head()) &&
- !file_exists(git_path_revert_head()))
+ if (!file_exists(git_path_cherry_pick_head(the_repository)) &&
+ !file_exists(git_path_revert_head(the_repository)))
return error(_("no cherry-pick or revert in progress"));
return run_command_v_opt(argv, RUN_GIT_CMD);
}
-static int commit_staged_changes(struct replay_opts *opts)
+static int commit_staged_changes(struct replay_opts *opts,
+ struct todo_list *todo_list)
{
unsigned int flags = ALLOW_EMPTY | EDIT_MSG;
+ unsigned int final_fixup = 0, is_clean;
if (has_unstaged_changes(1))
return error(_("cannot rebase: You have unstaged changes."));
- if (!has_uncommitted_changes(0)) {
- const char *cherry_pick_head = git_path_cherry_pick_head(the_repository);
- if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
- return error(_("could not remove CHERRY_PICK_HEAD"));
- return 0;
- }
+ is_clean = !has_uncommitted_changes(0);
if (file_exists(rebase_path_amend())) {
struct strbuf rev = STRBUF_INIT;
if (get_oid_hex(rev.buf, &to_amend))
return error(_("invalid contents: '%s'"),
rebase_path_amend());
- if (oidcmp(&head, &to_amend))
+ if (!is_clean && oidcmp(&head, &to_amend))
return error(_("\nYou have uncommitted changes in your "
"working tree. Please, commit them\n"
"first and then run 'git rebase "
"--continue' again."));
+ /*
+ * When skipping a failed fixup/squash, we need to edit the
+ * commit message, the current fixup list and count, and if it
+ * was the last fixup/squash in the chain, we need to clean up
+ * the commit message and if there was a squash, let the user
+ * edit it.
+ */
+ if (is_clean && !oidcmp(&head, &to_amend) &&
+ opts->current_fixup_count > 0 &&
+ file_exists(rebase_path_stopped_sha())) {
+ const char *p = opts->current_fixups.buf;
+ int len = opts->current_fixups.len;
+
+ opts->current_fixup_count--;
+ if (!len)
+ BUG("Incorrect current_fixups:\n%s", p);
+ while (len && p[len - 1] != '\n')
+ len--;
+ strbuf_setlen(&opts->current_fixups, len);
+ if (write_message(p, len, rebase_path_current_fixups(),
+ 0) < 0)
+ return error(_("could not write file: '%s'"),
+ rebase_path_current_fixups());
+
+ /*
+ * If a fixup/squash in a fixup/squash chain failed, the
+ * commit message is already correct, no need to commit
+ * it again.
+ *
+ * Only if it is the final command in the fixup/squash
+ * chain, and only if the chain is longer than a single
+ * fixup/squash command (which was just skipped), do we
+ * actually need to re-commit with a cleaned up commit
+ * message.
+ */
+ if (opts->current_fixup_count > 0 &&
+ !is_fixup(peek_command(todo_list, 0))) {
+ final_fixup = 1;
+ /*
+ * If there was not a single "squash" in the
+ * chain, we only need to clean up the commit
+ * message, no need to bother the user with
+ * opening the commit message in the editor.
+ */
+ if (!starts_with(p, "squash ") &&
+ !strstr(p, "\nsquash "))
+ flags = (flags & ~EDIT_MSG) | CLEANUP_MSG;
+ } else if (is_fixup(peek_command(todo_list, 0))) {
+ /*
+ * We need to update the squash message to skip
+ * the latest commit message.
+ */
+ struct commit *commit;
+ const char *path = rebase_path_squash_msg();
+
+ if (parse_head(&commit) ||
+ !(p = get_commit_buffer(commit, NULL)) ||
+ write_message(p, strlen(p), path, 0)) {
+ unuse_commit_buffer(commit, p);
+ return error(_("could not write file: "
+ "'%s'"), path);
+ }
+ unuse_commit_buffer(commit, p);
+ }
+ }
strbuf_release(&rev);
flags |= AMEND_MSG;
}
- if (run_git_commit(rebase_path_message(), opts, flags))
+ if (is_clean) {
- const char *cherry_pick_head = git_path_cherry_pick_head();
++ const char *cherry_pick_head = git_path_cherry_pick_head(the_repository);
+
+ if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
+ return error(_("could not remove CHERRY_PICK_HEAD"));
+ if (!final_fixup)
+ return 0;
+ }
+
+ if (run_git_commit(final_fixup ? NULL : rebase_path_message(),
+ opts, flags))
return error(_("could not commit staged changes."));
unlink(rebase_path_amend());
+ if (final_fixup) {
+ unlink(rebase_path_fixup_msg());
+ unlink(rebase_path_squash_msg());
+ }
+ if (opts->current_fixup_count > 0) {
+ /*
+ * Whether final fixup or not, we just cleaned up the commit
+ * message...
+ */
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
+ }
return 0;
}
if (read_and_refresh_cache(opts))
return -1;
+ if (read_populate_opts(opts))
+ return -1;
if (is_rebase_i(opts)) {
- if (commit_staged_changes(opts))
+ if ((res = read_populate_todo(&todo_list, opts)))
+ goto release_todo_list;
+ if (commit_staged_changes(opts, &todo_list))
return -1;
} else if (!file_exists(get_todo_path(opts)))
return continue_single_pick();
- if (read_populate_opts(opts))
- return -1;
- if ((res = read_populate_todo(&todo_list, opts)))
+ else if ((res = read_populate_todo(&todo_list, opts)))
goto release_todo_list;
if (!is_rebase_i(opts)) {
/* Verify that the conflict has been resolved */
- if (file_exists(git_path_cherry_pick_head()) ||
- file_exists(git_path_revert_head())) {
+ if (file_exists(git_path_cherry_pick_head(the_repository)) ||
+ file_exists(git_path_revert_head(the_repository))) {
res = continue_single_pick();
if (res)
goto release_todo_list;
strbuf_release(&sob);
}
+struct labels_entry {
+ struct hashmap_entry entry;
+ char label[FLEX_ARRAY];
+};
+
+static int labels_cmp(const void *fndata, const struct labels_entry *a,
+ const struct labels_entry *b, const void *key)
+{
+ return key ? strcmp(a->label, key) : strcmp(a->label, b->label);
+}
+
+struct string_entry {
+ struct oidmap_entry entry;
+ char string[FLEX_ARRAY];
+};
+
+struct label_state {
+ struct oidmap commit2label;
+ struct hashmap labels;
+ struct strbuf buf;
+};
+
+static const char *label_oid(struct object_id *oid, const char *label,
+ struct label_state *state)
+{
+ struct labels_entry *labels_entry;
+ struct string_entry *string_entry;
+ struct object_id dummy;
+ size_t len;
+ int i;
+
+ string_entry = oidmap_get(&state->commit2label, oid);
+ if (string_entry)
+ return string_entry->string;
+
+ /*
+ * For "uninteresting" commits, i.e. commits that are not to be
+ * rebased, and which can therefore not be labeled, we use a unique
+ * abbreviation of the commit name. This is slightly more complicated
+ * than calling find_unique_abbrev() because we also need to make
+ * sure that the abbreviation does not conflict with any other
+ * label.
+ *
+ * We disallow "interesting" commits to be labeled by a string that
+ * is a valid full-length hash, to ensure that we always can find an
+ * abbreviation for any uninteresting commit's names that does not
+ * clash with any other label.
+ */
+ if (!label) {
+ char *p;
+
+ strbuf_reset(&state->buf);
+ strbuf_grow(&state->buf, GIT_SHA1_HEXSZ);
+ label = p = state->buf.buf;
+
+ find_unique_abbrev_r(p, oid, default_abbrev);
+
+ /*
+ * We may need to extend the abbreviated hash so that there is
+ * no conflicting label.
+ */
+ if (hashmap_get_from_hash(&state->labels, strihash(p), p)) {
+ size_t i = strlen(p) + 1;
+
+ oid_to_hex_r(p, oid);
+ for (; i < GIT_SHA1_HEXSZ; i++) {
+ char save = p[i];
+ p[i] = '\0';
+ if (!hashmap_get_from_hash(&state->labels,
+ strihash(p), p))
+ break;
+ p[i] = save;
+ }
+ }
+ } else if (((len = strlen(label)) == the_hash_algo->hexsz &&
+ !get_oid_hex(label, &dummy)) ||
+ (len == 1 && *label == '#') ||
+ hashmap_get_from_hash(&state->labels,
+ strihash(label), label)) {
+ /*
+ * If the label already exists, or if the label is a valid full
+ * OID, or the label is a '#' (which we use as a separator
+ * between merge heads and oneline), we append a dash and a
+ * number to make it unique.
+ */
+ struct strbuf *buf = &state->buf;
+
+ strbuf_reset(buf);
+ strbuf_add(buf, label, len);
+
+ for (i = 2; ; i++) {
+ strbuf_setlen(buf, len);
+ strbuf_addf(buf, "-%d", i);
+ if (!hashmap_get_from_hash(&state->labels,
+ strihash(buf->buf),
+ buf->buf))
+ break;
+ }
+
+ label = buf->buf;
+ }
+
+ FLEX_ALLOC_STR(labels_entry, label, label);
+ hashmap_entry_init(labels_entry, strihash(label));
+ hashmap_add(&state->labels, labels_entry);
+
+ FLEX_ALLOC_STR(string_entry, string, label);
+ oidcpy(&string_entry->entry.oid, oid);
+ oidmap_put(&state->commit2label, string_entry);
+
+ return string_entry->string;
+}
+
+static int make_script_with_merges(struct pretty_print_context *pp,
+ struct rev_info *revs, FILE *out,
+ unsigned flags)
+{
+ int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
+ int rebase_cousins = flags & TODO_LIST_REBASE_COUSINS;
+ struct strbuf buf = STRBUF_INIT, oneline = STRBUF_INIT;
+ struct strbuf label = STRBUF_INIT;
+ struct commit_list *commits = NULL, **tail = &commits, *iter;
+ struct commit_list *tips = NULL, **tips_tail = &tips;
+ struct commit *commit;
+ struct oidmap commit2todo = OIDMAP_INIT;
+ struct string_entry *entry;
+ struct oidset interesting = OIDSET_INIT, child_seen = OIDSET_INIT,
+ shown = OIDSET_INIT;
+ struct label_state state = { OIDMAP_INIT, { NULL }, STRBUF_INIT };
+
+ int abbr = flags & TODO_LIST_ABBREVIATE_CMDS;
+ const char *cmd_pick = abbr ? "p" : "pick",
+ *cmd_label = abbr ? "l" : "label",
+ *cmd_reset = abbr ? "t" : "reset",
+ *cmd_merge = abbr ? "m" : "merge";
+
+ oidmap_init(&commit2todo, 0);
+ oidmap_init(&state.commit2label, 0);
+ hashmap_init(&state.labels, (hashmap_cmp_fn) labels_cmp, NULL, 0);
+ strbuf_init(&state.buf, 32);
+
+ if (revs->cmdline.nr && (revs->cmdline.rev[0].flags & BOTTOM)) {
+ struct object_id *oid = &revs->cmdline.rev[0].item->oid;
+ FLEX_ALLOC_STR(entry, string, "onto");
+ oidcpy(&entry->entry.oid, oid);
+ oidmap_put(&state.commit2label, entry);
+ }
+
+ /*
+ * First phase:
+ * - get onelines for all commits
+ * - gather all branch tips (i.e. 2nd or later parents of merges)
+ * - label all branch tips
+ */
+ while ((commit = get_revision(revs))) {
+ struct commit_list *to_merge;
+ int is_octopus;
+ const char *p1, *p2;
+ struct object_id *oid;
+ int is_empty;
+
+ tail = &commit_list_insert(commit, tail)->next;
+ oidset_insert(&interesting, &commit->object.oid);
+
+ is_empty = is_original_commit_empty(commit);
+ if (!is_empty && (commit->object.flags & PATCHSAME))
+ continue;
+
+ strbuf_reset(&oneline);
+ pretty_print_commit(pp, commit, &oneline);
+
+ to_merge = commit->parents ? commit->parents->next : NULL;
+ if (!to_merge) {
+ /* non-merge commit: easy case */
+ strbuf_reset(&buf);
+ if (!keep_empty && is_empty)
+ strbuf_addf(&buf, "%c ", comment_line_char);
+ strbuf_addf(&buf, "%s %s %s", cmd_pick,
+ oid_to_hex(&commit->object.oid),
+ oneline.buf);
+
+ FLEX_ALLOC_STR(entry, string, buf.buf);
+ oidcpy(&entry->entry.oid, &commit->object.oid);
+ oidmap_put(&commit2todo, entry);
+
+ continue;
+ }
+
+ is_octopus = to_merge && to_merge->next;
+
+ if (is_octopus)
+ BUG("Octopus merges not yet supported");
+
+ /* Create a label */
+ strbuf_reset(&label);
+ if (skip_prefix(oneline.buf, "Merge ", &p1) &&
+ (p1 = strchr(p1, '\'')) &&
+ (p2 = strchr(++p1, '\'')))
+ strbuf_add(&label, p1, p2 - p1);
+ else if (skip_prefix(oneline.buf, "Merge pull request ",
+ &p1) &&
+ (p1 = strstr(p1, " from ")))
+ strbuf_addstr(&label, p1 + strlen(" from "));
+ else
+ strbuf_addbuf(&label, &oneline);
+
+ for (p1 = label.buf; *p1; p1++)
+ if (isspace(*p1))
+ *(char *)p1 = '-';
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s -C %s",
+ cmd_merge, oid_to_hex(&commit->object.oid));
+
+ /* label the tip of merged branch */
+ oid = &to_merge->item->object.oid;
+ strbuf_addch(&buf, ' ');
+
+ if (!oidset_contains(&interesting, oid))
+ strbuf_addstr(&buf, label_oid(oid, NULL, &state));
+ else {
+ tips_tail = &commit_list_insert(to_merge->item,
+ tips_tail)->next;
+
+ strbuf_addstr(&buf, label_oid(oid, label.buf, &state));
+ }
+ strbuf_addf(&buf, " # %s", oneline.buf);
+
+ FLEX_ALLOC_STR(entry, string, buf.buf);
+ oidcpy(&entry->entry.oid, &commit->object.oid);
+ oidmap_put(&commit2todo, entry);
+ }
+
+ /*
+ * Second phase:
+ * - label branch points
+ * - add HEAD to the branch tips
+ */
+ for (iter = commits; iter; iter = iter->next) {
+ struct commit_list *parent = iter->item->parents;
+ for (; parent; parent = parent->next) {
+ struct object_id *oid = &parent->item->object.oid;
+ if (!oidset_contains(&interesting, oid))
+ continue;
+ if (!oidset_contains(&child_seen, oid))
+ oidset_insert(&child_seen, oid);
+ else
+ label_oid(oid, "branch-point", &state);
+ }
+
+ /* Add HEAD as implict "tip of branch" */
+ if (!iter->next)
+ tips_tail = &commit_list_insert(iter->item,
+ tips_tail)->next;
+ }
+
+ /*
+ * Third phase: output the todo list. This is a bit tricky, as we
+ * want to avoid jumping back and forth between revisions. To
+ * accomplish that goal, we walk backwards from the branch tips,
+ * gathering commits not yet shown, reversing the list on the fly,
+ * then outputting that list (labeling revisions as needed).
+ */
+ fprintf(out, "%s onto\n", cmd_label);
+ for (iter = tips; iter; iter = iter->next) {
+ struct commit_list *list = NULL, *iter2;
+
+ commit = iter->item;
+ if (oidset_contains(&shown, &commit->object.oid))
+ continue;
+ entry = oidmap_get(&state.commit2label, &commit->object.oid);
+
+ if (entry)
+ fprintf(out, "\n# Branch %s\n", entry->string);
+ else
+ fprintf(out, "\n");
+
+ while (oidset_contains(&interesting, &commit->object.oid) &&
+ !oidset_contains(&shown, &commit->object.oid)) {
+ commit_list_insert(commit, &list);
+ if (!commit->parents) {
+ commit = NULL;
+ break;
+ }
+ commit = commit->parents->item;
+ }
+
+ if (!commit)
+ fprintf(out, "%s %s\n", cmd_reset,
+ rebase_cousins ? "onto" : "[new root]");
+ else {
+ const char *to = NULL;
+
+ entry = oidmap_get(&state.commit2label,
+ &commit->object.oid);
+ if (entry)
+ to = entry->string;
+ else if (!rebase_cousins)
+ to = label_oid(&commit->object.oid, NULL,
+ &state);
+
+ if (!to || !strcmp(to, "onto"))
+ fprintf(out, "%s onto\n", cmd_reset);
+ else {
+ strbuf_reset(&oneline);
+ pretty_print_commit(pp, commit, &oneline);
+ fprintf(out, "%s %s # %s\n",
+ cmd_reset, to, oneline.buf);
+ }
+ }
+
+ for (iter2 = list; iter2; iter2 = iter2->next) {
+ struct object_id *oid = &iter2->item->object.oid;
+ entry = oidmap_get(&commit2todo, oid);
+ /* only show if not already upstream */
+ if (entry)
+ fprintf(out, "%s\n", entry->string);
+ entry = oidmap_get(&state.commit2label, oid);
+ if (entry)
+ fprintf(out, "%s %s\n",
+ cmd_label, entry->string);
+ oidset_insert(&shown, oid);
+ }
+
+ free_commit_list(list);
+ }
+
+ free_commit_list(commits);
+ free_commit_list(tips);
+
+ strbuf_release(&label);
+ strbuf_release(&oneline);
+ strbuf_release(&buf);
+
+ oidmap_free(&commit2todo, 1);
+ oidmap_free(&state.commit2label, 1);
+ hashmap_free(&state.labels, 1);
+ strbuf_release(&state.buf);
+
+ return 0;
+}
+
int sequencer_make_script(FILE *out, int argc, const char **argv,
unsigned flags)
{
struct commit *commit;
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick";
+ int rebase_merges = flags & TODO_LIST_REBASE_MERGES;
init_revisions(&revs, NULL);
revs.verbose_header = 1;
- revs.max_parents = 1;
- revs.cherry_pick = 1;
+ if (!rebase_merges)
+ revs.max_parents = 1;
+ revs.cherry_mark = 1;
revs.limited = 1;
revs.reverse = 1;
revs.right_only = 1;
if (prepare_revision_walk(&revs) < 0)
return error(_("make_script: error preparing revisions"));
+ if (rebase_merges)
+ return make_script_with_merges(&pp, &revs, out, flags);
+
while ((commit = get_revision(&revs))) {
+ int is_empty = is_original_commit_empty(commit);
+
+ if (!is_empty && (commit->object.flags & PATCHSAME))
+ continue;
strbuf_reset(&buf);
- if (!keep_empty && is_original_commit_empty(commit))
+ if (!keep_empty && is_empty)
strbuf_addf(&buf, "%c ", comment_line_char);
strbuf_addf(&buf, "%s %s ", insn,
oid_to_hex(&commit->object.oid));
short_commit_name(item->commit) :
oid_to_hex(&item->commit->object.oid);
+ if (item->command == TODO_MERGE) {
+ if (item->flags & TODO_EDIT_MERGE_MSG)
+ strbuf_addstr(&buf, " -c");
+ else
+ strbuf_addstr(&buf, " -C");
+ }
+
strbuf_addf(&buf, " %s", oid);
}
+
/* add all the rest */
if (!item->arg_len)
strbuf_addch(&buf, '\n');
return CHECK_IGNORE;
}
+define_commit_slab(commit_seen, unsigned char);
/*
* Check if the user dropped some commits by mistake
* Behaviour determined by rebase.missingCommitsCheck.
struct todo_list todo_list = TODO_LIST_INIT;
struct strbuf missing = STRBUF_INIT;
int advise_to_edit_todo = 0, res = 0, i;
+ struct commit_seen commit_seen;
+
+ init_commit_seen(&commit_seen);
strbuf_addstr(&todo_file, rebase_path_todo());
if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
for (i = 0; i < todo_list.nr; i++) {
struct commit *commit = todo_list.items[i].commit;
if (commit)
- commit->util = (void *)1;
+ *commit_seen_at(&commit_seen, commit) = 1;
}
todo_list_release(&todo_list);
for (i = todo_list.nr - 1; i >= 0; i--) {
struct todo_item *item = todo_list.items + i;
struct commit *commit = item->commit;
- if (commit && !commit->util) {
+ if (commit && !*commit_seen_at(&commit_seen, commit)) {
strbuf_addf(&missing, " - %s %.*s\n",
short_commit_name(commit),
item->arg_len, item->arg);
- commit->util = (void *)1;
+ *commit_seen_at(&commit_seen, commit) = 1;
}
}
"The possible behaviours are: ignore, warn, error.\n\n"));
leave_check:
+ clear_commit_seen(&commit_seen);
strbuf_release(&todo_file);
todo_list_release(&todo_list);
oid = &item->commit->object.oid;
}
if (i > 0) {
- int offset = i < todo_list.nr ?
- todo_list.items[i].offset_in_buf : todo_list.buf.len;
+ int offset = get_item_line_offset(&todo_list, i);
const char *done_path = rebase_path_done();
fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
return key ? strcmp(a->subject, key) : strcmp(a->subject, b->subject);
}
+define_commit_slab(commit_todo_item, struct todo_item *);
+
/*
* Rearrange the todo list that has both "pick commit-id msg" and "pick
* commit-id fixup!/squash! msg" in it so that the latter is put immediately
struct hashmap subject2item;
int res = 0, rearranged = 0, *next, *tail, i;
char **subjects;
+ struct commit_todo_item commit_todo;
if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
return -1;
return -1;
}
+ init_commit_todo_item(&commit_todo);
/*
* The hashmap maps onelines to the respective todo list index.
*
struct subject2item_entry *entry;
next[i] = tail[i] = -1;
- if (item->command >= TODO_EXEC) {
+ if (!item->commit || item->command == TODO_DROP) {
subjects[i] = NULL;
continue;
}
if (is_fixup(item->command)) {
todo_list_release(&todo_list);
+ clear_commit_todo_item(&commit_todo);
return error(_("the script was already rearranged."));
}
- item->commit->util = item;
+ *commit_todo_item_at(&commit_todo, item->commit) = item;
parse_commit(item->commit);
commit_buffer = get_commit_buffer(item->commit, NULL);
else if (!strchr(p, ' ') &&
(commit2 =
lookup_commit_reference_by_name(p)) &&
- commit2->util)
+ *commit_todo_item_at(&commit_todo, commit2))
/* found by commit name */
- i2 = (struct todo_item *)commit2->util
+ i2 = *commit_todo_item_at(&commit_todo, commit2)
- todo_list.items;
else {
/* copy can be a prefix of the commit subject */
continue;
while (cur >= 0) {
- int offset = todo_list.items[cur].offset_in_buf;
- int end_offset = cur + 1 < todo_list.nr ?
- todo_list.items[cur + 1].offset_in_buf :
- todo_list.buf.len;
- char *bol = todo_list.buf.buf + offset;
- char *eol = todo_list.buf.buf + end_offset;
+ const char *bol =
+ get_item_line(&todo_list, cur);
+ const char *eol =
+ get_item_line(&todo_list, cur + 1);
/* replace 'pick', by 'fixup' or 'squash' */
command = todo_list.items[cur].command;
hashmap_free(&subject2item, 1);
todo_list_release(&todo_list);
+ clear_commit_todo_item(&commit_todo);
return res;
}
#include "cache.h"
+ #include "repository.h"
#include "tempfile.h"
#include "lockfile.h"
+ #include "object-store.h"
#include "commit.h"
#include "tag.h"
#include "pkt-line.h"
#include "commit-slab.h"
#include "revision.h"
#include "list-objects.h"
+#include "commit-slab.h"
+ #include "repository.h"
- static int is_shallow = -1;
- static struct stat_validity shallow_stat;
- static char *alternate_shallow_file;
-
- void set_alternate_shallow_file(const char *path, int override)
+ void set_alternate_shallow_file(struct repository *r, const char *path, int override)
{
- if (is_shallow != -1)
+ if (r->parsed_objects->is_shallow != -1)
- die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
+ BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
- if (alternate_shallow_file && !override)
+ if (r->parsed_objects->alternate_shallow_file && !override)
return;
- free(alternate_shallow_file);
- alternate_shallow_file = xstrdup_or_null(path);
+ free(r->parsed_objects->alternate_shallow_file);
+ r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
}
- int register_shallow(const struct object_id *oid)
+ int register_shallow(struct repository *r, const struct object_id *oid)
{
struct commit_graft *graft =
xmalloc(sizeof(struct commit_graft));
graft->nr_parent = -1;
if (commit && commit->object.parsed)
commit->parents = NULL;
- return register_commit_graft(graft, 0);
+ return register_commit_graft(r, graft, 0);
}
- int is_repository_shallow(void)
+ int is_repository_shallow(struct repository *r)
{
FILE *fp;
char buf[1024];
- const char *path = alternate_shallow_file;
+ const char *path = r->parsed_objects->alternate_shallow_file;
- if (is_shallow >= 0)
- return is_shallow;
+ if (r->parsed_objects->is_shallow >= 0)
+ return r->parsed_objects->is_shallow;
if (!path)
- path = git_path_shallow();
+ path = git_path_shallow(r);
/*
* fetch-pack sets '--shallow-file ""' as an indicator that no
* shallow file should be used. We could just open it and it
* will likely fail. But let's do an explicit check instead.
*/
if (!*path || (fp = fopen(path, "r")) == NULL) {
- stat_validity_clear(&shallow_stat);
- is_shallow = 0;
- return is_shallow;
+ stat_validity_clear(r->parsed_objects->shallow_stat);
+ r->parsed_objects->is_shallow = 0;
+ return r->parsed_objects->is_shallow;
}
- stat_validity_update(&shallow_stat, fileno(fp));
- is_shallow = 1;
+ stat_validity_update(r->parsed_objects->shallow_stat, fileno(fp));
+ r->parsed_objects->is_shallow = 1;
while (fgets(buf, sizeof(buf), fp)) {
struct object_id oid;
if (get_oid_hex(buf, &oid))
die("bad shallow line: %s", buf);
- register_shallow(&oid);
+ register_shallow(r, &oid);
}
fclose(fp);
- return is_shallow;
+ return r->parsed_objects->is_shallow;
}
+/*
+ * TODO: use "int" elemtype instead of "int *" when/if commit-slab
+ * supports a "valid" flag.
+ */
+define_commit_slab(commit_depth, int *);
struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
int shallow_flag, int not_shallow_flag)
{
struct object_array stack = OBJECT_ARRAY_INIT;
struct commit *commit = NULL;
struct commit_graft *graft;
+ struct commit_depth depths;
+ init_commit_depth(&depths);
while (commit || i < heads->nr || stack.nr) {
struct commit_list *p;
if (!commit) {
if (i < heads->nr) {
+ int **depth_slot;
commit = (struct commit *)
deref_tag(heads->objects[i++].item, NULL, 0);
if (!commit || commit->object.type != OBJ_COMMIT) {
commit = NULL;
continue;
}
- if (!commit->util)
- commit->util = xmalloc(sizeof(int));
- *(int *)commit->util = 0;
+ depth_slot = commit_depth_at(&depths, commit);
+ if (!*depth_slot)
+ *depth_slot = xmalloc(sizeof(int));
+ **depth_slot = 0;
cur_depth = 0;
} else {
commit = (struct commit *)
object_array_pop(&stack);
- cur_depth = *(int *)commit->util;
+ cur_depth = **commit_depth_at(&depths, commit);
}
}
parse_commit_or_die(commit);
cur_depth++;
if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
- (is_repository_shallow() && !commit->parents &&
- (graft = lookup_commit_graft(&commit->object.oid)) != NULL &&
+ (is_repository_shallow(the_repository) && !commit->parents &&
+ (graft = lookup_commit_graft(the_repository, &commit->object.oid)) != NULL &&
graft->nr_parent < 0)) {
commit_list_insert(commit, &result);
commit->object.flags |= shallow_flag;
}
commit->object.flags |= not_shallow_flag;
for (p = commit->parents, commit = NULL; p; p = p->next) {
- if (!p->item->util) {
- int *pointer = xmalloc(sizeof(int));
- p->item->util = pointer;
- *pointer = cur_depth;
+ int **depth_slot = commit_depth_at(&depths, p->item);
+ if (!*depth_slot) {
+ *depth_slot = xmalloc(sizeof(int));
+ **depth_slot = cur_depth;
} else {
- int *pointer = p->item->util;
- if (cur_depth >= *pointer)
+ if (cur_depth >= **depth_slot)
continue;
- *pointer = cur_depth;
+ **depth_slot = cur_depth;
}
if (p->next)
add_object_array(&p->item->object,
NULL, &stack);
else {
commit = p->item;
- cur_depth = *(int *)commit->util;
+ cur_depth = **commit_depth_at(&depths, commit);
}
}
}
+ for (i = 0; i < depths.slab_count; i++) {
+ int j;
+
+ for (j = 0; j < depths.slab_size; j++)
+ free(depths.slab[i][j]);
+ }
+ clear_commit_depth(&depths);
return result;
}
*/
clear_object_flags(both_flags);
- is_repository_shallow(); /* make sure shallows are read */
+ is_repository_shallow(the_repository); /* make sure shallows are read */
init_revisions(&revs, NULL);
save_commit_buffer = 0;
die("revision walk setup failed");
traverse_commit_list(&revs, show_commit, NULL, ¬_shallow_list);
+ if (!not_shallow_list)
+ die("no commits selected for shallow requests");
+
/* Mark all reachable commits as NOT_SHALLOW */
for (p = not_shallow_list; p; p = p->next)
p->item->object.flags |= not_shallow_flag;
return result;
}
- static void check_shallow_file_for_update(void)
+ static void check_shallow_file_for_update(struct repository *r)
{
- if (is_shallow == -1)
+ if (r->parsed_objects->is_shallow == -1)
- die("BUG: shallow must be initialized by now");
+ BUG("shallow must be initialized by now");
- if (!stat_validity_check(&shallow_stat, git_path_shallow()))
+ if (!stat_validity_check(r->parsed_objects->shallow_stat, git_path_shallow(the_repository)))
die("shallow file has changed since we read it");
}
struct strbuf sb = STRBUF_INIT;
int fd;
- fd = hold_lock_file_for_update(shallow_lock, git_path_shallow(),
+ fd = hold_lock_file_for_update(shallow_lock,
+ git_path_shallow(the_repository),
LOCK_DIE_ON_ERROR);
- check_shallow_file_for_update();
+ check_shallow_file_for_update(the_repository);
if (write_shallow_commits(&sb, 0, extra)) {
if (write_in_full(fd, sb.buf, sb.len) < 0)
die_errno("failed to write to %s",
void advertise_shallow_grafts(int fd)
{
- if (!is_repository_shallow())
+ if (!is_repository_shallow(the_repository))
return;
for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
}
*/
void prune_shallow(int show_only)
{
- static struct lock_file shallow_lock;
+ struct lock_file shallow_lock = LOCK_INIT;
struct strbuf sb = STRBUF_INIT;
int fd;
strbuf_release(&sb);
return;
}
- fd = hold_lock_file_for_update(&shallow_lock, git_path_shallow(),
+ fd = hold_lock_file_for_update(&shallow_lock,
+ git_path_shallow(the_repository),
LOCK_DIE_ON_ERROR);
- check_shallow_file_for_update();
+ check_shallow_file_for_update(the_repository);
if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
if (write_in_full(fd, sb.buf, sb.len) < 0)
die_errno("failed to write to %s",
get_lock_file_path(&shallow_lock));
commit_lock_file(&shallow_lock);
} else {
- unlink(git_path_shallow());
+ unlink(git_path_shallow(the_repository));
rollback_lock_file(&shallow_lock);
}
strbuf_release(&sb);
for (i = 0; i < sa->nr; i++) {
if (has_object_file(sa->oid + i)) {
struct commit_graft *graft;
- graft = lookup_commit_graft(&sa->oid[i]);
+ graft = lookup_commit_graft(the_repository,
+ &sa->oid[i]);
if (graft && graft->nr_parent < 0)
continue;
info->ours[info->nr_ours++] = i;
void *p;
if (!info->pool_count || size > info->end - info->free) {
if (size > POOL_SIZE)
- die("BUG: pool size too small for %d in paint_alloc()",
+ BUG("pool size too small for %d in paint_alloc()",
size);
info->pool_count++;
REALLOC_ARRAY(info->pools, info->pool_count);
#include "submodule-config.h"
#include "submodule.h"
#include "strbuf.h"
+ #include "object-store.h"
#include "parse-options.h"
/*
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->path, b->config->path) ||
- hashcmp(a->config->gitmodules_sha1, b->config->gitmodules_sha1);
+ oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static int config_name_cmp(const void *unused_cmp_data,
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->name, b->config->name) ||
- hashcmp(a->config->gitmodules_sha1, b->config->gitmodules_sha1);
+ oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static struct submodule_cache *submodule_cache_alloc(void)
free(cache);
}
-static unsigned int hash_sha1_string(const unsigned char *sha1,
- const char *string)
+static unsigned int hash_oid_string(const struct object_id *oid,
+ const char *string)
{
- return memhash(sha1, 20) + strhash(string);
+ return memhash(oid->hash, the_hash_algo->rawsz) + strhash(string);
}
static void cache_put_path(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->path);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->path);
struct submodule_entry *e = xmalloc(sizeof(*e));
hashmap_entry_init(e, hash);
e->config = submodule;
static void cache_remove_path(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->path);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->path);
struct submodule_entry e;
struct submodule_entry *removed;
hashmap_entry_init(&e, hash);
static void cache_add(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->name);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->name);
struct submodule_entry *e = xmalloc(sizeof(*e));
hashmap_entry_init(e, hash);
e->config = submodule;
}
static const struct submodule *cache_lookup_path(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *path)
+ const struct object_id *gitmodules_oid, const char *path)
{
struct submodule_entry *entry;
- unsigned int hash = hash_sha1_string(gitmodules_sha1, path);
+ unsigned int hash = hash_oid_string(gitmodules_oid, path);
struct submodule_entry key;
struct submodule key_config;
- hashcpy(key_config.gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.path = path;
hashmap_entry_init(&key, hash);
}
static struct submodule *cache_lookup_name(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *name)
+ const struct object_id *gitmodules_oid, const char *name)
{
struct submodule_entry *entry;
- unsigned int hash = hash_sha1_string(gitmodules_sha1, name);
+ unsigned int hash = hash_oid_string(gitmodules_oid, name);
struct submodule_entry key;
struct submodule key_config;
- hashcpy(key_config.gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.name = name;
hashmap_entry_init(&key, hash);
return NULL;
}
+int check_submodule_name(const char *name)
+{
+ /* Disallow empty names */
+ if (!*name)
+ return -1;
+
+ /*
+ * Look for '..' as a path component. Check both '/' and '\\' as
+ * separators rather than is_dir_sep(), because we want the name rules
+ * to be consistent across platforms.
+ */
+ goto in_component; /* always start inside component */
+ while (*name) {
+ char c = *name++;
+ if (c == '/' || c == '\\') {
+in_component:
+ if (name[0] == '.' && name[1] == '.' &&
+ (!name[2] || name[2] == '/' || name[2] == '\\'))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int name_and_item_from_var(const char *var, struct strbuf *name,
struct strbuf *item)
{
return 0;
strbuf_add(name, subsection, subsection_len);
+ if (check_submodule_name(name->buf) < 0) {
+ warning(_("ignoring suspicious submodule name: %s"), name->buf);
+ strbuf_release(name);
+ return 0;
+ }
+
strbuf_addstr(item, key);
return 1;
}
static struct submodule *lookup_or_create_by_name(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *name)
+ const struct object_id *gitmodules_oid, const char *name)
{
struct submodule *submodule;
struct strbuf name_buf = STRBUF_INIT;
- submodule = cache_lookup_name(cache, gitmodules_sha1, name);
+ submodule = cache_lookup_name(cache, gitmodules_oid, name);
if (submodule)
return submodule;
submodule->branch = NULL;
submodule->recommend_shallow = -1;
- hashcpy(submodule->gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&submodule->gitmodules_oid, gitmodules_oid);
cache_add(cache, submodule);
return parse_push_recurse(opt, arg, 1);
}
-static void warn_multiple_config(const unsigned char *treeish_name,
+static void warn_multiple_config(const struct object_id *treeish_name,
const char *name, const char *option)
{
const char *commit_string = "WORKTREE";
if (treeish_name)
- commit_string = sha1_to_hex(treeish_name);
+ commit_string = oid_to_hex(treeish_name);
warning("%s:.gitmodules, multiple configurations found for "
"'submodule.%s.%s'. Skipping second one!",
commit_string, name, option);
struct parse_config_parameter {
struct submodule_cache *cache;
- const unsigned char *treeish_name;
- const unsigned char *gitmodules_sha1;
+ const struct object_id *treeish_name;
+ const struct object_id *gitmodules_oid;
int overwrite;
};
return 0;
submodule = lookup_or_create_by_name(me->cache,
- me->gitmodules_sha1,
+ me->gitmodules_oid,
name.buf);
if (!strcmp(item.buf, "path")) {
}
} else if (!strcmp(item.buf, "fetchrecursesubmodules")) {
/* when parsing worktree configurations we can die early */
- int die_on_error = is_null_sha1(me->gitmodules_sha1);
+ int die_on_error = is_null_oid(me->gitmodules_oid);
if (!me->overwrite &&
submodule->fetch_recurse != RECURSE_SUBMODULES_NONE)
warn_multiple_config(me->treeish_name, submodule->name,
switch (lookup_type) {
case lookup_name:
- submodule = cache_lookup_name(cache, oid.hash, key);
+ submodule = cache_lookup_name(cache, &oid, key);
break;
case lookup_path:
- submodule = cache_lookup_path(cache, oid.hash, key);
+ submodule = cache_lookup_path(cache, &oid, key);
break;
}
if (submodule)
/* fill the submodule config into the cache */
parameter.cache = cache;
- parameter.treeish_name = treeish_name->hash;
- parameter.gitmodules_sha1 = oid.hash;
+ parameter.treeish_name = treeish_name;
+ parameter.gitmodules_oid = &oid;
parameter.overwrite = 0;
git_config_from_mem(parse_config, CONFIG_ORIGIN_SUBMODULE_BLOB, rev.buf,
config, config_size, ¶meter);
switch (lookup_type) {
case lookup_name:
- return cache_lookup_name(cache, oid.hash, key);
+ return cache_lookup_name(cache, &oid, key);
case lookup_path:
- return cache_lookup_path(cache, oid.hash, key);
+ return cache_lookup_path(cache, &oid, key);
default:
return NULL;
}
parameter.cache = repo->submodule_cache;
parameter.treeish_name = NULL;
- parameter.gitmodules_sha1 = null_sha1;
+ parameter.gitmodules_oid = &null_oid;
parameter.overwrite = 1;
return parse_config(var, value, ¶meter);
repo_read_gitmodules(repo);
}
-const struct submodule *submodule_from_name(const struct object_id *treeish_name,
+const struct submodule *submodule_from_name(struct repository *r,
+ const struct object_id *treeish_name,
const char *name)
{
- gitmodules_read_check(the_repository);
- return config_from(the_repository->submodule_cache, treeish_name, name, lookup_name);
+ gitmodules_read_check(r);
+ return config_from(r->submodule_cache, treeish_name, name, lookup_name);
}
-const struct submodule *submodule_from_path(const struct object_id *treeish_name,
+const struct submodule *submodule_from_path(struct repository *r,
+ const struct object_id *treeish_name,
const char *path)
{
- gitmodules_read_check(the_repository);
- return config_from(the_repository->submodule_cache, treeish_name, path, lookup_path);
-}
-
-const struct submodule *submodule_from_cache(struct repository *repo,
- const struct object_id *treeish_name,
- const char *key)
-{
- gitmodules_read_check(repo);
- return config_from(repo->submodule_cache, treeish_name,
- key, lookup_path);
+ gitmodules_read_check(r);
+ return config_from(r->submodule_cache, treeish_name, path, lookup_path);
}
-void submodule_free(void)
+void submodule_free(struct repository *r)
{
- if (the_repository->submodule_cache)
- submodule_cache_clear(the_repository->submodule_cache);
+ if (r->submodule_cache)
+ submodule_cache_clear(r->submodule_cache);
}
#include "tree-walk.h"
#include "unpack-trees.h"
#include "dir.h"
+ #include "object-store.h"
#include "tree.h"
#include "pathspec.h"
static int update_tree_entry_internal(struct tree_desc *desc, struct strbuf *err)
{
const void *buf = desc->buffer;
- const unsigned char *end = desc->entry.oid->hash + 20;
+ const unsigned char *end = desc->entry.oid->hash + the_hash_algo->rawsz;
unsigned long size = desc->size;
unsigned long len = end - (const unsigned char *)buf;
struct dir_state {
void *tree;
unsigned long size;
- unsigned char sha1[20];
+ struct object_id oid;
};
static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned *mode)
* See the code for enum follow_symlink_result for a description of
* the return values.
*/
-enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_sha1, const char *name, unsigned char *result, struct strbuf *result_path, unsigned *mode)
+enum follow_symlinks_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned *mode)
{
int retval = MISSING_OBJECT;
struct dir_state *parents = NULL;
init_tree_desc(&t, NULL, 0UL);
strbuf_addstr(&namebuf, name);
- hashcpy(current_tree_oid.hash, tree_sha1);
+ oidcpy(¤t_tree_oid, tree_oid);
while (1) {
int find_result;
ALLOC_GROW(parents, parents_nr + 1, parents_alloc);
parents[parents_nr].tree = tree;
parents[parents_nr].size = size;
- hashcpy(parents[parents_nr].sha1, root.hash);
+ oidcpy(&parents[parents_nr].oid, &root);
parents_nr++;
if (namebuf.buf[0] == '\0') {
- hashcpy(result, root.hash);
+ oidcpy(result, &root);
retval = FOUND;
goto done;
}
/* We could end up here via a symlink to dir/.. */
if (namebuf.buf[0] == '\0') {
- hashcpy(result, parents[parents_nr - 1].sha1);
+ oidcpy(result, &parents[parents_nr - 1].oid);
retval = FOUND;
goto done;
}
if (S_ISDIR(*mode)) {
if (!remainder) {
- hashcpy(result, current_tree_oid.hash);
+ oidcpy(result, ¤t_tree_oid);
retval = FOUND;
goto done;
}
1 + first_slash - namebuf.buf);
} else if (S_ISREG(*mode)) {
if (!remainder) {
- hashcpy(result, current_tree_oid.hash);
+ oidcpy(result, ¤t_tree_oid);
retval = FOUND;
} else {
retval = NOT_DIR;
#include "cache.h"
#include "cache-tree.h"
#include "tree.h"
+ #include "object-store.h"
#include "blob.h"
#include "commit.h"
#include "tag.h"
oid_to_hex(entry.oid),
base->buf, entry.path);
- oidcpy(&oid, &commit->tree->object.oid);
+ oidcpy(&oid, get_commit_tree_oid(commit));
}
else
continue;
if (obj->type == OBJ_TREE)
return (struct tree *) obj;
else if (obj->type == OBJ_COMMIT)
- obj = &(((struct commit *) obj)->tree->object);
+ obj = &(get_commit_tree(((struct commit *)obj))->object);
else if (obj->type == OBJ_TAG)
obj = ((struct tag *) obj)->tagged;
else
#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
+#include "argv-array.h"
#include "repository.h"
#include "config.h"
#include "dir.h"
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
+ #include "object-store.h"
#include "fetch-object.h"
/*
const char **msgs = opts->msgs;
const char *msg;
+ argv_array_init(&opts->msgs_to_free);
+
if (!strcmp(cmd, "checkout"))
msg = advice_commit_before_merge
? _("Your local changes to the following files would be overwritten by checkout:\n%%s"
"Please commit your changes or stash them before you %s.")
: _("Your local changes to the following files would be overwritten by %s:\n%%s");
msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] =
- xstrfmt(msg, cmd, cmd);
+ argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd);
msgs[ERROR_NOT_UPTODATE_DIR] =
_("Updating the following directories would lose untracked files in them:\n%s");
? _("The following untracked working tree files would be removed by %s:\n%%s"
"Please move or remove them before you %s.")
: _("The following untracked working tree files would be removed by %s:\n%%s");
- msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd);
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] =
+ argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd);
if (!strcmp(cmd, "checkout"))
msg = advice_commit_before_merge
? _("The following untracked working tree files would be overwritten by %s:\n%%s"
"Please move or remove them before you %s.")
: _("The following untracked working tree files would be overwritten by %s:\n%%s");
- msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd);
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] =
+ argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd);
/*
* Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
opts->unpack_rejects[i].strdup_strings = 1;
}
+void clear_unpack_trees_porcelain(struct unpack_trees_options *opts)
+{
+ argv_array_clear(&opts->msgs_to_free);
+ memset(opts->msgs, 0, sizeof(opts->msgs));
+}
+
static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
unsigned int set, unsigned int clear)
{
if (!state && ce->ce_flags & CE_WT_REMOVE) {
repo_read_gitmodules(the_repository);
} else if (state && (ce->ce_flags & CE_UPDATE)) {
- submodule_free();
+ submodule_free(the_repository);
checkout_entry(ce, state, NULL);
repo_read_gitmodules(the_repository);
}
if (ce->ce_flags & CE_UPDATE) {
if (ce->ce_flags & CE_WT_REMOVE)
- die("BUG: both update and delete flags are set on %s",
+ BUG("both update and delete flags are set on %s",
ce->name);
display_progress(progress, ++cnt);
ce->ce_flags &= ~CE_UPDATE;
o->result.timestamp.sec = o->src_index->timestamp.sec;
o->result.timestamp.nsec = o->src_index->timestamp.nsec;
o->result.version = o->src_index->version;
- o->result.split_index = o->src_index->split_index;
- if (o->result.split_index)
+ if (!o->src_index->split_index) {
+ o->result.split_index = NULL;
+ } else if (o->src_index == o->dst_index) {
+ /*
+ * o->dst_index (and thus o->src_index) will be discarded
+ * and overwritten with o->result at the end of this function,
+ * so just use src_index's split_index to avoid having to
+ * create a new one.
+ */
+ o->result.split_index = o->src_index->split_index;
o->result.split_index->refcount++;
- hashcpy(o->result.sha1, o->src_index->sha1);
+ } else {
+ o->result.split_index = init_split_index(&o->result);
+ }
+ oidcpy(&o->result.oid, &o->src_index->oid);
o->merge_size = len;
mark_all_ce_unused(o->src_index);
}
}
- o->src_index = NULL;
ret = check_updates(o) ? (-2) : 0;
if (o->dst_index) {
if (!ret) {
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
- move_index_extensions(&o->result, o->dst_index);
+ move_index_extensions(&o->result, o->src_index);
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
discard_index(&o->result);
}
+ o->src_index = NULL;
done:
clear_exclude_list(&el);
add_rejected_path(o, error_type, ce->name);
}
-static int verify_uptodate(const struct cache_entry *ce,
- struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+ struct unpack_trees_options *o)
{
if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
return 0;
#include "refs.h"
#include "pkt-line.h"
#include "sideband.h"
+ #include "object-store.h"
#include "tag.h"
#include "object.h"
#include "commit.h"
-#include "exec_cmd.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "sigchain.h"
#include "version.h"
#include "string-list.h"
-#include "parse-options.h"
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
#include "quote.h"
-
-static const char * const upload_pack_usage[] = {
- N_("git upload-pack [<options>] <dir>"),
- NULL
-};
+#include "upload-pack.h"
+#include "serve.h"
/* Remember to update object flag allocation in object.h */
#define THEY_HAVE (1u << 11)
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
-static int advertise_refs;
static int stateless_rpc;
static const char *pack_objects_hook;
break;
default:
got_common = 1;
- memcpy(last_hex, oid_to_hex(&oid), 41);
+ oid_to_hex_r(last_hex, &oid);
if (multi_ack == 2)
packet_write_fmt(1, "ACK %s common\n", last_hex);
else if (multi_ack)
"rev-list", "--stdin", NULL,
};
struct object *o;
- char namebuf[42]; /* ^ + SHA-1 + LF */
+ char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */
int i;
cmd->argv = argv;
struct child_process cmd = CHILD_PROCESS_INIT;
int i;
struct object *o;
- char namebuf[42]; /* ^ + SHA-1 + LF */
+ char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */
+ const unsigned hexsz = the_hash_algo->hexsz;
if (do_reachable_revlist(&cmd, src, reachable) < 0)
return -1;
- while ((i = read_in_full(cmd.out, namebuf, 41)) == 41) {
+ while ((i = read_in_full(cmd.out, namebuf, hexsz + 1)) == hexsz + 1) {
struct object_id sha1;
+ const char *p;
- if (namebuf[40] != '\n' || get_oid_hex(namebuf, &sha1))
+ if (parse_oid_hex(namebuf, &sha1, &p) || *p != '\n')
break;
o = lookup_object(sha1.hash);
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
packet_write_fmt(1, "shallow %s",
oid_to_hex(&object->oid));
- register_shallow(&object->oid);
+ register_shallow(the_repository, &object->oid);
shallow_nr++;
}
result = result->next;
add_object_array(object, NULL, &extra_edge_obj);
}
/* make sure commit traversal conforms to client */
- register_shallow(&object->oid);
+ register_shallow(the_repository, &object->oid);
}
}
static void deepen(int depth, int deepen_relative,
struct object_array *shallows)
{
- if (depth == INFINITE_DEPTH && !is_repository_shallow()) {
+ if (depth == INFINITE_DEPTH && !is_repository_shallow(the_repository)) {
int i;
for (i = 0; i < shallows->nr; i++) {
}
send_unshallow(shallows);
- packet_flush(1);
}
static void deepen_by_rev_list(int ac, const char **av,
send_shallow(result);
free_commit_list(result);
send_unshallow(shallows);
- packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+ timestamp_t deepen_since,
+ struct string_list *deepen_not,
+ struct object_array *shallows)
+{
+ int ret = 0;
+
+ if (depth > 0 && deepen_rev_list)
+ die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+ if (depth > 0) {
+ deepen(depth, deepen_relative, shallows);
+ ret = 1;
+ } else if (deepen_rev_list) {
+ struct argv_array av = ARGV_ARRAY_INIT;
+ int i;
+
+ argv_array_push(&av, "rev-list");
+ if (deepen_since)
+ argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+ if (deepen_not->nr) {
+ argv_array_push(&av, "--not");
+ for (i = 0; i < deepen_not->nr; i++) {
+ struct string_list_item *s = deepen_not->items + i;
+ argv_array_push(&av, s->string);
+ }
+ argv_array_push(&av, "--not");
+ }
+ for (i = 0; i < want_obj.nr; i++) {
+ struct object *o = want_obj.objects[i].item;
+ argv_array_push(&av, oid_to_hex(&o->oid));
+ }
+ deepen_by_rev_list(av.argc, av.argv, shallows);
+ argv_array_clear(&av);
+ ret = 1;
+ } else {
+ if (shallows->nr > 0) {
+ int i;
+ for (i = 0; i < shallows->nr; i++)
- register_shallow(&shallows->objects[i].item->oid);
++ register_shallow(the_repository,
++ &shallows->objects[i].item->oid);
+ }
+ }
+
+ shallow_nr += shallows->nr;
+ return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+ const char *arg;
+ if (skip_prefix(line, "shallow ", &arg)) {
+ struct object_id oid;
+ struct object *object;
+ if (get_oid_hex(arg, &oid))
+ die("invalid shallow line: %s", line);
+ object = parse_object(&oid);
+ if (!object)
+ return 1;
+ if (object->type != OBJ_COMMIT)
+ die("invalid shallow object %s", oid_to_hex(&oid));
+ if (!(object->flags & CLIENT_SHALLOW)) {
+ object->flags |= CLIENT_SHALLOW;
+ add_object_array(object, NULL, shallows);
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen ", &arg)) {
+ char *end = NULL;
+ *depth = (int)strtol(arg, &end, 0);
+ if (!end || *end || *depth <= 0)
+ die("Invalid deepen: %s", line);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-since ", &arg)) {
+ char *end = NULL;
+ *deepen_since = parse_timestamp(arg, &end, 0);
+ if (!end || *end || !deepen_since ||
+ /* revisions.c's max_age -1 is special */
+ *deepen_since == -1)
+ die("Invalid deepen-since: %s", line);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-not ", &arg)) {
+ char *ref = NULL;
+ struct object_id oid;
+ if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+ die("git upload-pack: ambiguous deepen-not: %s", line);
+ string_list_append(deepen_not, ref);
+ free(ref);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
}
static void receive_needs(void)
if (!line)
break;
- if (skip_prefix(line, "shallow ", &arg)) {
- struct object_id oid;
- struct object *object;
- if (get_oid_hex(arg, &oid))
- die("invalid shallow line: %s", line);
- object = parse_object(&oid);
- if (!object)
- continue;
- if (object->type != OBJ_COMMIT)
- die("invalid shallow object %s", oid_to_hex(&oid));
- if (!(object->flags & CLIENT_SHALLOW)) {
- object->flags |= CLIENT_SHALLOW;
- add_object_array(object, NULL, &shallows);
- }
+ if (process_shallow(line, &shallows))
continue;
- }
- if (skip_prefix(line, "deepen ", &arg)) {
- char *end = NULL;
- depth = strtol(arg, &end, 0);
- if (!end || *end || depth <= 0)
- die("Invalid deepen: %s", line);
+ if (process_deepen(line, &depth))
continue;
- }
- if (skip_prefix(line, "deepen-since ", &arg)) {
- char *end = NULL;
- deepen_since = parse_timestamp(arg, &end, 0);
- if (!end || *end || !deepen_since ||
- /* revisions.c's max_age -1 is special */
- deepen_since == -1)
- die("Invalid deepen-since: %s", line);
- deepen_rev_list = 1;
+ if (process_deepen_since(line, &deepen_since, &deepen_rev_list))
continue;
- }
- if (skip_prefix(line, "deepen-not ", &arg)) {
- char *ref = NULL;
- struct object_id oid;
- if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
- die("git upload-pack: ambiguous deepen-not: %s", line);
- string_list_append(&deepen_not, ref);
- free(ref);
- deepen_rev_list = 1;
+ if (process_deepen_not(line, &deepen_not, &deepen_rev_list))
continue;
- }
+
if (skip_prefix(line, "filter ", &arg)) {
if (!filter_capability_requested)
die("git upload-pack: filtering capability not negotiated");
parse_list_objects_filter(&filter_options, arg);
continue;
}
+
if (!skip_prefix(line, "want ", &arg) ||
- get_oid_hex(arg, &oid_buf))
+ parse_oid_hex(arg, &oid_buf, &features))
die("git upload-pack: protocol error, "
- "expected to get sha, not '%s'", line);
-
- features = arg + 40;
+ "expected to get object ID, not '%s'", line);
if (parse_feature_request(features, "deepen-relative"))
deepen_relative = 1;
if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
return;
- if (depth > 0 && deepen_rev_list)
- die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
- if (depth > 0)
- deepen(depth, deepen_relative, &shallows);
- else if (deepen_rev_list) {
- struct argv_array av = ARGV_ARRAY_INIT;
- int i;
-
- argv_array_push(&av, "rev-list");
- if (deepen_since)
- argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
- if (deepen_not.nr) {
- argv_array_push(&av, "--not");
- for (i = 0; i < deepen_not.nr; i++) {
- struct string_list_item *s = deepen_not.items + i;
- argv_array_push(&av, s->string);
- }
- argv_array_push(&av, "--not");
- }
- for (i = 0; i < want_obj.nr; i++) {
- struct object *o = want_obj.objects[i].item;
- argv_array_push(&av, oid_to_hex(&o->oid));
- }
- deepen_by_rev_list(av.argc, av.argv, &shallows);
- argv_array_clear(&av);
- }
- else
- if (shallows.nr > 0) {
- int i;
- for (i = 0; i < shallows.nr; i++)
- register_shallow(the_repository,
- &shallows.objects[i].item->oid);
- }
- shallow_nr += shallows.nr;
+ if (send_shallow_list(depth, deepen_rev_list, deepen_since,
+ &deepen_not, &shallows))
+ packet_flush(1);
object_array_clear(&shallows);
}
return 0;
}
-static void upload_pack(void)
-{
- struct string_list symref = STRING_LIST_INIT_DUP;
-
- head_ref_namespaced(find_symref, &symref);
-
- if (advertise_refs || !stateless_rpc) {
- reset_timeout();
- head_ref_namespaced(send_ref, &symref);
- for_each_namespaced_ref(send_ref, &symref);
- advertise_shallow_grafts(1);
- packet_flush(1);
- } else {
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
- }
- string_list_clear(&symref, 1);
- if (advertise_refs)
- return;
-
- receive_needs();
- if (want_obj.nr) {
- get_common_commits();
- create_pack_file();
- }
-}
-
static int upload_pack_config(const char *var, const char *value, void *unused)
{
if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
return parse_hide_refs_config(var, value, "uploadpack");
}
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
{
- const char *dir;
- int strict = 0;
- struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
- N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-refs", &advertise_refs,
- N_("exit immediately after initial ref advertisement")),
- OPT_BOOL(0, "strict", &strict,
- N_("do not try <directory>/.git/ if <directory> is no Git directory")),
- OPT_INTEGER(0, "timeout", &timeout,
- N_("interrupt transfer after <n> seconds of inactivity")),
- OPT_END()
- };
+ struct string_list symref = STRING_LIST_INIT_DUP;
+
+ stateless_rpc = options->stateless_rpc;
+ timeout = options->timeout;
+ daemon_mode = options->daemon_mode;
- packet_trace_identity("upload-pack");
- check_replace_refs = 0;
+ git_config(upload_pack_config, NULL);
+
+ head_ref_namespaced(find_symref, &symref);
- argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+ if (options->advertise_refs || !stateless_rpc) {
+ reset_timeout();
+ head_ref_namespaced(send_ref, &symref);
+ for_each_namespaced_ref(send_ref, &symref);
+ advertise_shallow_grafts(1);
+ packet_flush(1);
+ } else {
+ head_ref_namespaced(check_ref, NULL);
+ for_each_namespaced_ref(check_ref, NULL);
+ }
+ string_list_clear(&symref, 1);
+ if (options->advertise_refs)
+ return;
- if (argc != 1)
- usage_with_options(upload_pack_usage, options);
+ receive_needs();
+ if (want_obj.nr) {
+ get_common_commits();
+ create_pack_file();
+ }
+}
- if (timeout)
- daemon_mode = 1;
+struct upload_pack_data {
+ struct object_array wants;
+ struct oid_array haves;
- setup_path();
+ struct object_array shallows;
+ struct string_list deepen_not;
+ int depth;
+ timestamp_t deepen_since;
+ int deepen_rev_list;
+ int deepen_relative;
- dir = argv[0];
+ unsigned stateless_rpc : 1;
- if (!enter_repo(dir, strict))
- die("'%s' does not appear to be a git repository", dir);
+ unsigned use_thin_pack : 1;
+ unsigned use_ofs_delta : 1;
+ unsigned no_progress : 1;
+ unsigned use_include_tag : 1;
+ unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+ struct object_array wants = OBJECT_ARRAY_INIT;
+ struct oid_array haves = OID_ARRAY_INIT;
+ struct object_array shallows = OBJECT_ARRAY_INIT;
+ struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+ memset(data, 0, sizeof(*data));
+ data->wants = wants;
+ data->haves = haves;
+ data->shallows = shallows;
+ data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+ object_array_clear(&data->wants);
+ oid_array_clear(&data->haves);
+ object_array_clear(&data->shallows);
+ string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+ const char *arg;
+ if (skip_prefix(line, "want ", &arg)) {
+ struct object_id oid;
+ struct object *o;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: protocol error, "
+ "expected to get oid, not '%s'", line);
+
+ o = parse_object(&oid);
+ if (!o) {
+ packet_write_fmt(1,
+ "ERR upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ die("git upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ }
+
+ if (!(o->flags & WANTED)) {
+ o->flags |= WANTED;
+ add_object_array(o, NULL, &want_obj);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+ const char *arg;
+ if (skip_prefix(line, "have ", &arg)) {
+ struct object_id oid;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: expected SHA1 object, got '%s'", arg);
+ oid_array_append(haves, &oid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void process_args(struct packet_reader *request,
+ struct upload_pack_data *data)
+{
+ while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+ const char *arg = request->line;
+ const char *p;
+
+ /* process want */
+ if (parse_want(arg))
+ continue;
+ /* process have line */
+ if (parse_have(arg, &data->haves))
+ continue;
+
+ /* process args like thin-pack */
+ if (!strcmp(arg, "thin-pack")) {
+ use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp(arg, "ofs-delta")) {
+ use_ofs_delta = 1;
+ continue;
+ }
+ if (!strcmp(arg, "no-progress")) {
+ no_progress = 1;
+ continue;
+ }
+ if (!strcmp(arg, "include-tag")) {
+ use_include_tag = 1;
+ continue;
+ }
+ if (!strcmp(arg, "done")) {
+ data->done = 1;
+ continue;
+ }
+
+ /* Shallow related arguments */
+ if (process_shallow(arg, &data->shallows))
+ continue;
+ if (process_deepen(arg, &data->depth))
+ continue;
+ if (process_deepen_since(arg, &data->deepen_since,
+ &data->deepen_rev_list))
+ continue;
+ if (process_deepen_not(arg, &data->deepen_not,
+ &data->deepen_rev_list))
+ continue;
+ if (!strcmp(arg, "deepen-relative")) {
+ data->deepen_relative = 1;
+ continue;
+ }
+
+ if (allow_filter && skip_prefix(arg, "filter ", &p)) {
+ parse_list_objects_filter(&filter_options, p);
+ continue;
+ }
+
+ /* ignore unknown lines maybe? */
+ die("unexpected line: '%s'", arg);
+ }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+ int i;
+
+ /* Process haves */
+ for (i = 0; i < haves->nr; i++) {
+ const struct object_id *oid = &haves->oid[i];
+ struct object *o;
+ int we_knew_they_have = 0;
+
+ if (!has_object_file(oid))
+ continue;
+
+ oid_array_append(common, oid);
+
+ o = parse_object(oid);
+ if (!o)
+ die("oops (%s)", oid_to_hex(oid));
+ if (o->type == OBJ_COMMIT) {
+ struct commit_list *parents;
+ struct commit *commit = (struct commit *)o;
+ if (o->flags & THEY_HAVE)
+ we_knew_they_have = 1;
+ else
+ o->flags |= THEY_HAVE;
+ if (!oldest_have || (commit->date < oldest_have))
+ oldest_have = commit->date;
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ parents->item->object.flags |= THEY_HAVE;
+ }
+ if (!we_knew_they_have)
+ add_object_array(o, NULL, &have_obj);
+ }
+
+ return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+ int i;
+
+ packet_buf_write(response, "acknowledgments\n");
+
+ /* Send Acks */
+ if (!acks->nr)
+ packet_buf_write(response, "NAK\n");
+
+ for (i = 0; i < acks->nr; i++) {
+ packet_buf_write(response, "ACK %s\n",
+ oid_to_hex(&acks->oid[i]));
+ }
+
+ if (ok_to_give_up()) {
+ /* Send Ready */
+ packet_buf_write(response, "ready\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+ struct oid_array common = OID_ARRAY_INIT;
+ struct strbuf response = STRBUF_INIT;
+ int ret = 0;
+
+ process_haves(&data->haves, &common);
+ if (data->done) {
+ ret = 1;
+ } else if (send_acks(&common, &response)) {
+ packet_buf_delim(&response);
+ ret = 1;
+ } else {
+ /* Add Flush */
+ packet_buf_flush(&response);
+ ret = 0;
+ }
+
+ /* Send response */
+ write_or_die(1, response.buf, response.len);
+ strbuf_release(&response);
+
+ oid_array_clear(&data->haves);
+ oid_array_clear(&common);
+ return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+ /* No shallow info needs to be sent */
+ if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
- !is_repository_shallow())
++ !is_repository_shallow(the_repository))
+ return;
+
+ packet_write_fmt(1, "shallow-info\n");
+
+ if (!send_shallow_list(data->depth, data->deepen_rev_list,
+ data->deepen_since, &data->deepen_not,
- &data->shallows) && is_repository_shallow())
++ &data->shallows) &&
++ is_repository_shallow(the_repository))
+ deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+ packet_delim(1);
+}
+
+enum fetch_state {
+ FETCH_PROCESS_ARGS = 0,
+ FETCH_SEND_ACKS,
+ FETCH_SEND_PACK,
+ FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request)
+{
+ enum fetch_state state = FETCH_PROCESS_ARGS;
+ struct upload_pack_data data;
git_config(upload_pack_config, NULL);
- switch (determine_protocol_version_server()) {
- case protocol_v1:
- /*
- * v1 is just the original protocol with a version string,
- * so just fall through after writing the version string.
- */
- if (advertise_refs || !stateless_rpc)
- packet_write_fmt(1, "version 1\n");
-
- /* fallthrough */
- case protocol_v0:
- upload_pack();
- break;
- case protocol_unknown_version:
- BUG("unknown protocol version");
+ upload_pack_data_init(&data);
+ use_sideband = LARGE_PACKET_MAX;
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_PROCESS_ARGS:
+ process_args(request, &data);
+
+ if (!want_obj.nr) {
+ /*
+ * Request didn't contain any 'want' lines,
+ * guess they didn't want anything.
+ */
+ state = FETCH_DONE;
+ } else if (data.haves.nr) {
+ /*
+ * Request had 'have' lines, so lets ACK them.
+ */
+ state = FETCH_SEND_ACKS;
+ } else {
+ /*
+ * Request had 'want's but no 'have's so we can
+ * immedietly go to construct and send a pack.
+ */
+ state = FETCH_SEND_PACK;
+ }
+ break;
+ case FETCH_SEND_ACKS:
+ if (process_haves_and_send_acks(&data))
+ state = FETCH_SEND_PACK;
+ else
+ state = FETCH_DONE;
+ break;
+ case FETCH_SEND_PACK:
+ send_shallow_info(&data);
+
+ packet_write_fmt(1, "packfile\n");
+ create_pack_file();
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
}
+ upload_pack_data_clear(&data);
return 0;
}
+
+int upload_pack_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ if (value) {
+ int allow_filter_value;
+ strbuf_addstr(value, "shallow");
+ if (!repo_config_get_bool(the_repository,
+ "uploadpack.allowfilter",
+ &allow_filter_value) &&
+ allow_filter_value)
+ strbuf_addstr(value, " filter");
+ }
+ return 1;
+}
#include "cache.h"
#include "walker.h"
+ #include "object-store.h"
#include "commit.h"
#include "tree.h"
#include "tree-walk.h"
static int process_commit(struct walker *walker, struct commit *commit)
{
+ struct commit_list *parents;
+
if (parse_commit(commit))
return -1;
walker_say(walker, "walk %s\n", oid_to_hex(&commit->object.oid));
- if (walker->get_tree) {
- if (process(walker, &commit->tree->object))
+ if (process(walker, &get_commit_tree(commit)->object))
+ return -1;
+
+ for (parents = commit->parents; parents; parents = parents->next) {
+ if (process(walker, &parents->item->object))
return -1;
- if (!walker->get_all)
- walker->get_tree = 0;
- }
- if (walker->get_history) {
- struct commit_list *parents = commit->parents;
- for (; parents; parents = parents->next) {
- if (process(walker, &parents->item->object))
- return -1;
- }
}
+
return 0;
}
s->show_stash = 0;
s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED;
s->display_comment_prefix = 0;
+ s->detect_rename = -1;
+ s->rename_score = -1;
+ s->rename_limit = -1;
}
static void wt_longstatus_print_unmerged_header(struct wt_status *s)
case 7:
return _("both modified:");
default:
- die("BUG: unhandled unmerged status %x", stagemask);
+ BUG("unhandled unmerged status %x", stagemask);
}
}
status = d->worktree_status;
break;
default:
- die("BUG: unhandled change_type %d in wt_longstatus_print_change_data",
+ BUG("unhandled change_type %d in wt_longstatus_print_change_data",
change_type);
}
status_printf(s, color(WT_STATUS_HEADER, s), "\t");
what = wt_status_diff_status_string(status);
if (!what)
- die("BUG: unhandled diff status %c", status);
+ BUG("unhandled diff status %c", status);
len = label_width - utf8_strwidth(what);
assert(len >= 0);
if (one_name != two_name)
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-files status '%c'", p->status);
+ BUG("unhandled diff-files status '%c'", p->status);
break;
}
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-index status '%c'", p->status);
+ BUG("unhandled diff-index status '%c'", p->status);
break;
}
}
}
rev.diffopt.format_callback = wt_status_collect_changed_cb;
rev.diffopt.format_callback_data = s;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_files(&rev, 0);
}
init_revisions(&rev, NULL);
memset(&opt, 0, sizeof(opt));
- opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference;
+ opt.def = s->is_initial ? empty_tree_oid_hex() : s->reference;
setup_revisions(0, NULL, &rev, &opt);
rev.diffopt.flags.override_submodule_config = 1;
rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
rev.diffopt.format_callback = wt_status_collect_updated_cb;
rev.diffopt.format_callback_data = s;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.rename_limit = 200;
- rev.diffopt.break_opt = 0;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_index(&rev, 1);
}
rev.diffopt.ita_invisible_in_index = 1;
memset(&opt, 0, sizeof(opt));
- opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference;
+ opt.def = s->is_initial ? empty_tree_oid_hex() : s->reference;
setup_revisions(0, NULL, &rev, &opt);
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
rev.diffopt.file = s->fp;
rev.diffopt.close_file = 0;
/*
status_printf_ln(s, color,
_(" (use \"git rebase --abort\" to check out the original branch)"));
}
- } else if (state->rebase_in_progress || !stat(git_path_merge_msg(), &st)) {
+ } else if (state->rebase_in_progress || !stat(git_path_merge_msg(the_repository), &st)) {
print_rebase_state(s, state, color);
if (s->hints)
status_printf_ln(s, color,
struct stat st;
struct object_id oid;
- if (!stat(git_path_merge_head(), &st)) {
+ if (!stat(git_path_merge_head(the_repository), &st)) {
state->merge_in_progress = 1;
} else if (wt_status_check_rebase(NULL, state)) {
; /* all set */
- } else if (!stat(git_path_cherry_pick_head(), &st) &&
+ } else if (!stat(git_path_cherry_pick_head(the_repository), &st) &&
!get_oid("CHERRY_PICK_HEAD", &oid)) {
state->cherry_pick_in_progress = 1;
oidcpy(&state->cherry_pick_head_oid, &oid);
}
wt_status_check_bisect(NULL, state);
- if (!stat(git_path_revert_head(), &st) &&
+ if (!stat(git_path_revert_head(the_repository), &st) &&
!get_oid("REVERT_HEAD", &oid)) {
state->revert_in_progress = 1;
oidcpy(&state->revert_head_oid, &oid);
case 6: key = "AA"; break; /* both added */
case 7: key = "UU"; break; /* both modified */
default:
- die("BUG: unhandled unmerged status %x", d->stagemask);
+ BUG("unhandled unmerged status %x", d->stagemask);
}
/*
sum |= (1 << (stage - 1));
}
if (sum != d->stagemask)
- die("BUG: observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
+ BUG("observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
if (s->null_termination)
path_index = it->string;
wt_porcelain_v2_print(s);
break;
case STATUS_FORMAT_UNSPECIFIED:
- die("BUG: finalize_deferred_config() should have been called");
+ BUG("finalize_deferred_config() should have been called");
break;
case STATUS_FORMAT_NONE:
case STATUS_FORMAT_LONG: