The in-core repository instances are passed through more codepaths.
* sb/more-repo-in-api: (23 commits)
t/helper/test-repository: celebrate independence from the_repository
path.h: make REPO_GIT_PATH_FUNC repository agnostic
commit: prepare free_commit_buffer and release_commit_memory for any repo
commit-graph: convert remaining functions to handle any repo
submodule: don't add submodule as odb for push
submodule: use submodule repos for object lookup
pretty: prepare format_commit_message to handle arbitrary repositories
commit: prepare logmsg_reencode to handle arbitrary repositories
commit: prepare repo_unuse_commit_buffer to handle any repo
commit: prepare get_commit_buffer to handle any repo
commit-reach: prepare in_merge_bases[_many] to handle any repo
commit-reach: prepare get_merge_bases to handle any repo
commit-reach.c: allow get_merge_bases_many_0 to handle any repo
commit-reach.c: allow remove_redundant to handle any repo
commit-reach.c: allow merge_bases_many to handle any repo
commit-reach.c: allow paint_down_to_common to handle any repo
commit: allow parse_commit* to handle any repo
object: parse_object to honor its repository argument
object-store: prepare has_{sha1, object}_file to handle any repo
object-store: prepare read_object_file to deal with any repo
...
#include "packfile.h"
#include "object-store.h"
#include "run-command.h"
+#include "worktree.h"
#define REACHABLE 0x0001
#define SEEN 0x0002
static int keep_cache_objects;
static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT;
static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT;
-static struct object_id head_oid;
-static const char *head_points_at;
static int errors_found;
static int write_lost_and_found;
static int verbose;
static const char *describe_object(struct object *obj)
{
- static struct strbuf buf = STRBUF_INIT;
- char *name = name_objects ?
- lookup_decoration(fsck_walk_options.object_names, obj) : NULL;
+ static struct strbuf bufs[] = {
+ STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT
+ };
+ static int b = 0;
+ struct strbuf *buf;
+ char *name = NULL;
- strbuf_reset(&buf);
- strbuf_addstr(&buf, oid_to_hex(&obj->oid));
+ if (name_objects)
+ name = lookup_decoration(fsck_walk_options.object_names, obj);
+
+ buf = bufs + b;
+ b = (b + 1) % ARRAY_SIZE(bufs);
+ strbuf_reset(buf);
+ strbuf_addstr(buf, oid_to_hex(&obj->oid));
if (name)
- strbuf_addf(&buf, " (%s)", name);
+ strbuf_addf(buf, " (%s)", name);
- return buf.buf;
+ return buf->buf;
}
static const char *printable_type(struct object *obj)
ret = type_name(obj->type);
if (!ret)
- ret = "unknown";
+ ret = _("unknown");
return ret;
}
return git_default_config(var, value, cb);
}
-static void objreport(struct object *obj, const char *msg_type,
- const char *err)
-{
- fprintf(stderr, "%s in %s %s: %s\n",
- msg_type, printable_type(obj), describe_object(obj), err);
-}
-
static int objerror(struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
- objreport(obj, "error", err);
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(obj), describe_object(obj), err);
return -1;
}
static int fsck_error_func(struct fsck_options *o,
struct object *obj, int type, const char *message)
{
- objreport(obj, (type == FSCK_WARN) ? "warning" : "error", message);
- return (type == FSCK_WARN) ? 0 : 1;
+ switch (type) {
+ case FSCK_WARN:
+ /* TRANSLATORS: e.g. warning in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("warning in %s %s: %s"),
+ printable_type(obj), describe_object(obj), message);
+ return 0;
+ case FSCK_ERROR:
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(obj), describe_object(obj), message);
+ return 1;
+ default:
+ BUG("%d (FSCK_IGNORE?) should never trigger this callback", type);
+ }
}
static struct object_array pending;
*/
if (!obj) {
/* ... these references to parent->fld are safe here */
- printf("broken link from %7s %s\n",
- printable_type(parent), describe_object(parent));
- printf("broken link from %7s %s\n",
- (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
+ printf_ln(_("broken link from %7s %s"),
+ printable_type(parent), describe_object(parent));
+ printf_ln(_("broken link from %7s %s"),
+ (type == OBJ_ANY ? _("unknown") : type_name(type)),
+ _("unknown"));
errors_found |= ERROR_REACHABLE;
return 1;
}
if (type != OBJ_ANY && obj->type != type)
/* ... and the reference to parent is safe here */
- objerror(parent, "wrong object type in link");
+ objerror(parent, _("wrong object type in link"));
if (obj->flags & REACHABLE)
return 0;
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
- printf("broken link from %7s %s\n",
- printable_type(parent), describe_object(parent));
- printf(" to %7s %s\n",
- printable_type(obj), describe_object(obj));
+ printf_ln(_("broken link from %7s %s\n"
+ " to %7s %s"),
+ printable_type(parent),
+ describe_object(parent),
+ printable_type(obj),
+ describe_object(obj));
errors_found |= ERROR_REACHABLE;
}
return 1;
return;
if (has_object_pack(&obj->oid))
return; /* it is in pack - forget about it */
- printf("missing %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("missing %s %s"), printable_type(obj),
+ describe_object(obj));
errors_found |= ERROR_REACHABLE;
return;
}
* since this is something that is prunable.
*/
if (show_unreachable) {
- printf("unreachable %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("unreachable %s %s"), printable_type(obj),
+ describe_object(obj));
return;
}
*/
if (!(obj->flags & USED)) {
if (show_dangling)
- printf("dangling %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("dangling %s %s"), printable_type(obj),
+ describe_object(obj));
if (write_lost_and_found) {
char *filename = git_pathdup("lost-found/%s/%s",
obj->type == OBJ_COMMIT ? "commit" : "other",
FILE *f;
if (safe_create_leading_directories_const(filename)) {
- error("Could not create lost-found");
+ error(_("could not create lost-found"));
free(filename);
return;
}
f = xfopen(filename, "w");
if (obj->type == OBJ_BLOB) {
if (stream_blob_to_fd(fileno(f), &obj->oid, NULL, 1))
- die_errno("Could not write '%s'", filename);
+ die_errno(_("could not write '%s'"), filename);
} else
fprintf(f, "%s\n", describe_object(obj));
if (fclose(f))
- die_errno("Could not finish '%s'",
+ die_errno(_("could not finish '%s'"),
filename);
free(filename);
}
static void check_object(struct object *obj)
{
if (verbose)
- fprintf(stderr, "Checking %s\n", describe_object(obj));
+ fprintf_ln(stderr, _("Checking %s"), describe_object(obj));
if (obj->flags & REACHABLE)
check_reachable_object(obj);
/* Look up all the requirements, warn about missing objects.. */
max = get_max_object_index();
if (verbose)
- fprintf(stderr, "Checking connectivity (%d objects)\n", max);
+ fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
for (i = 0; i < max; i++) {
struct object *obj = get_indexed_object(i);
obj->flags |= SEEN;
if (verbose)
- fprintf(stderr, "Checking %s %s\n",
- printable_type(obj), describe_object(obj));
+ fprintf_ln(stderr, _("Checking %s %s"),
+ printable_type(obj), describe_object(obj));
if (fsck_walk(obj, NULL, &fsck_obj_options))
- objerror(obj, "broken links");
+ objerror(obj, _("broken links"));
err = fsck_object(obj, buffer, size, &fsck_obj_options);
if (err)
goto out;
struct commit *commit = (struct commit *) obj;
if (!commit->parents && show_root)
- printf("root %s\n", describe_object(&commit->object));
+ printf_ln(_("root %s"),
+ describe_object(&commit->object));
}
if (obj->type == OBJ_TAG) {
struct tag *tag = (struct tag *) obj;
if (show_tags && tag->tagged) {
- printf("tagged %s %s", printable_type(tag->tagged),
- describe_object(tag->tagged));
- printf(" (%s) in %s\n", tag->tag,
- describe_object(&tag->object));
+ printf_ln(_("tagged %s %s (%s) in %s"),
+ printable_type(tag->tagged),
+ describe_object(tag->tagged),
+ tag->tag,
+ describe_object(&tag->object));
}
}
if (obj->type == OBJ_TREE)
free_tree_buffer((struct tree *)obj);
if (obj->type == OBJ_COMMIT)
- free_commit_buffer((struct commit *)obj);
+ free_commit_buffer(the_repository->parsed_objects,
+ (struct commit *)obj);
return err;
}
eaten);
if (!obj) {
errors_found |= ERROR_OBJECT;
- return error("%s: object corrupt or missing", oid_to_hex(oid));
+ return error(_("%s: object corrupt or missing"),
+ oid_to_hex(oid));
}
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
obj->flags |= USED;
mark_object_reachable(obj);
} else if (!is_promisor_object(oid)) {
- error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
+ error(_("%s: invalid reflog entry %s"),
+ refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
}
const char *refname = cb_data;
if (verbose)
- fprintf(stderr, "Checking reflog %s->%s\n",
- oid_to_hex(ooid), oid_to_hex(noid));
+ fprintf_ln(stderr, _("Checking reflog %s->%s"),
+ oid_to_hex(ooid), oid_to_hex(noid));
fsck_handle_reflog_oid(refname, ooid, 0);
fsck_handle_reflog_oid(refname, noid, timestamp);
static int fsck_handle_reflog(const char *logname, const struct object_id *oid,
int flag, void *cb_data)
{
- for_each_reflog_ent(logname, fsck_handle_reflog_ent, (void *)logname);
+ struct strbuf refname = STRBUF_INIT;
+
+ strbuf_worktree_ref(cb_data, &refname, logname);
+ for_each_reflog_ent(refname.buf, fsck_handle_reflog_ent, refname.buf);
+ strbuf_release(&refname);
return 0;
}
default_refs++;
return 0;
}
- error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
+ error(_("%s: invalid sha1 pointer %s"),
+ refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
return 0;
}
if (obj->type != OBJ_COMMIT && is_branch(refname)) {
- error("%s: not a commit", refname);
+ error(_("%s: not a commit"), refname);
errors_found |= ERROR_REFS;
}
default_refs++;
return 0;
}
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid);
+
static void get_default_heads(void)
{
- if (head_points_at && !is_null_oid(&head_oid))
- fsck_handle_ref("HEAD", &head_oid, 0, NULL);
+ struct worktree **worktrees, **p;
+ const char *head_points_at;
+ struct object_id head_oid;
+
for_each_rawref(fsck_handle_ref, NULL);
- if (include_reflogs)
- for_each_reflog(fsck_handle_reflog, NULL);
+
+ worktrees = get_worktrees(0);
+ for (p = worktrees; *p; p++) {
+ struct worktree *wt = *p;
+ struct strbuf ref = STRBUF_INIT;
+
+ strbuf_worktree_ref(wt, &ref, "HEAD");
+ fsck_head_link(ref.buf, &head_points_at, &head_oid);
+ if (head_points_at && !is_null_oid(&head_oid))
+ fsck_handle_ref(ref.buf, &head_oid, 0, NULL);
+ strbuf_release(&ref);
+
+ if (include_reflogs)
+ refs_for_each_reflog(get_worktree_ref_store(wt),
+ fsck_handle_reflog, wt);
+ }
+ free_worktrees(worktrees);
/*
* Not having any default heads isn't really fatal, but
* "show_unreachable" flag.
*/
if (!default_refs) {
- fprintf(stderr, "notice: No default references\n");
+ fprintf_ln(stderr, _("notice: No default references"));
show_unreachable = 0;
}
}
if (read_loose_object(path, oid, &type, &size, &contents) < 0) {
errors_found |= ERROR_OBJECT;
- error("%s: object corrupt or missing: %s",
+ error(_("%s: object corrupt or missing: %s"),
oid_to_hex(oid), path);
return 0; /* keep checking other objects */
}
if (!obj) {
errors_found |= ERROR_OBJECT;
- error("%s: object could not be parsed: %s",
+ error(_("%s: object could not be parsed: %s"),
oid_to_hex(oid), path);
if (!eaten)
free(contents);
static int fsck_cruft(const char *basename, const char *path, void *data)
{
if (!starts_with(basename, "tmp_obj_"))
- fprintf(stderr, "bad sha1 file: %s\n", path);
+ fprintf_ln(stderr, _("bad sha1 file: %s"), path);
return 0;
}
struct progress *progress = NULL;
if (verbose)
- fprintf(stderr, "Checking object directory\n");
+ fprintf_ln(stderr, _("Checking object directory"));
if (show_progress)
progress = start_progress(_("Checking object directories"), 256);
stop_progress(&progress);
}
-static int fsck_head_link(void)
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid)
{
int null_is_error = 0;
if (verbose)
- fprintf(stderr, "Checking HEAD link\n");
+ fprintf_ln(stderr, _("Checking %s link"), head_ref_name);
- head_points_at = resolve_ref_unsafe("HEAD", 0, &head_oid, NULL);
- if (!head_points_at) {
+ *head_points_at = resolve_ref_unsafe(head_ref_name, 0, head_oid, NULL);
+ if (!*head_points_at) {
errors_found |= ERROR_REFS;
- return error("Invalid HEAD");
+ return error(_("invalid %s"), head_ref_name);
}
- if (!strcmp(head_points_at, "HEAD"))
+ if (!strcmp(*head_points_at, head_ref_name))
/* detached HEAD */
null_is_error = 1;
- else if (!starts_with(head_points_at, "refs/heads/")) {
+ else if (!starts_with(*head_points_at, "refs/heads/")) {
errors_found |= ERROR_REFS;
- return error("HEAD points to something strange (%s)",
- head_points_at);
+ return error(_("%s points to something strange (%s)"),
+ head_ref_name, *head_points_at);
}
- if (is_null_oid(&head_oid)) {
+ if (is_null_oid(head_oid)) {
if (null_is_error) {
errors_found |= ERROR_REFS;
- return error("HEAD: detached HEAD points at nothing");
+ return error(_("%s: detached HEAD points at nothing"),
+ head_ref_name);
}
- fprintf(stderr, "notice: HEAD points to an unborn branch (%s)\n",
- head_points_at + 11);
+ fprintf_ln(stderr,
+ _("notice: %s points to an unborn branch (%s)"),
+ head_ref_name, *head_points_at + 11);
}
return 0;
}
int err = 0;
if (verbose)
- fprintf(stderr, "Checking cache tree\n");
+ fprintf_ln(stderr, _("Checking cache tree"));
if (0 <= it->entry_count) {
struct object *obj = parse_object(the_repository, &it->oid);
if (!obj) {
- error("%s: invalid sha1 pointer in cache-tree",
+ error(_("%s: invalid sha1 pointer in cache-tree"),
oid_to_hex(&it->oid));
errors_found |= ERROR_REFS;
return 1;
obj, xstrdup(":"));
mark_object_reachable(obj);
if (obj->type != OBJ_TREE)
- err |= objerror(obj, "non-tree in cache-tree");
+ err |= objerror(obj, _("non-tree in cache-tree"));
}
for (i = 0; i < it->subtree_nr; i++)
err |= fsck_cache_tree(it->down[i]->cache_tree);
int cmd_fsck(int argc, const char **argv, const char *prefix)
{
int i;
- struct alternate_object_database *alt;
+ struct object_directory *odb;
/* fsck knows how to handle missing promisor objects */
fetch_if_missing = 0;
git_config(fsck_config, NULL);
- fsck_head_link();
if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
for_each_packed_object(mark_packed_for_connectivity, NULL, 0);
} else {
- struct alternate_object_database *alt_odb_list;
-
- fsck_object_dir(get_object_directory());
-
prepare_alt_odb(the_repository);
- alt_odb_list = the_repository->objects->alt_odb_list;
- for (alt = alt_odb_list; alt; alt = alt->next)
- fsck_object_dir(alt->path);
+ for (odb = the_repository->objects->odb; odb; odb = odb->next)
+ fsck_object_dir(odb->path);
if (check_full) {
struct packed_git *p;
for (p = get_all_packs(the_repository); p;
p = p->next) {
/* verify gives error messages itself */
- if (verify_pack(p, fsck_obj_buffer,
+ if (verify_pack(the_repository,
+ p, fsck_obj_buffer,
progress, count))
errors_found |= ERROR_PACK;
count += p->num_objects;
if (!obj || !(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&oid))
continue;
- error("%s: object missing", oid_to_hex(&oid));
+ error(_("%s: object missing"), oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
}
mark_object_reachable(obj);
continue;
}
- error("invalid parameter: expected sha1, got '%s'", arg);
+ error(_("invalid parameter: expected sha1, got '%s'"), arg);
errors_found |= ERROR_OBJECT;
}
struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
const char *verify_argv[] = { "commit-graph", "verify", NULL, NULL, NULL };
- commit_graph_verify.argv = verify_argv;
- commit_graph_verify.git_cmd = 1;
- if (run_command(&commit_graph_verify))
- errors_found |= ERROR_COMMIT_GRAPH;
-
prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+ for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ child_process_init(&commit_graph_verify);
+ commit_graph_verify.argv = verify_argv;
+ commit_graph_verify.git_cmd = 1;
verify_argv[2] = "--object-dir";
- verify_argv[3] = alt->path;
+ verify_argv[3] = odb->path;
if (run_command(&commit_graph_verify))
errors_found |= ERROR_COMMIT_GRAPH;
}
struct child_process midx_verify = CHILD_PROCESS_INIT;
const char *midx_argv[] = { "multi-pack-index", "verify", NULL, NULL, NULL };
- midx_verify.argv = midx_argv;
- midx_verify.git_cmd = 1;
- if (run_command(&midx_verify))
- errors_found |= ERROR_COMMIT_GRAPH;
-
prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+ for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ child_process_init(&midx_verify);
+ midx_verify.argv = midx_argv;
+ midx_verify.git_cmd = 1;
midx_argv[2] = "--object-dir";
- midx_argv[3] = alt->path;
+ midx_argv[3] = odb->path;
if (run_command(&midx_verify))
errors_found |= ERROR_COMMIT_GRAPH;
}
{
struct line_opt_callback_data *data = option->value;
+ BUG_ON_OPT_NEG(unset);
+
if (!arg)
return -1;
rev->diffopt.filter || rev->diffopt.flags.follow_renames)
rev->always_show_header = 0;
- if (source) {
+ if (source || w.source) {
init_revision_sources(&revision_sources);
rev->sources = &revision_sources;
}
* We may show a given commit multiple times when
* walking the reflogs.
*/
- free_commit_buffer(commit);
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
free_commit_list(commit->parents);
commit->parents = NULL;
}
diff_get_color_opt(&rev.diffopt, DIFF_COMMIT),
name,
diff_get_color_opt(&rev.diffopt, DIFF_RESET));
- read_tree_recursive((struct tree *)o, "", 0, 0, &match_all,
- show_tree_object, rev.diffopt.file);
+ read_tree_recursive(the_repository, (struct tree *)o, "",
+ 0, 0, &match_all, show_tree_object,
+ rev.diffopt.file);
rev.shown_one = 1;
break;
case OBJ_COMMIT:
memcpy(&opts, &rev->diffopt, sizeof(opts));
opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
- opts.stat_width = MAIL_DEFAULT_WRAP;
-
diff_setup_done(&opts);
diff_tree_oid(get_commit_tree_oid(origin),
}
if (rev->rdiff1) {
+ /*
+ * Pass minimum required diff-options to range-diff; others
+ * can be added later if deemed desirable.
+ */
+ struct diff_options opts;
+ diff_setup(&opts);
+ opts.file = rev->diffopt.file;
+ opts.use_color = rev->diffopt.use_color;
+ diff_setup_done(&opts);
fprintf_ln(rev->diffopt.file, "%s", rev->rdiff_title);
show_range_diff(rev->rdiff1, rev->rdiff2,
- rev->creation_factor, 1, &rev->diffopt);
+ rev->creation_factor, 1, &opts);
}
}
static int keep_callback(const struct option *opt, const char *arg, int unset)
{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
((struct rev_info *)opt->value)->total = -1;
keep_subject = 1;
return 0;
static int subject_prefix_callback(const struct option *opt, const char *arg,
int unset)
{
+ BUG_ON_OPT_NEG(unset);
subject_prefix = 1;
((struct rev_info *)opt->value)->subject_prefix = arg;
return 0;
static int rfc_callback(const struct option *opt, const char *arg, int unset)
{
+ BUG_ON_OPT_NEG(unset);
+ BUG_ON_OPT_ARG(arg);
return subject_prefix_callback(opt, "RFC PATCH", unset);
}
static int numbered_callback(const struct option *opt, const char *arg,
int unset)
{
+ BUG_ON_OPT_ARG(arg);
*(int *)opt->value = numbered_cmdline_opt = unset ? 0 : 1;
if (unset)
auto_number = 0;
static int no_numbered_callback(const struct option *opt, const char *arg,
int unset)
{
+ BUG_ON_OPT_NEG(unset);
return numbered_callback(opt, arg, 1);
}
int unset)
{
const char **dir = (const char **)opt->value;
+ BUG_ON_OPT_NEG(unset);
if (*dir)
die(_("Two output directories?"));
*dir = arg;
PARSE_OPT_NOARG, numbered_callback },
{ OPTION_CALLBACK, 'N', "no-numbered", &numbered, NULL,
N_("use [PATCH] even with multiple patches"),
- PARSE_OPT_NOARG, no_numbered_callback },
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, no_numbered_callback },
OPT_BOOL('s', "signoff", &do_signoff, N_("add Signed-off-by:")),
OPT_BOOL(0, "stdout", &use_stdout,
N_("print patches to standard out")),
open_next_file(rev.numbered_files ? NULL : commit, NULL, &rev, quiet))
die(_("Failed to create output files"));
shown = log_tree_commit(&rev, commit);
- free_commit_buffer(commit);
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
/* We put one extra blank line between formatted
* patches and this flag is used by log-tree code
#include "list-objects.h"
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
+#include "object.h"
#include "object-store.h"
#include "pack.h"
#include "pack-bitmap.h"
free_commit_list(commit->parents);
commit->parents = NULL;
}
- free_commit_buffer(commit);
+ free_commit_buffer(the_repository->parsed_objects,
+ commit);
}
static inline void finish_object__ma(struct object *obj)
*/
switch (arg_missing_action) {
case MA_ERROR:
- die("missing blob object '%s'", oid_to_hex(&obj->oid));
+ die("missing %s object '%s'",
+ type_name(obj->type), oid_to_hex(&obj->oid));
return;
case MA_ALLOW_ANY:
case MA_ALLOW_PROMISOR:
if (is_promisor_object(&obj->oid))
return;
- die("unexpected missing blob object '%s'",
- oid_to_hex(&obj->oid));
+ die("unexpected missing %s object '%s'",
+ type_name(obj->type), oid_to_hex(&obj->oid));
return;
default:
static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
+ if (!has_object_file(&obj->oid)) {
finish_object__ma(obj);
return 1;
}
{
struct rev_info revs;
struct rev_list_info info;
+ struct setup_revision_opt s_r_opt = {
+ .allow_exclude_promisor_objects = 1,
+ };
int i;
int bisect_list = 0;
int bisect_show_vars = 0;
repo_init_revisions(the_repository, &revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
+ revs.do_not_die_on_missing_tree = 1;
/*
* Scan the argument list before invoking setup_revisions(), so that we
}
}
- argc = setup_revisions(argc, argv, &revs, NULL);
+ argc = setup_revisions(argc, argv, &revs, &s_r_opt);
memset(&info, 0, sizeof(info));
info.revs = &revs;
#define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
#define GRAPH_CHUNKID_LARGEEDGES 0x45444745 /* "EDGE" */
-#define GRAPH_DATA_WIDTH 36
+#define GRAPH_DATA_WIDTH (the_hash_algo->rawsz + 16)
#define GRAPH_VERSION_1 0x1
#define GRAPH_VERSION GRAPH_VERSION_1
-#define GRAPH_OID_VERSION_SHA1 1
-#define GRAPH_OID_LEN_SHA1 GIT_SHA1_RAWSZ
-#define GRAPH_OID_VERSION GRAPH_OID_VERSION_SHA1
-#define GRAPH_OID_LEN GRAPH_OID_LEN_SHA1
-
#define GRAPH_OCTOPUS_EDGES_NEEDED 0x80000000
-#define GRAPH_PARENT_MISSING 0x7fffffff
#define GRAPH_EDGE_LAST_MASK 0x7fffffff
#define GRAPH_PARENT_NONE 0x70000000
#define GRAPH_FANOUT_SIZE (4 * 256)
#define GRAPH_CHUNKLOOKUP_WIDTH 12
#define GRAPH_MIN_SIZE (GRAPH_HEADER_SIZE + 4 * GRAPH_CHUNKLOOKUP_WIDTH \
- + GRAPH_FANOUT_SIZE + GRAPH_OID_LEN)
+ + GRAPH_FANOUT_SIZE + the_hash_algo->rawsz)
char *get_commit_graph_filename(const char *obj_dir)
{
return xstrfmt("%s/info/commit-graph", obj_dir);
}
+static uint8_t oid_version(void)
+{
+ return 1;
+}
+
static struct commit_graph *alloc_commit_graph(void)
{
struct commit_graph *g = xcalloc(1, sizeof(*g));
}
hash_version = *(unsigned char*)(data + 5);
- if (hash_version != GRAPH_OID_VERSION) {
+ if (hash_version != oid_version()) {
error(_("hash version %X does not match version %X"),
- hash_version, GRAPH_OID_VERSION);
+ hash_version, oid_version());
goto cleanup_fail;
}
graph = alloc_commit_graph();
- graph->hash_len = GRAPH_OID_LEN;
+ graph->hash_len = the_hash_algo->rawsz;
graph->num_chunks = *(unsigned char*)(data + 6);
graph->graph_fd = fd;
graph->data = graph_map;
chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
- if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
+ if (chunk_offset > graph_size - the_hash_algo->rawsz) {
error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
(uint32_t)chunk_offset);
goto cleanup_fail;
*/
static int prepare_commit_graph(struct repository *r)
{
- struct alternate_object_database *alt;
- char *obj_dir;
+ struct object_directory *odb;
int config_value;
if (r->objects->commit_graph_attempted)
if (!commit_graph_compatible(r))
return 0;
- obj_dir = r->objects->objectdir;
- prepare_commit_graph_one(r, obj_dir);
prepare_alt_odb(r);
- for (alt = r->objects->alt_odb_list;
- !r->objects->commit_graph && alt;
- alt = alt->next)
- prepare_commit_graph_one(r, alt->path);
+ for (odb = r->objects->odb;
+ !r->objects->commit_graph && odb;
+ odb = odb->next)
+ prepare_commit_graph_one(r, odb->path);
return !!r->objects->commit_graph;
}
g->chunk_oid_lookup, g->hash_len, pos);
}
- static struct commit_list **insert_parent_or_die(struct commit_graph *g,
+ static struct commit_list **insert_parent_or_die(struct repository *r,
+ struct commit_graph *g,
uint64_t pos,
struct commit_list **pptr)
{
die("invalid parent position %"PRIu64, pos);
hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
- c = lookup_commit(the_repository, &oid);
+ c = lookup_commit(r, &oid);
if (!c)
die(_("could not find commit %s"), oid_to_hex(&oid));
c->graph_pos = pos;
item->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
}
- static int fill_commit_in_graph(struct commit *item, struct commit_graph *g, uint32_t pos)
+ static int fill_commit_in_graph(struct repository *r,
+ struct commit *item,
+ struct commit_graph *g, uint32_t pos)
{
uint32_t edge_value;
uint32_t *parent_data_ptr;
edge_value = get_be32(commit_data + g->hash_len);
if (edge_value == GRAPH_PARENT_NONE)
return 1;
- pptr = insert_parent_or_die(g, edge_value, pptr);
+ pptr = insert_parent_or_die(r, g, edge_value, pptr);
edge_value = get_be32(commit_data + g->hash_len + 4);
if (edge_value == GRAPH_PARENT_NONE)
return 1;
if (!(edge_value & GRAPH_OCTOPUS_EDGES_NEEDED)) {
- pptr = insert_parent_or_die(g, edge_value, pptr);
+ pptr = insert_parent_or_die(r, g, edge_value, pptr);
return 1;
}
4 * (uint64_t)(edge_value & GRAPH_EDGE_LAST_MASK));
do {
edge_value = get_be32(parent_data_ptr);
- pptr = insert_parent_or_die(g,
+ pptr = insert_parent_or_die(r, g,
edge_value & GRAPH_EDGE_LAST_MASK,
pptr);
parent_data_ptr++;
}
}
- static int parse_commit_in_graph_one(struct commit_graph *g, struct commit *item)
+ static int parse_commit_in_graph_one(struct repository *r,
+ struct commit_graph *g,
+ struct commit *item)
{
uint32_t pos;
return 1;
if (find_commit_in_graph(item, g, &pos))
- return fill_commit_in_graph(item, g, pos);
+ return fill_commit_in_graph(r, item, g, pos);
return 0;
}
{
if (!prepare_commit_graph(r))
return 0;
- return parse_commit_in_graph_one(r->objects->commit_graph, item);
+ return parse_commit_in_graph_one(r, r->objects->commit_graph, item);
}
void load_commit_graph_info(struct repository *r, struct commit *item)
fill_commit_graph_info(item, r->objects->commit_graph, pos);
}
- static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *c)
+ static struct tree *load_tree_for_commit(struct repository *r,
+ struct commit_graph *g,
+ struct commit *c)
{
struct object_id oid;
const unsigned char *commit_data = g->chunk_commit_data +
GRAPH_DATA_WIDTH * (c->graph_pos);
hashcpy(oid.hash, commit_data);
- c->maybe_tree = lookup_tree(the_repository, &oid);
+ c->maybe_tree = lookup_tree(r, &oid);
return c->maybe_tree;
}
- static struct tree *get_commit_tree_in_graph_one(struct commit_graph *g,
+ static struct tree *get_commit_tree_in_graph_one(struct repository *r,
+ struct commit_graph *g,
const struct commit *c)
{
if (c->maybe_tree)
if (c->graph_pos == COMMIT_NOT_FROM_GRAPH)
BUG("get_commit_tree_in_graph_one called from non-commit-graph commit");
- return load_tree_for_commit(g, (struct commit *)c);
+ return load_tree_for_commit(r, g, (struct commit *)c);
}
struct tree *get_commit_tree_in_graph(struct repository *r, const struct commit *c)
{
- return get_commit_tree_in_graph_one(r->objects->commit_graph, c);
+ return get_commit_tree_in_graph_one(r, r->objects->commit_graph, c);
}
static void write_graph_chunk_fanout(struct hashfile *f,
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
}
hashwrite_be32(f, edge_value);
nr_commits,
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
}
hashwrite_be32(f, edge_value);
commit_to_sha1);
if (edge_value < 0)
- edge_value = GRAPH_PARENT_MISSING;
+ BUG("missing parent %s for commit %s",
+ oid_to_hex(&parent->item->object.oid),
+ oid_to_hex(&(*list)->object.oid));
else if (!parent->next)
edge_value |= GRAPH_LAST_EDGE;
static void close_reachable(struct packed_oid_list *oids, int report_progress)
{
- int i;
+ int i, j;
struct commit *commit;
struct progress *progress = NULL;
- int j = 0;
if (report_progress)
progress = start_delayed_progress(
- _("Annotating commits in commit graph"), 0);
+ _("Loading known commits in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags |= UNINTERESTING;
}
+ stop_progress(&progress);
/*
* As this loop runs, oids->nr may grow, but not more
* than the number of missing commits in the reachable
* closure.
*/
+ if (report_progress)
+ progress = start_delayed_progress(
+ _("Expanding reachable commits in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
if (commit && !parse_commit(commit))
add_missing_parents(oids, commit);
}
+ stop_progress(&progress);
+ if (report_progress)
+ progress = start_delayed_progress(
+ _("Clearing commit marks in commit graph"), j = 0);
for (i = 0; i < oids->nr; i++) {
display_progress(progress, ++j);
commit = lookup_commit(the_repository, &oids->list[i]);
int num_extra_edges;
struct commit_list *parent;
struct progress *progress = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!commit_graph_compatible(the_repository))
return;
count_distinct++;
}
- if (count_distinct >= GRAPH_PARENT_MISSING)
+ if (count_distinct >= GRAPH_EDGE_LAST_MASK)
die(_("the commit graph format cannot write %d commits"), count_distinct);
commits.nr = 0;
}
num_chunks = num_extra_edges ? 4 : 3;
- if (commits.nr >= GRAPH_PARENT_MISSING)
+ if (commits.nr >= GRAPH_EDGE_LAST_MASK)
die(_("too many commits to write graph"));
compute_generation_numbers(&commits, report_progress);
hashwrite_be32(f, GRAPH_SIGNATURE);
hashwrite_u8(f, GRAPH_VERSION);
- hashwrite_u8(f, GRAPH_OID_VERSION);
+ hashwrite_u8(f, oid_version());
hashwrite_u8(f, num_chunks);
hashwrite_u8(f, 0); /* unused padding byte */
chunk_offsets[0] = 8 + (num_chunks + 1) * GRAPH_CHUNKLOOKUP_WIDTH;
chunk_offsets[1] = chunk_offsets[0] + GRAPH_FANOUT_SIZE;
- chunk_offsets[2] = chunk_offsets[1] + GRAPH_OID_LEN * commits.nr;
- chunk_offsets[3] = chunk_offsets[2] + (GRAPH_OID_LEN + 16) * commits.nr;
+ chunk_offsets[2] = chunk_offsets[1] + hashsz * commits.nr;
+ chunk_offsets[3] = chunk_offsets[2] + (hashsz + 16) * commits.nr;
chunk_offsets[4] = chunk_offsets[3] + 4 * num_extra_edges;
for (i = 0; i <= num_chunks; i++) {
}
write_graph_chunk_fanout(f, commits.list, commits.nr);
- write_graph_chunk_oids(f, GRAPH_OID_LEN, commits.list, commits.nr);
- write_graph_chunk_data(f, GRAPH_OID_LEN, commits.list, commits.nr);
+ write_graph_chunk_oids(f, hashsz, commits.list, commits.nr);
+ write_graph_chunk_data(f, hashsz, commits.list, commits.nr);
write_graph_chunk_large_edges(f, commits.list, commits.nr);
close_commit_graph(the_repository);
}
graph_commit = lookup_commit(r, &cur_oid);
- if (!parse_commit_in_graph_one(g, graph_commit))
+ if (!parse_commit_in_graph_one(r, g, graph_commit))
graph_report("failed to parse %s from commit-graph",
oid_to_hex(&cur_oid));
}
continue;
}
- if (!oideq(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
+ if (!oideq(&get_commit_tree_in_graph_one(r, g, graph_commit)->object.oid,
get_commit_tree_oid(odb_commit)))
graph_report("root tree OID for commit %s in commit-graph is %s != %s",
oid_to_hex(&cur_oid),
}
/* all input commits in one and twos[] must have been parsed! */
- static struct commit_list *paint_down_to_common(struct commit *one, int n,
+ static struct commit_list *paint_down_to_common(struct repository *r,
+ struct commit *one, int n,
struct commit **twos,
int min_generation)
{
parents = parents->next;
if ((p->object.flags & flags) == flags)
continue;
- if (parse_commit(p))
+ if (repo_parse_commit(r, p))
return NULL;
p->object.flags |= flags;
prio_queue_put(&queue, p);
return result;
}
- static struct commit_list *merge_bases_many(struct commit *one, int n, struct commit **twos)
+ static struct commit_list *merge_bases_many(struct repository *r,
+ struct commit *one, int n,
+ struct commit **twos)
{
struct commit_list *list = NULL;
struct commit_list *result = NULL;
return commit_list_insert(one, &result);
}
- if (parse_commit(one))
+ if (repo_parse_commit(r, one))
return NULL;
for (i = 0; i < n; i++) {
- if (parse_commit(twos[i]))
+ if (repo_parse_commit(r, twos[i]))
return NULL;
}
- list = paint_down_to_common(one, n, twos, 0);
+ list = paint_down_to_common(r, one, n, twos, 0);
while (list) {
struct commit *commit = pop_commit(&list);
return ret;
}
- static int remove_redundant(struct commit **array, int cnt)
+ static int remove_redundant(struct repository *r, struct commit **array, int cnt)
{
/*
* Some commit in the array may be an ancestor of
ALLOC_ARRAY(filled_index, cnt - 1);
for (i = 0; i < cnt; i++)
- parse_commit(array[i]);
+ repo_parse_commit(r, array[i]);
for (i = 0; i < cnt; i++) {
struct commit_list *common;
uint32_t min_generation = array[i]->generation;
if (array[j]->generation < min_generation)
min_generation = array[j]->generation;
}
- common = paint_down_to_common(array[i], filled, work,
- min_generation);
+ common = paint_down_to_common(r, array[i], filled,
+ work, min_generation);
if (array[i]->object.flags & PARENT2)
redundant[i] = 1;
for (j = 0; j < filled; j++)
return filled;
}
- static struct commit_list *get_merge_bases_many_0(struct commit *one,
+ static struct commit_list *get_merge_bases_many_0(struct repository *r,
+ struct commit *one,
int n,
struct commit **twos,
int cleanup)
struct commit_list *result;
int cnt, i;
- result = merge_bases_many(one, n, twos);
+ result = merge_bases_many(r, one, n, twos);
for (i = 0; i < n; i++) {
if (one == twos[i])
return result;
clear_commit_marks(one, all_flags);
clear_commit_marks_many(n, twos, all_flags);
- cnt = remove_redundant(rslt, cnt);
+ cnt = remove_redundant(r, rslt, cnt);
result = NULL;
for (i = 0; i < cnt; i++)
commit_list_insert_by_date(rslt[i], &result);
return result;
}
- struct commit_list *get_merge_bases_many(struct commit *one,
- int n,
- struct commit **twos)
+ struct commit_list *repo_get_merge_bases_many(struct repository *r,
+ struct commit *one,
+ int n,
+ struct commit **twos)
{
- return get_merge_bases_many_0(one, n, twos, 1);
+ return get_merge_bases_many_0(r, one, n, twos, 1);
}
- struct commit_list *get_merge_bases_many_dirty(struct commit *one,
- int n,
- struct commit **twos)
+ struct commit_list *repo_get_merge_bases_many_dirty(struct repository *r,
+ struct commit *one,
+ int n,
+ struct commit **twos)
{
- return get_merge_bases_many_0(one, n, twos, 0);
+ return get_merge_bases_many_0(r, one, n, twos, 0);
}
- struct commit_list *get_merge_bases(struct commit *one, struct commit *two)
+ struct commit_list *repo_get_merge_bases(struct repository *r,
+ struct commit *one,
+ struct commit *two)
{
- return get_merge_bases_many_0(one, 1, &two, 1);
+ return get_merge_bases_many_0(r, one, 1, &two, 1);
}
/*
/*
* Is "commit" an ancestor of one of the "references"?
*/
- int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference)
+ int repo_in_merge_bases_many(struct repository *r, struct commit *commit,
+ int nr_reference, struct commit **reference)
{
struct commit_list *bases;
int ret = 0, i;
uint32_t min_generation = GENERATION_NUMBER_INFINITY;
- if (parse_commit(commit))
+ if (repo_parse_commit(r, commit))
return ret;
for (i = 0; i < nr_reference; i++) {
- if (parse_commit(reference[i]))
+ if (repo_parse_commit(r, reference[i]))
return ret;
if (reference[i]->generation < min_generation)
min_generation = reference[i]->generation;
if (commit->generation > min_generation)
return ret;
- bases = paint_down_to_common(commit, nr_reference, reference, commit->generation);
+ bases = paint_down_to_common(r, commit,
+ nr_reference, reference,
+ commit->generation);
if (commit->object.flags & PARENT2)
ret = 1;
clear_commit_marks(commit, all_flags);
/*
* Is "commit" an ancestor of (i.e. reachable from) the "reference"?
*/
- int in_merge_bases(struct commit *commit, struct commit *reference)
+ int repo_in_merge_bases(struct repository *r,
+ struct commit *commit,
+ struct commit *reference)
{
- return in_merge_bases_many(commit, 1, &reference);
+ return repo_in_merge_bases_many(r, commit, 1, &reference);
}
struct commit_list *reduce_heads(struct commit_list *heads)
p->item->object.flags &= ~STALE;
}
}
- num_head = remove_redundant(array, num_head);
+ num_head = remove_redundant(the_repository, array, num_head);
for (i = 0; i < num_head; i++)
tail = &commit_list_insert(array[i], tail)->next;
free(array);
static int compare_commits_by_gen(const void *_a, const void *_b)
{
- const struct commit *a = (const struct commit *)_a;
- const struct commit *b = (const struct commit *)_b;
+ const struct commit *a = *(const struct commit * const *)_a;
+ const struct commit *b = *(const struct commit * const *)_b;
if (a->generation < b->generation)
return -1;
while (stack) {
struct commit_list *parent;
- if (stack->item->object.flags & with_flag) {
+ if (stack->item->object.flags & (with_flag | RESULT)) {
pop_commit(&stack);
+ if (stack)
+ stack->item->object.flags |= RESULT;
continue;
}
object_array_clear(&from_objs);
return result;
}
+
+struct commit_list *get_reachable_subset(struct commit **from, int nr_from,
+ struct commit **to, int nr_to,
+ unsigned int reachable_flag)
+{
+ struct commit **item;
+ struct commit *current;
+ struct commit_list *found_commits = NULL;
+ struct commit **to_last = to + nr_to;
+ struct commit **from_last = from + nr_from;
+ uint32_t min_generation = GENERATION_NUMBER_INFINITY;
+ int num_to_find = 0;
+
+ struct prio_queue queue = { compare_commits_by_gen_then_commit_date };
+
+ for (item = to; item < to_last; item++) {
+ struct commit *c = *item;
+
+ parse_commit(c);
+ if (c->generation < min_generation)
+ min_generation = c->generation;
+
+ if (!(c->object.flags & PARENT1)) {
+ c->object.flags |= PARENT1;
+ num_to_find++;
+ }
+ }
+
+ for (item = from; item < from_last; item++) {
+ struct commit *c = *item;
+ if (!(c->object.flags & PARENT2)) {
+ c->object.flags |= PARENT2;
+ parse_commit(c);
+
+ prio_queue_put(&queue, *item);
+ }
+ }
+
+ while (num_to_find && (current = prio_queue_get(&queue)) != NULL) {
+ struct commit_list *parents;
+
+ if (current->object.flags & PARENT1) {
+ current->object.flags &= ~PARENT1;
+ current->object.flags |= reachable_flag;
+ commit_list_insert(current, &found_commits);
+ num_to_find--;
+ }
+
+ for (parents = current->parents; parents; parents = parents->next) {
+ struct commit *p = parents->item;
+
+ parse_commit(p);
+
+ if (p->generation < min_generation)
+ continue;
+
+ if (p->object.flags & PARENT2)
+ continue;
+
+ p->object.flags |= PARENT2;
+ prio_queue_put(&queue, p);
+ }
+ }
+
+ clear_commit_marks_many(nr_to, to, PARENT1);
+ clear_commit_marks_many(nr_from, from, PARENT2);
+
+ return found_commits;
+}
-#ifndef __COMMIT_REACH_H__
-#define __COMMIT_REACH_H__
+#ifndef COMMIT_REACH_H
+#define COMMIT_REACH_H
+#include "commit.h"
#include "commit-slab.h"
-struct commit;
struct commit_list;
-struct contains_cache;
struct ref_filter;
+struct object_id;
+struct object_array;
- struct commit_list *get_merge_bases_many(struct commit *one,
- int n,
- struct commit **twos);
- struct commit_list *get_merge_bases_many_dirty(struct commit *one,
- int n,
- struct commit **twos);
- struct commit_list *get_merge_bases(struct commit *one, struct commit *two);
- struct commit_list *get_octopus_merge_bases(struct commit_list *in);
-
+ struct commit_list *repo_get_merge_bases(struct repository *r,
+ struct commit *rev1,
+ struct commit *rev2);
+ struct commit_list *repo_get_merge_bases_many(struct repository *r,
+ struct commit *one, int n,
+ struct commit **twos);
/* To be used only when object flags after this call no longer matter */
- struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
+ struct commit_list *repo_get_merge_bases_many_dirty(struct repository *r,
+ struct commit *one, int n,
+ struct commit **twos);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define get_merge_bases(r1, r2) repo_get_merge_bases(the_repository, r1, r2)
+ #define get_merge_bases_many(one, n, two) repo_get_merge_bases_many(the_repository, one, n, two)
+ #define get_merge_bases_many_dirty(one, n, twos) repo_get_merge_bases_many_dirty(the_repository, one, n, twos)
+ #endif
+
+ struct commit_list *get_octopus_merge_bases(struct commit_list *in);
int is_descendant_of(struct commit *commit, struct commit_list *with_commit);
- int in_merge_bases_many(struct commit *commit, int nr_reference, struct commit **reference);
- int in_merge_bases(struct commit *commit, struct commit *reference);
+ int repo_in_merge_bases(struct repository *r,
+ struct commit *commit,
+ struct commit *reference);
+ int repo_in_merge_bases_many(struct repository *r,
+ struct commit *commit,
+ int nr_reference, struct commit **reference);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define in_merge_bases(c1, c2) repo_in_merge_bases(the_repository, c1, c2)
+ #define in_merge_bases_many(c1, n, cs) repo_in_merge_bases_many(the_repository, c1, n, cs)
+ #endif
/*
* Takes a list of commits and returns a new list where those
int can_all_from_reach(struct commit_list *from, struct commit_list *to,
int commit_date_cutoff);
+
+/*
+ * Return a list of commits containing the commits in the 'to' array
+ * that are reachable from at least one commit in the 'from' array.
+ * Also add the given 'flag' to each of the commits in the returned list.
+ *
+ * This method uses the PARENT1 and PARENT2 flags during its operation,
+ * so be sure these flags are not set before calling the method.
+ */
+struct commit_list *get_reachable_subset(struct commit **from, int nr_from,
+ struct commit **to, int nr_to,
+ unsigned int reachable_flag);
+
#endif
#include "sha1-lookup.h"
#include "wt-status.h"
#include "advice.h"
+#include "refs.h"
+#include "commit-reach.h"
static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
return v->buffer;
}
- const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
+ const void *repo_get_commit_buffer(struct repository *r,
+ const struct commit *commit,
+ unsigned long *sizep)
{
- const void *ret = get_cached_commit_buffer(the_repository, commit, sizep);
+ const void *ret = get_cached_commit_buffer(r, commit, sizep);
if (!ret) {
enum object_type type;
unsigned long size;
- ret = read_object_file(&commit->object.oid, &type, &size);
+ ret = repo_read_object_file(r, &commit->object.oid, &type, &size);
if (!ret)
die("cannot read commit object %s",
oid_to_hex(&commit->object.oid));
return ret;
}
- void unuse_commit_buffer(const struct commit *commit, const void *buffer)
+ void repo_unuse_commit_buffer(struct repository *r,
+ const struct commit *commit,
+ const void *buffer)
{
struct commit_buffer *v = buffer_slab_peek(
- the_repository->parsed_objects->buffer_slab, commit);
+ r->parsed_objects->buffer_slab, commit);
if (!(v && v->buffer == buffer))
free((void *)buffer);
}
- void free_commit_buffer(struct commit *commit)
+ void free_commit_buffer(struct parsed_object_pool *pool, struct commit *commit)
{
struct commit_buffer *v = buffer_slab_peek(
- the_repository->parsed_objects->buffer_slab, commit);
+ pool->buffer_slab, commit);
if (v) {
FREE_AND_NULL(v->buffer);
v->size = 0;
return &get_commit_tree(commit)->object.oid;
}
- void release_commit_memory(struct commit *c)
+ void release_commit_memory(struct parsed_object_pool *pool, struct commit *c)
{
c->maybe_tree = NULL;
c->index = 0;
- free_commit_buffer(c);
+ free_commit_buffer(pool, c);
free_commit_list(c->parents);
- /* TODO: what about commit->util? */
c->object.parsed = 0;
}
return 0;
}
- int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph)
+ int repo_parse_commit_internal(struct repository *r,
+ struct commit *item,
+ int quiet_on_missing,
+ int use_commit_graph)
{
enum object_type type;
void *buffer;
return -1;
if (item->object.parsed)
return 0;
- if (use_commit_graph && parse_commit_in_graph(the_repository, item))
+ if (use_commit_graph && parse_commit_in_graph(r, item))
return 0;
- buffer = read_object_file(&item->object.oid, &type, &size);
+ buffer = repo_read_object_file(r, &item->object.oid, &type, &size);
if (!buffer)
return quiet_on_missing ? -1 :
error("Could not read %s",
oid_to_hex(&item->object.oid));
}
- ret = parse_commit_buffer(the_repository, item, buffer, size, 0);
+ ret = parse_commit_buffer(r, item, buffer, size, 0);
if (save_commit_buffer && !ret) {
- set_commit_buffer(the_repository, item, buffer, size);
+ set_commit_buffer(r, item, buffer, size);
return 0;
}
free(buffer);
return ret;
}
- int parse_commit_gently(struct commit *item, int quiet_on_missing)
+ int repo_parse_commit_gently(struct repository *r,
+ struct commit *item, int quiet_on_missing)
{
- return parse_commit_internal(item, quiet_on_missing, 1);
+ return repo_parse_commit_internal(r, item, quiet_on_missing, 1);
}
void parse_commit_or_die(struct commit *item)
/* count number of children that have not been emitted */
define_commit_slab(indegree_slab, int);
-/* record author-date for each commit object */
define_commit_slab(author_date_slab, timestamp_t);
-static void record_author_date(struct author_date_slab *author_date,
- struct commit *commit)
+void record_author_date(struct author_date_slab *author_date,
+ struct commit *commit)
{
const char *buffer = get_commit_buffer(commit, NULL);
struct ident_split ident;
unuse_commit_buffer(commit, buffer);
}
-static int compare_commits_by_author_date(const void *a_, const void *b_,
- void *cb_data)
+int compare_commits_by_author_date(const void *a_, const void *b_,
+ void *cb_data)
{
const struct commit *a = a_, *b = b_;
struct author_date_slab *author_date = cb_data;
clear_author_date_slab(&author_date);
}
+struct rev_collect {
+ struct commit **commit;
+ int nr;
+ int alloc;
+ unsigned int initial : 1;
+};
+
+static void add_one_commit(struct object_id *oid, struct rev_collect *revs)
+{
+ struct commit *commit;
+
+ if (is_null_oid(oid))
+ return;
+
+ commit = lookup_commit(the_repository, oid);
+ if (!commit ||
+ (commit->object.flags & TMP_MARK) ||
+ parse_commit(commit))
+ return;
+
+ ALLOC_GROW(revs->commit, revs->nr + 1, revs->alloc);
+ revs->commit[revs->nr++] = commit;
+ commit->object.flags |= TMP_MARK;
+}
+
+static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *ident, timestamp_t timestamp,
+ int tz, const char *message, void *cbdata)
+{
+ struct rev_collect *revs = cbdata;
+
+ if (revs->initial) {
+ revs->initial = 0;
+ add_one_commit(ooid, revs);
+ }
+ add_one_commit(noid, revs);
+ return 0;
+}
+
+struct commit *get_fork_point(const char *refname, struct commit *commit)
+{
+ struct object_id oid;
+ struct rev_collect revs;
+ struct commit_list *bases;
+ int i;
+ struct commit *ret = NULL;
+
+ memset(&revs, 0, sizeof(revs));
+ revs.initial = 1;
+ for_each_reflog_ent(refname, collect_one_reflog_ent, &revs);
+
+ if (!revs.nr && !get_oid(refname, &oid))
+ add_one_commit(&oid, &revs);
+
+ for (i = 0; i < revs.nr; i++)
+ revs.commit[i]->object.flags &= ~TMP_MARK;
+
+ bases = get_merge_bases_many(commit, revs.nr, revs.commit);
+
+ /*
+ * There should be one and only one merge base, when we found
+ * a common ancestor among reflog entries.
+ */
+ if (!bases || bases->next)
+ goto cleanup_return;
+
+ /* And the found one must be one of the reflog entries */
+ for (i = 0; i < revs.nr; i++)
+ if (&bases->item->object == &revs.commit[i]->object)
+ break; /* found */
+ if (revs.nr <= i)
+ goto cleanup_return;
+
+ ret = bases->item;
+
+cleanup_return:
+ free_commit_list(bases);
+ return ret;
+}
+
static const char gpg_sig_header[] = "gpgsig";
static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1;
return ret;
}
+void verify_merge_signature(struct commit *commit, int verbosity)
+{
+ char hex[GIT_MAX_HEXSZ + 1];
+ struct signature_check signature_check;
+ memset(&signature_check, 0, sizeof(signature_check));
+
+ check_commit_signature(commit, &signature_check);
+ find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV);
+ switch (signature_check.result) {
+ case 'G':
+ break;
+ case 'U':
+ die(_("Commit %s has an untrusted GPG signature, "
+ "allegedly by %s."), hex, signature_check.signer);
+ case 'B':
+ die(_("Commit %s has a bad GPG signature "
+ "allegedly by %s."), hex, signature_check.signer);
+ default: /* 'N' */
+ die(_("Commit %s does not have a GPG signature."), hex);
+ }
+ if (verbosity >= 0 && signature_check.result == 'G')
+ printf(_("Commit %s has a good GPG signature by %s\n"),
+ hex, signature_check.signer);
+
+ signature_check_clear(&signature_check);
+}
void append_merge_tag_headers(struct commit_list *parents,
struct commit_extra_header ***tail)
#include "gpg-interface.h"
#include "string-list.h"
#include "pretty.h"
+#include "commit-slab.h"
#define COMMIT_NOT_FROM_GRAPH 0xFFFFFFFF
#define GENERATION_NUMBER_INFINITY 0xFFFFFFFF
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name);
int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph);
- int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph);
- int parse_commit_gently(struct commit *item, int quiet_on_missing);
- static inline int parse_commit(struct commit *item)
+ int repo_parse_commit_internal(struct repository *r, struct commit *item,
+ int quiet_on_missing, int use_commit_graph);
+ int repo_parse_commit_gently(struct repository *r,
+ struct commit *item,
+ int quiet_on_missing);
+ static inline int repo_parse_commit(struct repository *r, struct commit *item)
{
- return parse_commit_gently(item, 0);
+ return repo_parse_commit_gently(r, item, 0);
}
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define parse_commit_internal(item, quiet, use) repo_parse_commit_internal(the_repository, item, quiet, use)
+ #define parse_commit_gently(item, quiet) repo_parse_commit_gently(the_repository, item, quiet)
+ #define parse_commit(item) repo_parse_commit(the_repository, item)
+ #endif
+
void parse_commit_or_die(struct commit *item);
struct buffer_slab;
* from disk. The resulting memory should not be modified, and must be given
* to unuse_commit_buffer when the caller is done.
*/
- const void *get_commit_buffer(const struct commit *, unsigned long *size);
+ const void *repo_get_commit_buffer(struct repository *r,
+ const struct commit *,
+ unsigned long *size);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define get_commit_buffer(c, s) repo_get_commit_buffer(the_repository, c, s)
+ #endif
/*
* Tell the commit subsytem that we are done with a particular commit buffer.
* from an earlier call to get_commit_buffer. The buffer may or may not be
* freed by this call; callers should not access the memory afterwards.
*/
- void unuse_commit_buffer(const struct commit *, const void *buffer);
+ void repo_unuse_commit_buffer(struct repository *r,
+ const struct commit *,
+ const void *buffer);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define unuse_commit_buffer(c, b) repo_unuse_commit_buffer(the_repository, c, b)
+ #endif
/*
* Free any cached object buffer associated with the commit.
*/
- void free_commit_buffer(struct commit *);
+ void free_commit_buffer(struct parsed_object_pool *pool, struct commit *);
struct tree *get_commit_tree(const struct commit *);
struct object_id *get_commit_tree_oid(const struct commit *);
* Release memory related to a commit, including the parent list and
* any cached object buffer.
*/
- void release_commit_memory(struct commit *c);
+ void release_commit_memory(struct parsed_object_pool *pool, struct commit *c);
/*
* Disassociate any cached object buffer from the commit, but do not free it.
extern const char *logmsg_reencode(const struct commit *commit,
char **commit_encoding,
const char *output_encoding);
+ const char *repo_logmsg_reencode(struct repository *r,
+ const struct commit *commit,
+ char **commit_encoding,
+ const char *output_encoding);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define logmsg_reencode(c, enc, out) repo_logmsg_reencode(the_repository, c, enc, out)
+ #endif
+
extern const char *skip_blank_lines(const char *msg);
/** Removes the first commit from a list sorted by date, and adds all
void prepare_commit_graft(struct repository *r);
struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid);
+struct commit *get_fork_point(const char *refname, struct commit *commit);
+
/* largest positive number a signed 32-bit integer can contain */
#define INFINITE_DEPTH 0x7fffffff
uint32_t **used,
int *ref_status);
extern int delayed_reachability_test(struct shallow_info *si, int c);
-extern void prune_shallow(int show_only);
+#define PRUNE_SHOW_ONLY 1
+#define PRUNE_QUICK 2
+extern void prune_shallow(unsigned options);
extern struct trace_key trace_shallow;
extern int interactive_add(int argc, const char **argv, const char *prefix, int patch);
*/
extern int check_commit_signature(const struct commit *commit, struct signature_check *sigc);
+/* record author-date for each commit object */
+struct author_date_slab;
+void record_author_date(struct author_date_slab *author_date,
+ struct commit *commit);
+
+int compare_commits_by_author_date(const void *a_, const void *b_, void *unused);
+
+/*
+ * Verify a single commit with check_commit_signature() and die() if it is not
+ * a good signature. This isn't really suitable for general use, but is a
+ * helper to implement consistent logic for pull/merge --verify-signatures.
+ */
+void verify_merge_signature(struct commit *commit, int verbose);
+
int compare_commits_by_commit_date(const void *a_, const void *b_, void *unused);
int compare_commits_by_gen_then_commit_date(const void *a_, const void *b_, void *unused);
#include "sha1-array.h"
#include "strbuf.h"
-struct alternate_object_database {
- struct alternate_object_database *next;
-
- /* see alt_scratch_buf() */
- struct strbuf scratch;
- size_t base_len;
+struct object_directory {
+ struct object_directory *next;
/*
- * Used to store the results of readdir(3) calls when searching
- * for unique abbreviated hashes. This cache is never
- * invalidated, thus it's racy and not necessarily accurate.
- * That's fine for its purpose; don't use it for tasks requiring
- * greater accuracy!
+ * Used to store the results of readdir(3) calls when we are OK
+ * sacrificing accuracy due to races for speed. That includes
+ * object existence with OBJECT_INFO_QUICK, as well as
+ * our search for unique abbreviated hashes. Don't use it for tasks
+ * requiring greater accuracy!
+ *
+ * Be sure to call odb_load_loose_cache() before using.
*/
char loose_objects_subdir_seen[256];
- struct oid_array loose_objects_cache;
+ struct oid_array loose_objects_cache[256];
/*
* Path to the alternative object store. If this is a relative path,
* it is relative to the current working directory.
*/
- char path[FLEX_ARRAY];
+ char *path;
};
+
void prepare_alt_odb(struct repository *r);
char *compute_alternate_path(const char *path, struct strbuf *err);
-typedef int alt_odb_fn(struct alternate_object_database *, void *);
+typedef int alt_odb_fn(struct object_directory *, void *);
int foreach_alt_odb(alt_odb_fn, void*);
-/*
- * Allocate a "struct alternate_object_database" but do _not_ actually
- * add it to the list of alternates.
- */
-struct alternate_object_database *alloc_alt_odb(const char *dir);
-
/*
* Add the directory to the on-disk alternates file; the new entry will also
* take effect in the current process.
void add_to_alternates_memory(const char *dir);
/*
- * Returns a scratch strbuf pre-filled with the alternate object directory,
- * including a trailing slash, which can be used to access paths in the
- * alternate. Always use this over direct access to alt->scratch, as it
- * cleans up any previous use of the scratch buffer.
+ * Populate and return the loose object cache array corresponding to the
+ * given object ID.
*/
-struct strbuf *alt_scratch_buf(struct alternate_object_database *alt);
+struct oid_array *odb_loose_cache(struct object_directory *odb,
+ const struct object_id *oid);
+
+/* Empty the loose object cache for the specified object directory. */
+void odb_clear_loose_cache(struct object_directory *odb);
struct packed_git {
struct packed_git *next;
struct raw_object_store {
/*
- * Path to the repository's object store.
- * Cannot be NULL after initialization.
+ * Set of all object directories; the main directory is first (and
+ * cannot be NULL after initialization). Subsequent directories are
+ * alternates.
*/
- char *objectdir;
+ struct object_directory *odb;
+ struct object_directory **odb_tail;
+ int loaded_alternates;
- /* Path to extra alternate object database if not NULL */
+ /*
+ * A list of alternate object directories loaded from the environment;
+ * this should not generally need to be accessed directly, but will
+ * populate the "odb" list when prepare_alt_odb() is run.
+ */
char *alternate_db;
- struct alternate_object_database *alt_odb_list;
- struct alternate_object_database **alt_odb_tail;
-
/*
* Objects that should be substituted by other objects
* (see git-replace(1)).
* Put in `buf` the name of the file in the local object database that
* would be used to store a loose object with the specified sha1.
*/
-void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1);
+const char *loose_object_path(struct repository *r, struct strbuf *buf, const unsigned char *sha1);
void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
- extern void *read_object_file_extended(const struct object_id *oid,
+ extern void *read_object_file_extended(struct repository *r,
+ const struct object_id *oid,
enum object_type *type,
unsigned long *size, int lookup_replace);
- static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
+ static inline void *repo_read_object_file(struct repository *r,
+ const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size)
{
- return read_object_file_extended(oid, type, size, 1);
+ return read_object_file_extended(r, oid, type, size, 1);
}
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define read_object_file(oid, type, size) repo_read_object_file(the_repository, oid, type, size)
+ #endif
/* Read and unpack an object file into memory, write memory to an object file */
int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
* object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
* nonzero flags to also set other flags.
*/
- extern int has_sha1_file_with_flags(const unsigned char *sha1, int flags);
- static inline int has_sha1_file(const unsigned char *sha1)
+ int repo_has_sha1_file_with_flags(struct repository *r,
+ const unsigned char *sha1, int flags);
+ static inline int repo_has_sha1_file(struct repository *r,
+ const unsigned char *sha1)
{
- return has_sha1_file_with_flags(sha1, 0);
+ return repo_has_sha1_file_with_flags(r, sha1, 0);
}
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define has_sha1_file_with_flags(sha1, flags) repo_has_sha1_file_with_flags(the_repository, sha1, flags)
+ #define has_sha1_file(sha1) repo_has_sha1_file(the_repository, sha1)
+ #endif
+
/* Same as the above, except for struct object_id. */
- extern int has_object_file(const struct object_id *oid);
- extern int has_object_file_with_flags(const struct object_id *oid, int flags);
+ int repo_has_object_file(struct repository *r, const struct object_id *oid);
+ int repo_has_object_file_with_flags(struct repository *r,
+ const struct object_id *oid, int flags);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define has_object_file(oid) repo_has_object_file(the_repository, oid)
+ #define has_object_file_with_flags(oid, flags) repo_has_object_file_with_flags(the_repository, oid, flags)
+ #endif
/*
* Return true iff an alternate object database has a loose object
if (obj && obj->parsed)
return obj;
- if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
- (!obj && has_object_file(oid) &&
+ if ((obj && obj->type == OBJ_BLOB && repo_has_object_file(r, oid)) ||
+ (!obj && repo_has_object_file(r, oid) &&
oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
error(_("sha1 mismatch %s"), oid_to_hex(oid));
return lookup_object(r, oid->hash);
}
- buffer = read_object_file(oid, &type, &size);
+ buffer = repo_read_object_file(r, oid, &type, &size);
if (buffer) {
if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
free(buffer);
return o;
}
-static void free_alt_odb(struct alternate_object_database *alt)
+static void free_object_directory(struct object_directory *odb)
{
- strbuf_release(&alt->scratch);
- oid_array_clear(&alt->loose_objects_cache);
- free(alt);
+ free(odb->path);
+ odb_clear_loose_cache(odb);
+ free(odb);
}
-static void free_alt_odbs(struct raw_object_store *o)
+static void free_object_directories(struct raw_object_store *o)
{
- while (o->alt_odb_list) {
- struct alternate_object_database *next;
+ while (o->odb) {
+ struct object_directory *next;
- next = o->alt_odb_list->next;
- free_alt_odb(o->alt_odb_list);
- o->alt_odb_list = next;
+ next = o->odb->next;
+ free_object_directory(o->odb);
+ o->odb = next;
}
}
void raw_object_store_clear(struct raw_object_store *o)
{
- FREE_AND_NULL(o->objectdir);
FREE_AND_NULL(o->alternate_db);
oidmap_free(o->replace_map, 1);
o->commit_graph = NULL;
o->commit_graph_attempted = 0;
- free_alt_odbs(o);
- o->alt_odb_tail = NULL;
+ free_object_directories(o);
+ o->odb_tail = NULL;
+ o->loaded_alternates = 0;
INIT_LIST_HEAD(&o->packed_git_mru);
close_all_packs(o);
if (obj->type == OBJ_TREE)
free_tree_buffer((struct tree*)obj);
else if (obj->type == OBJ_COMMIT)
- release_commit_memory((struct commit*)obj);
+ release_commit_memory(o, (struct commit*)obj);
else if (obj->type == OBJ_TAG)
release_tag_memory((struct tag*)obj);
}
static int check_packed_git_idx(const char *path, struct packed_git *p)
{
void *idx_map;
- struct pack_idx_header *hdr;
size_t idx_size;
- uint32_t version, nr, i, *index;
- int fd = git_open(path);
+ int fd = git_open(path), ret;
struct stat st;
const unsigned int hashsz = the_hash_algo->rawsz;
idx_map = xmmap(NULL, idx_size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);
- hdr = idx_map;
+ ret = load_idx(path, hashsz, idx_map, idx_size, p);
+
+ if (ret)
+ munmap(idx_map, idx_size);
+
+ return ret;
+}
+
+int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
+ size_t idx_size, struct packed_git *p)
+{
+ struct pack_idx_header *hdr = idx_map;
+ uint32_t version, nr, i, *index;
+
+ if (idx_size < 4 * 256 + hashsz + hashsz)
+ return error("index file %s is too small", path);
+ if (idx_map == NULL)
+ return error("empty data");
+
if (hdr->idx_signature == htonl(PACK_IDX_SIGNATURE)) {
version = ntohl(hdr->idx_version);
- if (version < 2 || version > 2) {
- munmap(idx_map, idx_size);
+ if (version < 2 || version > 2)
return error("index file %s is version %"PRIu32
" and is not supported by this binary"
" (try upgrading GIT to a newer version)",
path, version);
- }
} else
version = 1;
index += 2; /* skip index header */
for (i = 0; i < 256; i++) {
uint32_t n = ntohl(index[i]);
- if (n < nr) {
- munmap(idx_map, idx_size);
+ if (n < nr)
return error("non-monotonic index %s", path);
- }
nr = n;
}
* - hash of the packfile
* - file checksum
*/
- if (idx_size != 4*256 + nr * (hashsz + 4) + hashsz + hashsz) {
- munmap(idx_map, idx_size);
+ if (idx_size != 4 * 256 + nr * (hashsz + 4) + hashsz + hashsz)
return error("wrong index v1 file size in %s", path);
- }
} else if (version == 2) {
/*
* Minimum size:
unsigned long max_size = min_size;
if (nr)
max_size += (nr - 1)*8;
- if (idx_size < min_size || idx_size > max_size) {
- munmap(idx_map, idx_size);
+ if (idx_size < min_size || idx_size > max_size)
return error("wrong index v2 file size in %s", path);
- }
if (idx_size != min_size &&
/*
* make sure we can deal with large pack offsets.
* 31-bit signed offset won't be enough, neither
* 32-bit unsigned one will be.
*/
- (sizeof(off_t) <= 4)) {
- munmap(idx_map, idx_size);
+ (sizeof(off_t) <= 4))
return error("pack too large for current definition of off_t in %s", path);
- }
}
p->index_version = version;
BUG("want to close pack marked 'do-not-close'");
else
close_pack(p);
+
+ if (o->multi_pack_index) {
+ close_midx(o->multi_pack_index);
+ o->multi_pack_index = NULL;
+ }
}
/*
static void prepare_packed_git(struct repository *r)
{
- struct alternate_object_database *alt;
+ struct object_directory *odb;
if (r->objects->packed_git_initialized)
return;
- prepare_multi_pack_index_one(r, r->objects->objectdir, 1);
- prepare_packed_git_one(r, r->objects->objectdir, 1);
+
prepare_alt_odb(r);
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
- prepare_multi_pack_index_one(r, alt->path, 0);
- prepare_packed_git_one(r, alt->path, 0);
+ for (odb = r->objects->odb; odb; odb = odb->next) {
+ int local = (odb == r->objects->odb);
+ prepare_multi_pack_index_one(r, odb->path, local);
+ prepare_packed_git_one(r, odb->path, local);
}
rearrange_packed_git(r);
void reprepare_packed_git(struct repository *r)
{
+ struct object_directory *odb;
+
+ for (odb = r->objects->odb; odb; odb = odb->next)
+ odb_clear_loose_cache(odb);
+
r->objects->approximate_object_count_valid = 0;
r->objects->packed_git_initialized = 0;
prepare_packed_git(r);
void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1)
{
unsigned i;
+ const unsigned hashsz = the_hash_algo->rawsz;
for (i = 0; i < p->num_bad_objects; i++)
- if (hasheq(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
+ if (hasheq(sha1, p->bad_object_sha1 + hashsz * i))
return;
p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
st_mult(GIT_MAX_RAWSZ,
st_add(p->num_bad_objects, 1)));
- hashcpy(p->bad_object_sha1 + GIT_SHA1_RAWSZ * p->num_bad_objects, sha1);
+ hashcpy(p->bad_object_sha1 + hashsz * p->num_bad_objects, sha1);
p->num_bad_objects++;
}
- const struct packed_git *has_packed_and_bad(const unsigned char *sha1)
+ const struct packed_git *has_packed_and_bad(struct repository *r,
+ const unsigned char *sha1)
{
struct packed_git *p;
unsigned i;
- for (p = the_repository->objects->packed_git; p; p = p->next)
+ for (p = r->objects->packed_git; p; p = p->next)
for (i = 0; i < p->num_bad_objects; i++)
if (hasheq(sha1,
p->bad_object_sha1 + the_hash_algo->rawsz * i))
*/
return 0;
while (tree_entry_gently(&desc, &entry))
- oidset_insert(set, entry.oid);
+ oidset_insert(set, &entry.oid);
} else if (obj->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *) obj;
struct commit_list *parents = commit->parents;
off_t offset, struct object_info *);
extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
- extern const struct packed_git *has_packed_and_bad(const unsigned char *sha1);
+ extern const struct packed_git *has_packed_and_bad(struct repository *r, const unsigned char *sha1);
/*
* Iff a pack file in the given repository contains the object named by sha1,
*/
extern int is_promisor_object(const struct object_id *oid);
+/*
+ * Expose a function for fuzz testing.
+ *
+ * load_idx() parses a block of memory as a packfile index and puts the results
+ * into a struct packed_git.
+ *
+ * This function should not be used directly. It is exposed here only so that we
+ * have a convenient entry-point for fuzz testing. For real uses, you should
+ * probably use open_pack_index() or parse_pack_index() instead.
+ */
+extern int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
+ size_t idx_size, struct packed_git *p);
+
#endif
return strbuf_detach(&tmp, NULL);
}
- const char *logmsg_reencode(const struct commit *commit,
- char **commit_encoding,
- const char *output_encoding)
+ const char *repo_logmsg_reencode(struct repository *r,
+ const struct commit *commit,
+ char **commit_encoding,
+ const char *output_encoding)
{
static const char *utf8 = "UTF-8";
const char *use_encoding;
char *encoding;
- const char *msg = get_commit_buffer(commit, NULL);
+ const char *msg = repo_get_commit_buffer(r, commit, NULL);
char *out;
if (!output_encoding || !*output_encoding) {
* the cached copy from get_commit_buffer, we need to duplicate it
* to avoid munging the cached copy.
*/
- if (msg == get_cached_commit_buffer(the_repository, commit, NULL))
+ if (msg == get_cached_commit_buffer(r, commit, NULL))
out = xstrdup(msg);
else
out = (char *)msg;
*/
out = reencode_string(msg, output_encoding, use_encoding);
if (out)
- unuse_commit_buffer(commit, msg);
+ repo_unuse_commit_buffer(r, commit, msg);
}
/*
struct commit_list *p;
const char *arg;
int ch;
+ char **slot;
/* these are independent of the commit */
switch (placeholder[0]) {
load_ref_decorations(NULL, DECORATE_SHORT_REFS);
format_decorations_extended(sb, commit, c->auto_color, "", ", ", "");
return 1;
+ case 'S': /* tag/branch like --source */
+ if (!(c->pretty_ctx->rev && c->pretty_ctx->rev->sources))
+ return 0;
+ slot = revision_sources_at(c->pretty_ctx->rev->sources, commit);
+ if (!(slot && *slot))
+ return 0;
+ strbuf_addstr(sb, *slot);
+ return 1;
case 'g': /* reflog info */
switch(placeholder[1]) {
case 'd': /* reflog selector */
if (c->signature_check.key)
strbuf_addstr(sb, c->signature_check.key);
break;
+ case 'F':
+ if (c->signature_check.fingerprint)
+ strbuf_addstr(sb, c->signature_check.fingerprint);
+ break;
+ case 'P':
+ if (c->signature_check.primary_key_fingerprint)
+ strbuf_addstr(sb, c->signature_check.primary_key_fingerprint);
+ break;
default:
return 0;
}
case 'N':
w->notes = 1;
break;
+ case 'S':
+ w->source = 1;
+ break;
}
return 0;
}
strbuf_release(&dummy);
}
- void format_commit_message(const struct commit *commit,
- const char *format, struct strbuf *sb,
- const struct pretty_print_context *pretty_ctx)
+ void repo_format_commit_message(struct repository *r,
+ const struct commit *commit,
+ const char *format, struct strbuf *sb,
+ const struct pretty_print_context *pretty_ctx)
{
struct format_commit_context context;
const char *output_enc = pretty_ctx->output_encoding;
* convert a commit message to UTF-8 first
* as far as 'format_commit_item' assumes it in UTF-8
*/
- context.message = logmsg_reencode(commit,
- &context.commit_encoding,
- utf8);
+ context.message = repo_logmsg_reencode(r, commit,
+ &context.commit_encoding,
+ utf8);
strbuf_expand(sb, format, format_commit_item, &context);
rewrap_message_tail(sb, &context, 0, 0, 0);
}
free(context.commit_encoding);
- unuse_commit_buffer(commit, context.message);
+ repo_unuse_commit_buffer(r, commit, context.message);
}
static void pp_header(struct pretty_print_context *pp,
struct userformat_want {
unsigned notes:1;
+ unsigned source:1;
};
/* Set the flag "w->notes" if there is placeholder %N in "fmt". */
* Put the result to "sb".
* Please use this function for custom formats.
*/
- void format_commit_message(const struct commit *commit,
+ void repo_format_commit_message(struct repository *r,
+ const struct commit *commit,
const char *format, struct strbuf *sb,
const struct pretty_print_context *context);
+ #ifndef NO_THE_REPOSITORY_COMPATIBILITY_MACROS
+ #define format_commit_message(c, f, s, con) \
+ repo_format_commit_message(the_repository, c, f, s, con)
+ #endif
/*
* Parse given arguments from "arg", check it for correctness and
#define EMPTY_TREE_SHA1_BIN_LITERAL \
"\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" \
"\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04"
+#define EMPTY_TREE_SHA256_BIN_LITERAL \
+ "\x6e\xf1\x9b\x41\x22\x5c\x53\x69\xf1\xc1" \
+ "\x04\xd4\x5d\x8d\x85\xef\xa9\xb0\x57\xb5" \
+ "\x3b\x14\xb4\xb9\xb9\x39\xdd\x74\xde\xcc" \
+ "\x53\x21"
#define EMPTY_BLOB_SHA1_BIN_LITERAL \
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
+#define EMPTY_BLOB_SHA256_BIN_LITERAL \
+ "\x47\x3a\x0f\x4c\x3b\xe8\xa9\x36\x81\xa2" \
+ "\x67\xe3\xb1\xe9\xa7\xdc\xda\x11\x85\x43" \
+ "\x6f\xe1\x41\xf7\x74\x91\x20\xa3\x03\x72" \
+ "\x18\x13"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
static const struct object_id empty_blob_oid = {
EMPTY_BLOB_SHA1_BIN_LITERAL
};
+static const struct object_id empty_tree_oid_sha256 = {
+ EMPTY_TREE_SHA256_BIN_LITERAL
+};
+static const struct object_id empty_blob_oid_sha256 = {
+ EMPTY_BLOB_SHA256_BIN_LITERAL
+};
static void git_hash_sha1_init(git_hash_ctx *ctx)
{
git_SHA1_Final(hash, &ctx->sha1);
}
+
+static void git_hash_sha256_init(git_hash_ctx *ctx)
+{
+ git_SHA256_Init(&ctx->sha256);
+}
+
+static void git_hash_sha256_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ git_SHA256_Update(&ctx->sha256, data, len);
+}
+
+static void git_hash_sha256_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ git_SHA256_Final(hash, &ctx->sha256);
+}
+
static void git_hash_unknown_init(git_hash_ctx *ctx)
{
BUG("trying to init unknown hash");
0x00000000,
0,
0,
+ 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
NULL,
},
{
- "sha-1",
+ "sha1",
/* "sha1", big-endian */
0x73686131,
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
+ GIT_SHA1_BLKSZ,
git_hash_sha1_init,
git_hash_sha1_update,
git_hash_sha1_final,
&empty_tree_oid,
&empty_blob_oid,
},
+ {
+ "sha256",
+ /* "s256", big-endian */
+ 0x73323536,
+ GIT_SHA256_RAWSZ,
+ GIT_SHA256_HEXSZ,
+ GIT_SHA256_BLKSZ,
+ git_hash_sha256_init,
+ git_hash_sha256_update,
+ git_hash_sha256_final,
+ &empty_tree_oid_sha256,
+ &empty_blob_oid_sha256,
+ }
};
const char *empty_tree_oid_hex(void)
return oid_to_hex_r(buf, the_hash_algo->empty_blob);
}
+int hash_algo_by_name(const char *name)
+{
+ int i;
+ if (!name)
+ return GIT_HASH_UNKNOWN;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (!strcmp(name, hash_algos[i].name))
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
+
+int hash_algo_by_id(uint32_t format_id)
+{
+ int i;
+ for (i = 1; i < GIT_HASH_NALGOS; i++)
+ if (format_id == hash_algos[i].format_id)
+ return i;
+ return GIT_HASH_UNKNOWN;
+}
+
+
/*
* This is meant to hold a *small* number of objects that you would
* want read_sha1_file() to be able to return, but yet you do not want
}
}
-void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1)
+static const char *odb_loose_path(struct object_directory *odb,
+ struct strbuf *buf,
+ const unsigned char *sha1)
{
- strbuf_addstr(buf, r->objects->objectdir);
+ strbuf_reset(buf);
+ strbuf_addstr(buf, odb->path);
strbuf_addch(buf, '/');
fill_sha1_path(buf, sha1);
+ return buf->buf;
}
-struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
+const char *loose_object_path(struct repository *r, struct strbuf *buf,
+ const unsigned char *sha1)
{
- strbuf_setlen(&alt->scratch, alt->base_len);
- return &alt->scratch;
-}
-
-static const char *alt_sha1_path(struct alternate_object_database *alt,
- const unsigned char *sha1)
-{
- struct strbuf *buf = alt_scratch_buf(alt);
- fill_sha1_path(buf, sha1);
- return buf->buf;
+ return odb_loose_path(r->objects->odb, buf, sha1);
}
/*
struct strbuf *path,
const char *normalized_objdir)
{
- struct alternate_object_database *alt;
+ struct object_directory *odb;
/* Detect cases where alternate disappeared */
if (!is_directory(path->buf)) {
* Prevent the common mistake of listing the same
* thing twice, or object directory itself.
*/
- for (alt = o->alt_odb_list; alt; alt = alt->next) {
- if (!fspathcmp(path->buf, alt->path))
+ for (odb = o->odb; odb; odb = odb->next) {
+ if (!fspathcmp(path->buf, odb->path))
return 0;
}
if (!fspathcmp(path->buf, normalized_objdir))
* Prepare alternate object database registry.
*
* The variable alt_odb_list points at the list of struct
- * alternate_object_database. The elements on this list come from
+ * object_directory. The elements on this list come from
* non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
* environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
* whose contents is similar to that environment variable but can be
static int link_alt_odb_entry(struct repository *r, const char *entry,
const char *relative_base, int depth, const char *normalized_objdir)
{
- struct alternate_object_database *ent;
+ struct object_directory *ent;
struct strbuf pathbuf = STRBUF_INIT;
if (!is_absolute_path(entry) && relative_base) {
return -1;
}
- ent = alloc_alt_odb(pathbuf.buf);
+ ent = xcalloc(1, sizeof(*ent));
+ ent->path = xstrdup(pathbuf.buf);
/* add the alternate entry */
- *r->objects->alt_odb_tail = ent;
- r->objects->alt_odb_tail = &(ent->next);
+ *r->objects->odb_tail = ent;
+ r->objects->odb_tail = &(ent->next);
ent->next = NULL;
/* recursively add alternates */
return;
}
- strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
+ strbuf_add_absolute_path(&objdirbuf, r->objects->odb->path);
if (strbuf_normalize_path(&objdirbuf) < 0)
die(_("unable to normalize object directory: %s"),
objdirbuf.buf);
free(path);
}
-struct alternate_object_database *alloc_alt_odb(const char *dir)
-{
- struct alternate_object_database *ent;
-
- FLEX_ALLOC_STR(ent, path, dir);
- strbuf_init(&ent->scratch, 0);
- strbuf_addf(&ent->scratch, "%s/", dir);
- ent->base_len = ent->scratch.len;
-
- return ent;
-}
-
void add_to_alternates_file(const char *reference)
{
struct lock_file lock = LOCK_INIT;
fprintf_or_die(out, "%s\n", reference);
if (commit_lock_file(&lock))
die_errno(_("unable to move new alternates file into place"));
- if (the_repository->objects->alt_odb_tail)
+ if (the_repository->objects->loaded_alternates)
link_alt_odb_entries(the_repository, reference,
'\n', NULL, 0);
}
int foreach_alt_odb(alt_odb_fn fn, void *cb)
{
- struct alternate_object_database *ent;
+ struct object_directory *ent;
int r = 0;
prepare_alt_odb(the_repository);
- for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) {
+ for (ent = the_repository->objects->odb->next; ent; ent = ent->next) {
r = fn(ent, cb);
if (r)
break;
void prepare_alt_odb(struct repository *r)
{
- if (r->objects->alt_odb_tail)
+ if (r->objects->loaded_alternates)
return;
- r->objects->alt_odb_tail = &r->objects->alt_odb_list;
link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
- read_info_alternates(r, r->objects->objectdir, 0);
+ read_info_alternates(r, r->objects->odb->path, 0);
+ r->objects->loaded_alternates = 1;
}
/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
return 1;
}
-static int check_and_freshen_local(const struct object_id *oid, int freshen)
+static int check_and_freshen_odb(struct object_directory *odb,
+ const struct object_id *oid,
+ int freshen)
{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- sha1_file_name(the_repository, &buf, oid->hash);
+ static struct strbuf path = STRBUF_INIT;
+ odb_loose_path(odb, &path, oid->hash);
+ return check_and_freshen_file(path.buf, freshen);
+}
- return check_and_freshen_file(buf.buf, freshen);
+static int check_and_freshen_local(const struct object_id *oid, int freshen)
+{
+ return check_and_freshen_odb(the_repository->objects->odb, oid, freshen);
}
static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen)
{
- struct alternate_object_database *alt;
+ struct object_directory *odb;
+
prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
- const char *path = alt_sha1_path(alt, oid->hash);
- if (check_and_freshen_file(path, freshen))
+ for (odb = the_repository->objects->odb->next; odb; odb = odb->next) {
+ if (check_and_freshen_odb(odb, oid, freshen))
return 1;
}
return 0;
return -1;
/* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(obj_type), (uintmax_t)size) + 1;
/* Sha1.. */
the_hash_algo->init_fn(&c);
*
* The "path" out-parameter will give the path of the object we found (if any).
* Note that it may point to static storage and is only valid until another
- * call to sha1_file_name(), etc.
+ * call to stat_sha1_file().
*/
static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
struct stat *st, const char **path)
{
- struct alternate_object_database *alt;
+ struct object_directory *odb;
static struct strbuf buf = STRBUF_INIT;
- strbuf_reset(&buf);
- sha1_file_name(r, &buf, sha1);
- *path = buf.buf;
-
- if (!lstat(*path, st))
- return 0;
-
prepare_alt_odb(r);
- errno = ENOENT;
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
- *path = alt_sha1_path(alt, sha1);
+ for (odb = r->objects->odb; odb; odb = odb->next) {
+ *path = odb_loose_path(odb, &buf, sha1);
if (!lstat(*path, st))
return 0;
}
const unsigned char *sha1, const char **path)
{
int fd;
- struct alternate_object_database *alt;
- int most_interesting_errno;
+ struct object_directory *odb;
+ int most_interesting_errno = ENOENT;
static struct strbuf buf = STRBUF_INIT;
- strbuf_reset(&buf);
- sha1_file_name(r, &buf, sha1);
- *path = buf.buf;
-
- fd = git_open(*path);
- if (fd >= 0)
- return fd;
- most_interesting_errno = errno;
-
prepare_alt_odb(r);
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
- *path = alt_sha1_path(alt, sha1);
+ for (odb = r->objects->odb; odb; odb = odb->next) {
+ *path = odb_loose_path(odb, &buf, sha1);
fd = git_open(*path);
if (fd >= 0)
return fd;
+
if (most_interesting_errno == ENOENT)
most_interesting_errno = errno;
}
return -1;
}
+static int quick_has_loose(struct repository *r,
+ const unsigned char *sha1)
+{
+ struct object_id oid;
+ struct object_directory *odb;
+
+ hashcpy(oid.hash, sha1);
+
+ prepare_alt_odb(r);
+ for (odb = r->objects->odb; odb; odb = odb->next) {
+ if (oid_array_lookup(odb_loose_cache(odb, &oid), &oid) >= 0)
+ return 1;
+ }
+ return 0;
+}
+
/*
* Map the loose object at "path" if it is not NULL, or the path found by
* searching for a loose object named "sha1".
if (!*size) {
/* mmap() is forbidden on empty files */
error(_("object file %s is empty"), path);
+ close(fd);
return NULL;
}
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
const char *path;
struct stat st;
+ if (!oi->disk_sizep && (flags & OBJECT_INFO_QUICK))
+ return quick_has_loose(r, sha1) ? 0 : -1;
if (stat_sha1_file(r, sha1, &st, &path) < 0)
return -1;
if (oi->disk_sizep)
return type;
}
- static void *read_object(const unsigned char *sha1, enum object_type *type,
+ static void *read_object(struct repository *r,
+ const unsigned char *sha1,
+ enum object_type *type,
unsigned long *size)
{
struct object_id oid;
hashcpy(oid.hash, sha1);
- if (oid_object_info_extended(the_repository, &oid, &oi, 0) < 0)
+ if (oid_object_info_extended(r, &oid, &oi, 0) < 0)
return NULL;
return content;
}
* deal with them should arrange to call read_object() and give error
* messages themselves.
*/
- void *read_object_file_extended(const struct object_id *oid,
+ void *read_object_file_extended(struct repository *r,
+ const struct object_id *oid,
enum object_type *type,
unsigned long *size,
int lookup_replace)
const char *path;
struct stat st;
const struct object_id *repl = lookup_replace ?
- lookup_replace_object(the_repository, oid) : oid;
+ lookup_replace_object(r, oid) : oid;
errno = 0;
- data = read_object(repl->hash, type, size);
+ data = read_object(r, repl->hash, type, size);
if (data)
return data;
die(_("replacement %s not found for %s"),
oid_to_hex(repl), oid_to_hex(oid));
- if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
+ if (!stat_sha1_file(r, repl->hash, &st, &path))
die(_("loose object %s (stored in %s) is corrupt"),
oid_to_hex(repl), path);
- if ((p = has_packed_and_bad(repl->hash)) != NULL)
+ if ((p = has_packed_and_bad(r, repl->hash)) != NULL)
die(_("packed object %s (stored in %s) is corrupt"),
oid_to_hex(repl), p->pack_name);
git_hash_ctx c;
/* Generate the header */
- *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
+ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %"PRIuMAX , type, (uintmax_t)len)+1;
/* Sha1.. */
the_hash_algo->init_fn(&c);
static struct strbuf tmp_file = STRBUF_INIT;
static struct strbuf filename = STRBUF_INIT;
- strbuf_reset(&filename);
- sha1_file_name(the_repository, &filename, oid->hash);
+ loose_object_path(the_repository, &filename, oid->hash);
fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (has_loose_object(oid))
return 0;
- buf = read_object(oid->hash, &type, &len);
+ buf = read_object(the_repository, oid->hash, &type, &len);
if (!buf)
return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %"PRIuMAX , type_name(type), (uintmax_t)len) + 1;
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
}
- int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
+ int repo_has_sha1_file_with_flags(struct repository *r,
+ const unsigned char *sha1, int flags)
{
struct object_id oid;
if (!startup_info->have_repository)
return 0;
hashcpy(oid.hash, sha1);
- return oid_object_info_extended(the_repository, &oid, NULL,
+ return oid_object_info_extended(r, &oid, NULL,
flags | OBJECT_INFO_SKIP_CACHED) >= 0;
}
- int has_object_file(const struct object_id *oid)
+ int repo_has_object_file(struct repository *r,
+ const struct object_id *oid)
{
- return has_sha1_file(oid->hash);
+ return repo_has_sha1_file(r, oid->hash);
}
- int has_object_file_with_flags(const struct object_id *oid, int flags)
+ int repo_has_object_file_with_flags(struct repository *r,
+ const struct object_id *oid, int flags)
{
- return has_sha1_file_with_flags(oid->hash, flags);
+ return repo_has_sha1_file_with_flags(r, oid->hash, flags);
}
static void check_tree(const void *buf, size_t size)
return r;
}
-struct loose_alt_odb_data {
- each_loose_object_fn *cb;
- void *data;
-};
+int for_each_loose_object(each_loose_object_fn cb, void *data,
+ enum for_each_object_flags flags)
+{
+ struct object_directory *odb;
+
+ prepare_alt_odb(the_repository);
+ for (odb = the_repository->objects->odb; odb; odb = odb->next) {
+ int r = for_each_loose_file_in_objdir(odb->path, cb, NULL,
+ NULL, data);
+ if (r)
+ return r;
-static int loose_from_alt_odb(struct alternate_object_database *alt,
- void *vdata)
+ if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
+ break;
+ }
+
+ return 0;
+}
+
+static int append_loose_object(const struct object_id *oid, const char *path,
+ void *data)
{
- struct loose_alt_odb_data *data = vdata;
+ oid_array_append(data, oid);
+ return 0;
+}
+
+struct oid_array *odb_loose_cache(struct object_directory *odb,
+ const struct object_id *oid)
+{
+ int subdir_nr = oid->hash[0];
struct strbuf buf = STRBUF_INIT;
- int r;
- strbuf_addstr(&buf, alt->path);
- r = for_each_loose_file_in_objdir_buf(&buf,
- data->cb, NULL, NULL,
- data->data);
+ if (subdir_nr < 0 ||
+ subdir_nr >= ARRAY_SIZE(odb->loose_objects_subdir_seen))
+ BUG("subdir_nr out of range");
+
+ if (odb->loose_objects_subdir_seen[subdir_nr])
+ return &odb->loose_objects_cache[subdir_nr];
+
+ strbuf_addstr(&buf, odb->path);
+ for_each_file_in_obj_subdir(subdir_nr, &buf,
+ append_loose_object,
+ NULL, NULL,
+ &odb->loose_objects_cache[subdir_nr]);
+ odb->loose_objects_subdir_seen[subdir_nr] = 1;
strbuf_release(&buf);
- return r;
+ return &odb->loose_objects_cache[subdir_nr];
}
-int for_each_loose_object(each_loose_object_fn cb, void *data,
- enum for_each_object_flags flags)
+void odb_clear_loose_cache(struct object_directory *odb)
{
- struct loose_alt_odb_data alt;
- int r;
-
- r = for_each_loose_file_in_objdir(get_object_directory(),
- cb, NULL, NULL, data);
- if (r)
- return r;
-
- if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
- return 0;
+ int i;
- alt.cb = cb;
- alt.data = data;
- return foreach_alt_odb(loose_from_alt_odb, &alt);
+ for (i = 0; i < ARRAY_SIZE(odb->loose_objects_cache); i++)
+ oid_array_clear(&odb->loose_objects_cache[i]);
+ memset(&odb->loose_objects_subdir_seen, 0,
+ sizeof(odb->loose_objects_subdir_seen));
}
static int check_stream_sha1(git_zstream *stream,
* see the comment in unpack_sha1_rest for details.
*/
while (total_read <= size &&
- (status == Z_OK || status == Z_BUF_ERROR)) {
+ (status == Z_OK ||
+ (status == Z_BUF_ERROR && !stream->avail_out))) {
stream->next_out = buf;
stream->avail_out = sizeof(buf);
if (size - total_read < stream->avail_out)
st->z_state = z_done;
break;
}
+
+ /*
+ * Unlike the loose object case, we do not have to worry here
+ * about running out of input bytes and spinning infinitely. If
+ * we get Z_BUF_ERROR due to too few input bytes, then we'll
+ * replenish them in the next use_pack() call when we loop. If
+ * we truly hit the end of the pack (i.e., because it's corrupt
+ * or truncated), then use_pack() catches that and will die().
+ */
if (status != Z_OK && status != Z_BUF_ERROR) {
git_inflate_end(&st->z);
st->z_state = z_error;
static open_method_decl(incore)
{
- st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0);
+ st->u.incore.buf = read_object_file_extended(the_repository, oid, type, &st->size, 0);
st->u.incore.read_ptr = 0;
st->vtbl = &incore_vtbl;
#include "commit-reach.h"
static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF;
-static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP;
static int initialized_fetch_ref_tips;
static struct oid_array ref_tips_before_fetch;
static struct oid_array ref_tips_after_fetch;
return 0;
}
+/*
+ * Check if the .gitmodules file is safe to write.
+ *
+ * Writing to the .gitmodules file requires that the file exists in the
+ * working tree or, if it doesn't, that a brand new .gitmodules file is going
+ * to be created (i.e. it's neither in the index nor in the current branch).
+ *
+ * It is not safe to write to .gitmodules if it's not in the working tree but
+ * it is in the index or in the current branch, because writing new values
+ * (and staging them) would blindly overwrite ALL the old content.
+ */
+int is_writing_gitmodules_ok(void)
+{
+ struct object_id oid;
+ return file_exists(GITMODULES_FILE) ||
+ (get_oid(GITMODULES_INDEX, &oid) < 0 && get_oid(GITMODULES_HEAD, &oid) < 0);
+}
+
/*
* Check if the .gitmodules file has unstaged modifications. This must be
* checked before allowing modifications to the .gitmodules file with the
{
struct strbuf entry = STRBUF_INIT;
const struct submodule *submodule;
+ int ret;
if (!file_exists(GITMODULES_FILE)) /* Do nothing without .gitmodules */
return -1;
strbuf_addstr(&entry, "submodule.");
strbuf_addstr(&entry, submodule->name);
strbuf_addstr(&entry, ".path");
- if (git_config_set_in_file_gently(GITMODULES_FILE, entry.buf, newpath) < 0) {
- /* Maybe the user already did that, don't error out here */
- warning(_("Could not update .gitmodules entry %s"), entry.buf);
- strbuf_release(&entry);
- return -1;
- }
+ ret = config_set_in_gitmodules_file_gently(entry.buf, newpath);
strbuf_release(&entry);
- return 0;
+ return ret;
}
/*
return prepare_revision_walk(rev);
}
- static void print_submodule_summary(struct rev_info *rev, struct diff_options *o)
+ static void print_submodule_summary(struct repository *r, struct rev_info *rev, struct diff_options *o)
{
static const char format[] = " %m %s";
struct strbuf sb = STRBUF_INIT;
ctx.date_mode = rev->date_mode;
ctx.output_encoding = get_log_output_encoding();
strbuf_setlen(&sb, 0);
- format_commit_message(commit, format, &sb, &ctx);
+ repo_format_commit_message(r, commit, format, &sb,
+ &ctx);
strbuf_addch(&sb, '\n');
if (commit->object.flags & SYMMETRIC_LEFT)
diff_emit_submodule_del(o, sb.buf);
DEFAULT_GIT_DIR_ENVIRONMENT);
}
- /* Helper function to display the submodule header line prior to the full
- * summary output. If it can locate the submodule objects directory it will
- * attempt to lookup both the left and right commits and put them into the
- * left and right pointers.
+static void prepare_submodule_repo_env_in_gitdir(struct argv_array *out)
+{
+ prepare_submodule_repo_env_no_git_dir(out);
+ argv_array_pushf(out, "%s=.", GIT_DIR_ENVIRONMENT);
+}
+
+ /*
+ * Initialize a repository struct for a submodule based on the provided 'path'.
+ *
+ * Unlike repo_submodule_init, this tolerates submodules not present
+ * in .gitmodules. This function exists only to preserve historical behavior,
+ *
+ * Returns the repository struct on success,
+ * NULL when the submodule is not present.
*/
- static void show_submodule_header(struct diff_options *o, const char *path,
+ static struct repository *open_submodule(const char *path)
+ {
+ struct strbuf sb = STRBUF_INIT;
+ struct repository *out = xmalloc(sizeof(*out));
+
+ if (submodule_to_gitdir(&sb, path) || repo_init(out, sb.buf, NULL)) {
+ strbuf_release(&sb);
+ free(out);
+ return NULL;
+ }
+
+ /* Mark it as a submodule */
+ out->submodule_prefix = xstrdup(path);
+
+ strbuf_release(&sb);
+ return out;
+ }
+
+ /*
+ * Helper function to display the submodule header line prior to the full
+ * summary output.
+ *
+ * If it can locate the submodule git directory it will create a repository
+ * handle for the submodule and lookup both the left and right commits and
+ * put them into the left and right pointers.
+ */
+ static void show_submodule_header(struct diff_options *o,
+ const char *path,
struct object_id *one, struct object_id *two,
unsigned dirty_submodule,
+ struct repository *sub,
struct commit **left, struct commit **right,
struct commit_list **merge_bases)
{
else if (is_null_oid(two))
message = "(submodule deleted)";
- if (add_submodule_odb(path)) {
+ if (!sub) {
if (!message)
message = "(commits not present)";
goto output_header;
* Attempt to lookup the commit references, and determine if this is
* a fast forward or fast backwards update.
*/
- *left = lookup_commit_reference(the_repository, one);
- *right = lookup_commit_reference(the_repository, two);
+ *left = lookup_commit_reference(sub, one);
+ *right = lookup_commit_reference(sub, two);
/*
* Warn about missing commits in the submodule project, but only if
(!is_null_oid(two) && !*right))
message = "(commits not present)";
- *merge_bases = get_merge_bases(*left, *right);
+ *merge_bases = repo_get_merge_bases(sub, *left, *right);
if (*merge_bases) {
if ((*merge_bases)->item == *left)
fast_forward = 1;
struct rev_info rev;
struct commit *left = NULL, *right = NULL;
struct commit_list *merge_bases = NULL;
+ struct repository *sub;
+ sub = open_submodule(path);
show_submodule_header(o, path, one, two, dirty_submodule,
- &left, &right, &merge_bases);
+ sub, &left, &right, &merge_bases);
/*
* If we don't have both a left and a right pointer, there is no
* reason to try and display a summary. The header line should contain
* all the information the user needs.
*/
- if (!left || !right)
+ if (!left || !right || !sub)
goto out;
/* Treat revision walker failure the same as missing commits */
goto out;
}
- print_submodule_summary(&rev, o);
+ print_submodule_summary(sub, &rev, o);
out:
if (merge_bases)
free_commit_list(merge_bases);
clear_commit_marks(left, ~0);
clear_commit_marks(right, ~0);
+ if (sub) {
+ repo_clear(sub);
+ free(sub);
+ }
}
void show_submodule_inline_diff(struct diff_options *o, const char *path,
struct commit_list *merge_bases = NULL;
struct child_process cp = CHILD_PROCESS_INIT;
struct strbuf sb = STRBUF_INIT;
+ struct repository *sub;
+ sub = open_submodule(path);
show_submodule_header(o, path, one, two, dirty_submodule,
- &left, &right, &merge_bases);
+ sub, &left, &right, &merge_bases);
/* We need a valid left and right commit to display a difference */
if (!(left || is_null_oid(one)) ||
clear_commit_marks(left, ~0);
if (right)
clear_commit_marks(right, ~0);
+ if (sub) {
+ repo_clear(sub);
+ free(sub);
+ }
}
int should_update_submodules(void)
}
struct collect_changed_submodules_cb_data {
+ struct repository *repo;
struct string_list *changed;
const struct object_id *commit_oid;
};
if (!S_ISGITLINK(p->two->mode))
continue;
- submodule = submodule_from_path(the_repository,
+ submodule = submodule_from_path(me->repo,
commit_oid, p->two->path);
if (submodule)
name = submodule->name;
name = default_name_or_path(p->two->path);
/* make sure name does not collide with existing one */
if (name)
- submodule = submodule_from_name(the_repository,
+ submodule = submodule_from_name(me->repo,
commit_oid, name);
if (submodule) {
warning("Submodule in commit %s at path: "
* have a corresponding 'struct oid_array' (in the 'util' field) which lists
* what the submodule pointers were updated to during the change.
*/
-static void collect_changed_submodules(struct index_state *istate,
+static void collect_changed_submodules(struct repository *r,
struct string_list *changed,
struct argv_array *argv)
{
struct rev_info rev;
const struct commit *commit;
- repo_init_revisions(the_repository, &rev, NULL);
+ repo_init_revisions(r, &rev, NULL);
setup_revisions(argv->argc, argv->argv, &rev, NULL);
if (prepare_revision_walk(&rev))
die("revision walk setup failed");
while ((commit = get_revision(&rev))) {
struct rev_info diff_rev;
struct collect_changed_submodules_cb_data data;
+ data.repo = r;
data.changed = changed;
data.commit_oid = &commit->object.oid;
- repo_init_revisions(the_repository, &diff_rev, NULL);
+ repo_init_revisions(r, &diff_rev, NULL);
diff_rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
diff_rev.diffopt.format_callback = collect_changed_submodules_cb;
diff_rev.diffopt.format_callback_data = &data;
}
struct has_commit_data {
+ struct repository *repo;
int result;
const char *path;
};
{
struct has_commit_data *cb = data;
- enum object_type type = oid_object_info(the_repository, oid, NULL);
+ enum object_type type = oid_object_info(cb->repo, oid, NULL);
switch (type) {
case OBJ_COMMIT:
}
}
-static int submodule_has_commits(const char *path, struct oid_array *commits)
+static int submodule_has_commits(struct repository *r,
+ const char *path,
+ struct oid_array *commits)
{
- struct has_commit_data has_commit = { 1, path };
+ struct has_commit_data has_commit = { r, 1, path };
/*
* Perform a cheap, but incorrect check for the existence of 'commits'.
return has_commit.result;
}
-static int submodule_needs_pushing(const char *path, struct oid_array *commits)
+static int submodule_needs_pushing(struct repository *r,
+ const char *path,
+ struct oid_array *commits)
{
- if (!submodule_has_commits(path, commits))
+ if (!submodule_has_commits(r, path, commits))
/*
* NOTE: We do consider it safe to return "no" here. The
* correct answer would be "We do not know" instead of
return 0;
}
-int find_unpushed_submodules(struct index_state *istate,
+int find_unpushed_submodules(struct repository *r,
struct oid_array *commits,
const char *remotes_name,
struct string_list *needs_pushing)
argv_array_push(&argv, "--not");
argv_array_pushf(&argv, "--remotes=%s", remotes_name);
- collect_changed_submodules(istate, &submodules, &argv);
+ collect_changed_submodules(r, &submodules, &argv);
for_each_string_list_item(name, &submodules) {
struct oid_array *commits = name->util;
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(the_repository, &null_oid, name->string);
+ submodule = submodule_from_name(r, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
if (!path)
continue;
- if (submodule_needs_pushing(path, commits))
+ if (submodule_needs_pushing(r, path, commits))
string_list_insert(needs_pushing, path);
}
const struct string_list *push_options,
int dry_run)
{
- if (add_submodule_odb(path))
- return 1;
-
if (for_each_remote_ref_submodule(path, has_remote, NULL) > 0) {
struct child_process cp = CHILD_PROCESS_INIT;
argv_array_push(&cp.args, "push");
die("process for submodule '%s' failed", path);
}
-int push_unpushed_submodules(struct index_state *istate,
+int push_unpushed_submodules(struct repository *r,
struct oid_array *commits,
const struct remote *remote,
const struct refspec *rs,
int i, ret = 1;
struct string_list needs_pushing = STRING_LIST_INIT_DUP;
- if (!find_unpushed_submodules(istate, commits,
+ if (!find_unpushed_submodules(r, commits,
remote->name, &needs_pushing))
return 1;
oid_array_append(&ref_tips_after_fetch, oid);
}
-static void calculate_changed_submodule_paths(struct index_state *istate)
+static void calculate_changed_submodule_paths(struct repository *r,
+ struct string_list *changed_submodule_names)
{
struct argv_array argv = ARGV_ARRAY_INIT;
- struct string_list changed_submodules = STRING_LIST_INIT_DUP;
- const struct string_list_item *name;
+ struct string_list_item *name;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(the_repository, NULL, NULL))
+ if (!submodule_from_path(r, NULL, NULL))
return;
argv_array_push(&argv, "--"); /* argv[0] program name */
* Collect all submodules (whether checked out or not) for which new
* commits have been recorded upstream in "changed_submodule_names".
*/
- collect_changed_submodules(istate, &changed_submodules, &argv);
+ collect_changed_submodules(r, changed_submodule_names, &argv);
- for_each_string_list_item(name, &changed_submodules) {
+ for_each_string_list_item(name, changed_submodule_names) {
struct oid_array *commits = name->util;
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(the_repository, &null_oid, name->string);
+ submodule = submodule_from_name(r, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
if (!path)
continue;
- if (!submodule_has_commits(path, commits))
- string_list_append(&changed_submodule_names, name->string);
+ if (submodule_has_commits(r, path, commits)) {
+ oid_array_clear(commits);
+ *name->string = '\0';
+ }
}
- free_submodules_oids(&changed_submodules);
+ string_list_remove_empty_items(changed_submodule_names, 1);
+
argv_array_clear(&argv);
oid_array_clear(&ref_tips_before_fetch);
oid_array_clear(&ref_tips_after_fetch);
initialized_fetch_ref_tips = 0;
}
-int submodule_touches_in_range(struct index_state *istate,
+int submodule_touches_in_range(struct repository *r,
struct object_id *excl_oid,
struct object_id *incl_oid)
{
int ret;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(the_repository, NULL, NULL))
+ if (!submodule_from_path(r, NULL, NULL))
return 0;
argv_array_push(&args, "--"); /* args[0] program name */
argv_array_push(&args, oid_to_hex(excl_oid));
}
- collect_changed_submodules(istate, &subs, &args);
+ collect_changed_submodules(r, &subs, &args);
ret = subs.nr;
argv_array_clear(&args);
int default_option;
int quiet;
int result;
+
+ struct string_list changed_submodule_names;
+
+ /* Pending fetches by OIDs */
+ struct fetch_task **oid_fetch_tasks;
+ int oid_fetch_tasks_nr, oid_fetch_tasks_alloc;
};
-#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0, 0}
+#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0, 0, \
+ STRING_LIST_INIT_DUP, \
+ NULL, 0, 0}
static int get_fetch_recurse_config(const struct submodule *submodule,
struct submodule_parallel_fetch *spf)
return spf->default_option;
}
+/*
+ * Fetch in progress (if callback data) or
+ * pending (if in oid_fetch_tasks in struct submodule_parallel_fetch)
+ */
+struct fetch_task {
+ struct repository *repo;
+ const struct submodule *sub;
+ unsigned free_sub : 1; /* Do we need to free the submodule? */
+
+ struct oid_array *commits; /* Ensure these commits are fetched */
+};
+
+/**
+ * When a submodule is not defined in .gitmodules, we cannot access it
+ * via the regular submodule-config. Create a fake submodule, which we can
+ * work on.
+ */
+static const struct submodule *get_non_gitmodules_submodule(const char *path)
+{
+ struct submodule *ret = NULL;
+ const char *name = default_name_or_path(path);
+
+ if (!name)
+ return NULL;
+
+ ret = xmalloc(sizeof(*ret));
+ memset(ret, 0, sizeof(*ret));
+ ret->path = name;
+ ret->name = name;
+
+ return (const struct submodule *) ret;
+}
+
+static struct fetch_task *fetch_task_create(struct repository *r,
+ const char *path)
+{
+ struct fetch_task *task = xmalloc(sizeof(*task));
+ memset(task, 0, sizeof(*task));
+
+ task->sub = submodule_from_path(r, &null_oid, path);
+ if (!task->sub) {
+ /*
+ * No entry in .gitmodules? Technically not a submodule,
+ * but historically we supported repositories that happen to be
+ * in-place where a gitlink is. Keep supporting them.
+ */
+ task->sub = get_non_gitmodules_submodule(path);
+ if (!task->sub) {
+ free(task);
+ return NULL;
+ }
+
+ task->free_sub = 1;
+ }
+
+ return task;
+}
+
+static void fetch_task_release(struct fetch_task *p)
+{
+ if (p->free_sub)
+ free((void*)p->sub);
+ p->free_sub = 0;
+ p->sub = NULL;
+
+ if (p->repo)
+ repo_clear(p->repo);
+ FREE_AND_NULL(p->repo);
+}
+
+static struct repository *get_submodule_repo_for(struct repository *r,
+ const struct submodule *sub)
+{
+ struct repository *ret = xmalloc(sizeof(*ret));
+
+ if (repo_submodule_init(ret, r, sub)) {
+ /*
+ * No entry in .gitmodules? Technically not a submodule,
+ * but historically we supported repositories that happen to be
+ * in-place where a gitlink is. Keep supporting them.
+ */
+ struct strbuf gitdir = STRBUF_INIT;
+ strbuf_repo_worktree_path(&gitdir, r, "%s/.git", sub->path);
+ if (repo_init(ret, gitdir.buf, NULL)) {
+ strbuf_release(&gitdir);
+ free(ret);
+ return NULL;
+ }
+ strbuf_release(&gitdir);
+ }
+
+ return ret;
+}
+
static int get_next_submodule(struct child_process *cp,
struct strbuf *err, void *data, void **task_cb)
{
- int ret = 0;
struct submodule_parallel_fetch *spf = data;
for (; spf->count < spf->r->index->cache_nr; spf->count++) {
- struct strbuf submodule_path = STRBUF_INIT;
- struct strbuf submodule_git_dir = STRBUF_INIT;
- struct strbuf submodule_prefix = STRBUF_INIT;
const struct cache_entry *ce = spf->r->index->cache[spf->count];
- const char *git_dir, *default_argv;
- const struct submodule *submodule;
- struct submodule default_submodule = SUBMODULE_INIT;
+ const char *default_argv;
+ struct fetch_task *task;
if (!S_ISGITLINK(ce->ce_mode))
continue;
- submodule = submodule_from_path(spf->r, &null_oid, ce->name);
- if (!submodule) {
- const char *name = default_name_or_path(ce->name);
- if (name) {
- default_submodule.path = default_submodule.name = name;
- submodule = &default_submodule;
- }
- }
+ task = fetch_task_create(spf->r, ce->name);
+ if (!task)
+ continue;
- switch (get_fetch_recurse_config(submodule, spf))
+ switch (get_fetch_recurse_config(task->sub, spf))
{
default:
case RECURSE_SUBMODULES_DEFAULT:
case RECURSE_SUBMODULES_ON_DEMAND:
- if (!submodule || !unsorted_string_list_lookup(&changed_submodule_names,
- submodule->name))
+ if (!task->sub ||
+ !string_list_lookup(
+ &spf->changed_submodule_names,
+ task->sub->name))
continue;
default_argv = "on-demand";
break;
continue;
}
- strbuf_repo_worktree_path(&submodule_path, spf->r, "%s", ce->name);
- strbuf_addf(&submodule_git_dir, "%s/.git", submodule_path.buf);
- strbuf_addf(&submodule_prefix, "%s%s/", spf->prefix, ce->name);
- git_dir = read_gitfile(submodule_git_dir.buf);
- if (!git_dir)
- git_dir = submodule_git_dir.buf;
- if (is_directory(git_dir)) {
+ task->repo = get_submodule_repo_for(spf->r, task->sub);
+ if (task->repo) {
+ struct strbuf submodule_prefix = STRBUF_INIT;
child_process_init(cp);
- cp->dir = strbuf_detach(&submodule_path, NULL);
- prepare_submodule_repo_env(&cp->env_array);
+ cp->dir = task->repo->gitdir;
+ prepare_submodule_repo_env_in_gitdir(&cp->env_array);
cp->git_cmd = 1;
if (!spf->quiet)
strbuf_addf(err, "Fetching submodule %s%s\n",
argv_array_pushv(&cp->args, spf->args.argv);
argv_array_push(&cp->args, default_argv);
argv_array_push(&cp->args, "--submodule-prefix");
+
+ strbuf_addf(&submodule_prefix, "%s%s/",
+ spf->prefix,
+ task->sub->path);
argv_array_push(&cp->args, submodule_prefix.buf);
- ret = 1;
- }
- strbuf_release(&submodule_path);
- strbuf_release(&submodule_git_dir);
- strbuf_release(&submodule_prefix);
- if (ret) {
+
spf->count++;
+ *task_cb = task;
+
+ strbuf_release(&submodule_prefix);
return 1;
+ } else {
+
+ fetch_task_release(task);
+ free(task);
+
+ /*
+ * An empty directory is normal,
+ * the submodule is not initialized
+ */
+ if (S_ISGITLINK(ce->ce_mode) &&
+ !is_empty_dir(ce->name)) {
+ spf->result = 1;
+ strbuf_addf(err,
+ _("Could not access submodule '%s'"),
+ ce->name);
+ }
}
}
+
+ if (spf->oid_fetch_tasks_nr) {
+ struct fetch_task *task =
+ spf->oid_fetch_tasks[spf->oid_fetch_tasks_nr - 1];
+ struct strbuf submodule_prefix = STRBUF_INIT;
+ spf->oid_fetch_tasks_nr--;
+
+ strbuf_addf(&submodule_prefix, "%s%s/",
+ spf->prefix, task->sub->path);
+
+ child_process_init(cp);
+ prepare_submodule_repo_env_in_gitdir(&cp->env_array);
+ cp->git_cmd = 1;
+ cp->dir = task->repo->gitdir;
+
+ argv_array_init(&cp->args);
+ argv_array_pushv(&cp->args, spf->args.argv);
+ argv_array_push(&cp->args, "on-demand");
+ argv_array_push(&cp->args, "--submodule-prefix");
+ argv_array_push(&cp->args, submodule_prefix.buf);
+
+ /* NEEDSWORK: have get_default_remote from submodule--helper */
+ argv_array_push(&cp->args, "origin");
+ oid_array_for_each_unique(task->commits,
+ append_oid_to_argv, &cp->args);
+
+ *task_cb = task;
+ strbuf_release(&submodule_prefix);
+ return 1;
+ }
+
return 0;
}
void *cb, void *task_cb)
{
struct submodule_parallel_fetch *spf = cb;
+ struct fetch_task *task = task_cb;
spf->result = 1;
+ fetch_task_release(task);
return 0;
}
+static int commit_missing_in_sub(const struct object_id *oid, void *data)
+{
+ struct repository *subrepo = data;
+
+ enum object_type type = oid_object_info(subrepo, oid, NULL);
+
+ return type != OBJ_COMMIT;
+}
+
static int fetch_finish(int retvalue, struct strbuf *err,
void *cb, void *task_cb)
{
struct submodule_parallel_fetch *spf = cb;
+ struct fetch_task *task = task_cb;
+
+ struct string_list_item *it;
+ struct oid_array *commits;
if (retvalue)
spf->result = 1;
+ if (!task || !task->sub)
+ BUG("callback cookie bogus");
+
+ /* Is this the second time we process this submodule? */
+ if (task->commits)
+ goto out;
+
+ it = string_list_lookup(&spf->changed_submodule_names, task->sub->name);
+ if (!it)
+ /* Could be an unchanged submodule, not contained in the list */
+ goto out;
+
+ commits = it->util;
+ oid_array_filter(commits,
+ commit_missing_in_sub,
+ task->repo);
+
+ /* Are there commits we want, but do not exist? */
+ if (commits->nr) {
+ task->commits = commits;
+ ALLOC_GROW(spf->oid_fetch_tasks,
+ spf->oid_fetch_tasks_nr + 1,
+ spf->oid_fetch_tasks_alloc);
+ spf->oid_fetch_tasks[spf->oid_fetch_tasks_nr] = task;
+ spf->oid_fetch_tasks_nr++;
+ return 0;
+ }
+
+out:
+ fetch_task_release(task);
+
return 0;
}
argv_array_push(&spf.args, "--recurse-submodules-default");
/* default value, "--submodule-prefix" and its value are added later */
- calculate_changed_submodule_paths(r->index);
+ calculate_changed_submodule_paths(r, &spf.changed_submodule_names);
+ string_list_sort(&spf.changed_submodule_names);
run_processes_parallel(max_parallel_jobs,
get_next_submodule,
fetch_start_failure,
argv_array_clear(&spf.args);
out:
- string_list_clear(&changed_submodule_names, 1);
+ free_submodules_oids(&spf.changed_submodule_names);
return spf.result;
}
return ret;
}
+void submodule_unset_core_worktree(const struct submodule *sub)
+{
+ char *config_path = xstrfmt("%s/modules/%s/config",
+ get_git_common_dir(), sub->name);
+
+ if (git_config_set_in_file_gently(config_path, "core.worktree", NULL))
+ warning(_("Could not unset core.worktree setting in submodule '%s'"),
+ sub->path);
+
+ free(config_path);
+}
+
static const char *get_super_prefix_or_empty(void)
{
const char *s = get_super_prefix();
if (is_empty_dir(path))
rmdir_or_warn(path);
+
+ submodule_unset_core_worktree(sub);
}
}
out: