From: Junio C Hamano Date: Wed, 11 Apr 2018 04:09:55 +0000 (+0900) Subject: Merge branch 'sb/object-store' X-Git-Tag: v2.18.0-rc0~144 X-Git-Url: https://git.lorimer.id.au/gitweb.git/diff_plain/cf0b1793ead9428d88e6592e624c7cb222913c58?ds=inline;hp=-c Merge branch 'sb/object-store' Refactoring the internal global data structure to make it possible to open multiple repositories, work with and then close them. Rerolled by Duy on top of a separate preliminary clean-up topic. The resulting structure of the topics looked very sensible. * sb/object-store: (27 commits) sha1_file: allow sha1_loose_object_info to handle arbitrary repositories sha1_file: allow map_sha1_file to handle arbitrary repositories sha1_file: allow map_sha1_file_1 to handle arbitrary repositories sha1_file: allow open_sha1_file to handle arbitrary repositories sha1_file: allow stat_sha1_file to handle arbitrary repositories sha1_file: allow sha1_file_name to handle arbitrary repositories sha1_file: add repository argument to sha1_loose_object_info sha1_file: add repository argument to map_sha1_file sha1_file: add repository argument to map_sha1_file_1 sha1_file: add repository argument to open_sha1_file sha1_file: add repository argument to stat_sha1_file sha1_file: add repository argument to sha1_file_name sha1_file: allow prepare_alt_odb to handle arbitrary repositories sha1_file: allow link_alt_odb_entries to handle arbitrary repositories sha1_file: add repository argument to prepare_alt_odb sha1_file: add repository argument to link_alt_odb_entries sha1_file: add repository argument to read_info_alternates sha1_file: add repository argument to link_alt_odb_entry sha1_file: add raw_object_store argument to alt_odb_usable pack: move approximate object count to object store ... --- cf0b1793ead9428d88e6592e624c7cb222913c58 diff --combined builtin/am.c index 1bcc3606c5,47beddbe24..9c82603f70 --- a/builtin/am.c +++ b/builtin/am.c @@@ -1011,7 -1011,6 +1011,7 @@@ static void am_setup(struct am_state *s if (mkdir(state->dir, 0777) < 0 && errno != EEXIST) die_errno(_("failed to create directory '%s'"), state->dir); + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (split_mail(state, patch_format, paths, keep_cr) < 0) { am_destroy(state); @@@ -1111,7 -1110,6 +1111,7 @@@ static void am_next(struct am_state *st oidclr(&state->orig_commit); unlink(am_path(state, "original-commit")); + delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF); if (!get_oid("HEAD", &head)) write_state_text(state, "abort-safety", oid_to_hex(&head)); @@@ -1443,8 -1441,6 +1443,8 @@@ static int parse_mail_rebase(struct am_ oidcpy(&state->orig_commit, &commit_oid); write_state_text(state, "original-commit", oid_to_hex(&commit_oid)); + update_ref("am", "REBASE_HEAD", &commit_oid, + NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR); return 0; } @@@ -1550,7 -1546,7 +1550,7 @@@ static int fall_back_threeway(const str discard_cache(); read_cache_from(index_path); - if (write_index_as_tree(orig_tree.hash, &the_index, index_path, 0, NULL)) + if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL)) return error(_("Repository lacks necessary blobs to fall back on 3-way merge.")); say(state, stdout, _("Using index info to reconstruct a base tree...")); @@@ -1575,7 -1571,7 +1575,7 @@@ return error(_("Did you hand edit your patch?\n" "It does not apply to blobs recorded in its index.")); - if (write_index_as_tree(their_tree.hash, &the_index, index_path, 0, NULL)) + if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL)) return error("could not write tree"); say(state, stdout, _("Falling back to patching base and 3-way merge...")); @@@ -1626,7 -1622,7 +1626,7 @@@ static void do_commit(const struct am_s if (run_hook_le(NULL, "pre-applypatch", NULL)) exit(1); - if (write_cache_as_tree(tree.hash, 0, NULL)) + if (write_cache_as_tree(&tree, 0, NULL)) die(_("git write-tree failed to write a tree")); if (!get_oid_commit("HEAD", &parent)) { @@@ -1645,8 -1641,8 +1645,8 @@@ setenv("GIT_COMMITTER_DATE", state->ignore_date ? "" : state->author_date, 1); - if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash, - author, state->sign_commit)) + if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit, + author, state->sign_commit)) die(_("failed to write commit object")); reflog_msg = getenv("GIT_REFLOG_ACTION"); @@@ -1835,7 -1831,8 +1835,7 @@@ static void am_run(struct am_state *sta git_config_get_bool("advice.amworkdir", &advice_amworkdir); if (advice_amworkdir) - printf_ln(_("The copy of the patch that failed is found in: %s"), - am_path(state, "patch")); + printf_ln(_("Use 'git am --show-current-patch' to see the failed patch")); die_user_resolve(state); } @@@ -1862,7 -1859,7 +1862,7 @@@ next */ if (!state->rebasing) { am_destroy(state); - close_all_packs(); + close_all_packs(the_repository->objects); run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); } } @@@ -2004,7 -2001,7 +2004,7 @@@ static int clean_index(const struct obj if (fast_forward_to(head_tree, head_tree, 1)) return -1; - if (write_cache_as_tree(index.hash, 0, NULL)) + if (write_cache_as_tree(&index, 0, NULL)) return -1; index_tree = parse_tree_indirect(&index); @@@ -2124,34 -2121,6 +2124,34 @@@ static void am_abort(struct am_state *s am_destroy(state); } +static int show_patch(struct am_state *state) +{ + struct strbuf sb = STRBUF_INIT; + const char *patch_path; + int len; + + if (!is_null_oid(&state->orig_commit)) { + const char *av[4] = { "show", NULL, "--", NULL }; + char *new_oid_str; + int ret; + + av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit)); + ret = run_command_v_opt(av, RUN_GIT_CMD); + free(new_oid_str); + return ret; + } + + patch_path = am_path(state, msgnum(state)); + len = strbuf_read_file(&sb, patch_path, 0); + if (len < 0) + die_errno(_("failed to read '%s'"), patch_path); + + setup_pager(); + write_in_full(1, sb.buf, sb.len); + strbuf_release(&sb); + return 0; +} + /** * parse_options() callback that validates and sets opt->value to the * PATCH_FORMAT_* enum value corresponding to `arg`. @@@ -2180,9 -2149,7 +2180,9 @@@ enum resume_mode RESUME_APPLY, RESUME_RESOLVED, RESUME_SKIP, - RESUME_ABORT + RESUME_ABORT, + RESUME_QUIT, + RESUME_SHOW_PATCH }; static int git_am_config(const char *k, const char *v, void *cb) @@@ -2204,7 -2171,6 +2204,7 @@@ int cmd_am(int argc, const char **argv int patch_format = PATCH_FORMAT_UNKNOWN; enum resume_mode resume = RESUME_FALSE; int in_progress; + int ret = 0; const char * const usage[] = { N_("git am [] [( | )...]"), @@@ -2283,12 -2249,6 +2283,12 @@@ OPT_CMDMODE(0, "abort", &resume, N_("restore the original branch and abort the patching operation."), RESUME_ABORT), + OPT_CMDMODE(0, "quit", &resume, + N_("abort the patching operation but keep HEAD where it is."), + RESUME_QUIT), + OPT_CMDMODE(0, "show-current-patch", &resume, + N_("show the patch being applied."), + RESUME_SHOW_PATCH), OPT_BOOL(0, "committer-date-is-author-date", &state.committer_date_is_author_date, N_("lie about committer date")), @@@ -2357,7 -2317,7 +2357,7 @@@ * stray directories. */ if (file_exists(state.dir) && !state.rebasing) { - if (resume == RESUME_ABORT) { + if (resume == RESUME_ABORT || resume == RESUME_QUIT) { am_destroy(&state); am_state_release(&state); return 0; @@@ -2399,18 -2359,11 +2399,18 @@@ case RESUME_ABORT: am_abort(&state); break; + case RESUME_QUIT: + am_rerere_clear(); + am_destroy(&state); + break; + case RESUME_SHOW_PATCH: + ret = show_patch(&state); + break; default: die("BUG: invalid resume value"); } am_state_release(&state); - return 0; + return ret; } diff --combined builtin/fetch.c index 8295f92b3e,a39e9d7b15..dcdfc66f09 --- a/builtin/fetch.c +++ b/builtin/fetch.c @@@ -39,10 -39,6 +39,10 @@@ static int fetch_prune_config = -1; /* static int prune = -1; /* unspecified */ #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */ +static int fetch_prune_tags_config = -1; /* unspecified */ +static int prune_tags = -1; /* unspecified */ +#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */ + static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative; static int progress = -1; static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen; @@@ -70,11 -66,6 +70,11 @@@ static int git_fetch_config(const char return 0; } + if (!strcmp(k, "fetch.prunetags")) { + fetch_prune_tags_config = git_config_bool(k, v); + return 0; + } + if (!strcmp(k, "submodule.recurse")) { int r = git_config_bool(k, v) ? RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF; @@@ -126,7 -117,7 +126,7 @@@ static struct option builtin_fetch_opti N_("append to .git/FETCH_HEAD instead of overwriting")), OPT_STRING(0, "upload-pack", &upload_pack, N_("path"), N_("path to upload pack on remote end")), - OPT__FORCE(&force, N_("force overwrite of local branch")), + OPT__FORCE(&force, N_("force overwrite of local branch"), 0), OPT_BOOL('m', "multiple", &multiple, N_("fetch from multiple remotes")), OPT_SET_INT('t', "tags", &tags, @@@ -137,8 -128,6 +137,8 @@@ N_("number of submodules fetched in parallel")), OPT_BOOL('p', "prune", &prune, N_("prune remote-tracking branches no longer on remote")), + OPT_BOOL('P', "prune-tags", &prune_tags, + N_("prune local tags no longer on remote and clobber changed tags")), { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"), N_("control recursive fetching of submodules"), PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules }, @@@ -637,7 -626,7 +637,7 @@@ static int update_local_ref(struct ref struct branch *current_branch = branch_get(NULL); const char *pretty_ref = prettify_refname(ref->name); - type = sha1_object_info(ref->new_oid.hash, NULL); + type = oid_object_info(&ref->new_oid, NULL); if (type < 0) die(_("object %s not found"), oid_to_hex(&ref->new_oid)); @@@ -708,9 -697,9 +708,9 @@@ if (in_merge_bases(current, updated)) { struct strbuf quickref = STRBUF_INIT; int r; - strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV); strbuf_addstr(&quickref, ".."); - strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV); if ((recurse_submodules != RECURSE_SUBMODULES_OFF) && (recurse_submodules != RECURSE_SUBMODULES_ON)) check_for_new_submodule_commits(&ref->new_oid); @@@ -723,9 -712,9 +723,9 @@@ } else if (force || ref->force) { struct strbuf quickref = STRBUF_INIT; int r; - strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&quickref, ¤t->object.oid, DEFAULT_ABBREV); strbuf_addstr(&quickref, "..."); - strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV); if ((recurse_submodules != RECURSE_SUBMODULES_OFF) && (recurse_submodules != RECURSE_SUBMODULES_ON)) check_for_new_submodule_commits(&ref->new_oid); @@@ -1231,8 -1220,6 +1231,8 @@@ static void add_options_to_argv(struct argv_array_push(argv, "--dry-run"); if (prune != -1) argv_array_push(argv, prune ? "--prune" : "--no-prune"); + if (prune_tags != -1) + argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags"); if (update_head_ok) argv_array_push(argv, "--update-head-ok"); if (force) @@@ -1336,15 -1323,12 +1336,15 @@@ static inline void fetch_one_setup_part return; } -static int fetch_one(struct remote *remote, int argc, const char **argv) +static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok) { static const char **refs = NULL; struct refspec *refspec; int ref_nr = 0; + int j = 0; int exit_code; + int maybe_prune_tags; + int remote_via_config = remote_is_configured(remote, 0); if (!remote) die(_("No remote repository specified. Please, specify either a URL or a\n" @@@ -1354,39 -1338,18 +1354,39 @@@ if (prune < 0) { /* no command line request */ - if (0 <= gtransport->remote->prune) - prune = gtransport->remote->prune; + if (0 <= remote->prune) + prune = remote->prune; else if (0 <= fetch_prune_config) prune = fetch_prune_config; else prune = PRUNE_BY_DEFAULT; } + if (prune_tags < 0) { + /* no command line request */ + if (0 <= remote->prune_tags) + prune_tags = remote->prune_tags; + else if (0 <= fetch_prune_tags_config) + prune_tags = fetch_prune_tags_config; + else + prune_tags = PRUNE_TAGS_BY_DEFAULT; + } + + maybe_prune_tags = prune_tags_ok && prune_tags; + if (maybe_prune_tags && remote_via_config) + add_prune_tags_to_fetch_refspec(remote); + + if (argc > 0 || (maybe_prune_tags && !remote_via_config)) { + size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1); + refs = xcalloc(nr_alloc, sizeof(const char *)); + if (maybe_prune_tags) { + refs[j++] = xstrdup("refs/tags/*:refs/tags/*"); + ref_nr++; + } + } + if (argc > 0) { - int j = 0; int i; - refs = xcalloc(st_add(argc, 1), sizeof(const char *)); for (i = 0; i < argc; i++) { if (!strcmp(argv[i], "tag")) { i++; @@@ -1396,8 -1359,9 +1396,8 @@@ argv[i], argv[i]); } else refs[j++] = argv[i]; + ref_nr++; } - refs[j] = NULL; - ref_nr = j; } sigchain_push_common(unlock_pack_on_signal); @@@ -1416,7 -1380,6 +1416,7 @@@ int cmd_fetch(int argc, const char **ar struct string_list list = STRING_LIST_INIT_DUP; struct remote *remote = NULL; int result = 0; + int prune_tags_ok = 1; struct argv_array argv_gc_auto = ARGV_ARRAY_INIT; packet_trace_identity("fetch"); @@@ -1483,7 -1446,6 +1483,7 @@@ } else { /* Zero or one remotes */ remote = remote_get(argv[0]); + prune_tags_ok = (argc == 1); argc--; argv++; } @@@ -1492,7 -1454,7 +1492,7 @@@ if (remote) { if (filter_options.choice || repository_format_partial_clone) fetch_one_setup_partial(remote); - result = fetch_one(remote, argc, argv); + result = fetch_one(remote, argc, argv, prune_tags_ok); } else { if (filter_options.choice) die(_("--filter can only be used with the remote configured in core.partialClone")); @@@ -1516,7 -1478,7 +1516,7 @@@ string_list_clear(&list, 0); - close_all_packs(); + close_all_packs(the_repository->objects); argv_array_pushl(&argv_gc_auto, "gc", "--auto", NULL); if (verbosity < 0) diff --combined builtin/fsck.c index 0922558683,3ef25fab97..13c0a8048e --- a/builtin/fsck.c +++ b/builtin/fsck.c @@@ -1,5 -1,6 +1,6 @@@ #include "builtin.h" #include "cache.h" + #include "repository.h" #include "config.h" #include "commit.h" #include "tree.h" @@@ -16,6 -17,7 +17,7 @@@ #include "streaming.h" #include "decorate.h" #include "packfile.h" + #include "object-store.h" #define REACHABLE 0x0001 #define SEEN 0x0002 @@@ -65,12 -67,12 +67,12 @@@ static const char *printable_type(struc const char *ret; if (obj->type == OBJ_NONE) { - enum object_type type = sha1_object_info(obj->oid.hash, NULL); + enum object_type type = oid_object_info(&obj->oid, NULL); if (type > 0) object_as_type(obj, type, 0); } - ret = typename(obj->type); + ret = type_name(obj->type); if (!ret) ret = "unknown"; @@@ -137,7 -139,7 +139,7 @@@ static int mark_object(struct object *o printf("broken link from %7s %s\n", printable_type(parent), describe_object(parent)); printf("broken link from %7s %s\n", - (type == OBJ_ANY ? "unknown" : typename(type)), "unknown"); + (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown"); errors_found |= ERROR_REACHABLE; return 1; } @@@ -180,13 -182,7 +182,13 @@@ static void mark_object_reachable(struc static int traverse_one_object(struct object *obj) { - return fsck_walk(obj, obj, &fsck_walk_options); + int result = fsck_walk(obj, obj, &fsck_walk_options); + + if (obj->type == OBJ_TREE) { + struct tree *tree = (struct tree *)obj; + free_tree_buffer(tree); + } + return result; } static int traverse_reachable(void) @@@ -513,7 -509,7 +515,7 @@@ static struct object *parse_loose_objec unsigned long size; int eaten; - if (read_loose_object(path, oid->hash, &type, &size, &contents) < 0) + if (read_loose_object(path, oid, &type, &size, &contents) < 0) return NULL; if (!contents && type != OBJ_BLOB) @@@ -719,9 -715,12 +721,12 @@@ int cmd_fsck(int argc, const char **arg for_each_loose_object(mark_loose_for_connectivity, NULL, 0); for_each_packed_object(mark_packed_for_connectivity, NULL, 0); } else { + struct alternate_object_database *alt_odb_list; + fsck_object_dir(get_object_directory()); - prepare_alt_odb(); + prepare_alt_odb(the_repository); + alt_odb_list = the_repository->objects->alt_odb_list; for (alt = alt_odb_list; alt; alt = alt->next) fsck_object_dir(alt->path); @@@ -733,7 -732,8 +738,8 @@@ prepare_packed_git(); if (show_progress) { - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; + p = p->next) { if (open_pack_index(p)) continue; total += p->num_objects; @@@ -741,7 -741,8 +747,8 @@@ progress = start_progress(_("Checking objects"), total); } - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; + p = p->next) { /* verify gives error messages itself */ if (verify_pack(p, fsck_obj_buffer, progress, count)) diff --combined builtin/gc.c index f51e5a6500,b00238cd5d..cef38e5427 --- a/builtin/gc.c +++ b/builtin/gc.c @@@ -11,6 -11,7 +11,7 @@@ */ #include "builtin.h" + #include "repository.h" #include "config.h" #include "tempfile.h" #include "lockfile.h" @@@ -20,6 -21,7 +21,7 @@@ #include "argv-array.h" #include "commit.h" #include "packfile.h" + #include "object-store.h" #define FAILED_RUN "failed to run %s" @@@ -173,7 -175,7 +175,7 @@@ static int too_many_packs(void return 0; prepare_packed_git(); - for (cnt = 0, p = packed_git; p; p = p->next) { + for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) { if (!p->pack_local) continue; if (p->pack_keep) @@@ -360,11 -362,8 +362,11 @@@ int cmd_gc(int argc, const char **argv N_("prune unreferenced objects"), PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire }, OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")), - OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")), - OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")), + OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"), + PARSE_OPT_NOCOMPLETE), + OPT_BOOL_F(0, "force", &force, + N_("force running gc even if there may be another gc running"), + PARSE_OPT_NOCOMPLETE), OPT_END() }; diff --combined builtin/grep.c index 668cb8050a,1e9cdbdf78..5f32d2ce84 --- a/builtin/grep.c +++ b/builtin/grep.c @@@ -22,6 -22,7 +22,7 @@@ #include "pathspec.h" #include "submodule.h" #include "submodule-config.h" + #include "object-store.h" static char const * const grep_usage[] = { N_("git grep [] [-e] [...] [[--] ...]"), @@@ -92,7 -93,8 +93,7 @@@ static pthread_cond_t cond_result static int skip_first_line; -static void add_work(struct grep_opt *opt, enum grep_source_type type, - const char *name, const char *path, const void *id) +static void add_work(struct grep_opt *opt, const struct grep_source *gs) { grep_lock(); @@@ -100,7 -102,7 +101,7 @@@ pthread_cond_wait(&cond_write, &grep_mutex); } - grep_source_init(&todo[todo_end].source, type, name, path, id); + todo[todo_end].source = *gs; if (opt->binary != GREP_BINARY_TEXT) grep_source_load_driver(&todo[todo_end].source); todo[todo_end].done = 0; @@@ -306,7 -308,7 +307,7 @@@ static void *lock_and_read_oid_file(con void *data; grep_read_lock(); - data = read_sha1_file(oid->hash, type, size); + data = read_object_file(oid, type, size); grep_read_unlock(); return data; } @@@ -316,7 -318,6 +317,7 @@@ static int grep_oid(struct grep_opt *op const char *path) { struct strbuf pathbuf = STRBUF_INIT; + struct grep_source gs; if (opt->relative && opt->prefix_length) { quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf); @@@ -325,22 -326,19 +326,22 @@@ strbuf_addstr(&pathbuf, filename); } + grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); + strbuf_release(&pathbuf); + #ifndef NO_PTHREADS if (num_threads) { - add_work(opt, GREP_SOURCE_OID, pathbuf.buf, path, oid); - strbuf_release(&pathbuf); + /* + * add_work() copies gs and thus assumes ownership of + * its fields, so do not call grep_source_clear() + */ + add_work(opt, &gs); return 0; } else #endif { - struct grep_source gs; int hit; - grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid); - strbuf_release(&pathbuf); hit = grep_source(opt, &gs); grep_source_clear(&gs); @@@ -351,29 -349,25 +352,29 @@@ static int grep_file(struct grep_opt *opt, const char *filename) { struct strbuf buf = STRBUF_INIT; + struct grep_source gs; if (opt->relative && opt->prefix_length) quote_path_relative(filename, opt->prefix, &buf); else strbuf_addstr(&buf, filename); + grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); + strbuf_release(&buf); + #ifndef NO_PTHREADS if (num_threads) { - add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename); - strbuf_release(&buf); + /* + * add_work() copies gs and thus assumes ownership of + * its fields, so do not call grep_source_clear() + */ + add_work(opt, &gs); return 0; } else #endif { - struct grep_source gs; int hit; - grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename); - strbuf_release(&buf); hit = grep_source(opt, &gs); grep_source_clear(&gs); @@@ -439,7 -433,7 +440,7 @@@ static int grep_submodule(struct grep_o * object. */ grep_read_lock(); - add_to_alternates_memory(submodule.objectdir); + add_to_alternates_memory(submodule.objects->objectdir); grep_read_unlock(); if (oid) { @@@ -452,7 -446,7 +453,7 @@@ object = parse_object_or_die(oid, oid_to_hex(oid)); grep_read_lock(); - data = read_object_with_reference(object->oid.hash, tree_type, + data = read_object_with_reference(&object->oid, tree_type, &size, NULL); grep_read_unlock(); @@@ -614,7 -608,7 +615,7 @@@ static int grep_object(struct grep_opt int hit, len; grep_read_lock(); - data = read_object_with_reference(obj->oid.hash, tree_type, + data = read_object_with_reference(&obj->oid, tree_type, &size, NULL); grep_read_unlock(); @@@ -634,7 -628,7 +635,7 @@@ free(data); return hit; } - die(_("unable to grep from object of type %s"), typename(obj->type)); + die(_("unable to grep from object of type %s"), type_name(obj->type)); } static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec, @@@ -839,9 -833,8 +840,9 @@@ int cmd_grep(int argc, const char **arg OPT_BOOL('L', "files-without-match", &opt.unmatch_name_only, N_("show only the names of files without match")), - OPT_BOOL('z', "null", &opt.null_following_name, - N_("print NUL after filenames")), + OPT_BOOL_F('z', "null", &opt.null_following_name, + N_("print NUL after filenames"), + PARSE_OPT_NOCOMPLETE), OPT_BOOL('c', "count", &opt.count, N_("show the number of matches instead of matching lines")), OPT__COLOR(&opt.color, N_("highlight matches")), @@@ -892,11 -885,9 +893,11 @@@ OPT_GROUP(""), { OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager, N_("pager"), N_("show matching files in the pager"), - PARSE_OPT_OPTARG, NULL, (intptr_t)default_pager }, - OPT_BOOL(0, "ext-grep", &external_grep_allowed__ignored, - N_("allow calling of grep(1) (ignored by this build)")), + PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE, + NULL, (intptr_t)default_pager }, + OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored, + N_("allow calling of grep(1) (ignored by this build)"), + PARSE_OPT_NOCOMPLETE), OPT_END() }; diff --combined builtin/index-pack.c index 657a5dda06,1d6bc87b76..d81473e722 --- a/builtin/index-pack.c +++ b/builtin/index-pack.c @@@ -13,6 -13,7 +13,7 @@@ #include "streaming.h" #include "thread-utils.h" #include "packfile.h" + #include "object-store.h" static const char index_pack_usage[] = "git index-pack [-v] [-o ] [--keep | --keep=] [--verify] [--strict] ( | --stdin [--fix-thin] [])"; @@@ -49,7 -50,6 +50,7 @@@ struct thread_local int pack_fd; }; +/* Remember to update object flag allocation in object.h */ #define FLAG_LINK (1u<<20) #define FLAG_CHECKED (1u<<21) @@@ -59,7 -59,7 +60,7 @@@ struct ofs_delta_entry }; struct ref_delta_entry { - unsigned char sha1[20]; + struct object_id oid; int obj_no; }; @@@ -92,7 -92,7 +93,7 @@@ static unsigned int input_offset, input static off_t consumed_bytes; static off_t max_input_size; static unsigned deepest_delta; -static git_SHA_CTX input_ctx; +static git_hash_ctx input_ctx; static uint32_t input_crc32; static int input_fd, output_fd; static const char *curr_pack; @@@ -222,14 -222,14 +223,14 @@@ static unsigned check_object(struct obj if (!(obj->flags & FLAG_CHECKED)) { unsigned long size; - int type = sha1_object_info(obj->oid.hash, &size); + int type = oid_object_info(&obj->oid, &size); if (type <= 0) die(_("did not receive expected object %s"), oid_to_hex(&obj->oid)); if (type != obj->type) die(_("object %s: expected type %s, found %s"), oid_to_hex(&obj->oid), - typename(obj->type), typename(type)); + type_name(obj->type), type_name(type)); obj->flags |= FLAG_CHECKED; return 1; } @@@ -254,7 -254,7 +255,7 @@@ static void flush(void if (input_offset) { if (output_fd >= 0) write_or_die(output_fd, input_buffer, input_offset); - git_SHA1_Update(&input_ctx, input_buffer, input_offset); + the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset); memmove(input_buffer, input_buffer + input_offset, input_len); input_offset = 0; } @@@ -327,7 -327,7 +328,7 @@@ static const char *open_pack_file(cons output_fd = -1; nothread_data.pack_fd = input_fd; } - git_SHA1_Init(&input_ctx); + the_hash_algo->init_fn(&input_ctx); return pack_name; } @@@ -438,22 -438,22 +439,22 @@@ static int is_delta_type(enum object_ty } static void *unpack_entry_data(off_t offset, unsigned long size, - enum object_type type, unsigned char *sha1) + enum object_type type, struct object_id *oid) { static char fixed_buf[8192]; int status; git_zstream stream; void *buf; - git_SHA_CTX c; + git_hash_ctx c; char hdr[32]; int hdrlen; if (!is_delta_type(type)) { - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1; + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); } else - sha1 = NULL; + oid = NULL; if (type == OBJ_BLOB && size > big_file_threshold) buf = fixed_buf; else @@@ -470,8 -470,8 +471,8 @@@ stream.avail_in = input_len; status = git_inflate(&stream, 0); use(input_len - stream.avail_in); - if (sha1) - git_SHA1_Update(&c, last_out, stream.next_out - last_out); + if (oid) + the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out); if (buf == fixed_buf) { stream.next_out = buf; stream.avail_out = sizeof(fixed_buf); @@@ -480,15 -480,15 +481,15 @@@ if (stream.total_out != size || status != Z_STREAM_END) bad_object(offset, _("inflate returned %d"), status); git_inflate_end(&stream); - if (sha1) - git_SHA1_Final(sha1, &c); + if (oid) + the_hash_algo->final_fn(oid->hash, &c); return buf == fixed_buf ? NULL : buf; } static void *unpack_raw_entry(struct object_entry *obj, off_t *ofs_offset, - unsigned char *ref_sha1, - unsigned char *sha1) + struct object_id *ref_oid, + struct object_id *oid) { unsigned char *p; unsigned long size, c; @@@ -516,8 -516,8 +517,8 @@@ switch (obj->type) { case OBJ_REF_DELTA: - hashcpy(ref_sha1, fill(20)); - use(20); + hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz)); + use(the_hash_algo->rawsz); break; case OBJ_OFS_DELTA: p = fill(1); @@@ -547,7 -547,7 +548,7 @@@ } obj->hdr_size = consumed_bytes - obj->idx.offset; - data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1); + data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid); obj->idx.crc32 = input_crc32; return data; } @@@ -672,18 -672,18 +673,18 @@@ static void find_ofs_delta_children(off *last_index = last; } -static int compare_ref_delta_bases(const unsigned char *sha1, - const unsigned char *sha2, +static int compare_ref_delta_bases(const struct object_id *oid1, + const struct object_id *oid2, enum object_type type1, enum object_type type2) { int cmp = type1 - type2; if (cmp) return cmp; - return hashcmp(sha1, sha2); + return oidcmp(oid1, oid2); } -static int find_ref_delta(const unsigned char *sha1, enum object_type type) +static int find_ref_delta(const struct object_id *oid, enum object_type type) { int first = 0, last = nr_ref_deltas; @@@ -692,7 -692,7 +693,7 @@@ struct ref_delta_entry *delta = &ref_deltas[next]; int cmp; - cmp = compare_ref_delta_bases(sha1, delta->sha1, + cmp = compare_ref_delta_bases(oid, &delta->oid, type, objects[delta->obj_no].type); if (!cmp) return next; @@@ -705,11 -705,11 +706,11 @@@ return -first-1; } -static void find_ref_delta_children(const unsigned char *sha1, +static void find_ref_delta_children(const struct object_id *oid, int *first_index, int *last_index, enum object_type type) { - int first = find_ref_delta(sha1, type); + int first = find_ref_delta(oid, type); int last = first; int end = nr_ref_deltas - 1; @@@ -718,9 -718,9 +719,9 @@@ *last_index = -1; return; } - while (first > 0 && !hashcmp(ref_deltas[first - 1].sha1, sha1)) + while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid)) --first; - while (last < end && !hashcmp(ref_deltas[last + 1].sha1, sha1)) + while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid)) ++last; *first_index = first; *last_index = last; @@@ -772,7 -772,7 +773,7 @@@ static int check_collison(struct object memset(&data, 0, sizeof(data)); data.entry = entry; - data.st = open_istream(entry->idx.oid.hash, &type, &size, NULL); + data.st = open_istream(&entry->idx.oid, &type, &size, NULL); if (!data.st) return -1; if (size != entry->size || type != entry->type) @@@ -811,12 -811,12 +812,12 @@@ static void sha1_object(const void *dat enum object_type has_type; unsigned long has_size; read_lock(); - has_type = sha1_object_info(oid->hash, &has_size); + has_type = oid_object_info(oid, &has_size); if (has_type < 0) die(_("cannot read existing object info %s"), oid_to_hex(oid)); if (has_type != type || has_size != size) die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid)); - has_data = read_sha1_file(oid->hash, &has_type, &has_size); + has_data = read_object_file(oid, &has_type, &has_size); read_unlock(); if (!data) data = new_data = get_data_from_pack(obj_entry); @@@ -828,7 -828,7 +829,7 @@@ free(has_data); } - if (strict) { + if (strict || do_fsck_object) { read_lock(); if (type == OBJ_BLOB) { struct blob *blob = lookup_blob(oid); @@@ -850,11 -850,11 +851,11 @@@ obj = parse_object_buffer(oid, type, size, buf, &eaten); if (!obj) - die(_("invalid %s"), typename(type)); + die(_("invalid %s"), type_name(type)); if (do_fsck_object && fsck_object(obj, buf, size, &fsck_options)) die(_("Error in object")); - if (fsck_walk(obj, NULL, &fsck_options)) + if (strict && fsck_walk(obj, NULL, &fsck_options)) die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid)); if (obj->type == OBJ_TREE) { @@@ -959,8 -959,9 +960,8 @@@ static void resolve_delta(struct object free(delta_data); if (!result->data) bad_object(delta_obj->idx.offset, _("failed to apply delta")); - hash_sha1_file(result->data, result->size, - typename(delta_obj->real_type), - delta_obj->idx.oid.hash); + hash_object_file(result->data, result->size, + type_name(delta_obj->real_type), &delta_obj->idx.oid); sha1_object(result->data, NULL, result->size, delta_obj->real_type, &delta_obj->idx.oid); counter_lock(); @@@ -992,7 -993,7 +993,7 @@@ static struct base_data *find_unresolve struct base_data *prev_base) { if (base->ref_last == -1 && base->ofs_last == -1) { - find_ref_delta_children(base->obj->idx.oid.hash, + find_ref_delta_children(&base->obj->idx.oid, &base->ref_first, &base->ref_last, OBJ_REF_DELTA); @@@ -1076,7 -1077,7 +1077,7 @@@ static int compare_ref_delta_entry(cons const struct ref_delta_entry *delta_a = a; const struct ref_delta_entry *delta_b = b; - return hashcmp(delta_a->sha1, delta_b->sha1); + return oidcmp(&delta_a->oid, &delta_b->oid); } static void resolve_base(struct object_entry *obj) @@@ -1119,11 -1120,11 +1120,11 @@@ static void *threaded_second_pass(void * - calculate SHA1 of all non-delta objects; * - remember base (SHA1 or offset) for all deltas. */ -static void parse_pack_objects(unsigned char *sha1) +static void parse_pack_objects(unsigned char *hash) { int i, nr_delays = 0; struct ofs_delta_entry *ofs_delta = ofs_deltas; - unsigned char ref_delta_sha1[20]; + struct object_id ref_delta_oid; struct stat st; if (verbose) @@@ -1133,8 -1134,8 +1134,8 @@@ for (i = 0; i < nr_objects; i++) { struct object_entry *obj = &objects[i]; void *data = unpack_raw_entry(obj, &ofs_delta->offset, - ref_delta_sha1, - obj->idx.oid.hash); + &ref_delta_oid, + &obj->idx.oid); obj->real_type = obj->type; if (obj->type == OBJ_OFS_DELTA) { nr_ofs_deltas++; @@@ -1142,7 -1143,7 +1143,7 @@@ ofs_delta++; } else if (obj->type == OBJ_REF_DELTA) { ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc); - hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1); + oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid); ref_deltas[nr_ref_deltas].obj_no = i; nr_ref_deltas++; } else if (!data) { @@@ -1160,10 -1161,10 +1161,10 @@@ /* Check pack integrity */ flush(); - git_SHA1_Final(sha1, &input_ctx); - if (hashcmp(fill(20), sha1)) + the_hash_algo->final_fn(hash, &input_ctx); + if (hashcmp(fill(the_hash_algo->rawsz), hash)) die(_("pack is corrupted (SHA1 mismatch)")); - use(20); + use(the_hash_algo->rawsz); /* If input_fd is a file, we should have reached its end now. */ if (fstat(input_fd, &st)) @@@ -1239,21 -1240,21 +1240,21 @@@ static void resolve_deltas(void /* * Third pass: * - append objects to convert thin pack to full pack if required - * - write the final 20-byte SHA-1 + * - write the final pack hash */ -static void fix_unresolved_deltas(struct sha1file *f); -static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1) +static void fix_unresolved_deltas(struct hashfile *f); +static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash) { if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) { stop_progress(&progress); - /* Flush remaining pack final 20-byte SHA1. */ + /* Flush remaining pack final hash. */ flush(); return; } if (fix_thin_pack) { - struct sha1file *f; - unsigned char read_sha1[20], tail_sha1[20]; + struct hashfile *f; + unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ]; struct strbuf msg = STRBUF_INIT; int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas; int nr_objects_initial = nr_objects; @@@ -1262,7 -1263,7 +1263,7 @@@ REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1); memset(objects + nr_objects + 1, 0, nr_unresolved * sizeof(*objects)); - f = sha1fd(output_fd, curr_pack); + f = hashfd(output_fd, curr_pack); fix_unresolved_deltas(f); strbuf_addf(&msg, Q_("completed with %d local object", "completed with %d local objects", @@@ -1270,12 -1271,12 +1271,12 @@@ nr_objects - nr_objects_initial); stop_progress_msg(&progress, msg.buf); strbuf_release(&msg); - sha1close(f, tail_sha1, 0); - hashcpy(read_sha1, pack_sha1); - fixup_pack_header_footer(output_fd, pack_sha1, + hashclose(f, tail_hash, 0); + hashcpy(read_hash, pack_hash); + fixup_pack_header_footer(output_fd, pack_hash, curr_pack, nr_objects, - read_sha1, consumed_bytes-20); - if (hashcmp(read_sha1, tail_sha1) != 0) + read_hash, consumed_bytes-the_hash_algo->rawsz); + if (hashcmp(read_hash, tail_hash) != 0) die(_("Unexpected tail checksum for %s " "(disk corruption?)"), curr_pack); } @@@ -1286,7 -1287,7 +1287,7 @@@ nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas); } -static int write_compressed(struct sha1file *f, void *in, unsigned int size) +static int write_compressed(struct hashfile *f, void *in, unsigned int size) { git_zstream stream; int status; @@@ -1300,7 -1301,7 +1301,7 @@@ stream.next_out = outbuf; stream.avail_out = sizeof(outbuf); status = git_deflate(&stream, Z_FINISH); - sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out); + hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out); } while (status == Z_OK); if (status != Z_STREAM_END) @@@ -1310,7 -1311,7 +1311,7 @@@ return size; } -static struct object_entry *append_obj_to_pack(struct sha1file *f, +static struct object_entry *append_obj_to_pack(struct hashfile *f, const unsigned char *sha1, void *buf, unsigned long size, enum object_type type) { @@@ -1327,7 -1328,7 +1328,7 @@@ } header[n++] = c; crc32_begin(f); - sha1write(f, header, n); + hashwrite(f, header, n); obj[0].size = size; obj[0].hdr_size = n; obj[0].type = type; @@@ -1335,7 -1336,7 +1336,7 @@@ obj[1].idx.offset = obj[0].idx.offset + n; obj[1].idx.offset += write_compressed(f, buf, size); obj[0].idx.crc32 = crc32_end(f); - sha1flush(f); + hashflush(f); hashcpy(obj->idx.oid.hash, sha1); return obj; } @@@ -1347,7 -1348,7 +1348,7 @@@ static int delta_pos_compare(const voi return a->obj_no - b->obj_no; } -static void fix_unresolved_deltas(struct sha1file *f) +static void fix_unresolved_deltas(struct hashfile *f) { struct ref_delta_entry **sorted_by_pos; int i; @@@ -1374,15 -1375,14 +1375,15 @@@ if (objects[d->obj_no].real_type != OBJ_REF_DELTA) continue; - base_obj->data = read_sha1_file(d->sha1, &type, &base_obj->size); + base_obj->data = read_object_file(&d->oid, &type, + &base_obj->size); if (!base_obj->data) continue; - if (check_sha1_signature(d->sha1, base_obj->data, - base_obj->size, typename(type))) - die(_("local object %s is corrupt"), sha1_to_hex(d->sha1)); - base_obj->obj = append_obj_to_pack(f, d->sha1, + if (check_object_signature(&d->oid, base_obj->data, + base_obj->size, type_name(type))) + die(_("local object %s is corrupt"), oid_to_hex(&d->oid)); + base_obj->obj = append_obj_to_pack(f, d->oid.hash, base_obj->data, base_obj->size, type); find_unresolved_deltas(base_obj); display_progress(progress, nr_resolved_deltas); @@@ -1404,7 -1404,7 +1405,7 @@@ static const char *derive_filename(cons } static void write_special_file(const char *suffix, const char *msg, - const char *pack_name, const unsigned char *sha1, + const char *pack_name, const unsigned char *hash, const char **report) { struct strbuf name_buf = STRBUF_INIT; @@@ -1415,7 -1415,7 +1416,7 @@@ if (pack_name) filename = derive_filename(pack_name, suffix, &name_buf); else - filename = odb_pack_name(&name_buf, sha1, suffix); + filename = odb_pack_name(&name_buf, hash, suffix); fd = odb_pack_keep(filename); if (fd < 0) { @@@ -1439,7 -1439,7 +1440,7 @@@ static void final(const char *final_pack_name, const char *curr_pack_name, const char *final_index_name, const char *curr_index_name, const char *keep_msg, const char *promisor_msg, - unsigned char *sha1) + unsigned char *hash) { const char *report = "pack"; struct strbuf pack_name = STRBUF_INIT; @@@ -1456,15 -1456,15 +1457,15 @@@ } if (keep_msg) - write_special_file("keep", keep_msg, final_pack_name, sha1, + write_special_file("keep", keep_msg, final_pack_name, hash, &report); if (promisor_msg) write_special_file("promisor", promisor_msg, final_pack_name, - sha1, NULL); + hash, NULL); if (final_pack_name != curr_pack_name) { if (!final_pack_name) - final_pack_name = odb_pack_name(&pack_name, sha1, "pack"); + final_pack_name = odb_pack_name(&pack_name, hash, "pack"); if (finalize_object_file(curr_pack_name, final_pack_name)) die(_("cannot store pack file")); } else if (from_stdin) @@@ -1472,18 -1472,18 +1473,18 @@@ if (final_index_name != curr_index_name) { if (!final_index_name) - final_index_name = odb_pack_name(&index_name, sha1, "idx"); + final_index_name = odb_pack_name(&index_name, hash, "idx"); if (finalize_object_file(curr_index_name, final_index_name)) die(_("cannot store index file")); } else chmod(final_index_name, 0444); if (!from_stdin) { - printf("%s\n", sha1_to_hex(sha1)); + printf("%s\n", sha1_to_hex(hash)); } else { struct strbuf buf = STRBUF_INIT; - strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1)); + strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash)); write_or_die(1, buf.buf, buf.len); strbuf_release(&buf); @@@ -1617,7 -1617,7 +1618,7 @@@ static void show_pack_info(int stat_onl continue; printf("%s %-6s %lu %lu %"PRIuMAX, oid_to_hex(&obj->idx.oid), - typename(obj->real_type), obj->size, + type_name(obj->real_type), obj->size, (unsigned long)(obj[1].idx.offset - obj->idx.offset), (uintmax_t)obj->idx.offset); if (is_delta_type(obj->type)) { @@@ -1654,7 -1654,7 +1655,7 @@@ int cmd_index_pack(int argc, const cha struct strbuf index_name_buf = STRBUF_INIT; struct pack_idx_entry **idx_objects; struct pack_idx_option opts; - unsigned char pack_sha1[20]; + unsigned char pack_hash[GIT_MAX_RAWSZ]; unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */ int report_end_of_input = 0; @@@ -1690,8 -1690,6 +1691,8 @@@ } else if (!strcmp(arg, "--check-self-contained-and-connected")) { strict = 1; check_self_contained_and_connected = 1; + } else if (!strcmp(arg, "--fsck-objects")) { + do_fsck_object = 1; } else if (!strcmp(arg, "--verify")) { verify = 1; } else if (!strcmp(arg, "--verify-stat")) { @@@ -1793,11 -1791,11 +1794,11 @@@ if (show_stat) obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat)); ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry)); - parse_pack_objects(pack_sha1); + parse_pack_objects(pack_hash); if (report_end_of_input) write_in_full(2, "\0", 1); resolve_deltas(); - conclude_pack(fix_thin_pack, curr_pack, pack_sha1); + conclude_pack(fix_thin_pack, curr_pack, pack_hash); free(ofs_deltas); free(ref_deltas); if (strict) @@@ -1809,14 -1807,14 +1810,14 @@@ ALLOC_ARRAY(idx_objects, nr_objects); for (i = 0; i < nr_objects; i++) idx_objects[i] = &objects[i].idx; - curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1); + curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash); free(idx_objects); if (!verify) final(pack_name, curr_pack, index_name, curr_index, keep_msg, promisor_msg, - pack_sha1); + pack_hash); else close(input_fd); free(objects); diff --combined builtin/merge.c index 8746c5e3e8,96d56cbdd2..9db5a2cf16 --- a/builtin/merge.c +++ b/builtin/merge.c @@@ -33,7 -33,6 +33,7 @@@ #include "sequencer.h" #include "string-list.h" #include "packfile.h" +#include "tag.h" #define DEFAULT_TWOHEAD (1<<0) #define DEFAULT_OCTOPUS (1<<1) @@@ -412,7 -411,7 +412,7 @@@ static void finish(struct commit *head_ * We ignore errors in 'gc --auto', since the * user should see them. */ - close_all_packs(); + close_all_packs(the_repository->objects); run_command_v_opt(argv_gc_auto, RUN_GIT_CMD); } } @@@ -521,7 -520,7 +521,7 @@@ static void merge_name(const char *remo if (desc && desc->obj && desc->obj->type == OBJ_TAG) { strbuf_addf(msg, "%s\t\t%s '%s'\n", oid_to_hex(&desc->obj->oid), - typename(desc->obj->type), + type_name(desc->obj->type), remote); goto cleanup; } @@@ -639,7 -638,7 +639,7 @@@ static int read_tree_trivial(struct obj static void write_tree_trivial(struct object_id *oid) { - if (write_cache_as_tree(oid->hash, 0, NULL)) + if (write_cache_as_tree(oid, 0, NULL)) die(_("git write-tree failed to write a tree")); } @@@ -652,9 -651,10 +652,9 @@@ static int try_merge_strategy(const cha hold_locked_index(&lock, LOCK_DIE_ON_ERROR); refresh_cache(REFRESH_QUIET); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) return error(_("Unable to write index.")); - rollback_lock_file(&lock); if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) { int clean, x; @@@ -691,9 -691,10 +691,9 @@@ remoteheads->item, reversed, &result); if (clean < 0) exit(128); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) die (_("unable to write %s"), get_index_file()); - rollback_lock_file(&lock); return clean ? 0 : 1; } else { return try_merge_command(strategy, xopts_nr, xopts, @@@ -809,17 -810,18 +809,17 @@@ static int merge_trivial(struct commit hold_locked_index(&lock, LOCK_DIE_ON_ERROR); refresh_cache(REFRESH_QUIET); - if (active_cache_changed && - write_locked_index(&the_index, &lock, COMMIT_LOCK)) + if (write_locked_index(&the_index, &lock, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) return error(_("Unable to write index.")); - rollback_lock_file(&lock); write_tree_trivial(&result_tree); printf(_("Wonderful.\n")); pptr = commit_list_append(head, pptr); pptr = commit_list_append(remoteheads->item, pptr); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); finish(head, remoteheads, &result_commit, "In-index merge"); drop_save(); @@@ -843,8 -845,8 +843,8 @@@ static int finish_automerge(struct comm commit_list_insert(head, &parents); strbuf_addch(&merge_msg, '\n'); prepare_to_commit(remoteheads); - if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents, - result_commit.hash, NULL, sign_commit)) + if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents, + &result_commit, NULL, sign_commit)) die(_("failed to write commit object")); strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy); finish(head, remoteheads, &result_commit, buf.buf); @@@ -1123,43 -1125,6 +1123,43 @@@ static struct commit_list *collect_pare return remoteheads; } +static int merging_a_throwaway_tag(struct commit *commit) +{ + char *tag_ref; + struct object_id oid; + int is_throwaway_tag = 0; + + /* Are we merging a tag? */ + if (!merge_remote_util(commit) || + !merge_remote_util(commit)->obj || + merge_remote_util(commit)->obj->type != OBJ_TAG) + return is_throwaway_tag; + + /* + * Now we know we are merging a tag object. Are we downstream + * and following the tags from upstream? If so, we must have + * the tag object pointed at by "refs/tags/$T" where $T is the + * tagname recorded in the tag object. We want to allow such + * a "just to catch up" merge to fast-forward. + * + * Otherwise, we are playing an integrator's role, making a + * merge with a throw-away tag from a contributor with + * something like "git pull $contributor $signed_tag". + * We want to forbid such a merge from fast-forwarding + * by default; otherwise we would not keep the signature + * anywhere. + */ + tag_ref = xstrfmt("refs/tags/%s", + ((struct tag *)merge_remote_util(commit)->obj)->tag); + if (!read_ref(tag_ref, &oid) && + !oidcmp(&oid, &merge_remote_util(commit)->obj->oid)) + is_throwaway_tag = 0; + else + is_throwaway_tag = 1; + free(tag_ref); + return is_throwaway_tag; +} + int cmd_merge(int argc, const char **argv, const char *prefix) { struct object_id result_tree, stash, head_oid; @@@ -1324,7 -1289,7 +1324,7 @@@ check_commit_signature(commit, &signature_check); - find_unique_abbrev_r(hex, commit->object.oid.hash, DEFAULT_ABBREV); + find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV); switch (signature_check.result) { case 'G': break; @@@ -1357,7 -1322,10 +1357,7 @@@ oid_to_hex(&commit->object.oid)); setenv(buf.buf, merge_remote_util(commit)->name, 1); strbuf_reset(&buf); - if (fast_forward != FF_ONLY && - merge_remote_util(commit) && - merge_remote_util(commit)->obj && - merge_remote_util(commit)->obj->type == OBJ_TAG) + if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit)) fast_forward = FF_NO; } @@@ -1417,9 -1385,9 +1417,9 @@@ if (verbosity >= 0) { printf(_("Updating %s..%s\n"), - find_unique_abbrev(head_commit->object.oid.hash, + find_unique_abbrev(&head_commit->object.oid, DEFAULT_ABBREV), - find_unique_abbrev(remoteheads->item->object.oid.hash, + find_unique_abbrev(&remoteheads->item->object.oid, DEFAULT_ABBREV)); } strbuf_addstr(&msg, "Fast-forward"); diff --combined builtin/pack-objects.c index e7e673266e,223f2d9fc0..89f49bb5f6 --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@@ -1,5 -1,6 +1,6 @@@ #include "builtin.h" #include "cache.h" + #include "repository.h" #include "config.h" #include "attr.h" #include "object.h" @@@ -28,6 -29,7 +29,7 @@@ #include "argv-array.h" #include "list.h" #include "packfile.h" + #include "object-store.h" static const char *pack_usage[] = { N_("git pack-objects --stdout [...] [< | < ]"), @@@ -122,10 -124,11 +124,10 @@@ static void *get_delta(struct object_en void *buf, *base_buf, *delta_buf; enum object_type type; - buf = read_sha1_file(entry->idx.oid.hash, &type, &size); + buf = read_object_file(&entry->idx.oid, &type, &size); if (!buf) die("unable to read %s", oid_to_hex(&entry->idx.oid)); - base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type, - &base_size); + base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size); if (!base_buf) die("unable to read %s", oid_to_hex(&entry->delta->idx.oid)); @@@ -163,7 -166,7 +165,7 @@@ static unsigned long do_compress(void * return stream.total_out; } -static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f, +static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, const struct object_id *oid) { git_zstream stream; @@@ -187,7 -190,7 +189,7 @@@ stream.next_out = obuf; stream.avail_out = sizeof(obuf); zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); - sha1write(f, obuf, stream.next_out - obuf); + hashwrite(f, obuf, stream.next_out - obuf); olen += stream.next_out - obuf; } if (stream.avail_in) @@@ -232,7 -235,7 +234,7 @@@ static int check_pack_inflate(struct pa stream.total_in == len) ? 0 : -1; } -static void copy_pack_data(struct sha1file *f, +static void copy_pack_data(struct hashfile *f, struct packed_git *p, struct pack_window **w_curs, off_t offset, @@@ -245,14 -248,14 +247,14 @@@ in = use_pack(p, w_curs, offset, &avail); if (avail > len) avail = (unsigned long)len; - sha1write(f, in, avail); + hashwrite(f, in, avail); offset += avail; len -= avail; } } /* Return 0 if we will bust the pack-size limit */ -static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry, +static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { unsigned long size, datalen; @@@ -266,10 -269,11 +268,10 @@@ if (!usable_delta) { if (entry->type == OBJ_BLOB && entry->size > big_file_threshold && - (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL) + (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) buf = NULL; else { - buf = read_sha1_file(entry->idx.oid.hash, &type, - &size); + buf = read_object_file(&entry->idx.oid, &type, &size); if (!buf) die(_("unable to read %s"), oid_to_hex(&entry->idx.oid)); @@@ -324,8 -328,8 +326,8 @@@ free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; } else if (type == OBJ_REF_DELTA) { /* @@@ -338,8 -342,8 +340,8 @@@ free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; } else { if (limit && hdrlen + datalen + 20 >= limit) { @@@ -348,13 -352,13 +350,13 @@@ free(buf); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } if (st) { datalen = write_large_blob_data(st, f, &entry->idx.oid); close_istream(st); } else { - sha1write(f, buf, datalen); + hashwrite(f, buf, datalen); free(buf); } @@@ -362,7 -366,7 +364,7 @@@ } /* Return 0 if we will bust the pack-size limit */ -static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, +static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { struct packed_git *p = entry->in_pack; @@@ -413,8 -417,8 +415,8 @@@ unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; reused_delta++; } else if (type == OBJ_REF_DELTA) { @@@ -422,8 -426,8 +424,8 @@@ unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); + hashwrite(f, header, hdrlen); + hashwrite(f, entry->delta->idx.oid.hash, 20); hdrlen += 20; reused_delta++; } else { @@@ -431,7 -435,7 +433,7 @@@ unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } copy_pack_data(f, p, &w_curs, offset, datalen); unuse_pack(&w_curs); @@@ -440,7 -444,7 +442,7 @@@ } /* Return 0 if we will bust the pack-size limit */ -static off_t write_object(struct sha1file *f, +static off_t write_object(struct hashfile *f, struct object_entry *entry, off_t write_offset) { @@@ -513,7 -517,7 +515,7 @@@ enum write_one_status WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ }; -static enum write_one_status write_one(struct sha1file *f, +static enum write_one_status write_one(struct hashfile *f, struct object_entry *e, off_t *offset) { @@@ -732,7 -736,7 +734,7 @@@ static struct object_entry **compute_wr return wo; } -static off_t write_reused_pack(struct sha1file *f) +static off_t write_reused_pack(struct hashfile *f) { unsigned char buffer[8192]; off_t to_write, total; @@@ -763,7 -767,7 +765,7 @@@ if (read_pack > to_write) read_pack = to_write; - sha1write(f, buffer, read_pack); + hashwrite(f, buffer, read_pack); to_write -= read_pack; /* @@@ -792,7 -796,7 +794,7 @@@ static const char no_split_warning[] = static void write_pack_file(void) { uint32_t i = 0, j; - struct sha1file *f; + struct hashfile *f; off_t offset; uint32_t nr_remaining = nr_result; time_t last_mtime = 0; @@@ -808,7 -812,7 +810,7 @@@ char *pack_tmp_name = NULL; if (pack_to_stdout) - f = sha1fd_throughput(1, "", progress_state); + f = hashfd_throughput(1, "", progress_state); else f = create_tmp_packfile(&pack_tmp_name); @@@ -835,11 -839,11 +837,11 @@@ * If so, rewrite it like in fast-import */ if (pack_to_stdout) { - sha1close(f, oid.hash, CSUM_CLOSE); + hashclose(f, oid.hash, CSUM_CLOSE); } else if (nr_written == nr_remaining) { - sha1close(f, oid.hash, CSUM_FSYNC); + hashclose(f, oid.hash, CSUM_FSYNC); } else { - int fd = sha1close(f, oid.hash, 0); + int fd = hashclose(f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, nr_written, oid.hash, offset); close(fd); @@@ -1023,8 -1027,7 +1025,7 @@@ static int want_object_in_pack(const st if (want != -1) return want; } - - list_for_each(pos, &packed_git_mru) { + list_for_each(pos, get_packed_git_mru(the_repository)) { struct packed_git *p = list_entry(pos, struct packed_git, mru); off_t offset; @@@ -1042,7 -1045,8 +1043,8 @@@ } want = want_found_object(exclude, p); if (!exclude && want > 0) - list_move(&p->mru, &packed_git_mru); + list_move(&p->mru, + get_packed_git_mru(the_repository)); if (want != -1) return want; } @@@ -1188,7 -1192,7 +1190,7 @@@ static struct pbase_tree_cache *pbase_t /* Did not find one. Either we got a bogus request or * we need to read and perhaps cache. */ - data = read_sha1_file(oid->hash, &type, &size); + data = read_object_file(oid, &type, &size); if (!data) return NULL; if (type != OBJ_TREE) { @@@ -1349,7 -1353,7 +1351,7 @@@ static void add_preferred_base(struct o if (window <= num_preferred_base++) return; - data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash); + data = read_object_with_reference(oid, tree_type, &size, &tree_oid); if (!data) return; @@@ -1377,10 -1381,10 +1379,10 @@@ static void cleanup_preferred_base(void it = pbase_tree; pbase_tree = NULL; while (it) { - struct pbase_tree *this = it; - it = this->next; - free(this->pcache.tree_data); - free(this); + struct pbase_tree *tmp = it; + it = tmp->next; + free(tmp->pcache.tree_data); + free(tmp); } for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) { @@@ -1514,7 -1518,7 +1516,7 @@@ static void check_object(struct object_ unuse_pack(&w_curs); } - entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size); + entry->type = oid_object_info(&entry->idx.oid, &entry->size); /* * The error condition is checked in prepare_pack(). This is * to permit a missing preferred base object to be ignored @@@ -1576,7 -1580,8 +1578,7 @@@ static void drop_reused_delta(struct ob * And if that fails, the error will be recorded in entry->type * and dealt with in prepare_pack(). */ - entry->type = sha1_object_info(entry->idx.oid.hash, - &entry->size); + entry->type = oid_object_info(&entry->idx.oid, &entry->size); } } @@@ -1868,7 -1873,8 +1870,7 @@@ static int try_delta(struct unpacked *t /* Load data if not already done */ if (!trg->data) { read_lock(); - trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type, - &sz); + trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz); read_unlock(); if (!trg->data) die("object %s cannot be read", @@@ -1881,7 -1887,8 +1883,7 @@@ } if (!src->data) { read_lock(); - src->data = read_sha1_file(src_entry->idx.oid.hash, &type, - &sz); + src->data = read_object_file(&src_entry->idx.oid, &type, &sz); read_unlock(); if (!src->data) { if (src_entry->preferred_base) { @@@ -2544,7 -2551,6 +2546,7 @@@ static void read_object_list_from_stdin } } +/* Remember to update object flag allocation in object.h */ #define OBJECT_ADDED (1u<<20) static void show_commit(struct commit *commit, void *data) @@@ -2669,7 -2675,7 +2671,7 @@@ static void add_objects_in_unpacked_pac memset(&in_pack, 0, sizeof(in_pack)); - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; p = p->next) { struct object_id oid; struct object *o; @@@ -2704,7 -2710,7 +2706,7 @@@ static int add_loose_object(const struct object_id *oid, const char *path, void *data) { - enum object_type type = sha1_object_info(oid->hash, NULL); + enum object_type type = oid_object_info(oid, NULL); if (type < 0) { warning("loose object at %s could not be examined", path); @@@ -2732,7 -2738,8 +2734,8 @@@ static int has_sha1_pack_kept_or_nonloc static struct packed_git *last_found = (void *)1; struct packed_git *p; - p = (last_found != (void *)1) ? last_found : packed_git; + p = (last_found != (void *)1) ? last_found : + get_packed_git(the_repository); while (p) { if ((!p->pack_local || p->pack_keep) && @@@ -2741,7 -2748,7 +2744,7 @@@ return 1; } if (p == last_found) - p = packed_git; + p = get_packed_git(the_repository); else p = p->next; if (p == last_found) @@@ -2777,7 -2784,7 +2780,7 @@@ static void loosen_unused_packed_object uint32_t i; struct object_id oid; - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; p = p->next) { if (!p->pack_local || p->pack_keep) continue; @@@ -2789,7 -2796,7 +2792,7 @@@ if (!packlist_find(&to_pack, oid.hash, NULL) && !has_sha1_pack_kept_or_nonlocal(&oid) && !loosened_object_can_be_discarded(&oid, p->mtime)) - if (force_object_loose(oid.hash, p->mtime)) + if (force_object_loose(&oid, p->mtime)) die("unable to force loose object"); } } @@@ -3148,7 -3155,7 +3151,7 @@@ int cmd_pack_objects(int argc, const ch prepare_packed_git(); if (ignore_packed_keep) { struct packed_git *p; - for (p = packed_git; p; p = p->next) + for (p = get_packed_git(the_repository); p; p = p->next) if (p->pack_local && p->pack_keep) break; if (!p) /* no keep-able packs found */ @@@ -3161,7 -3168,7 +3164,7 @@@ * also covers non-local objects */ struct packed_git *p; - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; p = p->next) { if (!p->pack_local) { have_non_local_packs = 1; break; diff --combined builtin/pack-redundant.c index 991e1bb76f,b5b007e706..f060b941b5 --- a/builtin/pack-redundant.c +++ b/builtin/pack-redundant.c @@@ -7,7 -7,9 +7,9 @@@ */ #include "builtin.h" + #include "repository.h" #include "packfile.h" + #include "object-store.h" #define BLKSIZE 512 @@@ -48,17 -50,17 +50,17 @@@ static inline void llist_item_put(struc static inline struct llist_item *llist_item_get(void) { - struct llist_item *new; + struct llist_item *new_item; if ( free_nodes ) { - new = free_nodes; + new_item = free_nodes; free_nodes = free_nodes->next; } else { int i = 1; - ALLOC_ARRAY(new, BLKSIZE); + ALLOC_ARRAY(new_item, BLKSIZE); for (; i < BLKSIZE; i++) - llist_item_put(&new[i]); + llist_item_put(&new_item[i]); } - return new; + return new_item; } static void llist_free(struct llist *list) @@@ -80,26 -82,26 +82,26 @@@ static inline void llist_init(struct ll static struct llist * llist_copy(struct llist *list) { struct llist *ret; - struct llist_item *new, *old, *prev; + struct llist_item *new_item, *old_item, *prev; llist_init(&ret); if ((ret->size = list->size) == 0) return ret; - new = ret->front = llist_item_get(); - new->sha1 = list->front->sha1; + new_item = ret->front = llist_item_get(); + new_item->sha1 = list->front->sha1; - old = list->front->next; - while (old) { - prev = new; - new = llist_item_get(); - prev->next = new; - new->sha1 = old->sha1; - old = old->next; + old_item = list->front->next; + while (old_item) { + prev = new_item; + new_item = llist_item_get(); + prev->next = new_item; + new_item->sha1 = old_item->sha1; + old_item = old_item->next; } - new->next = NULL; - ret->back = new; + new_item->next = NULL; + ret->back = new_item; return ret; } @@@ -108,24 -110,24 +110,24 @@@ static inline struct llist_item *llist_ struct llist_item *after, const unsigned char *sha1) { - struct llist_item *new = llist_item_get(); - new->sha1 = sha1; - new->next = NULL; + struct llist_item *new_item = llist_item_get(); + new_item->sha1 = sha1; + new_item->next = NULL; if (after != NULL) { - new->next = after->next; - after->next = new; + new_item->next = after->next; + after->next = new_item; if (after == list->back) - list->back = new; + list->back = new_item; } else {/* insert in front */ if (list->size == 0) - list->back = new; + list->back = new_item; else - new->next = list->front; - list->front = new; + new_item->next = list->front; + list->front = new_item; } list->size++; - return new; + return new_item; } static inline struct llist_item *llist_insert_back(struct llist *list, @@@ -571,7 -573,7 +573,7 @@@ static struct pack_list * add_pack(stru static struct pack_list * add_pack_file(const char *filename) { - struct packed_git *p = packed_git; + struct packed_git *p = get_packed_git(the_repository); if (strlen(filename) < 40) die("Bad pack filename: %s", filename); @@@ -586,7 -588,7 +588,7 @@@ static void load_all(void) { - struct packed_git *p = packed_git; + struct packed_git *p = get_packed_git(the_repository); while (p) { add_pack(p); diff --combined builtin/receive-pack.c index 2bf7f2d1a3,1a298a6711..19428ef97d --- a/builtin/receive-pack.c +++ b/builtin/receive-pack.c @@@ -69,7 -69,7 +69,7 @@@ static int sent_capabilities static int shallow_update; static const char *alt_shallow_file; static struct strbuf push_cert = STRBUF_INIT; -static unsigned char push_cert_sha1[20]; +static struct object_id push_cert_oid; static struct signature_check sigcheck; static const char *push_cert_nonce; static const char *cert_nonce_seed; @@@ -633,9 -633,8 +633,9 @@@ static void prepare_push_cert_sha1(stru int bogs /* beginning_of_gpg_sig */; already_done = 1; - if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1)) - hashclr(push_cert_sha1); + if (write_object_file(push_cert.buf, push_cert.len, "blob", + &push_cert_oid)) + oidclr(&push_cert_oid); memset(&sigcheck, '\0', sizeof(sigcheck)); sigcheck.result = 'N'; @@@ -656,9 -655,9 +656,9 @@@ strbuf_release(&gpg_status); nonce_status = check_nonce(push_cert.buf, bogs); } - if (!is_null_sha1(push_cert_sha1)) { + if (!is_null_oid(&push_cert_oid)) { argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s", - sha1_to_hex(push_cert_sha1)); + oid_to_hex(&push_cert_oid)); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s", sigcheck.signer ? sigcheck.signer : ""); argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s", @@@ -1242,11 -1241,11 +1242,11 @@@ static void check_aliased_update(struc rp_error("refusing inconsistent update between symref '%s' (%s..%s) and" " its target '%s' (%s..%s)", cmd->ref_name, - find_unique_abbrev(cmd->old_oid.hash, DEFAULT_ABBREV), - find_unique_abbrev(cmd->new_oid.hash, DEFAULT_ABBREV), + find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV), + find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV), dst_cmd->ref_name, - find_unique_abbrev(dst_cmd->old_oid.hash, DEFAULT_ABBREV), - find_unique_abbrev(dst_cmd->new_oid.hash, DEFAULT_ABBREV)); + find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV), + find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV)); cmd->error_string = dst_cmd->error_string = "inconsistent aliased update"; @@@ -2027,7 -2026,7 +2027,7 @@@ int cmd_receive_pack(int argc, const ch proc.git_cmd = 1; proc.argv = argv_gc_auto; - close_all_packs(); + close_all_packs(the_repository->objects); if (!start_command(&proc)) { if (use_sideband) copy_to_sideband(proc.err, -1, NULL); diff --combined builtin/submodule--helper.c index 6ba8587b6d,6d8e002be7..a2327c98b0 --- a/builtin/submodule--helper.c +++ b/builtin/submodule--helper.c @@@ -16,6 -16,7 +16,7 @@@ #include "revision.h" #include "diffcore.h" #include "diff.h" + #include "object-store.h" #define OPT_QUIET (1 << 0) #define OPT_CACHED (1 << 1) @@@ -1019,7 -1020,7 +1020,7 @@@ static int module_deinit(int argc, cons struct option module_deinit_options[] = { OPT__QUIET(&quiet, N_("Suppress submodule status output")), - OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes")), + OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes"), 0), OPT_BOOL(0, "all", &all, N_("Unregister all submodules")), OPT_END() }; @@@ -1042,7 -1043,7 +1043,7 @@@ die(_("Use '--all' if you really want to deinitialize all submodules")); if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0) - BUG("module_list_compute should not choke on empty pathspec"); + return 1; info.prefix = prefix; if (quiet) diff --combined cache.h index 6e45c1b537,720664e394..bbaf5c349a --- a/cache.h +++ b/cache.h @@@ -16,6 -16,31 +16,6 @@@ #include "sha1-array.h" #include "repository.h" -#ifndef platform_SHA_CTX -/* - * platform's underlying implementation of SHA-1; could be OpenSSL, - * blk_SHA, Apple CommonCrypto, etc... Note that including - * SHA1_HEADER may have already defined platform_SHA_CTX for our - * own implementations like block-sha1 and ppc-sha1, so we list - * the default for OpenSSL compatible SHA-1 implementations here. - */ -#define platform_SHA_CTX SHA_CTX -#define platform_SHA1_Init SHA1_Init -#define platform_SHA1_Update SHA1_Update -#define platform_SHA1_Final SHA1_Final -#endif - -#define git_SHA_CTX platform_SHA_CTX -#define git_SHA1_Init platform_SHA1_Init -#define git_SHA1_Update platform_SHA1_Update -#define git_SHA1_Final platform_SHA1_Final - -#ifdef SHA1_MAX_BLOCK_SIZE -#include "compat/sha1-chunked.h" -#undef git_SHA1_Update -#define git_SHA1_Update git_SHA1_Update_Chunked -#endif - #include typedef struct git_zstream { z_stream z; @@@ -599,7 -624,6 +599,7 @@@ extern int read_index_unmerged(struct i /* For use with `write_locked_index()`. */ #define COMMIT_LOCK (1 << 0) +#define SKIP_IF_UNCHANGED (1 << 1) /* * Write the index while holding an already-taken lock. Close the lock, @@@ -616,9 -640,6 +616,9 @@@ * With `COMMIT_LOCK`, the lock is always committed or rolled back. * Without it, the lock is closed, but neither committed nor rolled * back. + * + * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing + * is written (and the lock is rolled back if `COMMIT_LOCK` is given). */ extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags); @@@ -940,12 -961,6 +940,6 @@@ extern void check_repository_format(voi #define DATA_CHANGED 0x0020 #define TYPE_CHANGED 0x0040 - /* - * Put in `buf` the name of the file in the local object database that - * would be used to store a loose object with the specified sha1. - */ - extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1); - /* * Return an abbreviated sha1 unique within this repository's object database. * The result will be at least `len` characters long, and will be NUL @@@ -955,14 -970,14 +949,14 @@@ * more calls to find_unique_abbrev are made. * * The `_r` variant writes to a buffer supplied by the caller, which must be at - * least `GIT_SHA1_HEXSZ + 1` bytes. The return value is the number of bytes + * least `GIT_MAX_HEXSZ + 1` bytes. The return value is the number of bytes * written (excluding the NUL terminator). * * Note that while this version avoids the static buffer, it is not fully * reentrant, as it calls into other non-reentrant git code. */ -extern const char *find_unique_abbrev(const unsigned char *sha1, int len); -extern int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len); +extern const char *find_unique_abbrev(const struct object_id *oid, int len); +extern int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len); extern const unsigned char null_sha1[GIT_MAX_RAWSZ]; extern const struct object_id null_oid; @@@ -1011,7 -1026,7 +1005,7 @@@ static inline void hashclr(unsigned cha static inline void oidclr(struct object_id *oid) { - hashclr(oid->hash); + memset(oid->hash, 0, GIT_MAX_RAWSZ); } @@@ -1029,6 -1044,8 +1023,6 @@@ extern const struct object_id empty_tre "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \ "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91" extern const struct object_id empty_blob_oid; -#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash) - static inline int is_empty_blob_sha1(const unsigned char *sha1) { @@@ -1189,19 -1206,19 +1183,19 @@@ extern char *xdg_config_home(const cha */ extern char *xdg_cache_home(const char *filename); -extern void *read_sha1_file_extended(const unsigned char *sha1, - enum object_type *type, - unsigned long *size, int lookup_replace); -static inline void *read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size) +extern void *read_object_file_extended(const struct object_id *oid, + enum object_type *type, + unsigned long *size, int lookup_replace); +static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size) { - return read_sha1_file_extended(sha1, type, size, 1); + return read_object_file_extended(oid, type, size, 1); } /* * This internal function is only declared here for the benefit of * lookup_replace_object(). Please do not call it directly. */ -extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1); +extern const struct object_id *do_lookup_replace_object(const struct object_id *oid); /* * If object sha1 should be replaced, return the replacement object's @@@ -1209,50 -1226,38 +1203,49 @@@ * either sha1 or a pointer to a permanently-allocated value. When * object replacement is suppressed, always return sha1. */ -static inline const unsigned char *lookup_replace_object(const unsigned char *sha1) +static inline const struct object_id *lookup_replace_object(const struct object_id *oid) { if (!check_replace_refs) - return sha1; - return do_lookup_replace_object(sha1); + return oid; + return do_lookup_replace_object(oid); } -/* Read and unpack a sha1 file into memory, write memory to a sha1 file */ -extern int sha1_object_info(const unsigned char *, unsigned long *); -extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1); -extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1); -extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags); -extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *); -extern int force_object_loose(const unsigned char *sha1, time_t mtime); +/* Read and unpack an object file into memory, write memory to an object file */ +extern int oid_object_info(const struct object_id *, unsigned long *); + +extern int hash_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int write_object_file(const void *buf, unsigned long len, + const char *type, struct object_id *oid); + +extern int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags); + +extern int pretend_object_file(void *, unsigned long, enum object_type, + struct object_id *oid); + +extern int force_object_loose(const struct object_id *oid, time_t mtime); + extern int git_open_cloexec(const char *name, int flags); #define git_open(name) git_open_cloexec(name, O_RDONLY) - extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size); extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz); extern int parse_sha1_header(const char *hdr, unsigned long *sizep); -extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type); +extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type); extern int finalize_object_file(const char *tmpfile, const char *filename); /* - * Open the loose object at path, check its sha1, and return the contents, + * Open the loose object at path, check its hash, and return the contents, * type, and size. If the object is a blob, then "contents" may return NULL, * to allow streaming of large blobs. * * Returns 0 on success, negative on error (details may be written to stderr). */ int read_loose_object(const char *path, - const unsigned char *expected_sha1, + const struct object_id *expected_oid, enum object_type *type, unsigned long *size, void **contents); @@@ -1279,7 -1284,7 +1272,7 @@@ extern int has_object_file_with_flags(c */ extern int has_loose_object_nonlocal(const unsigned char *sha1); -extern void assert_sha1_type(const unsigned char *sha1, enum object_type expect); +extern void assert_oid_type(const struct object_id *oid, enum object_type expect); /* Helper to check and "touch" a file */ extern int check_and_freshen_file(const char *fn, int freshen); @@@ -1435,10 -1440,10 +1428,10 @@@ extern int df_name_compare(const char * extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2); extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2); -extern void *read_object_with_reference(const unsigned char *sha1, +extern void *read_object_with_reference(const struct object_id *oid, const char *required_type, unsigned long *size, - unsigned char *sha1_ret); + struct object_id *oid_ret); extern struct object *peel_to_type(const char *name, int namelen, struct object *o, enum object_type); @@@ -1564,57 -1569,6 +1557,6 @@@ extern int has_dirs_only_path(const cha extern void schedule_dir_for_removal(const char *name, int len); extern void remove_scheduled_dirs(void); - extern struct alternate_object_database { - struct alternate_object_database *next; - - /* see alt_scratch_buf() */ - struct strbuf scratch; - size_t base_len; - - /* - * Used to store the results of readdir(3) calls when searching - * for unique abbreviated hashes. This cache is never - * invalidated, thus it's racy and not necessarily accurate. - * That's fine for its purpose; don't use it for tasks requiring - * greater accuracy! - */ - char loose_objects_subdir_seen[256]; - struct oid_array loose_objects_cache; - - char path[FLEX_ARRAY]; - } *alt_odb_list; - extern void prepare_alt_odb(void); - extern char *compute_alternate_path(const char *path, struct strbuf *err); - typedef int alt_odb_fn(struct alternate_object_database *, void *); - extern int foreach_alt_odb(alt_odb_fn, void*); - - /* - * Allocate a "struct alternate_object_database" but do _not_ actually - * add it to the list of alternates. - */ - struct alternate_object_database *alloc_alt_odb(const char *dir); - - /* - * Add the directory to the on-disk alternates file; the new entry will also - * take effect in the current process. - */ - extern void add_to_alternates_file(const char *dir); - - /* - * Add the directory to the in-memory list of alternates (along with any - * recursive alternates it points to), but do not modify the on-disk alternates - * file. - */ - extern void add_to_alternates_memory(const char *dir); - - /* - * Returns a scratch strbuf pre-filled with the alternate object directory, - * including a trailing slash, which can be used to access paths in the - * alternate. Always use this over direct access to alt->scratch, as it - * cleans up any previous use of the scratch buffer. - */ - extern struct strbuf *alt_scratch_buf(struct alternate_object_database *alt); - struct pack_window { struct pack_window *next; unsigned char *base; @@@ -1624,35 -1578,6 +1566,6 @@@ unsigned int inuse_cnt; }; - extern struct packed_git { - struct packed_git *next; - struct list_head mru; - struct pack_window *windows; - off_t pack_size; - const void *index_data; - size_t index_size; - uint32_t num_objects; - uint32_t num_bad_objects; - unsigned char *bad_object_sha1; - int index_version; - time_t mtime; - int pack_fd; - unsigned pack_local:1, - pack_keep:1, - freshened:1, - do_not_close:1, - pack_promisor:1; - unsigned char sha1[20]; - struct revindex_entry *revindex; - /* something like ".git/objects/pack/xxxxx.pack" */ - char pack_name[FLEX_ARRAY]; /* more */ - } *packed_git; - - /* - * A most-recently-used ordered version of the packed_git list. - */ - extern struct list_head packed_git_mru; - struct pack_entry { off_t offset; unsigned char sha1[20]; @@@ -1665,7 -1590,7 +1578,7 @@@ * usual "XXXXXX" trailer, and the resulting filename is written into the * "template" buffer. Returns the open descriptor. */ -extern int odb_mkstemp(struct strbuf *template, const char *pattern); +extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern); /* * Create a pack .keep file named "name" (which should generally be the output @@@ -1736,7 -1661,7 +1649,7 @@@ struct object_info unsigned long *sizep; off_t *disk_sizep; unsigned char *delta_base_sha1; - struct strbuf *typename; + struct strbuf *type_name; void **contentp; /* Response */ @@@ -1777,9 -1702,7 +1690,9 @@@ #define OBJECT_INFO_SKIP_CACHED 4 /* Do not retry packed storage after checking packed and loose storage */ #define OBJECT_INFO_QUICK 8 -extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags); +/* Do not check loose object */ +#define OBJECT_INFO_IGNORE_LOOSE 16 +extern int oid_object_info_extended(const struct object_id *, struct object_info *, unsigned flags); /* * Set this to 0 to prevent sha1_object_info_extended() from fetching missing diff --combined environment.c index 21565c3c52,93c9fbb0ba..39b3d906c8 --- a/environment.c +++ b/environment.c @@@ -14,6 -14,7 +14,7 @@@ #include "fmt-merge-msg.h" #include "commit.h" #include "argv-array.h" + #include "object-store.h" int trust_executable_bit = 1; int trust_ctime = 1; @@@ -101,7 -102,7 +102,7 @@@ int ignore_untracked_cache_config /* This is set by setup_git_dir_gently() and/or git_default_config() */ char *git_work_tree_cfg; -static char *namespace; +static char *git_namespace; static const char *super_prefix; @@@ -184,8 -185,8 +185,8 @@@ void setup_git_env(const char *git_dir free(git_replace_ref_base); git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base : "refs/replace/"); - free(namespace); - namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); + free(git_namespace); + git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT)); shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT); if (shallow_file) set_alternate_shallow_file(shallow_file, 0); @@@ -219,9 -220,9 +220,9 @@@ const char *get_git_common_dir(void const char *get_git_namespace(void) { - if (!namespace) + if (!git_namespace) BUG("git environment hasn't been setup"); - return namespace; + return git_namespace; } const char *strip_namespace(const char *namespaced_ref) @@@ -270,12 -271,12 +271,12 @@@ const char *get_git_work_tree(void char *get_object_directory(void) { - if (!the_repository->objectdir) + if (!the_repository->objects->objectdir) BUG("git environment hasn't been setup"); - return the_repository->objectdir; + return the_repository->objects->objectdir; } -int odb_mkstemp(struct strbuf *template, const char *pattern) +int odb_mkstemp(struct strbuf *temp_filename, const char *pattern) { int fd; /* @@@ -283,16 -284,16 +284,16 @@@ * restrictive except to remove write permission. */ int mode = 0444; - git_path_buf(template, "objects/%s", pattern); - fd = git_mkstemp_mode(template->buf, mode); + git_path_buf(temp_filename, "objects/%s", pattern); + fd = git_mkstemp_mode(temp_filename->buf, mode); if (0 <= fd) return fd; /* slow path */ - /* some mkstemp implementations erase template on failure */ - git_path_buf(template, "objects/%s", pattern); - safe_create_leading_directories(template->buf); - return xmkstemp_mode(template->buf, mode); + /* some mkstemp implementations erase temp_filename on failure */ + git_path_buf(temp_filename, "objects/%s", pattern); + safe_create_leading_directories(temp_filename->buf); + return xmkstemp_mode(temp_filename->buf, mode); } int odb_pack_keep(const char *name) diff --combined fast-import.c index a2e8b1d763,b3492fce5c..b2f6d6d583 --- a/fast-import.c +++ b/fast-import.c @@@ -154,6 -154,7 +154,7 @@@ Format of STDIN stream #include "builtin.h" #include "cache.h" + #include "repository.h" #include "config.h" #include "lockfile.h" #include "object.h" @@@ -168,6 -169,7 +169,7 @@@ #include "dir.h" #include "run-command.h" #include "packfile.h" + #include "object-store.h" #define PACK_ID_BITS 16 #define MAX_PACK_ID ((1<pack_fd = pack_fd; p->do_not_close = 1; - pack_file = sha1fd(pack_fd, p->pack_name); + pack_file = hashfd(pack_fd, p->pack_name); hdr.hdr_signature = htonl(PACK_SIGNATURE); hdr.hdr_version = htonl(2); hdr.hdr_entries = 0; - sha1write(pack_file, &hdr, sizeof(hdr)); + hashwrite(pack_file, &hdr, sizeof(hdr)); pack_data = p; pack_size = sizeof(hdr); @@@ -1016,7 -1018,7 +1018,7 @@@ static void end_packfile(void struct tag *t; close_pack_windows(pack_data); - sha1close(pack_file, cur_pack_oid.hash, 0); + hashclose(pack_file, cur_pack_oid.hash, 0); fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1, pack_data->pack_name, object_count, cur_pack_oid.hash, pack_size); @@@ -1092,15 -1094,15 +1094,15 @@@ static int store_object unsigned char hdr[96]; struct object_id oid; unsigned long hdrlen, deltalen; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu", - typename(type), (unsigned long)dat->len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); - git_SHA1_Update(&c, dat->buf, dat->len); - git_SHA1_Final(oid.hash, &c); + type_name(type), (unsigned long)dat->len) + 1; + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, dat->buf, dat->len); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@@ -1110,7 -1112,8 +1112,8 @@@ if (e->idx.offset) { duplicate_count_by_type[type]++; return 1; - } else if (find_sha1_pack(oid.hash, packed_git)) { + } else if (find_sha1_pack(oid.hash, + get_packed_git(the_repository))) { e->type = type; e->pack_id = MAX_PACK_ID; e->idx.offset = 1; /* just not zero! */ @@@ -1118,13 -1121,11 +1121,13 @@@ return 1; } - if (last && last->data.buf && last->depth < max_depth && dat->len > 20) { + if (last && last->data.buf && last->depth < max_depth + && dat->len > the_hash_algo->rawsz) { + delta_count_attempts_by_type[type]++; delta = diff_delta(last->data.buf, last->data.len, dat->buf, dat->len, - &deltalen, dat->len - 20); + &deltalen, dat->len - the_hash_algo->rawsz); } else delta = NULL; @@@ -1182,23 -1183,23 +1185,23 @@@ hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), OBJ_OFS_DELTA, deltalen); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; hdr[pos] = ofs & 127; while (ofs >>= 7) hdr[--pos] = 128 | (--ofs & 127); - sha1write(pack_file, hdr + pos, sizeof(hdr) - pos); + hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos); pack_size += sizeof(hdr) - pos; } else { e->depth = 0; hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr), type, dat->len); - sha1write(pack_file, hdr, hdrlen); + hashwrite(pack_file, hdr, hdrlen); pack_size += hdrlen; } - sha1write(pack_file, out, s.total_out); + hashwrite(pack_file, out, s.total_out); pack_size += s.total_out; e->idx.crc32 = crc32_end(pack_file); @@@ -1217,9 -1218,9 +1220,9 @@@ return 0; } -static void truncate_pack(struct sha1file_checkpoint *checkpoint) +static void truncate_pack(struct hashfile_checkpoint *checkpoint) { - if (sha1file_truncate(pack_file, checkpoint)) + if (hashfile_truncate(pack_file, checkpoint)) die_errno("cannot truncate pack to skip duplicate"); pack_size = checkpoint->offset; } @@@ -1233,9 -1234,9 +1236,9 @@@ static void stream_blob(uintmax_t len, struct object_id oid; unsigned long hdrlen; off_t offset; - git_SHA_CTX c; + git_hash_ctx c; git_zstream s; - struct sha1file_checkpoint checkpoint; + struct hashfile_checkpoint checkpoint; int status = Z_OK; /* Determine if we should auto-checkpoint. */ @@@ -1243,13 -1244,13 +1246,13 @@@ || (pack_size + 60 + len) < pack_size) cycle_packfile(); - sha1file_checkpoint(pack_file, &checkpoint); + hashfile_checkpoint(pack_file, &checkpoint); offset = checkpoint.offset; hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1; - git_SHA1_Init(&c); - git_SHA1_Update(&c, out_buf, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, out_buf, hdrlen); crc32_begin(pack_file); @@@ -1267,7 -1268,7 +1270,7 @@@ if (!n && feof(stdin)) die("EOF in data (%" PRIuMAX " bytes remaining)", len); - git_SHA1_Update(&c, in_buf, n); + the_hash_algo->update_fn(&c, in_buf, n); s.next_in = in_buf; s.avail_in = n; len -= n; @@@ -1277,7 -1278,7 +1280,7 @@@ if (!s.avail_out || status == Z_STREAM_END) { size_t n = s.next_out - out_buf; - sha1write(pack_file, out_buf, n); + hashwrite(pack_file, out_buf, n); pack_size += n; s.next_out = out_buf; s.avail_out = out_sz; @@@ -1293,7 -1294,7 +1296,7 @@@ } } git_deflate_end(&s); - git_SHA1_Final(oid.hash, &c); + the_hash_algo->final_fn(oid.hash, &c); if (oidout) oidcpy(oidout, &oid); @@@ -1307,7 -1308,8 +1310,8 @@@ duplicate_count_by_type[OBJ_BLOB]++; truncate_pack(&checkpoint); - } else if (find_sha1_pack(oid.hash, packed_git)) { + } else if (find_sha1_pack(oid.hash, + get_packed_git(the_repository))) { e->type = OBJ_BLOB; e->pack_id = MAX_PACK_ID; e->idx.offset = 1; /* just not zero! */ @@@ -1352,25 -1354,25 +1356,25 @@@ static void *gfi_unpack_entry { enum object_type type; struct packed_git *p = all_packs[oe->pack_id]; - if (p == pack_data && p->pack_size < (pack_size + 20)) { + if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) { /* The object is stored in the packfile we are writing to * and we have modified it since the last time we scanned * back to read a previously written object. If an old - * window covered [p->pack_size, p->pack_size + 20) its + * window covered [p->pack_size, p->pack_size + rawsz) its * data is stale and is not valid. Closing all windows * and updating the packfile length ensures we can read * the newly written data. */ close_pack_windows(p); - sha1flush(pack_file); + hashflush(pack_file); - /* We have to offer 20 bytes additional on the end of + /* We have to offer rawsz bytes additional on the end of * the packfile as the core unpacker code assumes the * footer is present at the file end and must promise - * at least 20 bytes within any window it maps. But + * at least rawsz bytes within any window it maps. But * we don't actually create the footer here. */ - p->pack_size = pack_size + 20; + p->pack_size = pack_size + the_hash_algo->rawsz; } return unpack_entry(p, oe->idx.offset, &type, sizep); } @@@ -1412,7 -1414,7 +1416,7 @@@ static void load_tree(struct tree_entr die("Can't load tree %s", oid_to_hex(oid)); } else { enum object_type type; - buf = read_sha1_file(oid->hash, &type, &size); + buf = read_object_file(oid, &type, &size); if (!buf || type != OBJ_TREE) die("Can't load tree %s", oid_to_hex(oid)); } @@@ -1913,7 -1915,7 +1917,7 @@@ static void read_marks(void die("corrupt mark line: %s", line); e = find_object(&oid); if (!e) { - enum object_type type = sha1_object_info(oid.hash, NULL); + enum object_type type = oid_object_info(&oid, NULL); if (type < 0) die("object not found: %s", oid_to_hex(&oid)); e = insert_object(&oid); @@@ -2206,7 -2208,7 +2210,7 @@@ static void construct_path_with_fanout( unsigned char fanout, char *path) { unsigned int i = 0, j = 0; - if (fanout >= 20) + if (fanout >= the_hash_algo->rawsz) die("Too large fanout (%u)", fanout); while (fanout) { path[i++] = hex_sha1[j++]; @@@ -2214,8 -2216,8 +2218,8 @@@ path[i++] = '/'; fanout--; } - memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j); - path[i + GIT_SHA1_HEXSZ - j] = '\0'; + memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j); + path[i + the_hash_algo->hexsz - j] = '\0'; } static uintmax_t do_change_note_fanout( @@@ -2423,7 -2425,7 +2427,7 @@@ static void file_change_m(const char *p else if (oe) { if (oe->type != OBJ_COMMIT) die("Not a commit (actually a %s): %s", - typename(oe->type), command_buf.buf); + type_name(oe->type), command_buf.buf); } /* * Accept the sha1 without checking; it expected to be in @@@ -2443,14 -2445,14 +2447,14 @@@ enum object_type expected = S_ISDIR(mode) ? OBJ_TREE: OBJ_BLOB; enum object_type type = oe ? oe->type : - sha1_object_info(oid.hash, NULL); + oid_object_info(&oid, NULL); if (type < 0) die("%s not found: %s", S_ISDIR(mode) ? "Tree" : "Blob", command_buf.buf); if (type != expected) die("Not a %s (actually a %s): %s", - typename(expected), typename(type), + type_name(expected), type_name(type), command_buf.buf); } @@@ -2583,9 -2585,8 +2587,9 @@@ static void note_change_n(const char *p oidcpy(&commit_oid, &commit_oe->idx.oid); } else if (!get_oid(p, &commit_oid)) { unsigned long size; - char *buf = read_object_with_reference(commit_oid.hash, - commit_type, &size, commit_oid.hash); + char *buf = read_object_with_reference(&commit_oid, + commit_type, &size, + &commit_oid); if (!buf || size < 46) die("Not a valid commit: %s", p); free(buf); @@@ -2602,14 -2603,14 +2606,14 @@@ } else if (oe) { if (oe->type != OBJ_BLOB) die("Not a blob (actually a %s): %s", - typename(oe->type), command_buf.buf); + type_name(oe->type), command_buf.buf); } else if (!is_null_oid(&oid)) { - enum object_type type = sha1_object_info(oid.hash, NULL); + enum object_type type = oid_object_info(&oid, NULL); if (type < 0) die("Blob not found: %s", command_buf.buf); if (type != OBJ_BLOB) die("Not a blob (actually a %s): %s", - typename(type), command_buf.buf); + type_name(type), command_buf.buf); } construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path); @@@ -2654,8 -2655,9 +2658,8 @@@ static void parse_from_existing(struct unsigned long size; char *buf; - buf = read_object_with_reference(b->oid.hash, - commit_type, &size, - b->oid.hash); + buf = read_object_with_reference(&b->oid, commit_type, &size, + &b->oid); parse_from_commit(b, buf, size); free(buf); } @@@ -2732,9 -2734,8 +2736,9 @@@ static struct hash_list *parse_merge(un oidcpy(&n->oid, &oe->idx.oid); } else if (!get_oid(from, &n->oid)) { unsigned long size; - char *buf = read_object_with_reference(n->oid.hash, - commit_type, &size, n->oid.hash); + char *buf = read_object_with_reference(&n->oid, + commit_type, + &size, &n->oid); if (!buf || size < 46) die("Not a valid commit: %s", from); free(buf); @@@ -2891,7 -2892,7 +2895,7 @@@ static void parse_new_tag(const char *a } else if (!get_oid(from, &oid)) { struct object_entry *oe = find_object(&oid); if (!oe) { - type = sha1_object_info(oid.hash, NULL); + type = oid_object_info(&oid, NULL); if (type < 0) die("Not a valid object: %s", from); } else @@@ -2917,7 -2918,7 +2921,7 @@@ "object %s\n" "type %s\n" "tag %s\n", - oid_to_hex(&oid), typename(type), t->name); + oid_to_hex(&oid), type_name(type), t->name); if (tagger) strbuf_addf(&new_data, "tagger %s\n", tagger); @@@ -2967,7 -2968,7 +2971,7 @@@ static void cat_blob(struct object_entr char *buf; if (!oe || oe->pack_id == MAX_PACK_ID) { - buf = read_sha1_file(oid->hash, &type, &size); + buf = read_object_file(oid, &type, &size); } else { type = oe->type; buf = gfi_unpack_entry(oe, &size); @@@ -2988,10 -2989,10 +2992,10 @@@ die("Can't read object %s", oid_to_hex(oid)); if (type != OBJ_BLOB) die("Object %s is a %s but a blob was expected.", - oid_to_hex(oid), typename(type)); + oid_to_hex(oid), type_name(type)); strbuf_reset(&line); strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid), - typename(type), size); + type_name(type), size); cat_blob_write(line.buf, line.len); strbuf_release(&line); cat_blob_write(buf, size); @@@ -3006,7 -3007,7 +3010,7 @@@ static void parse_get_mark(const char *p) { - struct object_entry *oe = oe; + struct object_entry *oe; char output[GIT_MAX_HEXSZ + 2]; /* get-mark SP LF */ @@@ -3023,7 -3024,7 +3027,7 @@@ static void parse_cat_blob(const char *p) { - struct object_entry *oe = oe; + struct object_entry *oe; struct object_id oid; /* cat-blob SP LF */ @@@ -3049,7 -3050,7 +3053,7 @@@ static struct object_entry *dereference unsigned long size; char *buf = NULL; if (!oe) { - enum object_type type = sha1_object_info(oid->hash, NULL); + enum object_type type = oid_object_info(oid, NULL); if (type < 0) die("object not found: %s", oid_to_hex(oid)); /* cache it! */ @@@ -3072,7 -3073,7 +3076,7 @@@ buf = gfi_unpack_entry(oe, &size); } else { enum object_type unused; - buf = read_sha1_file(oid->hash, &unused, &size); + buf = read_object_file(oid, &unused, &size); } if (!buf) die("Can't load object %s", oid_to_hex(oid)); diff --combined http-push.c index ff82b63133,97fe22a705..c0998fd763 --- a/http-push.c +++ b/http-push.c @@@ -12,6 -12,7 +12,7 @@@ #include "sigchain.h" #include "argv-array.h" #include "packfile.h" + #include "object-store.h" #ifdef EXPAT_NEEDS_XMLPARSE_H #include @@@ -361,8 -362,8 +362,8 @@@ static void start_put(struct transfer_r ssize_t size; git_zstream stream; - unpacked = read_sha1_file(request->obj->oid.hash, &type, &len); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1; + unpacked = read_object_file(&request->obj->oid, &type, &len); + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; /* Set it up */ git_deflate_init(&stream, zlib_compression_level); diff --combined http-walker.c index f506f394ac,75d55d42a9..7cdfb2f24c --- a/http-walker.c +++ b/http-walker.c @@@ -1,10 -1,12 +1,12 @@@ #include "cache.h" + #include "repository.h" #include "commit.h" #include "walker.h" #include "http.h" #include "list.h" #include "transport.h" #include "packfile.h" + #include "object-store.h" struct alt_base { char *base; @@@ -22,7 -24,7 +24,7 @@@ enum object_request_state struct object_request { struct walker *walker; - unsigned char sha1[20]; + struct object_id oid; struct alt_base *repo; enum object_request_state state; struct http_object_request *req; @@@ -56,7 -58,7 +58,7 @@@ static void start_object_request(struc struct active_request_slot *slot; struct http_object_request *req; - req = new_http_object_request(obj_req->repo->base, obj_req->sha1); + req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash); if (req == NULL) { obj_req->state = ABORTED; return; @@@ -82,7 -84,7 +84,7 @@@ static void finish_object_request(struc return; if (obj_req->req->rename == 0) - walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1)); + walker_say(obj_req->walker, "got %s\n", oid_to_hex(&obj_req->oid)); } static void process_object_response(void *callback_data) @@@ -129,7 -131,7 +131,7 @@@ static int fill_active_slot(struct walk list_for_each_safe(pos, tmp, head) { obj_req = list_entry(pos, struct object_request, node); if (obj_req->state == WAITING) { - if (has_sha1_file(obj_req->sha1)) + if (has_sha1_file(obj_req->oid.hash)) obj_req->state = COMPLETE; else { start_object_request(walker, obj_req); @@@ -148,7 -150,7 +150,7 @@@ static void prefetch(struct walker *wal newreq = xmalloc(sizeof(*newreq)); newreq->walker = walker; - hashcpy(newreq->sha1, sha1); + hashcpy(newreq->oid.hash, sha1); newreq->repo = data->alt; newreq->state = WAITING; newreq->req = NULL; @@@ -481,13 -483,13 +483,13 @@@ static int fetch_object(struct walker * list_for_each(pos, head) { obj_req = list_entry(pos, struct object_request, node); - if (!hashcmp(obj_req->sha1, sha1)) + if (!hashcmp(obj_req->oid.hash, sha1)) break; } if (obj_req == NULL) return error("Couldn't find request for %s in the queue", hex); - if (has_sha1_file(obj_req->sha1)) { + if (has_sha1_file(obj_req->oid.hash)) { if (obj_req->req != NULL) abort_http_object_request(obj_req->req); abort_object_request(obj_req); @@@ -541,11 -543,11 +543,11 @@@ } else if (req->zret != Z_STREAM_END) { walker->corrupt_object_found++; ret = error("File %s (%s) corrupt", hex, req->url); - } else if (hashcmp(obj_req->sha1, req->real_sha1)) { + } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) { ret = error("File %s has bad hash", hex); } else if (req->rename < 0) { struct strbuf buf = STRBUF_INIT; - sha1_file_name(&buf, req->sha1); + sha1_file_name(the_repository, &buf, req->sha1); ret = error("unable to write sha1 filename %s", buf.buf); strbuf_release(&buf); } diff --combined http.c index a5bd5d62c2,4d613d5f6b..9304045b6c --- a/http.c +++ b/http.c @@@ -14,6 -14,7 +14,7 @@@ #include "packfile.h" #include "protocol.h" #include "string-list.h" + #include "object-store.h" static struct trace_key trace_curl = TRACE_KEY_INIT(CURL); static int trace_curl_data = 1; @@@ -69,9 -70,6 +70,9 @@@ static const char *ssl_key #if LIBCURL_VERSION_NUM >= 0x070908 static const char *ssl_capath; #endif +#if LIBCURL_VERSION_NUM >= 0x071304 +static const char *curl_no_proxy; +#endif #if LIBCURL_VERSION_NUM >= 0x072c00 static const char *ssl_pinnedkey; #endif @@@ -80,6 -78,7 +81,6 @@@ static long curl_low_speed_limit = -1 static long curl_low_speed_time = -1; static int curl_ftp_no_epsv; static const char *curl_http_proxy; -static const char *curl_no_proxy; static const char *http_proxy_authmethod; static struct { const char *name; @@@ -1262,14 -1261,14 +1263,14 @@@ static struct fill_chain *fill_cfg void add_fill_function(void *data, int (*fill)(void *)) { - struct fill_chain *new = xmalloc(sizeof(*new)); + struct fill_chain *new_fill = xmalloc(sizeof(*new_fill)); struct fill_chain **linkp = &fill_cfg; - new->data = data; - new->fill = fill; - new->next = NULL; + new_fill->data = data; + new_fill->fill = fill; + new_fill->next = NULL; while (*linkp) linkp = &(*linkp)->next; - *linkp = new; + *linkp = new_fill; } void fill_active_slots(void) @@@ -2248,7 -2247,7 +2249,7 @@@ struct http_object_request *new_http_ob hashcpy(freq->sha1, sha1); freq->localfile = -1; - sha1_file_name(&filename, sha1); + sha1_file_name(the_repository, &filename, sha1); snprintf(freq->tmpfile, sizeof(freq->tmpfile), "%s.temp", filename.buf); @@@ -2397,8 -2396,7 +2398,7 @@@ int finish_http_object_request(struct h unlink_or_warn(freq->tmpfile); return -1; } - - sha1_file_name(&filename, freq->sha1); + sha1_file_name(the_repository, &filename, freq->sha1); freq->rename = finalize_object_file(freq->tmpfile, filename.buf); strbuf_release(&filename); diff --combined object.c index 2c909385a7,4c2cf7ff5d..a0a756f24f --- a/object.c +++ b/object.c @@@ -4,6 -4,8 +4,8 @@@ #include "tree.h" #include "commit.h" #include "tag.h" + #include "object-store.h" + #include "packfile.h" static struct object **obj_hash; static int nr_objs, obj_hash_size; @@@ -26,7 -28,7 +28,7 @@@ static const char *object_type_strings[ "tag", /* OBJ_TAG = 4 */ }; -const char *typename(unsigned int type) +const char *type_name(unsigned int type) { if (type >= ARRAY_SIZE(object_type_strings)) return NULL; @@@ -166,7 -168,7 +168,7 @@@ void *object_as_type(struct object *obj if (!quiet) error("object %s is a %s, not a %s", oid_to_hex(&obj->oid), - typename(obj->type), typename(type)); + type_name(obj->type), type_name(type)); return NULL; } } @@@ -244,7 -246,7 +246,7 @@@ struct object *parse_object(const struc unsigned long size; enum object_type type; int eaten; - const unsigned char *repl = lookup_replace_object(oid->hash); + const struct object_id *repl = lookup_replace_object(oid); void *buffer; struct object *obj; @@@ -254,8 -256,8 +256,8 @@@ if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) || (!obj && has_object_file(oid) && - sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) { - if (check_sha1_signature(repl, NULL, 0, NULL) < 0) { + oid_object_info(oid, NULL) == OBJ_BLOB)) { + if (check_object_signature(repl, NULL, 0, NULL) < 0) { error("sha1 mismatch %s", oid_to_hex(oid)); return NULL; } @@@ -263,11 -265,11 +265,11 @@@ return lookup_object(oid->hash); } - buffer = read_sha1_file(oid->hash, &type, &size); + buffer = read_object_file(oid, &type, &size); if (buffer) { - if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) { + if (check_object_signature(repl, buffer, size, type_name(type)) < 0) { free(buffer); - error("sha1 mismatch %s", sha1_to_hex(repl)); + error("sha1 mismatch %s", oid_to_hex(repl)); return NULL; } @@@ -445,3 -447,43 +447,43 @@@ void clear_commit_marks_all(unsigned in obj->flags &= ~flags; } } + + struct raw_object_store *raw_object_store_new(void) + { + struct raw_object_store *o = xmalloc(sizeof(*o)); + + memset(o, 0, sizeof(*o)); + INIT_LIST_HEAD(&o->packed_git_mru); + return o; + } + + static void free_alt_odb(struct alternate_object_database *alt) + { + strbuf_release(&alt->scratch); + oid_array_clear(&alt->loose_objects_cache); + free(alt); + } + + static void free_alt_odbs(struct raw_object_store *o) + { + while (o->alt_odb_list) { + struct alternate_object_database *next; + + next = o->alt_odb_list->next; + free_alt_odb(o->alt_odb_list); + o->alt_odb_list = next; + } + } + + void raw_object_store_clear(struct raw_object_store *o) + { + FREE_AND_NULL(o->objectdir); + FREE_AND_NULL(o->alternate_db); + + free_alt_odbs(o); + o->alt_odb_tail = NULL; + + INIT_LIST_HEAD(&o->packed_git_mru); + close_all_packs(o); + o->packed_git = NULL; + } diff --combined pack-check.c index d0591dd5e8,2378f25999..385d964bdd --- a/pack-check.c +++ b/pack-check.c @@@ -3,6 -3,7 +3,7 @@@ #include "pack-revindex.h" #include "progress.h" #include "packfile.h" + #include "object-store.h" struct idx_entry { off_t offset; @@@ -41,7 -42,7 +42,7 @@@ int check_pack_crc(struct packed_git *p } while (len); index_crc = p->index_data; - index_crc += 2 + 256 + p->num_objects * (20/4) + nr; + index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr; return data_crc != ntohl(*index_crc); } @@@ -54,7 -55,7 +55,7 @@@ static int verify_packfile(struct packe { off_t index_size = p->index_size; const unsigned char *index_base = p->index_data; - git_SHA_CTX ctx; + git_hash_ctx ctx; unsigned char hash[GIT_MAX_RAWSZ], *pack_sig; off_t offset = 0, pack_sig_ofs = 0; uint32_t nr_objects, i; @@@ -64,24 -65,24 +65,24 @@@ if (!is_pack_valid(p)) return error("packfile %s cannot be accessed", p->pack_name); - git_SHA1_Init(&ctx); + the_hash_algo->init_fn(&ctx); do { unsigned long remaining; unsigned char *in = use_pack(p, w_curs, offset, &remaining); offset += remaining; if (!pack_sig_ofs) - pack_sig_ofs = p->pack_size - 20; + pack_sig_ofs = p->pack_size - the_hash_algo->rawsz; if (offset > pack_sig_ofs) remaining -= (unsigned int)(offset - pack_sig_ofs); - git_SHA1_Update(&ctx, in, remaining); + the_hash_algo->update_fn(&ctx, in, remaining); } while (offset < pack_sig_ofs); - git_SHA1_Final(hash, &ctx); + the_hash_algo->final_fn(hash, &ctx); pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL); if (hashcmp(hash, pack_sig)) - err = error("%s SHA1 checksum mismatch", + err = error("%s pack checksum mismatch", p->pack_name); - if (hashcmp(index_base + index_size - 40, pack_sig)) - err = error("%s SHA1 does not match its index", + if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig)) + err = error("%s pack checksum does not match its index", p->pack_name); unuse_pack(w_curs); @@@ -126,7 -127,7 +127,7 @@@ if (type == OBJ_BLOB && big_file_threshold <= size) { /* - * Let check_sha1_signature() check it with + * Let check_object_signature() check it with * the streaming interface; no point slurping * the data in-core only to discard. */ @@@ -141,7 -142,7 +142,7 @@@ err = error("cannot unpack %s from %s at offset %"PRIuMAX"", oid_to_hex(entries[i].oid.oid), p->pack_name, (uintmax_t)entries[i].offset); - else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type))) + else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type))) err = error("packed %s from %s is corrupt", oid_to_hex(entries[i].oid.oid), p->pack_name); else if (fn) { @@@ -165,8 -166,8 +166,8 @@@ int verify_pack_index(struct packed_gi { off_t index_size; const unsigned char *index_base; - git_SHA_CTX ctx; - unsigned char sha1[20]; + git_hash_ctx ctx; + unsigned char hash[GIT_MAX_RAWSZ]; int err = 0; if (open_pack_index(p)) @@@ -175,11 -176,11 +176,11 @@@ index_base = p->index_data; /* Verify SHA1 sum of the index file */ - git_SHA1_Init(&ctx); - git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20)); - git_SHA1_Final(sha1, &ctx); - if (hashcmp(sha1, index_base + index_size - 20)) - err = error("Packfile index for %s SHA1 mismatch", + the_hash_algo->init_fn(&ctx); + the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz)); + the_hash_algo->final_fn(hash, &ctx); + if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz)) + err = error("Packfile index for %s hash mismatch", p->pack_name); return err; } diff --combined packfile.c index 69d3afda6c,d087eacc06..8e8b743910 --- a/packfile.c +++ b/packfile.c @@@ -1,6 -1,7 +1,7 @@@ #include "cache.h" #include "list.h" #include "pack.h" + #include "repository.h" #include "dir.h" #include "mergesort.h" #include "packfile.h" @@@ -13,6 -14,7 +14,7 @@@ #include "tag.h" #include "tree-walk.h" #include "tree.h" + #include "object-store.h" char *odb_pack_name(struct strbuf *buf, const unsigned char *sha1, @@@ -44,8 -46,6 +46,6 @@@ static unsigned int pack_open_fds static unsigned int pack_max_fds; static size_t peak_pack_mapped; static size_t pack_mapped; - struct packed_git *packed_git; - LIST_HEAD(packed_git_mru); #define SZ_FMT PRIuMAX static inline uintmax_t sz_fmt(size_t s) { return s; } @@@ -245,7 -245,7 +245,7 @@@ static int unuse_one_window(struct pack if (current) scan_windows(current, &lru_p, &lru_w, &lru_l); - for (p = packed_git; p; p = p->next) + for (p = the_repository->objects->packed_git; p; p = p->next) scan_windows(p, &lru_p, &lru_w, &lru_l); if (lru_p) { munmap(lru_w->base, lru_w->len); @@@ -311,11 -311,11 +311,11 @@@ static void close_pack(struct packed_gi close_pack_index(p); } - void close_all_packs(void) + void close_all_packs(struct raw_object_store *o) { struct packed_git *p; - for (p = packed_git; p; p = p->next) + for (p = o->packed_git; p; p = p->next) if (p->do_not_close) die("BUG: want to close pack marked 'do-not-close'"); else @@@ -383,7 -383,7 +383,7 @@@ static int close_one_pack(void struct pack_window *mru_w = NULL; int accept_windows_inuse = 1; - for (p = packed_git; p; p = p->next) { + for (p = the_repository->objects->packed_git; p; p = p->next) { if (p->pack_fd == -1) continue; find_lru_pack(p, &lru_p, &mru_w, &accept_windows_inuse); @@@ -685,8 -685,8 +685,8 @@@ void install_packed_git(struct packed_g if (pack->pack_fd != -1) pack_open_fds++; - pack->next = packed_git; - packed_git = pack; + pack->next = the_repository->objects->packed_git; + the_repository->objects->packed_git = pack; } void (*report_garbage)(unsigned seen_bits, const char *path); @@@ -768,7 -768,8 +768,8 @@@ static void prepare_packed_git_one(cha base_len = path.len; if (strip_suffix_mem(path.buf, &base_len, ".idx")) { /* Don't reopen a pack we already have. */ - for (p = packed_git; p; p = p->next) { + for (p = the_repository->objects->packed_git; p; + p = p->next) { size_t len; if (strip_suffix(p->pack_name, ".pack", &len) && len == base_len && @@@ -802,8 -803,6 +803,6 @@@ strbuf_release(&path); } - static int approximate_object_count_valid; - /* * Give a fast, rough count of the number of objects in the repository. This * ignores loose objects completely. If you have a lot of them, then either @@@ -813,19 -812,20 +812,20 @@@ */ unsigned long approximate_object_count(void) { - static unsigned long count; - if (!approximate_object_count_valid) { + if (!the_repository->objects->approximate_object_count_valid) { + unsigned long count; struct packed_git *p; prepare_packed_git(); count = 0; - for (p = packed_git; p; p = p->next) { + for (p = the_repository->objects->packed_git; p; p = p->next) { if (open_pack_index(p)) continue; count += p->num_objects; } + the_repository->objects->approximate_object_count = count; } - return count; + return the_repository->objects->approximate_object_count; } static void *get_next_packed_git(const void *p) @@@ -868,43 -868,53 +868,53 @@@ static int sort_pack(const void *a_, co static void rearrange_packed_git(void) { - packed_git = llist_mergesort(packed_git, get_next_packed_git, - set_next_packed_git, sort_pack); + the_repository->objects->packed_git = llist_mergesort( + the_repository->objects->packed_git, get_next_packed_git, + set_next_packed_git, sort_pack); } static void prepare_packed_git_mru(void) { struct packed_git *p; - INIT_LIST_HEAD(&packed_git_mru); + INIT_LIST_HEAD(&the_repository->objects->packed_git_mru); - for (p = packed_git; p; p = p->next) - list_add_tail(&p->mru, &packed_git_mru); + for (p = the_repository->objects->packed_git; p; p = p->next) + list_add_tail(&p->mru, &the_repository->objects->packed_git_mru); } - static int prepare_packed_git_run_once = 0; void prepare_packed_git(void) { struct alternate_object_database *alt; - if (prepare_packed_git_run_once) + if (the_repository->objects->packed_git_initialized) return; prepare_packed_git_one(get_object_directory(), 1); - prepare_alt_odb(); - for (alt = alt_odb_list; alt; alt = alt->next) + prepare_alt_odb(the_repository); + for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) prepare_packed_git_one(alt->path, 0); rearrange_packed_git(); prepare_packed_git_mru(); - prepare_packed_git_run_once = 1; + the_repository->objects->packed_git_initialized = 1; } void reprepare_packed_git(void) { - approximate_object_count_valid = 0; - prepare_packed_git_run_once = 0; + the_repository->objects->approximate_object_count_valid = 0; + the_repository->objects->packed_git_initialized = 0; prepare_packed_git(); } + struct packed_git *get_packed_git(struct repository *r) + { + return r->objects->packed_git; + } + + struct list_head *get_packed_git_mru(struct repository *r) + { + return &r->objects->packed_git_mru; + } + unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep) { @@@ -1013,7 -1023,7 +1023,7 @@@ const struct packed_git *has_packed_and struct packed_git *p; unsigned i; - for (p = packed_git; p; p = p->next) + for (p = the_repository->objects->packed_git; p; p = p->next) for (i = 0; i < p->num_bad_objects; i++) if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i)) return p; @@@ -1095,13 -1105,13 +1105,13 @@@ static int retry_bad_packed_offset(stru { int type; struct revindex_entry *revidx; - const unsigned char *sha1; + struct object_id oid; revidx = find_pack_revindex(p, obj_offset); if (!revidx) return OBJ_BAD; - sha1 = nth_packed_object_sha1(p, revidx->nr); - mark_bad_packed_object(p, sha1); - type = sha1_object_info(sha1, NULL); + nth_packed_object_oid(&oid, p, revidx->nr); + mark_bad_packed_object(p, oid.hash); + type = oid_object_info(&oid, NULL); if (type <= OBJ_NONE) return OBJ_BAD; return type; @@@ -1361,16 -1371,16 +1371,16 @@@ int packed_object_info(struct packed_gi *oi->disk_sizep = revidx[1].offset - obj_offset; } - if (oi->typep || oi->typename) { + if (oi->typep || oi->type_name) { enum object_type ptot; ptot = packed_to_object_type(p, obj_offset, type, &w_curs, curpos); if (oi->typep) *oi->typep = ptot; - if (oi->typename) { - const char *tn = typename(ptot); + if (oi->type_name) { + const char *tn = type_name(ptot); if (tn) - strbuf_addstr(oi->typename, tn); + strbuf_addstr(oi->type_name, tn); } if (ptot < 0) { type = OBJ_BAD; @@@ -1452,7 -1462,7 +1462,7 @@@ struct unpack_entry_stack_ent unsigned long size; }; -static void *read_object(const unsigned char *sha1, enum object_type *type, +static void *read_object(const struct object_id *oid, enum object_type *type, unsigned long *size) { struct object_info oi = OBJECT_INFO_INIT; @@@ -1461,7 -1471,7 +1471,7 @@@ oi.sizep = size; oi.contentp = &content; - if (sha1_object_info_extended(sha1, &oi, 0) < 0) + if (oid_object_info_extended(oid, &oi, 0) < 0) return NULL; return content; } @@@ -1501,11 -1511,11 +1511,11 @@@ void *unpack_entry(struct packed_git *p struct revindex_entry *revidx = find_pack_revindex(p, obj_offset); off_t len = revidx[1].offset - obj_offset; if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) { - const unsigned char *sha1 = - nth_packed_object_sha1(p, revidx->nr); + struct object_id oid; + nth_packed_object_oid(&oid, p, revidx->nr); error("bad packed object CRC for %s", - sha1_to_hex(sha1)); - mark_bad_packed_object(p, sha1); + oid_to_hex(&oid)); + mark_bad_packed_object(p, oid.hash); data = NULL; goto out; } @@@ -1588,16 -1598,16 +1598,16 @@@ * of a corrupted pack, and is better than failing outright. */ struct revindex_entry *revidx; - const unsigned char *base_sha1; + struct object_id base_oid; revidx = find_pack_revindex(p, obj_offset); if (revidx) { - base_sha1 = nth_packed_object_sha1(p, revidx->nr); + nth_packed_object_oid(&base_oid, p, revidx->nr); error("failed to read delta base object %s" " at offset %"PRIuMAX" from %s", - sha1_to_hex(base_sha1), (uintmax_t)obj_offset, + oid_to_hex(&base_oid), (uintmax_t)obj_offset, p->pack_name); - mark_bad_packed_object(p, base_sha1); - base = read_object(base_sha1, &type, &base_size); + mark_bad_packed_object(p, base_oid.hash); + base = read_object(&base_oid, &type, &base_size); external_base = base; } } @@@ -1654,29 -1664,6 +1664,29 @@@ out return data; } +int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result) +{ + const unsigned char *index_fanout = p->index_data; + const unsigned char *index_lookup; + int index_lookup_width; + + if (!index_fanout) + BUG("bsearch_pack called without a valid pack-index"); + + index_lookup = index_fanout + 4 * 256; + if (p->index_version == 1) { + index_lookup_width = 24; + index_lookup += 4; + } else { + index_lookup_width = 20; + index_fanout += 8; + index_lookup += 8; + } + + return bsearch_hash(oid->hash, (const uint32_t*)index_fanout, + index_lookup, index_lookup_width, result); +} + const unsigned char *nth_packed_object_sha1(struct packed_git *p, uint32_t n) { @@@ -1743,18 -1730,52 +1753,18 @@@ off_t nth_packed_object_offset(const st off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *p) { - const uint32_t *level1_ofs = p->index_data; const unsigned char *index = p->index_data; - unsigned hi, lo, stride; - static int debug_lookup = -1; - - if (debug_lookup < 0) - debug_lookup = !!getenv("GIT_DEBUG_LOOKUP"); + struct object_id oid; + uint32_t result; if (!index) { if (open_pack_index(p)) return 0; - level1_ofs = p->index_data; - index = p->index_data; - } - if (p->index_version > 1) { - level1_ofs += 2; - index += 8; - } - index += 4 * 256; - hi = ntohl(level1_ofs[*sha1]); - lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1])); - if (p->index_version > 1) { - stride = 20; - } else { - stride = 24; - index += 4; } - if (debug_lookup) - printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n", - sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects); - - while (lo < hi) { - unsigned mi = lo + (hi - lo) / 2; - int cmp = hashcmp(index + mi * stride, sha1); - - if (debug_lookup) - printf("lo %u hi %u rg %u mi %u\n", - lo, hi, hi - lo, mi); - if (!cmp) - return nth_packed_object_offset(p, mi); - if (cmp > 0) - hi = mi; - else - lo = mi+1; - } + hashcpy(oid.hash, sha1); + if (bsearch_pack(&oid, p, &result)) + return nth_packed_object_offset(p, result); return 0; } @@@ -1833,13 -1854,14 +1843,14 @@@ int find_pack_entry(const unsigned cha struct list_head *pos; prepare_packed_git(); - if (!packed_git) + if (!the_repository->objects->packed_git) return 0; - list_for_each(pos, &packed_git_mru) { + list_for_each(pos, &the_repository->objects->packed_git_mru) { struct packed_git *p = list_entry(pos, struct packed_git, mru); if (fill_pack_entry(sha1, e, p)) { - list_move(&p->mru, &packed_git_mru); + list_move(&p->mru, + &the_repository->objects->packed_git_mru); return 1; } } @@@ -1886,7 -1908,7 +1897,7 @@@ int for_each_packed_object(each_packed_ int pack_errors = 0; prepare_packed_git(); - for (p = packed_git; p; p = p->next) { + for (p = the_repository->objects->packed_git; p; p = p->next) { if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local) continue; if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) && @@@ -1917,7 -1939,7 +1928,7 @@@ static int add_promisor_object(const st /* * If this is a tree, commit, or tag, the objects it refers - * to are also promisor objects. (Blobs refer to no objects.) + * to are also promisor objects. (Blobs refer to no objects->) */ if (obj->type == OBJ_TREE) { struct tree *tree = (struct tree *)obj; diff --combined packfile.h index ec08cb2bb0,5b1ce00f84..a7abd602da --- a/packfile.h +++ b/packfile.h @@@ -38,6 -38,9 +38,9 @@@ extern void prepare_packed_git(void) extern void reprepare_packed_git(void); extern void install_packed_git(struct packed_git *pack); + struct packed_git *get_packed_git(struct repository *r); + struct list_head *get_packed_git_mru(struct repository *r); + /* * Give a rough count of objects in the repository. This sacrifices accuracy * for speed. @@@ -63,7 -66,7 +66,7 @@@ extern void close_pack_index(struct pac extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *); extern void close_pack_windows(struct packed_git *); - extern void close_all_packs(void); + extern void close_all_packs(struct raw_object_store *o); extern void unuse_pack(struct pack_window **); extern void clear_delta_base_cache(void); extern struct packed_git *add_packed_git(const char *path, size_t path_len, int local); @@@ -78,14 -81,6 +81,14 @@@ */ extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr); +/* + * Perform binary search on a pack-index for a given oid. Packfile is expected to + * have a valid pack-index. + * + * See 'bsearch_hash' for more information. + */ +int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result); + /* * Return the SHA-1 of the nth object within the specified packfile. * Open the index if it is not already open. The return value points diff --combined reachable.c index 404e1440e9,25cfd99d1c..a6ea33a5db --- a/reachable.c +++ b/reachable.c @@@ -11,6 -11,7 +11,7 @@@ #include "list-objects.h" #include "packfile.h" #include "worktree.h" + #include "object-store.h" struct connectivity_progress { struct progress *progress; @@@ -77,7 -78,7 +78,7 @@@ static void add_recent_object(const str * later processing, and the revision machinery expects * commits and tags to have been parsed. */ - type = sha1_object_info(oid->hash, NULL); + type = oid_object_info(oid, NULL); if (type < 0) die("unable to get object info for %s", oid_to_hex(oid)); @@@ -94,7 -95,7 +95,7 @@@ break; default: die("unknown object type for %s: %s", - oid_to_hex(oid), typename(type)); + oid_to_hex(oid), type_name(type)); } if (!obj) diff --combined sha1_file.c index aea9124a78,0989bbd948..aab3b58e03 --- a/sha1_file.c +++ b/sha1_file.c @@@ -22,6 -22,7 +22,7 @@@ #include "pack-revindex.h" #include "sha1-lookup.h" #include "bulk-checkin.h" + #include "repository.h" #include "streaming.h" #include "dir.h" #include "list.h" @@@ -29,10 -30,8 +30,11 @@@ #include "quote.h" #include "packfile.h" #include "fetch-object.h" + #include "object-store.h" +/* The maximum size for an object header. */ +#define MAX_HEADER_LEN 32 + const unsigned char null_sha1[GIT_MAX_RAWSZ]; const struct object_id null_oid; const struct object_id empty_tree_oid = { @@@ -42,32 -41,32 +44,32 @@@ const struct object_id empty_blob_oid EMPTY_BLOB_SHA1_BIN_LITERAL }; -static void git_hash_sha1_init(void *ctx) +static void git_hash_sha1_init(git_hash_ctx *ctx) { - git_SHA1_Init((git_SHA_CTX *)ctx); + git_SHA1_Init(&ctx->sha1); } -static void git_hash_sha1_update(void *ctx, const void *data, size_t len) +static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len) { - git_SHA1_Update((git_SHA_CTX *)ctx, data, len); + git_SHA1_Update(&ctx->sha1, data, len); } -static void git_hash_sha1_final(unsigned char *hash, void *ctx) +static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx) { - git_SHA1_Final(hash, (git_SHA_CTX *)ctx); + git_SHA1_Final(hash, &ctx->sha1); } -static void git_hash_unknown_init(void *ctx) +static void git_hash_unknown_init(git_hash_ctx *ctx) { die("trying to init unknown hash"); } -static void git_hash_unknown_update(void *ctx, const void *data, size_t len) +static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len) { die("trying to update unknown hash"); } -static void git_hash_unknown_final(unsigned char *hash, void *ctx) +static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx) { die("trying to finalize unknown hash"); } @@@ -78,6 -77,7 +80,6 @@@ const struct git_hash_algo hash_algos[G 0x00000000, 0, 0, - 0, git_hash_unknown_init, git_hash_unknown_update, git_hash_unknown_final, @@@ -88,6 -88,7 +90,6 @@@ "sha-1", /* "sha1", big-endian */ 0x73686131, - sizeof(git_SHA_CTX), GIT_SHA1_RAWSZ, GIT_SHA1_HEXSZ, git_hash_sha1_init, @@@ -322,9 -323,9 +324,9 @@@ static void fill_sha1_path(struct strbu } } - void sha1_file_name(struct strbuf *buf, const unsigned char *sha1) + void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1) { - strbuf_addstr(buf, get_object_directory()); + strbuf_addstr(buf, r->objects->objectdir); strbuf_addch(buf, '/'); fill_sha1_path(buf, sha1); } @@@ -343,13 -344,12 +345,12 @@@ static const char *alt_sha1_path(struc return buf->buf; } - struct alternate_object_database *alt_odb_list; - static struct alternate_object_database **alt_odb_tail; - /* * Return non-zero iff the path is usable as an alternate object database. */ - static int alt_odb_usable(struct strbuf *path, const char *normalized_objdir) + static int alt_odb_usable(struct raw_object_store *o, + struct strbuf *path, + const char *normalized_objdir) { struct alternate_object_database *alt; @@@ -365,7 -365,7 +366,7 @@@ * Prevent the common mistake of listing the same * thing twice, or object directory itself. */ - for (alt = alt_odb_list; alt; alt = alt->next) { + for (alt = o->alt_odb_list; alt; alt = alt->next) { if (!fspathcmp(path->buf, alt->path)) return 0; } @@@ -390,9 -390,11 +391,11 @@@ * SHA1, an extra slash for the first level indirection, and the * terminating NUL. */ - static void read_info_alternates(const char * relative_base, int depth); - static int link_alt_odb_entry(const char *entry, const char *relative_base, - int depth, const char *normalized_objdir) + static void read_info_alternates(struct repository *r, + const char *relative_base, + int depth); + static int link_alt_odb_entry(struct repository *r, const char *entry, + const char *relative_base, int depth, const char *normalized_objdir) { struct alternate_object_database *ent; struct strbuf pathbuf = STRBUF_INIT; @@@ -417,7 -419,7 +420,7 @@@ while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/') strbuf_setlen(&pathbuf, pathbuf.len - 1); - if (!alt_odb_usable(&pathbuf, normalized_objdir)) { + if (!alt_odb_usable(r->objects, &pathbuf, normalized_objdir)) { strbuf_release(&pathbuf); return -1; } @@@ -425,12 -427,12 +428,12 @@@ ent = alloc_alt_odb(pathbuf.buf); /* add the alternate entry */ - *alt_odb_tail = ent; - alt_odb_tail = &(ent->next); + *r->objects->alt_odb_tail = ent; + r->objects->alt_odb_tail = &(ent->next); ent->next = NULL; /* recursively add alternates */ - read_info_alternates(pathbuf.buf, depth + 1); + read_info_alternates(r, pathbuf.buf, depth + 1); strbuf_release(&pathbuf); return 0; @@@ -465,8 -467,8 +468,8 @@@ static const char *parse_alt_odb_entry( return end; } - static void link_alt_odb_entries(const char *alt, int sep, - const char *relative_base, int depth) + static void link_alt_odb_entries(struct repository *r, const char *alt, + int sep, const char *relative_base, int depth) { struct strbuf objdirbuf = STRBUF_INIT; struct strbuf entry = STRBUF_INIT; @@@ -480,7 -482,7 +483,7 @@@ return; } - strbuf_add_absolute_path(&objdirbuf, get_object_directory()); + strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir); if (strbuf_normalize_path(&objdirbuf) < 0) die("unable to normalize object directory: %s", objdirbuf.buf); @@@ -489,13 -491,16 +492,16 @@@ alt = parse_alt_odb_entry(alt, sep, &entry); if (!entry.len) continue; - link_alt_odb_entry(entry.buf, relative_base, depth, objdirbuf.buf); + link_alt_odb_entry(r, entry.buf, + relative_base, depth, objdirbuf.buf); } strbuf_release(&entry); strbuf_release(&objdirbuf); } - static void read_info_alternates(const char * relative_base, int depth) + static void read_info_alternates(struct repository *r, + const char *relative_base, + int depth) { char *path; struct strbuf buf = STRBUF_INIT; @@@ -507,7 -512,7 +513,7 @@@ return; } - link_alt_odb_entries(buf.buf, '\n', relative_base, depth); + link_alt_odb_entries(r, buf.buf, '\n', relative_base, depth); strbuf_release(&buf); free(path); } @@@ -560,8 -565,9 +566,9 @@@ void add_to_alternates_file(const char fprintf_or_die(out, "%s\n", reference); if (commit_lock_file(&lock)) die_errno("unable to move new alternates file into place"); - if (alt_odb_tail) - link_alt_odb_entries(reference, '\n', NULL, 0); + if (the_repository->objects->alt_odb_tail) + link_alt_odb_entries(the_repository, reference, + '\n', NULL, 0); } free(alts); } @@@ -572,9 -578,10 +579,10 @@@ void add_to_alternates_memory(const cha * Make sure alternates are initialized, or else our entry may be * overwritten when they are. */ - prepare_alt_odb(); + prepare_alt_odb(the_repository); - link_alt_odb_entries(reference, '\n', NULL, 0); + link_alt_odb_entries(the_repository, reference, + '\n', NULL, 0); } /* @@@ -657,8 -664,8 +665,8 @@@ int foreach_alt_odb(alt_odb_fn fn, voi struct alternate_object_database *ent; int r = 0; - prepare_alt_odb(); - for (ent = alt_odb_list; ent; ent = ent->next) { + prepare_alt_odb(the_repository); + for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) { r = fn(ent, cb); if (r) break; @@@ -666,15 -673,15 +674,15 @@@ return r; } - void prepare_alt_odb(void) + void prepare_alt_odb(struct repository *r) { - if (alt_odb_tail) + if (r->objects->alt_odb_tail) return; - alt_odb_tail = &alt_odb_list; - link_alt_odb_entries(the_repository->alternate_db, PATH_SEP, NULL, 0); + r->objects->alt_odb_tail = &r->objects->alt_odb_list; + link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0); - read_info_alternates(get_object_directory(), 0); + read_info_alternates(r, r->objects->objectdir, 0); } /* Returns 1 if we have successfully freshened the file, 0 otherwise. */ @@@ -706,7 -713,7 +714,7 @@@ static int check_and_freshen_local(cons static struct strbuf buf = STRBUF_INIT; strbuf_reset(&buf); - sha1_file_name(&buf, sha1); + sha1_file_name(the_repository, &buf, sha1); return check_and_freshen_file(buf.buf, freshen); } @@@ -714,8 -721,8 +722,8 @@@ static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen) { struct alternate_object_database *alt; - prepare_alt_odb(); - for (alt = alt_odb_list; alt; alt = alt->next) { + prepare_alt_odb(the_repository); + for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) { const char *path = alt_sha1_path(alt, sha1); if (check_and_freshen_file(path, freshen)) return 1; @@@ -783,31 -790,31 +791,31 @@@ void *xmmap(void *start, size_t length * With "map" == NULL, try reading the object named with "sha1" using * the streaming interface and rehash it to do the same. */ -int check_sha1_signature(const unsigned char *sha1, void *map, - unsigned long size, const char *type) +int check_object_signature(const struct object_id *oid, void *map, + unsigned long size, const char *type) { - unsigned char real_sha1[20]; + struct object_id real_oid; enum object_type obj_type; struct git_istream *st; - git_SHA_CTX c; - char hdr[32]; + git_hash_ctx c; + char hdr[MAX_HEADER_LEN]; int hdrlen; if (map) { - hash_sha1_file(map, size, type, real_sha1); - return hashcmp(sha1, real_sha1) ? -1 : 0; + hash_object_file(map, size, type, &real_oid); + return oidcmp(oid, &real_oid) ? -1 : 0; } - st = open_istream(sha1, &obj_type, &size, NULL); + st = open_istream(oid, &obj_type, &size, NULL); if (!st) return -1; /* Generate the header */ - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1; + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, hdrlen); for (;;) { char buf[1024 * 16]; ssize_t readlen = read_istream(st, buf, sizeof(buf)); @@@ -818,11 -825,11 +826,11 @@@ } if (!readlen) break; - git_SHA1_Update(&c, buf, readlen); + the_hash_algo->update_fn(&c, buf, readlen); } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_oid.hash, &c); close_istream(st); - return hashcmp(sha1, real_sha1) ? -1 : 0; + return oidcmp(oid, &real_oid) ? -1 : 0; } int git_open_cloexec(const char *name, int flags) @@@ -860,22 -867,22 +868,22 @@@ * Note that it may point to static storage and is only valid until another * call to sha1_file_name(), etc. */ - static int stat_sha1_file(const unsigned char *sha1, struct stat *st, - const char **path) + static int stat_sha1_file(struct repository *r, const unsigned char *sha1, + struct stat *st, const char **path) { struct alternate_object_database *alt; static struct strbuf buf = STRBUF_INIT; strbuf_reset(&buf); - sha1_file_name(&buf, sha1); + sha1_file_name(r, &buf, sha1); *path = buf.buf; if (!lstat(*path, st)) return 0; - prepare_alt_odb(); + prepare_alt_odb(r); errno = ENOENT; - for (alt = alt_odb_list; alt; alt = alt->next) { + for (alt = r->objects->alt_odb_list; alt; alt = alt->next) { *path = alt_sha1_path(alt, sha1); if (!lstat(*path, st)) return 0; @@@ -888,7 -895,8 +896,8 @@@ * Like stat_sha1_file(), but actually open the object and return the * descriptor. See the caveats on the "path" parameter above. */ - static int open_sha1_file(const unsigned char *sha1, const char **path) + static int open_sha1_file(struct repository *r, + const unsigned char *sha1, const char **path) { int fd; struct alternate_object_database *alt; @@@ -896,7 -904,7 +905,7 @@@ static struct strbuf buf = STRBUF_INIT; strbuf_reset(&buf); - sha1_file_name(&buf, sha1); + sha1_file_name(r, &buf, sha1); *path = buf.buf; fd = git_open(*path); @@@ -904,8 -912,8 +913,8 @@@ return fd; most_interesting_errno = errno; - prepare_alt_odb(); - for (alt = alt_odb_list; alt; alt = alt->next) { + prepare_alt_odb(r); + for (alt = r->objects->alt_odb_list; alt; alt = alt->next) { *path = alt_sha1_path(alt, sha1); fd = git_open(*path); if (fd >= 0) @@@ -921,9 -929,8 +930,8 @@@ * Map the loose object at "path" if it is not NULL, or the path found by * searching for a loose object named "sha1". */ - static void *map_sha1_file_1(const char *path, - const unsigned char *sha1, - unsigned long *size) + static void *map_sha1_file_1(struct repository *r, const char *path, + const unsigned char *sha1, unsigned long *size) { void *map; int fd; @@@ -931,7 -938,7 +939,7 @@@ if (path) fd = git_open(path); else - fd = open_sha1_file(sha1, &path); + fd = open_sha1_file(r, sha1, &path); map = NULL; if (fd >= 0) { struct stat st; @@@ -950,9 -957,10 +958,10 @@@ return map; } - void *map_sha1_file(const unsigned char *sha1, unsigned long *size) + void *map_sha1_file(struct repository *r, + const unsigned char *sha1, unsigned long *size) { - return map_sha1_file_1(NULL, sha1, size); + return map_sha1_file_1(r, NULL, sha1, size); } static int unpack_sha1_short_header(git_zstream *stream, @@@ -1093,8 -1101,8 +1102,8 @@@ static int parse_sha1_header_extended(c } type = type_from_string_gently(type_buf, type_len, 1); - if (oi->typename) - strbuf_add(oi->typename, type_buf, type_len); + if (oi->type_name) + strbuf_add(oi->type_name, type_buf, type_len); /* * Set type to 0 if its an unknown object and * we're obtaining the type using '--allow-unknown-type' @@@ -1141,15 -1149,15 +1150,15 @@@ int parse_sha1_header(const char *hdr, return parse_sha1_header_extended(hdr, &oi, 0); } - static int sha1_loose_object_info(const unsigned char *sha1, - struct object_info *oi, - int flags) + static int sha1_loose_object_info(struct repository *r, + const unsigned char *sha1, + struct object_info *oi, int flags) { int status = 0; unsigned long mapsize; void *map; git_zstream stream; - char hdr[32]; + char hdr[MAX_HEADER_LEN]; struct strbuf hdrbuf = STRBUF_INIT; unsigned long size_scratch; @@@ -1164,17 -1172,17 +1173,17 @@@ * return value implicitly indicates whether the * object even exists. */ - if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) { + if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) { const char *path; struct stat st; - if (stat_sha1_file(sha1, &st, &path) < 0) + if (stat_sha1_file(r, sha1, &st, &path) < 0) return -1; if (oi->disk_sizep) *oi->disk_sizep = st.st_size; return 0; } - map = map_sha1_file(sha1, &mapsize); + map = map_sha1_file(r, sha1, &mapsize); if (!map) return -1; @@@ -1221,25 -1229,24 +1230,25 @@@ int fetch_if_missing = 1; -int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags) +int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags) { static struct object_info blank_oi = OBJECT_INFO_INIT; struct pack_entry e; int rtype; - const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ? - lookup_replace_object(sha1) : - sha1; + const struct object_id *real = oid; int already_retried = 0; - if (is_null_sha1(real)) + if (flags & OBJECT_INFO_LOOKUP_REPLACE) + real = lookup_replace_object(oid); + + if (is_null_oid(real)) return -1; if (!oi) oi = &blank_oi; if (!(flags & OBJECT_INFO_SKIP_CACHED)) { - struct cached_object *co = find_cached_object(real); + struct cached_object *co = find_cached_object(real->hash); if (co) { if (oi->typep) *(oi->typep) = co->type; @@@ -1249,8 -1256,8 +1258,8 @@@ *(oi->disk_sizep) = 0; if (oi->delta_base_sha1) hashclr(oi->delta_base_sha1); - if (oi->typename) - strbuf_addstr(oi->typename, typename(co->type)); + if (oi->type_name) + strbuf_addstr(oi->type_name, type_name(co->type)); if (oi->contentp) *oi->contentp = xmemdupz(co->buf, co->size); oi->whence = OI_CACHED; @@@ -1259,22 -1266,17 +1268,22 @@@ } while (1) { - if (find_pack_entry(real, &e)) + if (find_pack_entry(real->hash, &e)) break; + if (flags & OBJECT_INFO_IGNORE_LOOSE) + return -1; + /* Most likely it's a loose object. */ - if (!sha1_loose_object_info(real->hash, oi, flags)) - if (!sha1_loose_object_info(the_repository, real, oi, flags)) ++ if (!sha1_loose_object_info(the_repository, real->hash, oi, flags)) return 0; /* Not a loose object; someone else may have just packed it. */ - reprepare_packed_git(); - if (find_pack_entry(real, &e)) - break; + if (!(flags & OBJECT_INFO_QUICK)) { + reprepare_packed_git(); + if (find_pack_entry(real->hash, &e)) + break; + } /* Check if it is a missing object */ if (fetch_if_missing && repository_format_partial_clone && @@@ -1283,7 -1285,7 +1292,7 @@@ * TODO Investigate haveing fetch_object() return * TODO error/success and stopping the music here. */ - fetch_object(repository_format_partial_clone, real); + fetch_object(repository_format_partial_clone, real->hash); already_retried = 1; continue; } @@@ -1299,8 -1301,8 +1308,8 @@@ return 0; rtype = packed_object_info(e.p, e.offset, oi); if (rtype < 0) { - mark_bad_packed_object(e.p, real); - return sha1_object_info_extended(real, oi, 0); + mark_bad_packed_object(e.p, real->hash); + return oid_object_info_extended(real, oi, 0); } else if (oi->whence == OI_PACKED) { oi->u.packed.offset = e.offset; oi->u.packed.pack = e.p; @@@ -1312,15 -1314,15 +1321,15 @@@ } /* returns enum object_type or negative */ -int sha1_object_info(const unsigned char *sha1, unsigned long *sizep) +int oid_object_info(const struct object_id *oid, unsigned long *sizep) { enum object_type type; struct object_info oi = OBJECT_INFO_INIT; oi.typep = &type; oi.sizep = sizep; - if (sha1_object_info_extended(sha1, &oi, - OBJECT_INFO_LOOKUP_REPLACE) < 0) + if (oid_object_info_extended(oid, &oi, + OBJECT_INFO_LOOKUP_REPLACE) < 0) return -1; return type; } @@@ -1328,27 -1330,24 +1337,27 @@@ static void *read_object(const unsigned char *sha1, enum object_type *type, unsigned long *size) { + struct object_id oid; struct object_info oi = OBJECT_INFO_INIT; void *content; oi.typep = type; oi.sizep = size; oi.contentp = &content; - if (sha1_object_info_extended(sha1, &oi, 0) < 0) + hashcpy(oid.hash, sha1); + + if (oid_object_info_extended(&oid, &oi, 0) < 0) return NULL; return content; } -int pretend_sha1_file(void *buf, unsigned long len, enum object_type type, - unsigned char *sha1) +int pretend_object_file(void *buf, unsigned long len, enum object_type type, + struct object_id *oid) { struct cached_object *co; - hash_sha1_file(buf, len, typename(type), sha1); - if (has_sha1_file(sha1) || find_cached_object(sha1)) + hash_object_file(buf, len, type_name(type), oid); + if (has_sha1_file(oid->hash) || find_cached_object(oid->hash)) return 0; ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc); co = &cached_objects[cached_object_nr++]; @@@ -1356,7 -1355,7 +1365,7 @@@ co->type = type; co->buf = xmalloc(len); memcpy(co->buf, buf, len); - hashcpy(co->sha1, sha1); + hashcpy(co->sha1, oid->hash); return 0; } @@@ -1365,65 -1364,65 +1374,65 @@@ * deal with them should arrange to call read_object() and give error * messages themselves. */ -void *read_sha1_file_extended(const unsigned char *sha1, - enum object_type *type, - unsigned long *size, - int lookup_replace) +void *read_object_file_extended(const struct object_id *oid, + enum object_type *type, + unsigned long *size, + int lookup_replace) { void *data; const struct packed_git *p; const char *path; struct stat st; - const unsigned char *repl = lookup_replace ? lookup_replace_object(sha1) - : sha1; + const struct object_id *repl = lookup_replace ? lookup_replace_object(oid) + : oid; errno = 0; - data = read_object(repl, type, size); + data = read_object(repl->hash, type, size); if (data) return data; if (errno && errno != ENOENT) - die_errno("failed to read object %s", sha1_to_hex(sha1)); + die_errno("failed to read object %s", oid_to_hex(oid)); /* die if we replaced an object with one that does not exist */ - if (repl != sha1) + if (repl != oid) die("replacement %s not found for %s", - sha1_to_hex(repl), sha1_to_hex(sha1)); + oid_to_hex(repl), oid_to_hex(oid)); - if (!stat_sha1_file(repl->hash, &st, &path)) - if (!stat_sha1_file(the_repository, repl, &st, &path)) ++ if (!stat_sha1_file(the_repository, repl->hash, &st, &path)) die("loose object %s (stored in %s) is corrupt", - sha1_to_hex(repl), path); + oid_to_hex(repl), path); - if ((p = has_packed_and_bad(repl)) != NULL) + if ((p = has_packed_and_bad(repl->hash)) != NULL) die("packed object %s (stored in %s) is corrupt", - sha1_to_hex(repl), p->pack_name); + oid_to_hex(repl), p->pack_name); return NULL; } -void *read_object_with_reference(const unsigned char *sha1, +void *read_object_with_reference(const struct object_id *oid, const char *required_type_name, unsigned long *size, - unsigned char *actual_sha1_return) + struct object_id *actual_oid_return) { enum object_type type, required_type; void *buffer; unsigned long isize; - unsigned char actual_sha1[20]; + struct object_id actual_oid; required_type = type_from_string(required_type_name); - hashcpy(actual_sha1, sha1); + oidcpy(&actual_oid, oid); while (1) { int ref_length = -1; const char *ref_type = NULL; - buffer = read_sha1_file(actual_sha1, &type, &isize); + buffer = read_object_file(&actual_oid, &type, &isize); if (!buffer) return NULL; if (type == required_type) { *size = isize; - if (actual_sha1_return) - hashcpy(actual_sha1_return, actual_sha1); + if (actual_oid_return) + oidcpy(actual_oid_return, &actual_oid); return buffer; } /* Handle references */ @@@ -1437,32 -1436,32 +1446,32 @@@ } ref_length = strlen(ref_type); - if (ref_length + 40 > isize || + if (ref_length + GIT_SHA1_HEXSZ > isize || memcmp(buffer, ref_type, ref_length) || - get_sha1_hex((char *) buffer + ref_length, actual_sha1)) { + get_oid_hex((char *) buffer + ref_length, &actual_oid)) { free(buffer); return NULL; } free(buffer); /* Now we have the ID of the referred-to object in - * actual_sha1. Check again. */ + * actual_oid. Check again. */ } } -static void write_sha1_file_prepare(const void *buf, unsigned long len, - const char *type, unsigned char *sha1, - char *hdr, int *hdrlen) +static void write_object_file_prepare(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + char *hdr, int *hdrlen) { - git_SHA_CTX c; + git_hash_ctx c; /* Generate the header */ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1; /* Sha1.. */ - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, *hdrlen); - git_SHA1_Update(&c, buf, len); - git_SHA1_Final(sha1, &c); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, *hdrlen); + the_hash_algo->update_fn(&c, buf, len); + the_hash_algo->final_fn(oid->hash, &c); } /* @@@ -1515,12 -1514,12 +1524,12 @@@ static int write_buffer(int fd, const v return 0; } -int hash_sha1_file(const void *buf, unsigned long len, const char *type, - unsigned char *sha1) +int hash_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { - char hdr[32]; + char hdr[MAX_HEADER_LEN]; int hdrlen = sizeof(hdr); - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); return 0; } @@@ -1578,20 -1577,19 +1587,20 @@@ static int create_tmpfile(struct strbu return fd; } -static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen, - const void *buf, unsigned long len, time_t mtime) +static int write_loose_object(const struct object_id *oid, char *hdr, + int hdrlen, const void *buf, unsigned long len, + time_t mtime) { int fd, ret; unsigned char compressed[4096]; git_zstream stream; - git_SHA_CTX c; - unsigned char parano_sha1[20]; + git_hash_ctx c; + struct object_id parano_oid; static struct strbuf tmp_file = STRBUF_INIT; static struct strbuf filename = STRBUF_INIT; strbuf_reset(&filename); - sha1_file_name(&filename, oid->hash); - sha1_file_name(the_repository, &filename, sha1); ++ sha1_file_name(the_repository, &filename, oid->hash); fd = create_tmpfile(&tmp_file, filename.buf); if (fd < 0) { @@@ -1605,14 -1603,14 +1614,14 @@@ git_deflate_init(&stream, zlib_compression_level); stream.next_out = compressed; stream.avail_out = sizeof(compressed); - git_SHA1_Init(&c); + the_hash_algo->init_fn(&c); /* First header.. */ stream.next_in = (unsigned char *)hdr; stream.avail_in = hdrlen; while (git_deflate(&stream, 0) == Z_OK) ; /* nothing */ - git_SHA1_Update(&c, hdr, hdrlen); + the_hash_algo->update_fn(&c, hdr, hdrlen); /* Then the data itself.. */ stream.next_in = (void *)buf; @@@ -1620,7 -1618,7 +1629,7 @@@ do { unsigned char *in0 = stream.next_in; ret = git_deflate(&stream, Z_FINISH); - git_SHA1_Update(&c, in0, stream.next_in - in0); + the_hash_algo->update_fn(&c, in0, stream.next_in - in0); if (write_buffer(fd, compressed, stream.next_out - compressed) < 0) die("unable to write sha1 file"); stream.next_out = compressed; @@@ -1628,16 -1626,13 +1637,16 @@@ } while (ret == Z_OK); if (ret != Z_STREAM_END) - die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret); + die("unable to deflate new object %s (%d)", oid_to_hex(oid), + ret); ret = git_deflate_end_gently(&stream); if (ret != Z_OK) - die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret); - git_SHA1_Final(parano_sha1, &c); - if (hashcmp(sha1, parano_sha1) != 0) - die("confused by unstable object source data for %s", sha1_to_hex(sha1)); + die("deflateEnd on object %s failed (%d)", oid_to_hex(oid), + ret); + the_hash_algo->final_fn(parano_oid.hash, &c); + if (oidcmp(oid, ¶no_oid) != 0) + die("confused by unstable object source data for %s", + oid_to_hex(oid)); close_sha1_file(fd); @@@ -1670,60 -1665,58 +1679,60 @@@ static int freshen_packed_object(const return 1; } -int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1) +int write_object_file(const void *buf, unsigned long len, const char *type, + struct object_id *oid) { - char hdr[32]; + char hdr[MAX_HEADER_LEN]; int hdrlen = sizeof(hdr); /* Normally if we have it in the pack then we do not bother writing * it out into .git/objects/??/?{38} file. */ - write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen); - if (freshen_packed_object(sha1) || freshen_loose_object(sha1)) + write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen); + if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) return 0; - return write_loose_object(sha1, hdr, hdrlen, buf, len, 0); + return write_loose_object(oid, hdr, hdrlen, buf, len, 0); } -int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, - struct object_id *oid, unsigned flags) +int hash_object_file_literally(const void *buf, unsigned long len, + const char *type, struct object_id *oid, + unsigned flags) { char *header; int hdrlen, status = 0; /* type string, SP, %lu of the length plus NUL must fit this */ - hdrlen = strlen(type) + 32; + hdrlen = strlen(type) + MAX_HEADER_LEN; header = xmalloc(hdrlen); - write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen); + write_object_file_prepare(buf, len, type, oid, header, &hdrlen); if (!(flags & HASH_WRITE_OBJECT)) goto cleanup; if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash)) goto cleanup; - status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0); + status = write_loose_object(oid, header, hdrlen, buf, len, 0); cleanup: free(header); return status; } -int force_object_loose(const unsigned char *sha1, time_t mtime) +int force_object_loose(const struct object_id *oid, time_t mtime) { void *buf; unsigned long len; enum object_type type; - char hdr[32]; + char hdr[MAX_HEADER_LEN]; int hdrlen; int ret; - if (has_loose_object(sha1)) + if (has_loose_object(oid->hash)) return 0; - buf = read_object(sha1, &type, &len); + buf = read_object(oid->hash, &type, &len); if (!buf) - return error("cannot read sha1_file for %s", sha1_to_hex(sha1)); - hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1; - ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime); + return error("cannot read sha1_file for %s", oid_to_hex(oid)); + hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1; + ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime); free(buf); return ret; @@@ -1731,12 -1724,10 +1740,12 @@@ int has_sha1_file_with_flags(const unsigned char *sha1, int flags) { + struct object_id oid; if (!startup_info->have_repository) return 0; - return sha1_object_info_extended(sha1, NULL, - flags | OBJECT_INFO_SKIP_CACHED) >= 0; + hashcpy(oid.hash, sha1); + return oid_object_info_extended(&oid, NULL, + flags | OBJECT_INFO_SKIP_CACHED) >= 0; } int has_object_file(const struct object_id *oid) @@@ -1808,9 -1799,9 +1817,9 @@@ static int index_mem(struct object_id * } if (write_object) - ret = write_sha1_file(buf, size, typename(type), oid->hash); + ret = write_object_file(buf, size, type_name(type), oid); else - ret = hash_sha1_file(buf, size, typename(type), oid->hash); + ret = hash_object_file(buf, size, type_name(type), oid); if (re_allocated) free(buf); return ret; @@@ -1830,11 -1821,11 +1839,11 @@@ static int index_stream_convert_blob(st get_conv_flags(flags)); if (write_object) - ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + oid); else - ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB), - oid->hash); + ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB), + oid); strbuf_release(&sbuf); return ret; } @@@ -1902,7 -1893,7 +1911,7 @@@ static int index_stream(struct object_i enum object_type type, const char *path, unsigned flags) { - return index_bulk_checkin(oid->hash, fd, size, type, path, flags); + return index_bulk_checkin(oid, fd, size, type, path, flags); } int index_fd(struct object_id *oid, int fd, struct stat *st, @@@ -1948,8 -1939,8 +1957,8 @@@ int index_path(struct object_id *oid, c if (strbuf_readlink(&sb, path, st->st_size)) return error_errno("readlink(\"%s\")", path); if (!(flags & HASH_WRITE_OBJECT)) - hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash); - else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash)) + hash_object_file(sb.buf, sb.len, blob_type, oid); + else if (write_object_file(sb.buf, sb.len, blob_type, oid)) rc = error("%s: failed to insert into database", path); strbuf_release(&sb); break; @@@ -1976,14 -1967,14 +1985,14 @@@ int read_pack_header(int fd, struct pac return 0; } -void assert_sha1_type(const unsigned char *sha1, enum object_type expect) +void assert_oid_type(const struct object_id *oid, enum object_type expect) { - enum object_type type = sha1_object_info(sha1, NULL); + enum object_type type = oid_object_info(oid, NULL); if (type < 0) - die("%s is not a valid object", sha1_to_hex(sha1)); + die("%s is not a valid object", oid_to_hex(oid)); if (type != expect) - die("%s is not a valid '%s' object", sha1_to_hex(sha1), - typename(expect)); + die("%s is not a valid '%s' object", oid_to_hex(oid), + type_name(expect)); } int for_each_file_in_obj_subdir(unsigned int subdir_nr, @@@ -2134,14 -2125,14 +2143,14 @@@ static int check_stream_sha1(git_zstrea const char *path, const unsigned char *expected_sha1) { - git_SHA_CTX c; + git_hash_ctx c; unsigned char real_sha1[GIT_MAX_RAWSZ]; unsigned char buf[4096]; unsigned long total_read; int status = Z_OK; - git_SHA1_Init(&c); - git_SHA1_Update(&c, hdr, stream->total_out); + the_hash_algo->init_fn(&c); + the_hash_algo->update_fn(&c, hdr, stream->total_out); /* * We already read some bytes into hdr, but the ones up to the NUL @@@ -2160,7 -2151,7 +2169,7 @@@ if (size - total_read < stream->avail_out) stream->avail_out = size - total_read; status = git_inflate(stream, Z_FINISH); - git_SHA1_Update(&c, buf, stream->next_out - buf); + the_hash_algo->update_fn(&c, buf, stream->next_out - buf); total_read += stream->next_out - buf; } git_inflate_end(stream); @@@ -2175,7 -2166,7 +2184,7 @@@ return -1; } - git_SHA1_Final(real_sha1, &c); + the_hash_algo->final_fn(real_sha1, &c); if (hashcmp(expected_sha1, real_sha1)) { error("sha1 mismatch for %s (expected %s)", path, sha1_to_hex(expected_sha1)); @@@ -2186,7 -2177,7 +2195,7 @@@ } int read_loose_object(const char *path, - const unsigned char *expected_sha1, + const struct object_id *expected_oid, enum object_type *type, unsigned long *size, void **contents) @@@ -2195,11 -2186,11 +2204,11 @@@ void *map = NULL; unsigned long mapsize; git_zstream stream; - char hdr[32]; + char hdr[MAX_HEADER_LEN]; *contents = NULL; - map = map_sha1_file_1(path, NULL, &mapsize); + map = map_sha1_file_1(the_repository, path, NULL, &mapsize); if (!map) { error_errno("unable to mmap %s", path); goto out; @@@ -2218,19 -2209,19 +2227,19 @@@ } if (*type == OBJ_BLOB) { - if (check_stream_sha1(&stream, hdr, *size, path, expected_sha1) < 0) + if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0) goto out; } else { - *contents = unpack_sha1_rest(&stream, hdr, *size, expected_sha1); + *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash); if (!*contents) { error("unable to unpack contents of %s", path); git_inflate_end(&stream); goto out; } - if (check_sha1_signature(expected_sha1, *contents, - *size, typename(*type))) { + if (check_object_signature(expected_oid, *contents, + *size, type_name(*type))) { error("sha1 mismatch for %s (expected %s)", path, - sha1_to_hex(expected_sha1)); + oid_to_hex(expected_oid)); free(*contents); goto out; } diff --combined sha1_name.c index 0185c6081a,4325f74e0c..509a95f0cf --- a/sha1_name.c +++ b/sha1_name.c @@@ -10,6 -10,8 +10,8 @@@ #include "dir.h" #include "sha1-array.h" #include "packfile.h" + #include "object-store.h" + #include "repository.h" static int get_oid_oneline(const char *, struct object_id *, struct commit_list *); @@@ -104,7 -106,7 +106,7 @@@ static void find_short_object_filename( */ fakeent = alloc_alt_odb(get_object_directory()); } - fakeent->next = alt_odb_list; + fakeent->next = the_repository->objects->alt_odb_list; for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) { int pos; @@@ -150,14 -152,31 +152,14 @@@ static int match_sha(unsigned len, cons static void unique_in_pack(struct packed_git *p, struct disambiguate_state *ds) { - uint32_t num, last, i, first = 0; + uint32_t num, i, first = 0; const struct object_id *current = NULL; if (open_pack_index(p) || !p->num_objects) return; num = p->num_objects; - last = num; - while (first < last) { - uint32_t mid = first + (last - first) / 2; - const unsigned char *current; - int cmp; - - current = nth_packed_object_sha1(p, mid); - cmp = hashcmp(ds->bin_pfx.hash, current); - if (!cmp) { - first = mid; - break; - } - if (cmp > 0) { - first = mid+1; - continue; - } - last = mid; - } + bsearch_pack(&ds->bin_pfx, p, &first); /* * At this point, "first" is the location of the lowest object @@@ -178,7 -197,8 +180,8 @@@ static void find_short_packed_object(st struct packed_git *p; prepare_packed_git(); - for (p = packed_git; p && !ds->ambiguous; p = p->next) + for (p = get_packed_git(the_repository); p && !ds->ambiguous; + p = p->next) unique_in_pack(p, ds); } @@@ -221,7 -241,7 +224,7 @@@ static int finish_object_disambiguation static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused) { - int kind = sha1_object_info(oid->hash, NULL); + int kind = oid_object_info(oid, NULL); return kind == OBJ_COMMIT; } @@@ -230,7 -250,7 +233,7 @@@ static int disambiguate_committish_only struct object *obj; int kind; - kind = sha1_object_info(oid->hash, NULL); + kind = oid_object_info(oid, NULL); if (kind == OBJ_COMMIT) return 1; if (kind != OBJ_TAG) @@@ -245,7 -265,7 +248,7 @@@ static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused) { - int kind = sha1_object_info(oid->hash, NULL); + int kind = oid_object_info(oid, NULL); return kind == OBJ_TREE; } @@@ -254,7 -274,7 +257,7 @@@ static int disambiguate_treeish_only(co struct object *obj; int kind; - kind = sha1_object_info(oid->hash, NULL); + kind = oid_object_info(oid, NULL); if (kind == OBJ_TREE || kind == OBJ_COMMIT) return 1; if (kind != OBJ_TAG) @@@ -269,7 -289,7 +272,7 @@@ static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused) { - int kind = sha1_object_info(oid->hash, NULL); + int kind = oid_object_info(oid, NULL); return kind == OBJ_BLOB; } @@@ -334,7 -354,7 +337,7 @@@ static int init_object_disambiguation(c ds->len = len; ds->hex_pfx[len] = '\0'; - prepare_alt_odb(); + prepare_alt_odb(the_repository); return 0; } @@@ -348,7 -368,7 +351,7 @@@ static int show_ambiguous_object(const if (ds->fn && !ds->fn(oid, ds->cb_data)) return 0; - type = sha1_object_info(oid->hash, NULL); + type = oid_object_info(oid, NULL); if (type == OBJ_COMMIT) { struct commit *commit = lookup_commit(oid); if (commit) { @@@ -363,8 -383,8 +366,8 @@@ } advise(" %s %s%s", - find_unique_abbrev(oid->hash, DEFAULT_ABBREV), - typename(type) ? typename(type) : "unknown type", + find_unique_abbrev(oid, DEFAULT_ABBREV), + type_name(type) ? type_name(type) : "unknown type", desc.buf); strbuf_release(&desc); @@@ -463,7 -483,7 +466,7 @@@ struct min_abbrev_data unsigned int init_len; unsigned int cur_len; char *hex; - const unsigned char *hash; + const struct object_id *oid; }; static inline char get_hex_char_from_oid(const struct object_id *oid, @@@ -495,34 -515,50 +498,34 @@@ static void find_abbrev_len_for_pack(st struct min_abbrev_data *mad) { int match = 0; - uint32_t num, last, first = 0; + uint32_t num, first = 0; struct object_id oid; + const struct object_id *mad_oid; if (open_pack_index(p) || !p->num_objects) return; num = p->num_objects; - last = num; - while (first < last) { - uint32_t mid = first + (last - first) / 2; - const unsigned char *current; - int cmp; - - current = nth_packed_object_sha1(p, mid); - cmp = hashcmp(mad->hash, current); - if (!cmp) { - match = 1; - first = mid; - break; - } - if (cmp > 0) { - first = mid + 1; - continue; - } - last = mid; - } + mad_oid = mad->oid; + match = bsearch_pack(mad_oid, p, &first); /* * first is now the position in the packfile where we would insert * mad->hash if it does not exist (or the position of mad->hash if - * it does exist). Hence, we consider a maximum of three objects + * it does exist). Hence, we consider a maximum of two objects * nearby for the abbreviation length. */ mad->init_len = 0; if (!match) { - nth_packed_object_oid(&oid, p, first); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first)) + extend_abbrev_len(&oid, mad); } else if (first < num - 1) { - nth_packed_object_oid(&oid, p, first + 1); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first + 1)) + extend_abbrev_len(&oid, mad); } if (first > 0) { - nth_packed_object_oid(&oid, p, first - 1); - extend_abbrev_len(&oid, mad); + if (nth_packed_object_oid(&oid, p, first - 1)) + extend_abbrev_len(&oid, mad); } mad->init_len = mad->cur_len; } @@@ -532,11 -568,11 +535,11 @@@ static void find_abbrev_len_packed(stru struct packed_git *p; prepare_packed_git(); - for (p = packed_git; p; p = p->next) + for (p = get_packed_git(the_repository); p; p = p->next) find_abbrev_len_for_pack(p, mad); } -int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len) +int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len) { struct disambiguate_state ds; struct min_abbrev_data mad; @@@ -563,14 -599,14 +566,14 @@@ len = FALLBACK_DEFAULT_ABBREV; } - sha1_to_hex_r(hex, sha1); + oid_to_hex_r(hex, oid); if (len == GIT_SHA1_HEXSZ || !len) return GIT_SHA1_HEXSZ; mad.init_len = len; mad.cur_len = len; mad.hex = hex; - mad.hash = sha1; + mad.oid = oid; find_abbrev_len_packed(&mad); @@@ -588,13 -624,13 +591,13 @@@ return mad.cur_len; } -const char *find_unique_abbrev(const unsigned char *sha1, int len) +const char *find_unique_abbrev(const struct object_id *oid, int len) { static int bufno; static char hexbuffer[4][GIT_MAX_HEXSZ + 1]; char *hex = hexbuffer[bufno]; bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer); - find_unique_abbrev_r(hex, sha1, len); + find_unique_abbrev_r(hex, oid, len); return hex; } @@@ -868,8 -904,8 +871,8 @@@ struct object *peel_to_type(const char if (name) error("%.*s: expected %s type, but the object " "dereferences to %s type", - namelen, name, typename(expected_type), - typename(o->type)); + namelen, name, type_name(expected_type), + type_name(o->type)); return NULL; } } @@@ -1496,7 -1532,8 +1499,7 @@@ static void diagnose_invalid_oid_path(c if (is_missing_file_error(errno)) { char *fullname = xstrfmt("%s%s", prefix, filename); - if (!get_tree_entry(tree_oid->hash, fullname, - oid.hash, &mode)) { + if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) { die("Path '%s' exists, but not '%s'.\n" "Did you mean '%.*s:%s' aka '%.*s:./%s'?", fullname, @@@ -1688,8 -1725,8 +1691,8 @@@ static int get_oid_with_context_1(cons filename, oid->hash, &oc->symlink_path, &oc->mode); } else { - ret = get_tree_entry(tree_oid.hash, filename, - oid->hash, &oc->mode); + ret = get_tree_entry(&tree_oid, filename, oid, + &oc->mode); if (ret && only_to_die) { diagnose_invalid_oid_path(prefix, filename, diff --combined streaming.c index 46fabee3aa,22d27df55e..7d55ba64c7 --- a/streaming.c +++ b/streaming.c @@@ -3,6 -3,8 +3,8 @@@ */ #include "cache.h" #include "streaming.h" + #include "repository.h" + #include "object-store.h" #include "packfile.h" enum input_source { @@@ -14,7 -16,7 +16,7 @@@ typedef int (*open_istream_fn)(struct git_istream *, struct object_info *, - const unsigned char *, + const struct object_id *, enum object_type *); typedef int (*close_istream_fn)(struct git_istream *); typedef ssize_t (*read_istream_fn)(struct git_istream *, char *, size_t); @@@ -27,7 -29,7 +29,7 @@@ struct stream_vtbl #define open_method_decl(name) \ int open_istream_ ##name \ (struct git_istream *st, struct object_info *oi, \ - const unsigned char *sha1, \ + const struct object_id *oid, \ enum object_type *type) #define close_method_decl(name) \ @@@ -105,7 -107,7 +107,7 @@@ ssize_t read_istream(struct git_istrea return st->vtbl->read(st, buf, sz); } -static enum input_source istream_source(const unsigned char *sha1, +static enum input_source istream_source(const struct object_id *oid, enum object_type *type, struct object_info *oi) { @@@ -114,7 -116,7 +116,7 @@@ oi->typep = type; oi->sizep = &size; - status = sha1_object_info_extended(sha1, oi, 0); + status = oid_object_info_extended(oid, oi, 0); if (status < 0) return stream_error; @@@ -130,14 -132,14 +132,14 @@@ } } -struct git_istream *open_istream(const unsigned char *sha1, +struct git_istream *open_istream(const struct object_id *oid, enum object_type *type, unsigned long *size, struct stream_filter *filter) { struct git_istream *st; struct object_info oi = OBJECT_INFO_INIT; - const unsigned char *real = lookup_replace_object(sha1); + const struct object_id *real = lookup_replace_object(oid); enum input_source src = istream_source(real, type, &oi); if (src < 0) @@@ -335,7 -337,8 +337,8 @@@ static struct stream_vtbl loose_vtbl = static open_method_decl(loose) { - st->u.loose.mapped = map_sha1_file(oid->hash, &st->u.loose.mapsize); + st->u.loose.mapped = map_sha1_file(the_repository, - sha1, &st->u.loose.mapsize); ++ oid->hash, &st->u.loose.mapsize); if (!st->u.loose.mapped) return -1; if ((unpack_sha1_header(&st->z, @@@ -486,7 -489,7 +489,7 @@@ static struct stream_vtbl incore_vtbl static open_method_decl(incore) { - st->u.incore.buf = read_sha1_file_extended(sha1, type, &st->size, 0); + st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0); st->u.incore.read_ptr = 0; st->vtbl = &incore_vtbl; @@@ -507,7 -510,7 +510,7 @@@ int stream_blob_to_fd(int fd, const str ssize_t kept = 0; int result = -1; - st = open_istream(oid->hash, &type, &sz, filter); + st = open_istream(oid, &type, &sz, filter); if (!st) { if (filter) free_stream_filter(filter); diff --combined submodule.c index a05c544e8d,b03e5f5045..9a50168b23 --- a/submodule.c +++ b/submodule.c @@@ -21,6 -21,7 +21,7 @@@ #include "remote.h" #include "worktree.h" #include "parse-options.h" + #include "object-store.h" static int config_update_recurse_submodules = RECURSE_SUBMODULES_OFF; static struct string_list changed_submodule_names = STRING_LIST_INIT_DUP; @@@ -540,9 -541,9 +541,9 @@@ static void show_submodule_header(struc output_header: strbuf_addf(&sb, "Submodule %s ", path); - strbuf_add_unique_abbrev(&sb, one->hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&sb, one, DEFAULT_ABBREV); strbuf_addstr(&sb, (fast_backward || fast_forward) ? ".." : "..."); - strbuf_add_unique_abbrev(&sb, two->hash, DEFAULT_ABBREV); + strbuf_add_unique_abbrev(&sb, two, DEFAULT_ABBREV); if (message) strbuf_addf(&sb, " %s\n", message); else @@@ -590,7 -591,7 +591,7 @@@ void show_submodule_inline_diff(struct struct object_id *one, struct object_id *two, unsigned dirty_submodule) { - const struct object_id *old = the_hash_algo->empty_tree, *new = the_hash_algo->empty_tree; + const struct object_id *old_oid = the_hash_algo->empty_tree, *new_oid = the_hash_algo->empty_tree; struct commit *left = NULL, *right = NULL; struct commit_list *merge_bases = NULL; struct child_process cp = CHILD_PROCESS_INIT; @@@ -605,9 -606,9 +606,9 @@@ goto done; if (left) - old = one; + old_oid = one; if (right) - new = two; + new_oid = two; cp.git_cmd = 1; cp.dir = path; @@@ -630,7 -631,7 +631,7 @@@ argv_array_pushf(&cp.args, "--dst-prefix=%s%s/", o->b_prefix, path); } - argv_array_push(&cp.args, oid_to_hex(old)); + argv_array_push(&cp.args, oid_to_hex(old_oid)); /* * If the submodule has modified content, we will diff against the * work tree, under the assumption that the user has asked for the @@@ -638,7 -639,7 +639,7 @@@ * haven't yet been committed to the submodule yet. */ if (!(dirty_submodule & DIRTY_SUBMODULE_MODIFIED)) - argv_array_push(&cp.args, oid_to_hex(new)); + argv_array_push(&cp.args, oid_to_hex(new_oid)); prepare_submodule_repo_env(&cp.env_array); if (start_command(&cp)) @@@ -817,7 -818,7 +818,7 @@@ static int check_has_commit(const struc { struct has_commit_data *cb = data; - enum object_type type = sha1_object_info(oid->hash, NULL); + enum object_type type = oid_object_info(oid, NULL); switch (type) { case OBJ_COMMIT: @@@ -831,7 -832,7 +832,7 @@@ return 0; default: die(_("submodule entry '%s' (%s) is a %s, not a commit"), - cb->path, oid_to_hex(oid), typename(type)); + cb->path, oid_to_hex(oid), type_name(type)); } } @@@ -1578,8 -1579,8 +1579,8 @@@ static void submodule_reset_index(cons * pass NULL for old or new respectively. */ int submodule_move_head(const char *path, - const char *old, - const char *new, + const char *old_head, + const char *new_head, unsigned flags) { int ret = 0; @@@ -1600,7 -1601,7 +1601,7 @@@ else error_code_ptr = NULL; - if (old && !is_submodule_populated_gently(path, error_code_ptr)) + if (old_head && !is_submodule_populated_gently(path, error_code_ptr)) return 0; sub = submodule_from_path(&null_oid, path); @@@ -1608,14 -1609,14 +1609,14 @@@ if (!sub) die("BUG: could not get submodule information for '%s'", path); - if (old && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) { + if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) { /* Check if the submodule has a dirty index. */ if (submodule_has_dirty_index(sub)) return error(_("submodule '%s' has dirty index"), path); } if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) { - if (old) { + if (old_head) { if (!submodule_uses_gitfile(path)) absorb_git_dir_into_superproject("", path, ABSORB_GITDIR_RECURSE_SUBMODULES); @@@ -1629,7 -1630,7 +1630,7 @@@ submodule_reset_index(path); } - if (old && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { + if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) { char *gitdir = xstrfmt("%s/modules/%s", get_git_common_dir(), sub->name); connect_work_tree_and_git_dir(path, gitdir); @@@ -1658,9 -1659,9 +1659,9 @@@ argv_array_push(&cp.args, "-m"); if (!(flags & SUBMODULE_MOVE_HEAD_FORCE)) - argv_array_push(&cp.args, old ? old : EMPTY_TREE_SHA1_HEX); + argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX); - argv_array_push(&cp.args, new ? new : EMPTY_TREE_SHA1_HEX); + argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX); if (run_command(&cp)) { ret = -1; @@@ -1668,7 -1669,7 +1669,7 @@@ } if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) { - if (new) { + if (new_head) { child_process_init(&cp); /* also set the HEAD accordingly */ cp.git_cmd = 1; @@@ -1677,7 -1678,7 +1678,7 @@@ prepare_submodule_repo_env(&cp.env_array); argv_array_pushl(&cp.args, "update-ref", "HEAD", - "--no-deref", new, NULL); + "--no-deref", new_head, NULL); if (run_command(&cp)) { ret = -1; diff --combined transport.c index b9dfa11bd2,3afc632472..94eccf29aa --- a/transport.c +++ b/transport.c @@@ -18,6 -18,7 +18,7 @@@ #include "sha1-array.h" #include "sigchain.h" #include "transport-internal.h" + #include "object-store.h" static void set_upstreams(struct transport *transport, struct ref *refs, int pretend) @@@ -367,7 -368,7 +368,7 @@@ static void print_ok_ref_status(struct char type; const char *msg; - strbuf_add_unique_abbrev(&quickref, ref->old_oid.hash, + strbuf_add_unique_abbrev(&quickref, &ref->old_oid, DEFAULT_ABBREV); if (ref->forced_update) { strbuf_addstr(&quickref, "..."); @@@ -378,7 -379,7 +379,7 @@@ type = ' '; msg = NULL; } - strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, + strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV); print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg, @@@ -461,7 -462,7 +462,7 @@@ static int print_one_push_status(struc static int measure_abbrev(const struct object_id *oid, int sofar) { char hex[GIT_MAX_HEXSZ + 1]; - int w = find_unique_abbrev_r(hex, oid->hash, DEFAULT_ABBREV); + int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV); return (w < sofar) ? sofar : w; }