From: Junio C Hamano Date: Fri, 28 Nov 2008 03:23:51 +0000 (-0800) Subject: Merge branch 'maint' X-Git-Tag: v1.6.1-rc1~15 X-Git-Url: https://git.lorimer.id.au/gitweb.git/diff_plain/455d0f5c23a600ff8c94642ca5123f1bd2f1ec0d?ds=inline;hp=-c Merge branch 'maint' * maint: sha1_file.c: resolve confusion EACCES vs EPERM sha1_file: avoid bogus "file exists" error message git checkout: don't warn about unborn branch if -f is already passed bash: offer refs instead of filenames for 'git revert' bash: remove dashed command leftovers git-p4: fix keyword-expansion regex fast-export: use an unsorted string list for extra_refs Add new testcase to show fast-export does not always exports all tags --- 455d0f5c23a600ff8c94642ca5123f1bd2f1ec0d diff --combined builtin-checkout.c index 464fd25707,c107fd643a..7f3bd7bb1c --- a/builtin-checkout.c +++ b/builtin-checkout.c @@@ -13,9 -13,6 +13,9 @@@ #include "diff.h" #include "revision.h" #include "remote.h" +#include "blob.h" +#include "xdiff-interface.h" +#include "ll-merge.h" static const char * const checkout_usage[] = { "git checkout [options] ", @@@ -23,18 -20,6 +23,18 @@@ NULL, }; +struct checkout_opts { + int quiet; + int merge; + int force; + int writeout_stage; + int writeout_error; + + const char *new_branch; + int new_branch_log; + enum branch_track track; +}; + static int post_checkout_hook(struct commit *old, struct commit *new, int changed) { @@@ -99,121 -84,8 +99,121 @@@ static int skip_same_name(struct cache_ return pos; } +static int check_stage(int stage, struct cache_entry *ce, int pos) +{ + while (pos < active_nr && + !strcmp(active_cache[pos]->name, ce->name)) { + if (ce_stage(active_cache[pos]) == stage) + return 0; + pos++; + } + return error("path '%s' does not have %s version", + ce->name, + (stage == 2) ? "our" : "their"); +} + +static int check_all_stages(struct cache_entry *ce, int pos) +{ + if (ce_stage(ce) != 1 || + active_nr <= pos + 2 || + strcmp(active_cache[pos+1]->name, ce->name) || + ce_stage(active_cache[pos+1]) != 2 || + strcmp(active_cache[pos+2]->name, ce->name) || + ce_stage(active_cache[pos+2]) != 3) + return error("path '%s' does not have all three versions", + ce->name); + return 0; +} + +static int checkout_stage(int stage, struct cache_entry *ce, int pos, + struct checkout *state) +{ + while (pos < active_nr && + !strcmp(active_cache[pos]->name, ce->name)) { + if (ce_stage(active_cache[pos]) == stage) + return checkout_entry(active_cache[pos], state, NULL); + pos++; + } + return error("path '%s' does not have %s version", + ce->name, + (stage == 2) ? "our" : "their"); +} + +/* NEEDSWORK: share with merge-recursive */ +static void fill_mm(const unsigned char *sha1, mmfile_t *mm) +{ + unsigned long size; + enum object_type type; + + if (!hashcmp(sha1, null_sha1)) { + mm->ptr = xstrdup(""); + mm->size = 0; + return; + } + + mm->ptr = read_sha1_file(sha1, &type, &size); + if (!mm->ptr || type != OBJ_BLOB) + die("unable to read blob object %s", sha1_to_hex(sha1)); + mm->size = size; +} + +static int checkout_merged(int pos, struct checkout *state) +{ + struct cache_entry *ce = active_cache[pos]; + const char *path = ce->name; + mmfile_t ancestor, ours, theirs; + int status; + unsigned char sha1[20]; + mmbuffer_t result_buf; + + if (ce_stage(ce) != 1 || + active_nr <= pos + 2 || + strcmp(active_cache[pos+1]->name, path) || + ce_stage(active_cache[pos+1]) != 2 || + strcmp(active_cache[pos+2]->name, path) || + ce_stage(active_cache[pos+2]) != 3) + return error("path '%s' does not have all 3 versions", path); + + fill_mm(active_cache[pos]->sha1, &ancestor); + fill_mm(active_cache[pos+1]->sha1, &ours); + fill_mm(active_cache[pos+2]->sha1, &theirs); + + status = ll_merge(&result_buf, path, &ancestor, + &ours, "ours", &theirs, "theirs", 1); + free(ancestor.ptr); + free(ours.ptr); + free(theirs.ptr); + if (status < 0 || !result_buf.ptr) { + free(result_buf.ptr); + return error("path '%s': cannot merge", path); + } + + /* + * NEEDSWORK: + * There is absolutely no reason to write this as a blob object + * and create a phoney cache entry just to leak. This hack is + * primarily to get to the write_entry() machinery that massages + * the contents to work-tree format and writes out which only + * allows it for a cache entry. The code in write_entry() needs + * to be refactored to allow us to feed a + * instead of a cache entry. Such a refactoring would help + * merge_recursive as well (it also writes the merge result to the + * object database even when it may contain conflicts). + */ + if (write_sha1_file(result_buf.ptr, result_buf.size, + blob_type, sha1)) + die("Unable to add merge result for '%s'", path); + ce = make_cache_entry(create_ce_mode(active_cache[pos+1]->ce_mode), + sha1, + path, 2, 0); + if (!ce) + die("make_cache_entry failed for path '%s'", path); + status = checkout_entry(ce, state, NULL); + return status; +} -static int checkout_paths(struct tree *source_tree, const char **pathspec) +static int checkout_paths(struct tree *source_tree, const char **pathspec, + struct checkout_opts *opts) { int pos; struct checkout state; @@@ -222,8 -94,7 +222,8 @@@ int flag; struct commit *head; int errs = 0; - + int stage = opts->writeout_stage; + int merge = opts->merge; int newfd; struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file)); @@@ -251,16 -122,8 +251,16 @@@ if (pathspec_match(pathspec, NULL, ce->name, 0)) { if (!ce_stage(ce)) continue; - errs = 1; - error("path '%s' is unmerged", ce->name); + if (opts->force) { + warning("path '%s' is unmerged", ce->name); + } else if (stage) { + errs |= check_stage(stage, ce, pos); + } else if (opts->merge) { + errs |= check_all_stages(ce, pos); + } else { + errs = 1; + error("path '%s' is unmerged", ce->name); + } pos = skip_same_name(ce, pos) - 1; } } @@@ -278,10 -141,6 +278,10 @@@ errs |= checkout_entry(ce, &state, NULL); continue; } + if (stage) + errs |= checkout_stage(stage, ce, pos, &state); + else if (merge) + errs |= checkout_merged(pos, &state); pos = skip_same_name(ce, pos) - 1; } } @@@ -310,7 -169,8 +310,7 @@@ static void show_local_changes(struct o static void describe_detached_head(char *msg, struct commit *commit) { - struct strbuf sb; - strbuf_init(&sb, 0); + struct strbuf sb = STRBUF_INIT; parse_commit(commit); pretty_print_commit(CMIT_FMT_ONELINE, commit, &sb, 0, NULL, NULL, 0, 0); fprintf(stderr, "%s %s... %s\n", msg, @@@ -318,6 -178,17 +318,6 @@@ strbuf_release(&sb); } -struct checkout_opts { - int quiet; - int merge; - int force; - int writeout_error; - - char *new_branch; - int new_branch_log; - enum branch_track track; -}; - static int reset_tree(struct tree *tree, struct checkout_opts *o, int worktree) { struct unpack_trees_options opts; @@@ -359,7 -230,8 +359,7 @@@ struct branch_info static void setup_branch_path(struct branch_info *branch) { - struct strbuf buf; - strbuf_init(&buf, 0); + struct strbuf buf = STRBUF_INIT; strbuf_addstr(&buf, "refs/heads/"); strbuf_addstr(&buf, branch->name); branch->path = strbuf_detach(&buf, NULL); @@@ -420,7 -292,6 +420,7 @@@ static int merge_working_tree(struct ch */ struct tree *result; struct tree *work; + struct merge_options o; if (!opts->merge) return 1; parse_commit(old->commit); @@@ -439,17 -310,13 +439,17 @@@ */ add_files_to_cache(NULL, NULL, 0); - work = write_tree_from_memory(); + init_merge_options(&o); + o.verbosity = 0; + work = write_tree_from_memory(&o); ret = reset_tree(new->commit->tree, opts, 1); if (ret) return ret; - merge_trees(new->commit->tree, work, old->commit->tree, - new->name, "local", &result); + o.branch1 = new->name; + o.branch2 = "local"; + merge_trees(&o, new->commit->tree, work, + old->commit->tree, &result); ret = reset_tree(new->commit->tree, opts, 0); if (ret) return ret; @@@ -481,7 -348,7 +481,7 @@@ static void update_refs_for_switch(stru struct branch_info *old, struct branch_info *new) { - struct strbuf msg; + struct strbuf msg = STRBUF_INIT; const char *old_desc; if (opts->new_branch) { create_branch(old->name, opts->new_branch, new->name, 0, @@@ -490,6 -357,7 +490,6 @@@ setup_branch_path(new); } - strbuf_init(&msg, 0); old_desc = old->name; if (!old_desc && old->commit) old_desc = sha1_to_hex(old->commit->object.sha1); @@@ -553,7 -421,7 +553,7 @@@ static int switch_branches(struct check if (!opts->quiet && !old.path && old.commit && new->commit != old.commit) describe_detached_head("Previous HEAD position was", old.commit); - if (!old.commit) { + if (!old.commit && !opts->force) { if (!opts->quiet) { fprintf(stderr, "warning: You appear to be on a branch yet to be born.\n"); fprintf(stderr, "warning: Forcing checkout of %s.\n", new->name); @@@ -571,11 -439,6 +571,11 @@@ return ret || opts->writeout_error; } +static int git_checkout_config(const char *var, const char *value, void *cb) +{ + return git_xmerge_config(var, value, cb); +} + int cmd_checkout(int argc, const char **argv, const char *prefix) { struct checkout_opts opts; @@@ -583,21 -446,14 +583,21 @@@ const char *arg; struct branch_info new; struct tree *source_tree = NULL; + char *conflict_style = NULL; struct option options[] = { OPT__QUIET(&opts.quiet), OPT_STRING('b', NULL, &opts.new_branch, "new branch", "branch"), OPT_BOOLEAN('l', NULL, &opts.new_branch_log, "log for new branch"), OPT_SET_INT('t', "track", &opts.track, "track", BRANCH_TRACK_EXPLICIT), + OPT_SET_INT('2', "ours", &opts.writeout_stage, "stage", + 2), + OPT_SET_INT('3', "theirs", &opts.writeout_stage, "stage", + 3), OPT_BOOLEAN('f', NULL, &opts.force, "force"), - OPT_BOOLEAN('m', NULL, &opts.merge, "merge"), + OPT_BOOLEAN('m', "merge", &opts.merge, "merge"), + OPT_STRING(0, "conflict", &conflict_style, "style", + "conflict style (merge or diff3)"), OPT_END(), }; int has_dash_dash; @@@ -605,34 -461,15 +605,34 @@@ memset(&opts, 0, sizeof(opts)); memset(&new, 0, sizeof(new)); - git_config(git_default_config, NULL); + git_config(git_checkout_config, NULL); - opts.track = git_branch_track; + opts.track = BRANCH_TRACK_UNSPECIFIED; argc = parse_options(argc, argv, options, checkout_usage, PARSE_OPT_KEEP_DASHDASH); - if (!opts.new_branch && (opts.track != git_branch_track)) - die("git checkout: --track and --no-track require -b"); + /* --track without -b should DWIM */ + if (0 < opts.track && !opts.new_branch) { + const char *argv0 = argv[0]; + if (!argc || !strcmp(argv0, "--")) + die ("--track needs a branch name"); + if (!prefixcmp(argv0, "refs/")) + argv0 += 5; + if (!prefixcmp(argv0, "remotes/")) + argv0 += 8; + argv0 = strchr(argv0, '/'); + if (!argv0 || !argv0[1]) + die ("Missing branch name; try -b"); + opts.new_branch = argv0 + 1; + } + + if (opts.track == BRANCH_TRACK_UNSPECIFIED) + opts.track = git_branch_track; + if (conflict_style) { + opts.merge = 1; /* implied */ + git_xmerge_config("merge.conflictstyle", conflict_style, NULL); + } if (opts.force && opts.merge) die("git checkout: -f and -m are incompatible"); @@@ -716,22 -553,20 +716,22 @@@ no_reference die("invalid path specification"); /* Checkout paths */ - if (opts.new_branch || opts.force || opts.merge) { + if (opts.new_branch) { if (argc == 1) { - die("git checkout: updating paths is incompatible with switching branches/forcing\nDid you intend to checkout '%s' which can not be resolved as commit?", argv[0]); + die("git checkout: updating paths is incompatible with switching branches.\nDid you intend to checkout '%s' which can not be resolved as commit?", argv[0]); } else { - die("git checkout: updating paths is incompatible with switching branches/forcing"); + die("git checkout: updating paths is incompatible with switching branches."); } } - return checkout_paths(source_tree, pathspec); + if (1 < !!opts.writeout_stage + !!opts.force + !!opts.merge) + die("git checkout: --ours/--theirs, --force and --merge are incompatible when\nchecking out of the index."); + + return checkout_paths(source_tree, pathspec, &opts); } if (opts.new_branch) { - struct strbuf buf; - strbuf_init(&buf, 0); + struct strbuf buf = STRBUF_INIT; strbuf_addstr(&buf, "refs/heads/"); strbuf_addstr(&buf, opts.new_branch); if (!get_sha1(buf.buf, rev)) @@@ -744,8 -579,6 +744,8 @@@ if (new.name && !new.commit) { die("Cannot switch branch to a non-commit."); } + if (opts.writeout_stage) + die("--ours/--theirs is incompatible with switching branches."); return switch_branches(&opts, &new); } diff --combined contrib/completion/git-completion.bash index 5fb34c49dc,554a03ff4f..c79c98ffec --- a/contrib/completion/git-completion.bash +++ b/contrib/completion/git-completion.bash @@@ -154,8 -154,11 +154,8 @@@ __git_heads ( { local cmd i is_hash=y dir="$(__gitdir "$1")" if [ -d "$dir" ]; then - for i in $(git --git-dir="$dir" \ - for-each-ref --format='%(refname)' \ - refs/heads ); do - echo "${i#refs/heads/}" - done + git --git-dir="$dir" for-each-ref --format='%(refname:short)' \ + refs/heads return fi for i in $(git ls-remote "$1" 2>/dev/null); do @@@ -172,8 -175,11 +172,8 @@@ __git_tags ( { local cmd i is_hash=y dir="$(__gitdir "$1")" if [ -d "$dir" ]; then - for i in $(git --git-dir="$dir" \ - for-each-ref --format='%(refname)' \ - refs/tags ); do - echo "${i#refs/tags/}" - done + git --git-dir="$dir" for-each-ref --format='%(refname:short)' \ + refs/tags return fi for i in $(git ls-remote "$1" 2>/dev/null); do @@@ -188,22 -194,19 +188,22 @@@ __git_refs () { - local cmd i is_hash=y dir="$(__gitdir "$1")" + local i is_hash=y dir="$(__gitdir "$1")" + local cur="${COMP_WORDS[COMP_CWORD]}" format refs if [ -d "$dir" ]; then - if [ -e "$dir/HEAD" ]; then echo HEAD; fi - for i in $(git --git-dir="$dir" \ - for-each-ref --format='%(refname)' \ - refs/tags refs/heads refs/remotes); do - case "$i" in - refs/tags/*) echo "${i#refs/tags/}" ;; - refs/heads/*) echo "${i#refs/heads/}" ;; - refs/remotes/*) echo "${i#refs/remotes/}" ;; - *) echo "$i" ;; - esac - done + case "$cur" in + refs|refs/*) + format="refname" + refs="${cur%/*}" + ;; + *) + if [ -e "$dir/HEAD" ]; then echo HEAD; fi + format="refname:short" + refs="refs/tags refs/heads refs/remotes" + ;; + esac + git --git-dir="$dir" for-each-ref --format="%($format)" \ + $refs return fi for i in $(git ls-remote "$dir" 2>/dev/null); do @@@ -647,21 -650,12 +647,12 @@@ _git_branch ( _git_bundle () { - local mycword="$COMP_CWORD" - case "${COMP_WORDS[0]}" in - git) - local cmd="${COMP_WORDS[2]}" - mycword="$((mycword-1))" - ;; - git-bundle*) - local cmd="${COMP_WORDS[1]}" - ;; - esac - case "$mycword" in - 1) + local cmd="${COMP_WORDS[2]}" + case "$COMP_CWORD" in + 2) __gitcomp "create list-heads verify unbundle" ;; - 2) + 3) # looking for a file ;; *) @@@ -809,12 -803,7 +800,7 @@@ _git_fetch ( __gitcomp "$(__git_refs)" "$pfx" "${cur#*:}" ;; *) - local remote - case "${COMP_WORDS[0]}" in - git-fetch) remote="${COMP_WORDS[1]}" ;; - git) remote="${COMP_WORDS[2]}" ;; - esac - __gitcomp "$(__git_refs2 "$remote")" + __gitcomp "$(__git_refs2 "${COMP_WORDS[2]}")" ;; esac fi @@@ -892,7 -881,6 +878,7 @@@ _git_help ( attributes cli core-tutorial cvs-migration diffcore gitk glossary hooks ignore modules repository-layout tutorial tutorial-2 + workflows " } @@@ -1058,12 -1046,7 +1044,7 @@@ _git_pull ( if [ "$COMP_CWORD" = 2 ]; then __gitcomp "$(__git_remotes)" else - local remote - case "${COMP_WORDS[0]}" in - git-pull) remote="${COMP_WORDS[1]}" ;; - git) remote="${COMP_WORDS[2]}" ;; - esac - __gitcomp "$(__git_refs "$remote")" + __gitcomp "$(__git_refs "${COMP_WORDS[2]}")" fi } @@@ -1076,19 -1059,13 +1057,13 @@@ _git_push ( else case "$cur" in *:*) - local remote - case "${COMP_WORDS[0]}" in - git-push) remote="${COMP_WORDS[1]}" ;; - git) remote="${COMP_WORDS[2]}" ;; - esac - local pfx="" case "$COMP_WORDBREAKS" in *:*) : great ;; *) pfx="${cur%%:*}:" ;; esac - __gitcomp "$(__git_refs "$remote")" "$pfx" "${cur#*:}" + __gitcomp "$(__git_refs "${COMP_WORDS[2]}")" "$pfx" "${cur#*:}" ;; +*) __gitcomp "$(__git_refs)" + "${cur#+}" @@@ -1135,8 -1112,7 +1110,8 @@@ _git_send_email ( --no-suppress-from --no-thread --quiet --signed-off-by-cc --smtp-pass --smtp-server --smtp-server-port --smtp-ssl --smtp-user --subject - --suppress-cc --suppress-from --thread --to" + --suppress-cc --suppress-from --thread --to + --validate --no-validate" return ;; esac @@@ -1368,7 -1344,7 +1343,7 @@@ _git_revert ( return ;; esac - COMPREPLY=() + __gitcomp "$(__git_refs)" } _git_rm () @@@ -1476,7 -1452,7 +1451,7 @@@ _git_submodule ( { __git_has_doubledash && return - local subcommands="add status init update" + local subcommands="add status init update summary foreach sync" if [ -z "$(__git_find_subcommand "$subcommands")" ]; then local cur="${COMP_WORDS[COMP_CWORD]}" case "$cur" in @@@ -1590,7 -1566,7 +1565,7 @@@ _git_tag ( -m|-F) COMPREPLY=() ;; - -*|tag|git-tag) + -*|tag) if [ $f = 1 ]; then __gitcomp "$(__git_tags)" else diff --combined contrib/fast-import/git-p4 index 9f0a5f92c1,2b122d3f51..b44fbfc9b3 --- a/contrib/fast-import/git-p4 +++ b/contrib/fast-import/git-p4 @@@ -316,11 -316,8 +316,11 @@@ def gitBranchExists(branch) stderr=subprocess.PIPE, stdout=subprocess.PIPE); return proc.wait() == 0; +_gitConfig = {} def gitConfig(key): - return read_pipe("git config %s" % key, ignore_error=True).strip() + if not _gitConfig.has_key(key): + _gitConfig[key] = read_pipe("git config %s" % key, ignore_error=True).strip() + return _gitConfig[key] def p4BranchesInGit(branchesAreInRemotes = True): branches = {} @@@ -711,7 -708,6 +711,7 @@@ class P4Submit(Command) newdiff = newdiff.replace("\n", "\r\n") tmpFile.write(submitTemplate + separatorLine + diff + newdiff) tmpFile.close() + mtime = os.stat(fileName).st_mtime defaultEditor = "vi" if platform.system() == "Windows": defaultEditor = "notepad" @@@ -720,29 -716,15 +720,29 @@@ else: editor = os.environ.get("EDITOR", defaultEditor); system(editor + " " + fileName) - tmpFile = open(fileName, "rb") - message = tmpFile.read() - tmpFile.close() - os.remove(fileName) - submitTemplate = message[:message.index(separatorLine)] - if self.isWindows: - submitTemplate = submitTemplate.replace("\r\n", "\n") - p4_write_pipe("submit -i", submitTemplate) + response = "y" + if os.stat(fileName).st_mtime <= mtime: + response = "x" + while response != "y" and response != "n": + response = raw_input("Submit template unchanged. Submit anyway? [y]es, [n]o (skip this patch) ") + + if response == "y": + tmpFile = open(fileName, "rb") + message = tmpFile.read() + tmpFile.close() + submitTemplate = message[:message.index(separatorLine)] + if self.isWindows: + submitTemplate = submitTemplate.replace("\r\n", "\n") + p4_write_pipe("submit -i", submitTemplate) + else: + for f in editedFiles: + p4_system("revert \"%s\"" % f); + for f in filesToAdd: + p4_system("revert \"%s\"" % f); + system("rm %s" %f) + + os.remove(fileName) else: fileName = "submit.txt" file = open(fileName, "w+") @@@ -949,7 -931,7 +949,7 @@@ class P4Sync(Command) if includeFile: filesForCommit.append(f) - if f['action'] != 'delete': + if f['action'] not in ('delete', 'purge'): filesToRead.append(f) filedata = [] @@@ -968,11 -950,11 +968,11 @@@ while j < len(filedata): stat = filedata[j] j += 1 - text = []; + text = '' while j < len(filedata) and filedata[j]['code'] in ('text', 'unicode', 'binary'): - text.append(filedata[j]['data']) + text += filedata[j]['data'] + del filedata[j]['data'] j += 1 - text = ''.join(text) if not stat.has_key('depotFile'): sys.stderr.write("p4 print fails with: %s\n" % repr(stat)) @@@ -981,7 -963,7 +981,7 @@@ if stat['type'] in ('text+ko', 'unicode+ko', 'binary+ko'): text = re.sub(r'(?i)\$(Id|Header):[^$]*\$',r'$\1$', text) elif stat['type'] in ('text+k', 'ktext', 'kxtext', 'unicode+k', 'binary+k'): - text = re.sub(r'\$(Id|Header|Author|Date|DateTime|Change|File|Revision):[^$]*\$',r'$\1$', text) + text = re.sub(r'\$(Id|Header|Author|Date|DateTime|Change|File|Revision):[^$\n]*\$',r'$\1$', text) contents[stat['depotFile']] = text @@@ -1041,7 -1023,7 +1041,7 @@@ continue relPath = self.stripRepoPath(file['path'], branchPrefixes) - if file["action"] == "delete": + if file["action"] in ("delete", "purge"): self.gitStream.write("D %s\n" % relPath) else: data = file['data'] @@@ -1080,7 -1062,7 +1080,7 @@@ cleanedFiles = {} for info in files: - if info["action"] == "delete": + if info["action"] in ("delete", "purge"): continue cleanedFiles[info["depotFile"]] = info["rev"] @@@ -1403,7 -1385,7 +1403,7 @@@ if change > newestRevision: newestRevision = change - if info["action"] == "delete": + if info["action"] in ("delete", "purge"): # don't increase the file cnt, otherwise details["depotFile123"] will have gaps! #fileCnt = fileCnt + 1 continue @@@ -1751,12 -1733,8 +1751,12 @@@ class P4Clone(P4Sync) if not P4Sync.run(self, depotPaths): return False if self.branch != "master": - if gitBranchExists("refs/remotes/p4/master"): - system("git branch master refs/remotes/p4/master") + if self.importIntoRemotes: + masterbranch = "refs/remotes/p4/master" + else: + masterbranch = "refs/heads/p4/master" + if gitBranchExists(masterbranch): + system("git branch master %s" % masterbranch) system("git checkout -f") else: print "Could not detect main branch. No checkout/master branch created." diff --combined sha1_file.c index 36dad7261b,4e05429aba..6c0e251e02 --- a/sha1_file.c +++ b/sha1_file.c @@@ -99,11 -99,7 +99,11 @@@ int safe_create_leading_directories(cha pos = strchr(pos, '/'); if (!pos) break; - *pos = 0; + while (*++pos == '/') + ; + if (!*pos) + break; + *--pos = '\0'; if (!stat(path, &st)) { /* path exists */ if (!S_ISDIR(st.st_mode)) { @@@ -254,6 -250,7 +254,6 @@@ static void read_info_alternates(const */ static int link_alt_odb_entry(const char * entry, int len, const char * relative_base, int depth) { - struct stat st; const char *objdir = get_object_directory(); struct alternate_object_database *ent; struct alternate_object_database *alt; @@@ -284,7 -281,7 +284,7 @@@ ent->base[pfxlen] = ent->base[entlen-1] = 0; /* Detect cases where alternate disappeared */ - if (stat(ent->base, &st) || !S_ISDIR(st.st_mode)) { + if (!is_directory(ent->base)) { error("object directory %s does not exist; " "check .git/objects/info/alternates.", ent->base); @@@ -397,16 -394,6 +397,16 @@@ void add_to_alternates_file(const char link_alt_odb_entries(alt, alt + strlen(alt), '\n', NULL, 0); } +void foreach_alt_odb(alt_odb_fn fn, void *cb) +{ + struct alternate_object_database *ent; + + prepare_alt_odb(); + for (ent = alt_odb_list; ent; ent = ent->next) + if (fn(ent, cb)) + return; +} + void prepare_alt_odb(void) { const char *alt; @@@ -423,30 -410,23 +423,30 @@@ read_info_alternates(get_object_directory(), 0); } -static int has_loose_object(const unsigned char *sha1) +static int has_loose_object_local(const unsigned char *sha1) { char *name = sha1_file_name(sha1); - struct alternate_object_database *alt; + return !access(name, F_OK); +} - if (!access(name, F_OK)) - return 1; +int has_loose_object_nonlocal(const unsigned char *sha1) +{ + struct alternate_object_database *alt; prepare_alt_odb(); for (alt = alt_odb_list; alt; alt = alt->next) { - name = alt->name; - fill_sha1_path(name, sha1); + fill_sha1_path(alt->name, sha1); if (!access(alt->base, F_OK)) return 1; } return 0; } +static int has_loose_object(const unsigned char *sha1) +{ + return has_loose_object_local(sha1) || + has_loose_object_nonlocal(sha1); +} + static unsigned int pack_used_ctr; static unsigned int pack_mmap_calls; static unsigned int peak_pack_open_windows; @@@ -848,11 -828,6 +848,11 @@@ struct packed_git *add_packed_git(cons return NULL; } memcpy(p->pack_name, path, path_len); + + strcpy(p->pack_name + path_len, ".keep"); + if (!access(p->pack_name, F_OK)) + p->pack_keep = 1; + strcpy(p->pack_name + path_len, ".pack"); if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) { free(p); @@@ -1122,8 -1097,7 +1122,8 @@@ static int legacy_loose_object(unsigne return 0; } -unsigned long unpack_object_header_gently(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep) +unsigned long unpack_object_header_buffer(const unsigned char *buf, + unsigned long len, enum object_type *type, unsigned long *sizep) { unsigned shift; unsigned char c; @@@ -1135,10 -1109,10 +1135,10 @@@ size = c & 15; shift = 4; while (c & 0x80) { - if (len <= used) - return 0; - if (sizeof(long) * 8 <= shift) + if (len <= used || sizeof(long) * 8 <= shift) { + error("bad object header"); return 0; + } c = buf[used++]; size += (c & 0x7f) << shift; shift += 7; @@@ -1177,7 -1151,7 +1177,7 @@@ static int unpack_sha1_header(z_stream * really worth it and we don't write it any longer. But we * can still read it. */ - used = unpack_object_header_gently(map, mapsize, &type, &size); + used = unpack_object_header_buffer(map, mapsize, &type, &size); if (!used || !valid_loose_object_type[type]) return -1; map += used; @@@ -1326,10 -1300,8 +1326,10 @@@ unsigned long get_size_from_delta(struc } while ((st == Z_OK || st == Z_BUF_ERROR) && stream.total_out < sizeof(delta_head)); inflateEnd(&stream); - if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) - die("delta data unpack-initial failed"); + if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) { + error("delta data unpack-initial failed"); + return 0; + } /* Examine the initial part of the delta to figure out * the result size. @@@ -1370,7 -1342,7 +1370,7 @@@ static off_t get_delta_base(struct pack base_offset = (base_offset << 7) + (c & 127); } base_offset = delta_obj_offset - base_offset; - if (base_offset >= delta_obj_offset) + if (base_offset <= 0 || base_offset >= delta_obj_offset) return 0; /* out of bound */ *curpos += used; } else if (type == OBJ_REF_DELTA) { @@@ -1396,32 -1368,15 +1396,32 @@@ static int packed_delta_info(struct pac off_t base_offset; base_offset = get_delta_base(p, w_curs, &curpos, type, obj_offset); + if (!base_offset) + return OBJ_BAD; type = packed_object_info(p, base_offset, NULL); + if (type <= OBJ_NONE) { + struct revindex_entry *revidx; + const unsigned char *base_sha1; + revidx = find_pack_revindex(p, base_offset); + if (!revidx) + return OBJ_BAD; + base_sha1 = nth_packed_object_sha1(p, revidx->nr); + mark_bad_packed_object(p, base_sha1); + type = sha1_object_info(base_sha1, NULL); + if (type <= OBJ_NONE) + return OBJ_BAD; + } /* We choose to only get the type of the base object and * ignore potentially corrupt pack file that expects the delta * based on a base with a wrong size. This saves tons of * inflate() calls. */ - if (sizep) + if (sizep) { *sizep = get_size_from_delta(p, w_curs, curpos); + if (*sizep == 0) + type = OBJ_BAD; + } return type; } @@@ -1443,11 -1398,10 +1443,11 @@@ static int unpack_object_header(struct * insane, so we know won't exceed what we have been given. */ base = use_pack(p, w_curs, *curpos, &left); - used = unpack_object_header_gently(base, left, &type, sizep); - if (!used) - die("object offset outside of pack file"); - *curpos += used; + used = unpack_object_header_buffer(base, left, &type, sizep); + if (!used) { + type = OBJ_BAD; + } else + *curpos += used; return type; } @@@ -1531,9 -1485,8 +1531,9 @@@ static int packed_object_info(struct pa *sizep = size; break; default: - die("pack %s contains unknown object type %d", - p->pack_name, type); + error("unknown object type %i at offset %"PRIuMAX" in %s", + type, (uintmax_t)obj_offset, p->pack_name); + type = OBJ_BAD; } unuse_pack(&w_curs); return type; @@@ -1605,9 -1558,11 +1605,9 @@@ static void *cache_or_unpack_entry(stru struct delta_base_cache_entry *ent = delta_base_cache + hash; ret = ent->data; - if (ret && ent->p == p && ent->base_offset == base_offset) - goto found_cache_entry; - return unpack_entry(p, base_offset, type, base_size); + if (!ret || ent->p != p || ent->base_offset != base_offset) + return unpack_entry(p, base_offset, type, base_size); -found_cache_entry: if (!keep_cache) { ent->data = NULL; ent->lru.next->prev = ent->lru.prev; @@@ -1697,12 -1652,9 +1697,12 @@@ static void *unpack_delta_entry(struct * This is costly but should happen only in the presence * of a corrupted pack, and is better than failing outright. */ - struct revindex_entry *revidx = find_pack_revindex(p, base_offset); - const unsigned char *base_sha1 = - nth_packed_object_sha1(p, revidx->nr); + struct revindex_entry *revidx; + const unsigned char *base_sha1; + revidx = find_pack_revindex(p, base_offset); + if (!revidx) + return NULL; + base_sha1 = nth_packed_object_sha1(p, revidx->nr); error("failed to read delta base object %s" " at offset %"PRIuMAX" from %s", sha1_to_hex(base_sha1), (uintmax_t)base_offset, @@@ -1731,8 -1683,6 +1731,8 @@@ return result; } +int do_check_packed_object_crc; + void *unpack_entry(struct packed_git *p, off_t obj_offset, enum object_type *type, unsigned long *sizep) { @@@ -1740,20 -1690,6 +1740,20 @@@ off_t curpos = obj_offset; void *data; + if (do_check_packed_object_crc && p->index_version > 1) { + struct revindex_entry *revidx = find_pack_revindex(p, obj_offset); + unsigned long len = revidx[1].offset - obj_offset; + if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) { + const unsigned char *sha1 = + nth_packed_object_sha1(p, revidx->nr); + error("bad packed object CRC for %s", + sha1_to_hex(sha1)); + mark_bad_packed_object(p, sha1); + unuse_pack(&w_curs); + return NULL; + } + } + *type = unpack_object_header(p, &w_curs, &curpos, sizep); switch (*type) { case OBJ_OFS_DELTA: @@@ -2007,14 -1943,7 +2007,14 @@@ int sha1_object_info(const unsigned cha if (!find_pack_entry(sha1, &e, NULL)) return status; } - return packed_object_info(e.p, e.offset, sizep); + + status = packed_object_info(e.p, e.offset, sizep); + if (status < 0) { + mark_bad_packed_object(e.p, sha1); + status = sha1_object_info(sha1, sizep); + } + + return status; } static void *read_packed_sha1(const unsigned char *sha1, @@@ -2056,7 -1985,9 +2056,7 @@@ static struct cached_object static int cached_object_nr, cached_object_alloc; static struct cached_object empty_tree = { - /* empty tree sha1: 4b825dc642cb6eb9a060e54bf8d69288fbee4904 */ - "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" - "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04", + EMPTY_TREE_SHA1_BIN, OBJ_TREE, "", 0 @@@ -2188,16 -2119,16 +2188,16 @@@ static void write_sha1_file_prepare(con const char *type, unsigned char *sha1, char *hdr, int *hdrlen) { - SHA_CTX c; + git_SHA_CTX c; /* Generate the header */ *hdrlen = sprintf(hdr, "%s %lu", type, len)+1; /* Sha1.. */ - SHA1_Init(&c); - SHA1_Update(&c, hdr, *hdrlen); - SHA1_Update(&c, buf, len); - SHA1_Final(sha1, &c); + git_SHA1_Init(&c); + git_SHA1_Update(&c, hdr, *hdrlen); + git_SHA1_Update(&c, buf, len); + git_SHA1_Final(sha1, &c); } /* @@@ -2315,7 -2246,7 +2315,7 @@@ static int write_loose_object(const uns filename = sha1_file_name(sha1); fd = create_tmpfile(tmpfile, sizeof(tmpfile), filename); if (fd < 0) { - if (errno == EPERM) + if (errno == EACCES) return error("insufficient permission for adding an object to repository database %s\n", get_object_directory()); else return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno)); @@@ -2436,21 -2367,51 +2436,21 @@@ int has_sha1_file(const unsigned char * return has_loose_object(sha1); } -int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object) -{ - struct strbuf buf; - int ret; - - strbuf_init(&buf, 0); - if (strbuf_read(&buf, fd, 4096) < 0) { - strbuf_release(&buf); - return -1; - } - - if (!type) - type = blob_type; - if (write_object) - ret = write_sha1_file(buf.buf, buf.len, type, sha1); - else - ret = hash_sha1_file(buf.buf, buf.len, type, sha1); - strbuf_release(&buf); - - return ret; -} - -int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object, - enum object_type type, const char *path) +static int index_mem(unsigned char *sha1, void *buf, size_t size, + int write_object, enum object_type type, const char *path) { - size_t size = xsize_t(st->st_size); - void *buf = NULL; int ret, re_allocated = 0; - if (size) - buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); - close(fd); - if (!type) type = OBJ_BLOB; /* * Convert blobs to git internal format */ - if ((type == OBJ_BLOB) && S_ISREG(st->st_mode)) { - struct strbuf nbuf; - strbuf_init(&nbuf, 0); + if ((type == OBJ_BLOB) && path) { + struct strbuf nbuf = STRBUF_INIT; if (convert_to_git(path, buf, size, &nbuf, write_object ? safe_crlf : 0)) { - munmap(buf, size); buf = strbuf_detach(&nbuf, &size); re_allocated = 1; } @@@ -2460,32 -2421,12 +2460,32 @@@ ret = write_sha1_file(buf, size, typename(type), sha1); else ret = hash_sha1_file(buf, size, typename(type), sha1); - if (re_allocated) { + if (re_allocated) free(buf); - return ret; - } - if (size) + return ret; +} + +int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object, + enum object_type type, const char *path) +{ + int ret; + size_t size = xsize_t(st->st_size); + + if (!S_ISREG(st->st_mode)) { + struct strbuf sbuf = STRBUF_INIT; + if (strbuf_read(&sbuf, fd, 4096) >= 0) + ret = index_mem(sha1, sbuf.buf, sbuf.len, write_object, + type, path); + else + ret = -1; + strbuf_release(&sbuf); + } else if (size) { + void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); + ret = index_mem(sha1, buf, size, write_object, type, path); munmap(buf, size); + } else + ret = index_mem(sha1, NULL, size, write_object, type, path); + close(fd); return ret; } diff --combined t/t9301-fast-export.sh index 6ddd7c19fd,638c858dc7..2057435462 --- a/t/t9301-fast-export.sh +++ b/t/t9301-fast-export.sh @@@ -3,7 -3,7 +3,7 @@@ # Copyright (c) 2007 Johannes E. Schindelin # -test_description='git-fast-export' +test_description='git fast-export' . ./test-lib.sh test_expect_success 'setup' ' @@@ -67,7 -67,7 +67,7 @@@ test_expect_success 'iso-8859-1' git config i18n.commitencoding ISO-8859-1 && # use author and committer name in ISO-8859-1 to match it. - . ../t3901-8859-1.txt && + . "$TEST_DIRECTORY"/t3901-8859-1.txt && test_tick && echo rosten >file && git commit -s -m den file && @@@ -231,4 -231,12 +231,12 @@@ test_expect_success 'fast-export -C -C ' + test_expect_success 'fast-export | fast-import when master is tagged' ' + + git tag -m msg last && + git fast-export -C -C --signed-tags=strip --all > output && + test $(grep -c "^tag " output) = 3 + + ' + test_done