Merge branch 'sb/packfiles-in-repository' into next
authorJunio C Hamano <gitster@pobox.com>
Fri, 30 Mar 2018 19:42:08 +0000 (12:42 -0700)
committerJunio C Hamano <gitster@pobox.com>
Fri, 30 Mar 2018 19:42:08 +0000 (12:42 -0700)
Refactoring of the internal global data structure continues.

* sb/packfiles-in-repository:
packfile: keep prepare_packed_git() private
packfile: allow find_pack_entry to handle arbitrary repositories
packfile: add repository argument to find_pack_entry
packfile: allow reprepare_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git_one to handle arbitrary repositories
packfile: add repository argument to reprepare_packed_git
packfile: add repository argument to prepare_packed_git
packfile: add repository argument to prepare_packed_git_one
packfile: allow install_packed_git to handle arbitrary repositories
packfile: allow rearrange_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git_mru to handle arbitrary repositories

547 files changed:
.gitignore
.travis.yml
Documentation/CodingGuidelines
Documentation/Makefile
Documentation/RelNotes/2.16.2.txt [new file with mode: 0644]
Documentation/RelNotes/2.16.3.txt [new file with mode: 0644]
Documentation/RelNotes/2.17.0.txt
Documentation/config.txt
Documentation/diff-options.txt
Documentation/fetch-options.txt
Documentation/git-add.txt
Documentation/git-am.txt
Documentation/git-branch.txt
Documentation/git-config.txt
Documentation/git-daemon.txt
Documentation/git-fetch.txt
Documentation/git-filter-branch.txt
Documentation/git-gc.txt
Documentation/git-index-pack.txt
Documentation/git-interpret-trailers.txt
Documentation/git-rebase.txt
Documentation/git-remote.txt
Documentation/git-send-email.txt
Documentation/git-shortlog.txt
Documentation/git-stash.txt
Documentation/git-status.txt
Documentation/git-tag.txt
Documentation/git-update-index.txt
Documentation/git-worktree.txt
Documentation/git.txt
Documentation/gitattributes.txt
Documentation/gitignore.txt
Documentation/gitremote-helpers.txt
Documentation/gitrepository-layout.txt
Documentation/gitsubmodules.txt
Documentation/howto/recover-corrupted-object-harder.txt
Documentation/merge-options.txt
Documentation/merge-strategies.txt
Documentation/pretty-formats.txt
Documentation/technical/api-object-access.txt
Documentation/technical/api-submodule-config.txt
Documentation/technical/hash-function-transition.txt
Documentation/technical/http-protocol.txt
Documentation/technical/protocol-v2.txt [new file with mode: 0644]
GIT-VERSION-GEN
INSTALL
Makefile
apply.c
archive-tar.c
archive-zip.c
archive.c
archive.h
bisect.c
blame.c
builtin.h
builtin/add.c
builtin/am.c
builtin/archive.c
builtin/blame.c
builtin/branch.c
builtin/cat-file.c
builtin/check-ignore.c
builtin/checkout-index.c
builtin/checkout.c
builtin/clean.c
builtin/clone.c
builtin/commit-tree.c
builtin/commit.c
builtin/config.c
builtin/describe.c
builtin/diff-tree.c
builtin/difftool.c
builtin/fast-export.c
builtin/fetch-pack.c
builtin/fetch.c
builtin/fmt-merge-msg.c
builtin/fsck.c
builtin/gc.c
builtin/grep.c
builtin/hash-object.c
builtin/help.c
builtin/index-pack.c
builtin/init-db.c
builtin/log.c
builtin/ls-files.c
builtin/ls-remote.c
builtin/ls-tree.c
builtin/merge-tree.c
builtin/merge.c
builtin/mktag.c
builtin/mktree.c
builtin/mv.c
builtin/name-rev.c
builtin/notes.c
builtin/pack-objects.c
builtin/pack-redundant.c
builtin/prune.c
builtin/pull.c
builtin/push.c
builtin/rebase--helper.c
builtin/receive-pack.c
builtin/reflog.c
builtin/remote.c
builtin/replace.c
builtin/reset.c
builtin/rev-list.c
builtin/rev-parse.c
builtin/rm.c
builtin/send-pack.c
builtin/serve.c [new file with mode: 0644]
builtin/shortlog.c
builtin/show-branch.c
builtin/show-ref.c
builtin/submodule--helper.c
builtin/tag.c
builtin/unpack-file.c
builtin/unpack-objects.c
builtin/update-index.c
builtin/update-server-info.c
builtin/upload-pack.c [new file with mode: 0644]
builtin/verify-commit.c
builtin/worktree.c
builtin/write-tree.c
bulk-checkin.c
bulk-checkin.h
bundle.c
cache-tree.c
cache-tree.h
cache.h
ci/lib-travisci.sh
ci/run-build-and-tests.sh [new file with mode: 0755]
ci/run-build.sh [deleted file]
ci/run-tests.sh [deleted file]
color.c
color.h
combine-diff.c
commit.c
commit.h
compat/mingw.c
compat/mingw.h
config.c
config.mak.uname
configure.ac
connect.c
connect.h
contrib/coccinelle/strbuf.cocci
contrib/completion/git-completion.bash
contrib/diff-highlight/DiffHighlight.pm
contrib/diff-highlight/t/t9400-diff-highlight.sh
contrib/emacs/.gitignore [deleted file]
contrib/emacs/Makefile [deleted file]
contrib/emacs/README
contrib/emacs/git-blame.el [deleted file]
contrib/emacs/git.el [deleted file]
contrib/examples/README
contrib/examples/builtin-fetch--tool.c [deleted file]
contrib/examples/git-am.sh [deleted file]
contrib/examples/git-checkout.sh [deleted file]
contrib/examples/git-clean.sh [deleted file]
contrib/examples/git-clone.sh [deleted file]
contrib/examples/git-commit.sh [deleted file]
contrib/examples/git-difftool.perl [deleted file]
contrib/examples/git-fetch.sh [deleted file]
contrib/examples/git-gc.sh [deleted file]
contrib/examples/git-log.sh [deleted file]
contrib/examples/git-ls-remote.sh [deleted file]
contrib/examples/git-merge-ours.sh [deleted file]
contrib/examples/git-merge.sh [deleted file]
contrib/examples/git-notes.sh [deleted file]
contrib/examples/git-pull.sh [deleted file]
contrib/examples/git-remote.perl [deleted file]
contrib/examples/git-repack.sh [deleted file]
contrib/examples/git-rerere.perl [deleted file]
contrib/examples/git-reset.sh [deleted file]
contrib/examples/git-resolve.sh [deleted file]
contrib/examples/git-revert.sh [deleted file]
contrib/examples/git-svnimport.perl [deleted file]
contrib/examples/git-svnimport.txt [deleted file]
contrib/examples/git-tag.sh [deleted file]
contrib/examples/git-verify-tag.sh [deleted file]
contrib/examples/git-whatchanged.sh [deleted file]
contrib/hooks/pre-auto-gc-battery
contrib/subtree/git-subtree.sh
contrib/subtree/git-subtree.txt
convert.c
convert.h
credential.c
csum-file.c
csum-file.h
daemon.c
diff-lib.c
diff.c
diff.h
diffcore-delta.c
diffcore-rename.c
dir.c
dir.h
entry.c
environment.c
fast-import.c
fetch-pack.c
fetch-pack.h
fsck.c
fsmonitor.c
fsmonitor.h
git-add--interactive.perl
git-compat-util.h
git-cvsimport.perl
git-filter-branch.sh
git-rebase--am.sh
git-rebase--interactive.sh
git-rebase--merge.sh
git-rebase.sh
git-send-email.perl
git-sh-i18n.sh
git-stash.sh
git.c
gitweb/INSTALL
gitweb/gitweb.perl
grep.c
hash.h
http-backend.c
http-push.c
http-walker.c
http.c
http.h
imap-send.c
line-log.c
list-objects-filter.c
log-tree.c
ls-refs.c [new file with mode: 0644]
ls-refs.h [new file with mode: 0644]
mailmap.c
match-trees.c
mem-pool.c [new file with mode: 0644]
mem-pool.h [new file with mode: 0644]
merge-blobs.c
merge-recursive.c
merge-recursive.h
merge.c
name-hash.c
notes-cache.c
notes-merge.c
notes-utils.c
notes-utils.h
notes.c
notes.h
object.c
object.h
pack-bitmap-write.c
pack-check.c
pack-write.c
pack.h
packfile.c
packfile.h
parse-options.c
parse-options.h
perl/FromCPAN/.gitattributes [new file with mode: 0644]
perl/FromCPAN/Error.pm [new file with mode: 0644]
perl/FromCPAN/Mail/Address.pm [new file with mode: 0644]
perl/Git.pm
perl/Git/Error.pm [deleted file]
perl/Git/FromCPAN/Error.pm [deleted file]
perl/Git/FromCPAN/Mail/Address.pm [deleted file]
perl/Git/LoadCPAN.pm [new file with mode: 0644]
perl/Git/LoadCPAN/Error.pm [new file with mode: 0644]
perl/Git/LoadCPAN/Mail/Address.pm [new file with mode: 0644]
perl/Git/Mail/Address.pm [deleted file]
perl/Git/SVN.pm
pkt-line.c
pkt-line.h
preload-index.c
pretty.c
protocol.c
protocol.h
quote.c
reachable.c
read-cache.c
ref-filter.c
refs.c
refs.h
refs/packed-backend.c
remote-curl.c
remote-testsvn.c
remote.c
remote.h
replace_object.c
rerere.c
resolve-undo.c
resolve-undo.h
revision.c
revision.h
run-command.c
send-pack.c
sequencer.c
serve.c [new file with mode: 0644]
serve.h [new file with mode: 0644]
setup.c
sha1-lookup.c
sha1-lookup.h
sha1_file.c
sha1_name.c
sha1dc_git.h
split-index.c
split-index.h
strbuf.c
strbuf.h
streaming.c
streaming.h
submodule-config.c
submodule.c
submodule.h
t/README
t/helper/test-chmtime.c
t/helper/test-config.c
t/helper/test-ctype.c
t/helper/test-date.c
t/helper/test-delta.c
t/helper/test-drop-caches.c
t/helper/test-dump-cache-tree.c
t/helper/test-dump-split-index.c
t/helper/test-dump-untracked-cache.c
t/helper/test-example-decorate.c
t/helper/test-genrandom.c
t/helper/test-hashmap.c
t/helper/test-index-version.c
t/helper/test-lazy-init-name-hash.c
t/helper/test-match-trees.c
t/helper/test-mergesort.c
t/helper/test-mktemp.c
t/helper/test-online-cpus.c
t/helper/test-path-utils.c
t/helper/test-pkt-line.c [new file with mode: 0644]
t/helper/test-prio-queue.c
t/helper/test-read-cache.c
t/helper/test-ref-store.c
t/helper/test-regex.c
t/helper/test-revision-walking.c
t/helper/test-run-command.c
t/helper/test-scrap-cache-tree.c
t/helper/test-sha1-array.c
t/helper/test-sha1.c
t/helper/test-sha1.sh
t/helper/test-sigchain.c
t/helper/test-strcmp-offset.c
t/helper/test-string-list.c
t/helper/test-submodule-config.c
t/helper/test-subprocess.c
t/helper/test-tool.c [new file with mode: 0644]
t/helper/test-tool.h [new file with mode: 0644]
t/helper/test-urlmatch-normalization.c
t/helper/test-wildmatch.c
t/helper/test-write-cache.c
t/lib-git-p4.sh
t/lib-git-svn.sh
t/lib-pack.sh
t/lib-terminal.sh
t/perf/aggregate.perl
t/perf/p0002-read-cache.sh
t/perf/p0004-lazy-init-name-hash.sh
t/perf/p0007-write-cache.sh
t/perf/p0071-sort.sh
t/perf/p7519-fsmonitor.sh
t/perf/run
t/t0002-gitfile.sh
t/t0005-signals.sh
t/t0006-date.sh
t/t0008-ignores.sh
t/t0009-prio-queue.sh
t/t0011-hashmap.sh
t/t0013-sha1dc.sh
t/t0021-conversion.sh
t/t0040-parse-options.sh
t/t0041-usage.sh [new file with mode: 0755]
t/t0050-filesystem.sh
t/t0060-path-utils.sh
t/t0061-run-command.sh
t/t0062-revision-walking.sh
t/t0063-string-list.sh
t/t0064-sha1-array.sh
t/t0065-strcmp-offset.sh
t/t0070-fundamental.sh
t/t0090-cache-tree.sh
t/t0110-urlmatch-normalization.sh
t/t0205-gettext-poison.sh
t/t1006-cat-file.sh
t/t1011-read-tree-sparse-checkout.sh
t/t1050-large.sh
t/t1300-repo-config.sh
t/t1304-default-acl.sh
t/t1305-config-include.sh
t/t1308-config-set.sh
t/t1309-early-config.sh
t/t1405-main-ref-store.sh
t/t1406-submodule-ref-store.sh
t/t1407-worktree-ref-store.sh
t/t1410-reflog.sh
t/t1411-reflog-show.sh
t/t1501-work-tree.sh
t/t1506-rev-parse-diagnosis.sh
t/t1507-rev-parse-upstream.sh
t/t1510-repo-setup.sh
t/t1600-index.sh
t/t1700-split-index.sh
t/t2020-checkout-detach.sh
t/t2022-checkout-paths.sh
t/t2025-worktree-add.sh
t/t2026-worktree-prune.sh
t/t2028-worktree-move.sh
t/t2101-update-index-reupdate.sh
t/t2104-update-index-skip-worktree.sh
t/t2107-update-index-basic.sh
t/t3008-ls-files-lazy-init-name-hash.sh
t/t3030-merge-recursive.sh
t/t3070-wildmatch.sh
t/t3200-branch.sh
t/t3306-notes-prune.sh
t/t3400-rebase.sh
t/t3404-rebase-interactive.sh
t/t3405-rebase-malformed.sh
t/t3408-rebase-multi-line.sh
t/t3418-rebase-continue.sh
t/t3501-revert-cherry-pick.sh
t/t3510-cherry-pick-sequence.sh
t/t3600-rm.sh
t/t3700-add.sh
t/t3701-add-interactive.sh
t/t3905-stash-include-untracked.sh
t/t4001-diff-rename.sh
t/t4011-diff-symlink.sh
t/t4013-diff-various.sh
t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial [new file with mode: 0644]
t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial [new file with mode: 0644]
t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode [new file with mode: 0644]
t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode [new file with mode: 0644]
t/t4018-diff-funcname.sh
t/t4018/golang-complex-function [new file with mode: 0644]
t/t4018/golang-func [new file with mode: 0644]
t/t4018/golang-interface [new file with mode: 0644]
t/t4018/golang-long-func [new file with mode: 0644]
t/t4018/golang-struct [new file with mode: 0644]
t/t4035-diff-quiet.sh
t/t4052-stat-output.sh
t/t4135-apply-weird-filenames.sh
t/t4150-am.sh
t/t4151-am-abort.sh
t/t4200-rerere.sh
t/t4201-shortlog.sh
t/t5000-tar-tree.sh
t/t5300-pack-object.sh
t/t5301-sliding-window.sh
t/t5302-pack-index.sh
t/t5303-pack-corruption-resilience.sh
t/t5304-prune.sh
t/t5310-pack-bitmaps.sh
t/t5313-pack-bounds-checks.sh
t/t5314-pack-cycle-detection.sh
t/t5316-pack-delta-depth.sh
t/t5400-send-pack.sh
t/t5500-fetch-pack.sh
t/t5510-fetch.sh
t/t5516-fetch-push.sh
t/t5526-fetch-submodules.sh
t/t5536-fetch-conflicts.sh
t/t5541-http-push-smart.sh
t/t5545-push-options.sh
t/t5546-receive-limits.sh
t/t5547-push-quarantine.sh
t/t5570-git-daemon.sh
t/t5608-clone-2gb.sh
t/t5616-partial-clone.sh
t/t5701-git-serve.sh [new file with mode: 0755]
t/t5702-protocol-v2.sh [new file with mode: 0755]
t/t5812-proto-disable-http.sh
t/t6015-rev-list-show-all-parents.sh [deleted file]
t/t6022-merge-rename.sh
t/t6040-tracking-info.sh
t/t6043-merge-rename-directories.sh [new file with mode: 0755]
t/t6120-describe.sh
t/t6200-fmt-merge-msg.sh
t/t6300-for-each-ref.sh
t/t6500-gc.sh
t/t6501-freshen-objects.sh
t/t7001-mv.sh
t/t7003-filter-branch.sh
t/t7004-tag.sh
t/t7006-pager.sh
t/t7063-status-untracked-cache.sh
t/t7064-wtstatus-pv2.sh
t/t7400-submodule-basic.sh
t/t7409-submodule-detached-work-tree.sh
t/t7411-submodule-config.sh
t/t7508-status.sh
t/t7519-status-fsmonitor.sh
t/t7600-merge.sh
t/t7607-merge-overwrite.sh
t/t7701-repack-unpack-unreachable.sh
t/t7812-grep-icase-non-ascii.sh
t/t9001-send-email.sh
t/t9004-example.sh
t/t9100-git-svn-basic.sh
t/t9104-git-svn-follow-parent.sh
t/t9108-git-svn-glob.sh
t/t9109-git-svn-multi-glob.sh
t/t9110-git-svn-use-svm-props.sh
t/t9111-git-svn-use-svnsync-props.sh
t/t9114-git-svn-dcommit-merge.sh
t/t9130-git-svn-authors-file.sh
t/t9138-git-svn-authors-prog.sh
t/t9153-git-svn-rewrite-uuid.sh
t/t9168-git-svn-partially-globbed-names.sh
t/t9300-fast-import.sh
t/t9350-fast-export.sh
t/t9400-git-cvsserver-server.sh
t/t9402-git-cvsserver-refs.sh
t/t9802-git-p4-filetype.sh
t/t9803-git-p4-shell-metachars.sh
t/t9813-git-p4-preserve-users.sh
t/t9820-git-p4-editor-handling.sh
t/t9902-completion.sh
t/t9903-bash-prompt.sh
t/test-lib-functions.sh
t/test-lib.sh
tag.c
tempfile.c
tempfile.h
trailer.c
transport-helper.c
transport-internal.h
transport.c
transport.h
tree-walk.c
tree-walk.h
tree.c
tree.h
unpack-trees.c
unpack-trees.h
upload-pack.c
upload-pack.h [new file with mode: 0644]
userdiff.c
walker.c
worktree.c
worktree.h
wrapper.c
wt-status.c
wt-status.h
xdiff-interface.c
index 833ef3b0b783b8180d0dad1ce336713bddf09b26..2d0450c262e1c8ec9cb433b7eb78e168274060f5 100644 (file)
 /git-rm
 /git-send-email
 /git-send-pack
+/git-serve
 /git-sh-i18n
 /git-sh-i18n--envsubst
 /git-sh-setup
index 4684b3f4f30f78d5f1d5d0f7eb5bfea9b81861ba..5f5ee4f3bd86f3f7d6cfce4e99453f68f0673eab 100644 (file)
@@ -33,7 +33,6 @@ matrix:
       compiler:
       addons:
       before_install:
-      before_script:
       script:
         - >
           test "$TRAVIS_REPO_SLUG" != "git/git" ||
@@ -46,7 +45,6 @@ matrix:
       services:
         - docker
       before_install:
-      before_script:
       script: ci/run-linux32-docker.sh
     - env: jobname=StaticAnalysis
       os: linux
@@ -56,7 +54,6 @@ matrix:
           packages:
           - coccinelle
       before_install:
-      before_script:
       script: ci/run-static-analysis.sh
       after_failure:
     - env: jobname=Documentation
@@ -68,13 +65,11 @@ matrix:
           - asciidoc
           - xmlto
       before_install:
-      before_script:
       script: ci/test-documentation.sh
       after_failure:
 
 before_install: ci/install-dependencies.sh
-before_script: ci/run-build.sh
-script: ci/run-tests.sh
+script: ci/run-build-and-tests.sh
 after_failure: ci/print-test-failures.sh
 
 notifications:
index c4cb5ff0d477938b8fd49749c3589c5afbb04221..48aa4edfbdd180e1c6d874b6bb61ea5fc8e32ef5 100644 (file)
@@ -386,6 +386,11 @@ For C programs:
  - Use Git's gettext wrappers to make the user interface
    translatable. See "Marking strings for translation" in po/README.
 
+ - Variables and functions local to a given source file should be marked
+   with "static". Variables that are visible to other source files
+   must be declared with "extern" in header files. However, function
+   declarations should not use "extern", as that is already the default.
+
 For Perl programs:
 
  - Most of the C guidelines above apply.
index 6232143cb95d105b0c33b81d16c6b7b628fadb97..fa9e5c0acd762d3623a5b436349b8d87e41aeb32 100644 (file)
@@ -78,6 +78,7 @@ TECH_DOCS += technical/pack-heuristics
 TECH_DOCS += technical/pack-protocol
 TECH_DOCS += technical/protocol-capabilities
 TECH_DOCS += technical/protocol-common
+TECH_DOCS += technical/protocol-v2
 TECH_DOCS += technical/racy-git
 TECH_DOCS += technical/send-pack-pipeline
 TECH_DOCS += technical/shallow
diff --git a/Documentation/RelNotes/2.16.2.txt b/Documentation/RelNotes/2.16.2.txt
new file mode 100644 (file)
index 0000000..a216466
--- /dev/null
@@ -0,0 +1,30 @@
+Git v2.16.2 Release Notes
+=========================
+
+Fixes since v2.16.1
+-------------------
+
+ * An old regression in "git describe --all $annotated_tag^0" has been
+   fixed.
+
+ * "git svn dcommit" did not take into account the fact that a
+   svn+ssh:// URL with a username@ (typically used for pushing) refers
+   to the same SVN repository without the username@ and failed when
+   svn.pushmergeinfo option is set.
+
+ * "git merge -Xours/-Xtheirs" learned to use our/their version when
+   resolving a conflicting updates to a symbolic link.
+
+ * "git clone $there $here" is allowed even when here directory exists
+   as long as it is an empty directory, but the command incorrectly
+   removed it upon a failure of the operation.
+
+ * "git stash -- <pathspec>" incorrectly blew away untracked files in
+   the directory that matched the pathspec, which has been corrected.
+
+ * "git add -p" was taught to ignore local changes to submodules as
+   they do not interfere with the partial addition of regular changes
+   anyway.
+
+
+Also contains various documentation updates and code clean-ups.
diff --git a/Documentation/RelNotes/2.16.3.txt b/Documentation/RelNotes/2.16.3.txt
new file mode 100644 (file)
index 0000000..64a0bcb
--- /dev/null
@@ -0,0 +1,49 @@
+Git v2.16.3 Release Notes
+=========================
+
+Fixes since v2.16.2
+-------------------
+
+ * "git status" after moving a path in the working tree (hence making
+   it appear "removed") and then adding with the -N option (hence
+   making that appear "added") detected it as a rename, but did not
+   report the  old and new pathnames correctly.
+
+ * "git commit --fixup" did not allow "-m<message>" option to be used
+   at the same time; allow it to annotate resulting commit with more
+   text.
+
+ * When resetting the working tree files recursively, the working tree
+   of submodules are now also reset to match.
+
+ * Fix for a commented-out code to adjust it to a rather old API change
+   around object ID.
+
+ * When there are too many changed paths, "git diff" showed a warning
+   message but in the middle of a line.
+
+ * The http tracing code, often used to debug connection issues,
+   learned to redact potentially sensitive information from its output
+   so that it can be more safely sharable.
+
+ * Crash fix for a corner case where an error codepath tried to unlock
+   what it did not acquire lock on.
+
+ * The split-index mode had a few corner case bugs fixed.
+
+ * Assorted fixes to "git daemon".
+
+ * Completion of "git merge -s<strategy>" (in contrib/) did not work
+   well in non-C locale.
+
+ * Workaround for segfault with more recent versions of SVN.
+
+ * Recently introduced leaks in fsck have been plugged.
+
+ * Travis CI integration now builds the executable in 'script' phase
+   to follow the established practice, rather than during
+   'before_script' phase.  This allows the CI categorize the failures
+   better ('failed' is project's fault, 'errored' is build
+   environment's).
+
+Also contains various documentation updates and code clean-ups.
index e1e509b5b0301e22591a1462f6dfc233d4d43c18..d6db0e19cf17ba10700f7b314ecab3c4c51e675e 100644 (file)
@@ -8,20 +8,92 @@ UI, Workflows & Features
 
  * "diff" family of commands learned "--find-object=<object-id>" option
    to limit the findings to changes that involve the named object.
-   (merge 4d8c51aa19 sb/diff-blobfind-pickaxe later to maint).
+
+ * "git format-patch" learned to give 72-cols to diffstat, which is
+   consistent with other line length limits the subcommand uses for
+   its output meant for e-mails.
+
+ * The log from "git daemon" can be redirected with a new option; one
+   relevant use case is to send the log to standard error (instead of
+   syslog) when running it from inetd.
+
+ * "git rebase" learned to take "--allow-empty-message" option.
+
+ * "git am" has learned the "--quit" option, in addition to the
+   existing "--abort" option; having the pair mirrors a few other
+   commands like "rebase" and "cherry-pick".
+
+ * "git worktree add" learned to run the post-checkout hook, just like
+   "git clone" runs it upon the initial checkout.
+
+ * "git tag" learned an explicit "--edit" option that allows the
+   message given via "-m" and "-F" to be further edited.
+
+ * "git fetch --prune-tags" may be used as a handy short-hand for
+   getting rid of stale tags that are locally held.
+
+ * The new "--show-current-patch" option gives an end-user facing way
+   to get the diff being applied when "git rebase" (and "git am")
+   stops with a conflict.
+
+ * "git add -p" used to offer "/" (look for a matching hunk) as a
+   choice, even there was only one hunk, which has been corrected.
+   Also the single-key help is now given only for keys that are
+   enabled (e.g. help for '/' won't be shown when there is only one
+   hunk).
+
+ * Since Git 1.7.9, "git merge" defaulted to --no-ff (i.e. even when
+   the side branch being merged is a descendant of the current commit,
+   create a merge commit instead of fast-forwarding) when merging a
+   tag object.  This was appropriate default for integrators who pull
+   signed tags from their downstream contributors, but caused an
+   unnecessary merges when used by downstream contributors who
+   habitually "catch up" their topic branches with tagged releases
+   from the upstream.  Update "git merge" to default to --no-ff only
+   when merging a tag object that does *not* sit at its usual place in
+   refs/tags/ hierarchy, and allow fast-forwarding otherwise, to
+   mitigate the problem.
+
+ * "git status" can spend a lot of cycles to compute the relation
+   between the current branch and its upstream, which can now be
+   disabled with "--no-ahead-behind" option.
+
+ * "git diff" and friends learned funcname patterns for Go language
+   source files.
+
+ * "git send-email" learned "--reply-to=<address>" option.
+
+ * Funcname pattern used for C# now recognizes "async" keyword.
+
+ * In a way similar to how "git tag" learned to honor the pager
+   setting only in the list mode, "git config" learned to ignore the
+   pager setting when it is used for setting values (i.e. when the
+   purpose of the operation is not to "show").
 
 
 Performance, Internal Implementation, Development Support etc.
 
  * More perf tests for threaded grep
-   (merge 7b31b55db1 ab/perf-grep-threads later to maint).
 
  * "perf" test output can be sent to codespeed server.
-   (merge 19cf57a92e cc/codespeed later to maint).
 
  * The build procedure for perl/ part has been greatly simplified by
    weaning ourselves off of MakeMaker.
 
+ * Perl 5.8 or greater has been required since Git 1.7.4 released in
+   2010, but we continued to assume some core modules may not exist and
+   used a conditional "eval { require <<module>> }"; we no longer do
+   this.  Some platforms (Fedora/RedHat/CentOS, for example) ship Perl
+   without all core modules by default (e.g. Digest::MD5, File::Temp,
+   File::Spec, Net::Domain, Net::SMTP).  Users on such platforms may
+   need to install these additional modules.
+
+ * As a convenience, we install copies of Perl modules we require which
+   are not part of the core Perl distribution (e.g. Error and
+   Mail::Address).  Users and packagers whose operating system provides
+   these modules can set NO_PERL_CPAN_FALLBACKS to avoid installing the
+   bundled modules.
+
  * In preparation for implementing narrow/partial clone, the machinery
    for checking object connectivity used by gc and fsck has been
    taught that a missing object is OK when it is referenced by a
@@ -44,13 +116,10 @@ Performance, Internal Implementation, Development Support etc.
 
  * The tracing machinery learned to report tweaking of environment
    variables as well.
-   (merge 090a09272a nd/trace-with-env later to maint).
 
  * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str)
-   (merge cd9a4b6d93 rs/strbuf-cocci-workaround later to maint).
 
  * Prevent "clang-format" from breaking line after function return type.
-   (merge a3715d43e8 po/clang-format-functype-weight later to maint).
 
  * The sequencer infrastructure is shared across "git cherry-pick",
    "git rebase -i", etc., and has always spawned "git commit" when it
@@ -59,6 +128,40 @@ Performance, Internal Implementation, Development Support etc.
    gives performance boost for a few tens of percents in some sample
    scenarios.
 
+ * Push the submodule version of collision-detecting SHA-1 hash
+   implementation a bit harder on builders.
+
+ * Avoid mmapping small files while using packed refs (especially ones
+   with zero size, which would cause later munmap() to fail).
+
+ * Conversion from uchar[20] to struct object_id continues.
+
+ * More tests for wildmatch functions.
+
+ * The code to binary search starting from a fan-out table (which is
+   how the packfile is indexed with object names) has been refactored
+   into a reusable helper.
+
+ * We now avoid using identifiers that clash with C++ keywords.  Even
+   though it is not a goal to compile Git with C++ compilers, changes
+   like this help use of code analysis tools that targets C++ on our
+   codebase.
+
+ * The executable is now built in 'script' phase in Travis CI integration,
+   to follow the established practice, rather than during 'before_script'
+   phase.  This allows the CI categorize the failures better ('failed'
+   is project's fault, 'errored' is build environment's).
+   (merge 3c93b82920 sg/travis-build-during-script-phase later to maint).
+
+ * Writing out the index file when the only thing that changed in it
+   is the untracked cache information is often wasteful, and this has
+   been optimized out.
+
+ * Various pieces of Perl code we have have been cleaned up.
+
+ * Internal API clean-up to allow write_locked_index() optionally skip
+   writing the in-core index when it is not modified.
+
 
 Also contains various documentation updates and code clean-ups.
 
@@ -68,44 +171,35 @@ Fixes since v2.16
 
  * An old regression in "git describe --all $annotated_tag^0" has been
    fixed.
-   (merge 1bba00130a dk/describe-all-output-fix later to maint).
 
  * "git status" after moving a path in the working tree (hence making
    it appear "removed") and then adding with the -N option (hence
    making that appear "added") detected it as a rename, but did not
    report the  old and new pathnames correctly.
-   (merge 176ea74793 nd/ita-wt-renames-in-status later to maint).
 
  * "git svn dcommit" did not take into account the fact that a
    svn+ssh:// URL with a username@ (typically used for pushing) refers
    to the same SVN repository without the username@ and failed when
    svn.pushmergeinfo option is set.
-   (merge 8aaed892fd jm/svn-pushmergeinfo-fix later to maint).
 
  * API clean-up around revision traversal.
-   (merge 6fcec2f9ae rs/lose-leak-pending later to maint).
 
  * "git merge -Xours/-Xtheirs" learned to use our/their version when
    resolving a conflicting updates to a symbolic link.
-   (merge fd48b46474 jc/merge-symlink-ours-theirs later to maint).
 
  * "git clone $there $here" is allowed even when here directory exists
    as long as it is an empty directory, but the command incorrectly
    removed it upon a failure of the operation.
-   (merge d45420c1c8 jk/abort-clone-with-existing-dest later to maint).
 
  * "git commit --fixup" did not allow "-m<message>" option to be used
    at the same time; allow it to annotate resulting commit with more
    text.
-   (merge 30884c9afc ab/commit-m-with-fixup later to maint).
 
  * When resetting the working tree files recursively, the working tree
    of submodules are now also reset to match.
-   (merge 7dcc1f4df8 sb/submodule-update-reset-fix later to maint).
 
  * "git stash -- <pathspec>" incorrectly blew away untracked files in
    the directory that matched the pathspec, which has been corrected.
-   (merge bba067d2fa tg/stash-with-pathspec-fix later to maint).
 
  * Instead of maintaining home-grown email address parsing code, ship
    a copy of reasonably recent Mail::Address to be used as a fallback
@@ -115,7 +209,6 @@ Fixes since v2.16
  * "git add -p" was taught to ignore local changes to submodules as
    they do not interfere with the partial addition of regular changes
    anyway.
-   (merge 12434efc1d nd/add-i-ignore-submodules later to maint).
 
  * Avoid showing a warning message in the middle of a line of "git
    diff" output.
@@ -143,11 +236,124 @@ Fixes since v2.16
  * Workaround for segfault with more recent versions of SVN.
    (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint).
 
+ * Plug recently introduced leaks in fsck.
+   (merge ba3a08ca0e jt/fsck-code-cleanup later to maint).
+
+ * "git pull --rebase" did not pass verbosity setting down when
+   recursing into a submodule.
+   (merge a56771a668 sb/pull-rebase-submodule later to maint).
+
+ * The way "git reset --hard" reports the commit the updated HEAD
+   points at is made consistent with the way how the commit title is
+   generated by the other parts of the system.  This matters when the
+   title is spread across physically multiple lines.
+   (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint).
+
+ * Test fixes.
+   (merge 63b1a175ee sg/test-i18ngrep later to maint).
+
+ * Some bugs around "untracked cache" feature have been fixed.  This
+   will notice corrupt data in the untracked cache left by old and
+   buggy code and issue a warning---the index can be fixed by clearing
+   the untracked cache from it.
+   (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint).
+   (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint).
+
+ * "git blame HEAD COPYING" in a bare repository failed to run, while
+   "git blame HEAD -- COPYING" run just fine.  This has been corrected.
+
+ * "git add" files in the same directory, but spelling the directory
+   path in different cases on case insensitive filesystem, corrupted
+   the name hash data structure and led to unexpected results.  This
+   has been corrected.
+   (merge c95525e90d bp/name-hash-dirname-fix later to maint).
+
+ * "git rebase -p" mangled log messages of a merge commit, which is
+   now fixed.
+   (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint).
+
+ * Some low level protocol codepath could crash when they get an
+   unexpected flush packet, which is now fixed.
+   (merge bb1356dc64 js/packet-read-line-check-null later to maint).
+
+ * "git check-ignore" with multiple paths got confused when one is a
+   file and the other is a directory, which has been fixed.
+   (merge d60771e930 rs/check-ignore-multi later to maint).
+
+ * "git describe $garbage" stopped giving any errors when the garbage
+   happens to be a string with 40 hexadecimal letters.
+   (merge a8e7a2bf0f sb/describe-blob later to maint).
+
+ * Code to unquote single-quoted string (used in the parser for
+   configuration files, etc.) did not diagnose bogus input correctly
+   and produced bogus results instead.
+   (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint).
+
+ * Many places in "git apply" knew that "/dev/null" that signals
+   "there is no such file on this side of the diff" can be followed by
+   whitespace and garbage when parsing a patch, except for one, which
+   made an otherwise valid patch (e.g. ones from subversion) rejected.
+   (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint).
+
+ * We no longer create any *.spec file, so "make clean" should not
+   remove it.
+   (merge 4321bdcabb tz/do-not-clean-spec-file later to maint).
+
+ * "git push" over http transport did not unquote the push-options
+   correctly.
+   (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint).
+
+ * "git send-email" learned to complain when the batch-size option is
+   not defined when the relogin-delay option is, since these two are
+   mutually required.
+   (merge 9caa70697b xz/send-email-batch-size later to maint).
+
+ * Y2k20 fix ;-) for our perl scripts.
+   (merge a40e06ee33 bw/perl-timegm-timelocal-fix later to maint).
+
+ * Threaded "git grep" has been optimized to avoid allocation in code
+   section that is covered under a mutex.
+   (merge 38ef24dccf rv/grep-cleanup later to maint).
+
+ * "git subtree" script (in contrib/) scripted around "git log", whose
+   output got affected by end-user configuration like log.showsignature
+   (merge 8841b5222c sg/subtree-signed-commits later to maint).
+
+ * While finding unique object name abbreviation, the code may
+   accidentally have read beyond the end of the array of object names
+   in a pack.
+   (merge 21abed500c ds/find-unique-abbrev-optim later to maint).
+
+ * Micro optimization in revision traversal code.
+   (merge ebbed3ba04 ds/mark-parents-uninteresting-optim later to maint).
+
+ * "git commit" used to run "gc --auto" near the end, which was lost
+   when the command was reimplemented in C by mistake.
+   (merge 095c741edd ab/gc-auto-in-commit later to maint).
+
+ * Allow running a couple of tests with "sh -x".
+   (merge c20bf94abc sg/cvs-tests-with-x later to maint).
+
+ * The codepath to replace an existing entry in the index had a bug in
+   updating the name hash structure, which has been fixed.
+   (merge 0e267b7a24 bp/refresh-cache-ent-rehash-fix later to maint).
+
+ * The transfer.fsckobjects configuration tells "git fetch" to
+   validate the data and connected-ness of objects in the received
+   pack; the code to perform this check has been taught about the
+   narrow clone's convention that missing objects that are reachable
+   from objects in a pack that came from a promissor remote is OK.
+
+ * There was an unused file-scope static variable left in http.c when
+   building for versions of libCURL that is older than 7.19.4, which
+   has been fixed.
+   (merge b8fd6008ec rj/http-code-cleanup later to maint).
+
+ * Shell script portability fix.
+   (merge 206a6ae013 ml/filter-branch-portability-fix later to maint).
+
  * Other minor doc, test and build updates and code cleanups.
    (merge e2a5a028c7 bw/oidmap-autoinit later to maint).
-   (merge f0a6068a9f ys/bisect-object-id-missing-conversion-fix later to maint).
-   (merge 30221a3389 as/read-tree-prefix-doc-fix later to maint).
-   (merge 9bd2ce5432 ab/doc-cat-file-e-still-shows-errors later to maint).
    (merge ec3b4b06f8 cl/t9001-cleanup later to maint).
    (merge e1b3f3dd38 ks/submodule-doc-updates later to maint).
    (merge fbac558a9b rs/describe-unique-abbrev later to maint).
@@ -159,3 +365,34 @@ Fixes since v2.16
    (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint).
    (merge ef5b3a6c5e nd/shared-index-fix later to maint).
    (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint).
+   (merge b780e4407d jc/worktree-add-short-help later to maint).
+   (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint).
+   (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint).
+   (merge 3738031581 jk/gettext-poison later to maint).
+   (merge 54360a1956 rj/sparse-updates later to maint).
+   (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint).
+   (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint).
+   (merge 4ccf461f56 bp/fsmonitor later to maint).
+   (merge a6119f82b1 jk/test-hashmap-updates later to maint).
+   (merge 5aea9fe6cc rd/typofix later to maint).
+   (merge e4e5da2796 sb/status-doc-fix later to maint).
+   (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint).
+   (merge d023df1ee6 tg/worktree-create-tracking later to maint).
+   (merge 4cbe92fd41 sm/mv-dry-run-update later to maint).
+   (merge 75e5e9c3f7 sb/color-h-cleanup later to maint).
+   (merge 2708ef4af6 sg/t6300-modernize later to maint).
+   (merge d88e92d4e0 bw/doc-submodule-recurse-config-with-clone later to maint).
+   (merge f74bbc8dd2 jk/cached-commit-buffer later to maint).
+   (merge 1316416903 ms/non-ascii-ticks later to maint).
+   (merge 878056005e rs/strbuf-read-file-or-whine later to maint).
+   (merge 79f0ba1547 jk/strbuf-read-file-close-error later to maint).
+   (merge edfb8ba068 ot/ref-filter-cleanup later to maint).
+   (merge 11395a3b4b jc/test-must-be-empty later to maint).
+   (merge 768b9d6db7 mk/doc-pretty-fill later to maint).
+   (merge 2caa7b8d27 ab/man-sec-list later to maint).
+   (merge 40c17eb184 ks/t3200-typofix later to maint).
+   (merge bd9958c358 dp/merge-strategy-doc-fix later to maint).
+   (merge 9ee0540a40 js/ming-strftime later to maint).
+   (merge 1775e990f7 tz/complete-tag-delete-tagname later to maint).
+   (merge 00a4b03501 rj/warning-uninitialized-fix later to maint).
+   (merge b635ed97a0 jk/attributes-path-doc later to maint).
index f57e9cf10ca485ae0147ee1d01639c94ce97fb56..2659153cb377554bc5cf6fc4199233b2bab498a2 100644 (file)
@@ -1398,7 +1398,16 @@ fetch.unpackLimit::
 
 fetch.prune::
        If true, fetch will automatically behave as if the `--prune`
-       option was given on the command line.  See also `remote.<name>.prune`.
+       option was given on the command line.  See also `remote.<name>.prune`
+       and the PRUNING section of linkgit:git-fetch[1].
+
+fetch.pruneTags::
+       If true, fetch will automatically behave as if the
+       `refs/tags/*:refs/tags/*` refspec was provided when pruning,
+       if not set already. This allows for setting both this option
+       and `fetch.prune` to maintain a 1=1 mapping to upstream
+       refs. See also `remote.<name>.pruneTags` and the PRUNING
+       section of linkgit:git-fetch[1].
 
 fetch.output::
        Control how ref update status is printed. Valid values are
@@ -1948,6 +1957,7 @@ http.sslVersion::
        - tlsv1.0
        - tlsv1.1
        - tlsv1.2
+       - tlsv1.3
 
 +
 Can be overridden by the `GIT_SSL_VERSION` environment variable.
@@ -2945,6 +2955,15 @@ remote.<name>.prune::
        remote (as if the `--prune` option was given on the command line).
        Overrides `fetch.prune` settings, if any.
 
+remote.<name>.pruneTags::
+       When set to true, fetching from this remote by default will also
+       remove any local tags that no longer exist on the remote if pruning
+       is activated in general via `remote.<name>.prune`, `fetch.prune` or
+       `--prune`. Overrides `fetch.pruneTags` settings, if any.
++
+See also `remote.<name>.prune` and the PRUNING section of
+linkgit:git-fetch[1].
+
 remotes.<group>::
        The list of remotes which are fetched by "git remote update
        <group>".  See linkgit:git-remote[1].
@@ -3210,7 +3229,8 @@ submodule.active::
 
 submodule.recurse::
        Specifies if commands recurse into submodules by default. This
-       applies to all commands that have a `--recurse-submodules` option.
+       applies to all commands that have a `--recurse-submodules` option,
+       except `clone`.
        Defaults to false.
 
 submodule.fetchJobs::
@@ -3345,7 +3365,7 @@ uploadpack.packObjectsHook::
        stdout.
 
 uploadpack.allowFilter::
-       If this option is set, `upload-pack` will advertise partial
+       If this option is set, `upload-pack` will support partial
        clone and partial fetch object filtering.
 +
 Note that this configuration variable is ignored if it is seen in the
index c330c01ff096c9f2e66cbbea2557b6429b90e81a..e3a44f03cdcee92098287bfccc9801fde042ef2b 100644 (file)
@@ -128,6 +128,14 @@ have to use `--diff-algorithm=default` option.
 These parameters can also be set individually with `--stat-width=<width>`,
 `--stat-name-width=<name-width>` and `--stat-count=<count>`.
 
+--compact-summary::
+       Output a condensed summary of extended header information such
+       as file creations or deletions ("new" or "gone", optionally "+l"
+       if it's a symlink) and mode changes ("+x" or "-x" for adding
+       or removing executable bit respectively) in diffstat. The
+       information is put betwen the filename part and the graph
+       part. Implies `--stat`.
+
 --numstat::
        Similar to `--stat`, but shows number of added and
        deleted lines in decimal notation and pathname without
index fb6bebbc618c3f71ab400ff4267264a0167a887a..8631e365f437fd85058bed3dbd0cebde15756ccc 100644 (file)
@@ -73,7 +73,22 @@ ifndef::git-pull[]
        are fetched due to an explicit refspec (either on the command
        line or in the remote configuration, for example if the remote
        was cloned with the --mirror option), then they are also
-       subject to pruning.
+       subject to pruning. Supplying `--prune-tags` is a shorthand for
+       providing the tag refspec.
++
+See the PRUNING section below for more details.
+
+-P::
+--prune-tags::
+       Before fetching, remove any local tags that no longer exist on
+       the remote if `--prune` is enabled. This option should be used
+       more carefully, unlike `--prune` it will remove any local
+       references (local tags) that have been created. This option is
+       a shorthand for providing the explicit tag refspec along with
+       `--prune`, see the discussion about that in its documentation.
++
+See the PRUNING section below for more details.
+
 endif::git-pull[]
 
 ifndef::git-pull[]
index d50fa339dcc523158896fda8cdccd1a2784dfdd7..f3c81dfb11b5188e6944d2781fcb1f14442fea85 100644 (file)
@@ -332,10 +332,20 @@ patch::
        J - leave this hunk undecided, see next hunk
        k - leave this hunk undecided, see previous undecided hunk
        K - leave this hunk undecided, see previous hunk
+       l - select hunk lines to use
        s - split the current hunk into smaller hunks
        e - manually edit the current hunk
        ? - print help
 +
+If you press "l" then the hunk will be reprinted with each insertion or
+deletion labelled with a number and you will be prompted to enter which
+lines you wish to select. Individual line numbers should be separated by
+a space or comma (these can be omitted if there are fewer than ten
+labelled lines), to specify a range of lines use a dash between them. If
+the upper bound of a range of lines is omitted it defaults to the last
+line. To invert the selection prefix it with "-" so "-3-5,8" will select
+everything except lines 3, 4, 5 and 8.
++
 After deciding the fate for all hunks, if there is any hunk
 that was chosen, the index is updated with the selected hunks.
 +
index 12879e4029a7710e2d78bae476f7ad7d9b0fe830..6f6c34b0f4bc9ba18ec890dff1a6fe10af2fd68f 100644 (file)
@@ -16,7 +16,7 @@ SYNOPSIS
         [--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet]
         [--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>]
         [(<mbox> | <Maildir>)...]
-'git am' (--continue | --skip | --abort)
+'git am' (--continue | --skip | --abort | --quit | --show-current-patch)
 
 DESCRIPTION
 -----------
@@ -167,6 +167,14 @@ default.   You can use `--no-utf8` to override this.
 --abort::
        Restore the original branch and abort the patching operation.
 
+--quit::
+       Abort the patching operation but keep HEAD and the index
+       untouched.
+
+--show-current-patch::
+       Show the patch being applied when "git am" is stopped because
+       of conflicts.
+
 DISCUSSION
 ----------
 
index b3084c99c1cabdccc690e4cec5071b22774bf6c8..b959df1cbf6622ebe206ac792555b158de8c1b00 100644 (file)
@@ -91,7 +91,6 @@ OPTIONS
 -D::
        Shortcut for `--delete --force`.
 
--l::
 --create-reflog::
        Create the branch's reflog.  This activates recording of
        all changes made to the branch ref, enabling use of date
@@ -101,6 +100,8 @@ OPTIONS
        The negated form `--no-create-reflog` only overrides an earlier
        `--create-reflog`, but currently does not negate the setting of
        `core.logAllRefUpdates`.
++
+The `-l` option is a deprecated synonym for `--create-reflog`.
 
 -f::
 --force::
index 14da5fc157ee0ea7467e7b9bd9d33d465c1f7a3f..e09ed5d7d5147d93039c479efc8ab450bf5ca8b4 100644 (file)
@@ -233,6 +233,12 @@ See also <<FILES>>.
        using `--file`, `--global`, etc) and `on` when searching all
        config files.
 
+CONFIGURATION
+-------------
+`pager.config` is only respected when listing configuration, i.e., when
+using `--list` or any of the `--get-*` which may return multiple results.
+The default is to use a pager.
+
 [[FILES]]
 FILES
 -----
index 3c91db7bed038f7ba28a4e7554cc6e63c5d91958..56d54a489875652e754f7cd16ee5a77c2f5e5202 100644 (file)
@@ -20,6 +20,7 @@ SYNOPSIS
             [--inetd |
              [--listen=<host_or_ipaddr>] [--port=<n>]
              [--user=<user> [--group=<group>]]]
+            [--log-destination=(stderr|syslog|none)]
             [<directory>...]
 
 DESCRIPTION
@@ -80,7 +81,8 @@ OPTIONS
        do not have the 'git-daemon-export-ok' file.
 
 --inetd::
-       Have the server run as an inetd service. Implies --syslog.
+       Have the server run as an inetd service. Implies --syslog (may be
+       overridden with `--log-destination=`).
        Incompatible with --detach, --port, --listen, --user and --group
        options.
 
@@ -110,8 +112,28 @@ OPTIONS
        zero for no limit.
 
 --syslog::
-       Log to syslog instead of stderr. Note that this option does not imply
-       --verbose, thus by default only error conditions will be logged.
+       Short for `--log-destination=syslog`.
+
+--log-destination=<destination>::
+       Send log messages to the specified destination.
+       Note that this option does not imply --verbose,
+       thus by default only error conditions will be logged.
+       The <destination> must be one of:
++
+--
+stderr::
+       Write to standard error.
+       Note that if `--detach` is specified,
+       the process disconnects from the real standard error,
+       making this destination effectively equivalent to `none`.
+syslog::
+       Write to syslog, using the `git-daemon` identifier.
+none::
+       Disable all logging.
+--
++
+The default destination is `syslog` if `--inetd` or `--detach` is specified,
+otherwise `stderr`.
 
 --user-path::
 --user-path=<path>::
index b153aefa68c8dcaa5f3600d67c5ff0010ee899af..e3199355978880b21668bc1aff0e42f610c02bd6 100644 (file)
@@ -99,6 +99,93 @@ The latter use of the `remote.<repository>.fetch` values can be
 overridden by giving the `--refmap=<refspec>` parameter(s) on the
 command line.
 
+PRUNING
+-------
+
+Git has a default disposition of keeping data unless it's explicitly
+thrown away; this extends to holding onto local references to branches
+on remotes that have themselves deleted those branches.
+
+If left to accumulate, these stale references might make performance
+worse on big and busy repos that have a lot of branch churn, and
+e.g. make the output of commands like `git branch -a --contains
+<commit>` needlessly verbose, as well as impacting anything else
+that'll work with the complete set of known references.
+
+These remote-tracking references can be deleted as a one-off with
+either of:
+
+------------------------------------------------
+# While fetching
+$ git fetch --prune <name>
+
+# Only prune, don't fetch
+$ git remote prune <name>
+------------------------------------------------
+
+To prune references as part of your normal workflow without needing to
+remember to run that, set `fetch.prune` globally, or
+`remote.<name>.prune` per-remote in the config. See
+linkgit:git-config[1].
+
+Here's where things get tricky and more specific. The pruning feature
+doesn't actually care about branches, instead it'll prune local <->
+remote-references as a function of the refspec of the remote (see
+`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above).
+
+Therefore if the refspec for the remote includes
+e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch
+--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote
+tracking branches that are deleted, but any local tag that doesn't
+exist on the remote.
+
+This might not be what you expect, i.e. you want to prune remote
+`<name>`, but also explicitly fetch tags from it, so when you fetch
+from it you delete all your local tags, most of which may not have
+come from the `<name>` remote in the first place.
+
+So be careful when using this with a refspec like
+`refs/tags/*:refs/tags/*`, or any other refspec which might map
+references from multiple remotes to the same local namespace.
+
+Since keeping up-to-date with both branches and tags on the remote is
+a common use-case the `--prune-tags` option can be supplied along with
+`--prune` to prune local tags that don't exist on the remote, and
+force-update those tags that differ. Tag pruning can also be enabled
+with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See
+linkgit:git-config[1].
+
+The `--prune-tags` option is equivalent to having
+`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This
+can lead to some seemingly strange interactions:
+
+------------------------------------------------
+# These both fetch tags
+$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*'
+$ git fetch --no-tags --prune-tags origin
+------------------------------------------------
+
+The reason it doesn't error out when provided without `--prune` or its
+config versions is for flexibility of the configured versions, and to
+maintain a 1=1 mapping between what the command line flags do, and
+what the configuration versions do.
+
+It's reasonable to e.g. configure `fetch.pruneTags=true` in
+`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is
+run, without making every invocation of `git fetch` without `--prune`
+an error.
+
+Pruning tags with `--prune-tags` also works when fetching a URL
+instead of a named remote. These will all prune tags not found on
+origin:
+
+------------------------------------------------
+$ git fetch origin --prune --prune-tags
+$ git fetch origin --prune 'refs/tags/*:refs/tags/*'
+$ git fetch <url of origin> --prune --prune-tags
+$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*'
+------------------------------------------------
+
 OUTPUT
 ------
 
index 3a52e4dce39eeaf6eba896ccbf9e0505cebb3ec9..b634043183b453a89b3f56e0544503b06ccdbdec 100644 (file)
@@ -222,6 +222,14 @@ this purpose, they are instead rewritten to point at the nearest ancestor that
 was not excluded.
 
 
+EXIT STATUS
+-----------
+
+On success, the exit status is `0`.  If the filter can't find any commits to
+rewrite, the exit status is `2`.  On any other error, the exit status may be
+any other non-zero value.
+
+
 Examples
 --------
 
index 571b5a7e3c9dbc11aafc194b6e08dbbed5b2f7d3..3126e0dd002eca7ac420932bb9d1ace63752e8dc 100644 (file)
@@ -15,8 +15,9 @@ DESCRIPTION
 -----------
 Runs a number of housekeeping tasks within the current repository,
 such as compressing file revisions (to reduce disk space and increase
-performance) and removing unreachable objects which may have been
-created from prior invocations of 'git add'.
+performance), removing unreachable objects which may have been
+created from prior invocations of 'git add', packing refs, pruning
+reflog, rerere metadata or stale working trees.
 
 Users are encouraged to run this task on a regular basis within
 each repository to maintain good disk space utilization and good
@@ -45,20 +46,25 @@ OPTIONS
        With this option, 'git gc' checks whether any housekeeping is
        required; if not, it exits without performing any work.
        Some git commands run `git gc --auto` after performing
-       operations that could create many loose objects.
+       operations that could create many loose objects. Housekeeping
+       is required if there are too many loose objects or too many
+       packs in the repository.
 +
-Housekeeping is required if there are too many loose objects or
-too many packs in the repository. If the number of loose objects
-exceeds the value of the `gc.auto` configuration variable, then
-all loose objects are combined into a single pack using
-`git repack -d -l`.  Setting the value of `gc.auto` to 0
-disables automatic packing of loose objects.
+If the number of loose objects exceeds the value of the `gc.auto`
+configuration variable, then all loose objects are combined into a
+single pack using `git repack -d -l`.  Setting the value of `gc.auto`
+to 0 disables automatic packing of loose objects.
 +
 If the number of packs exceeds the value of `gc.autoPackLimit`,
 then existing packs (except those marked with a `.keep` file)
 are consolidated into a single pack by using the `-A` option of
 'git repack'. Setting `gc.autoPackLimit` to 0 disables
 automatic consolidation of packs.
++
+If houskeeping is required due to many loose objects or packs, all
+other housekeeping tasks (e.g. rerere, working trees, reflog...) will
+be performed as well.
+
 
 --prune=<date>::
        Prune loose objects older than date (default is 2 weeks ago,
@@ -133,6 +139,10 @@ The optional configuration variable `gc.pruneExpire` controls how old
 the unreferenced loose objects have to be before they are pruned.  The
 default is "2 weeks ago".
 
+Optional configuration variable `gc.worktreePruneExpire` controls how
+old a stale working tree should be before `git worktree prune` deletes
+it. Default is "3 months ago".
+
 
 Notes
 -----
index 1b4b65d6657b003079e0407d1da8d8c239933267..138edb47b6a17ab925ef3206c1aec6336ff380fe 100644 (file)
@@ -77,6 +77,9 @@ OPTIONS
 --check-self-contained-and-connected::
        Die if the pack contains broken links. For internal use only.
 
+--fsck-objects::
+       Die if the pack contains broken objects. For internal use only.
+
 --threads=<n>::
        Specifies the number of threads to spawn when resolving
        deltas. This requires that index-pack be compiled with
index 9dd19a1dd9f126fbf54f1dc628b30fbe05445cf3..ff446f15f79f270a8979d1a398450a50ba4a4e95 100644 (file)
@@ -51,7 +51,7 @@ with only spaces at the end of the commit message part, one blank line
 will be added before the new trailer.
 
 Existing trailers are extracted from the input message by looking for
-a group of one or more lines that (i) are all trailers, or (ii) contains at
+a group of one or more lines that (i) is all trailers, or (ii) contains at
 least one Git-generated or user-configured trailer and consists of at
 least 25% trailers.
 The group must be preceded by one or more empty (or whitespace-only) lines.
index 8a861c1e0d69eda71fa2eec791e67c136b63b177..3277ca143273e01f5f4973ed351c8a5cb4b8e0fa 100644 (file)
@@ -12,7 +12,7 @@ SYNOPSIS
        [<upstream> [<branch>]]
 'git rebase' [-i | --interactive] [options] [--exec <cmd>] [--onto <newbase>]
        --root [<branch>]
-'git rebase' --continue | --skip | --abort | --quit | --edit-todo
+'git rebase' --continue | --skip | --abort | --quit | --edit-todo | --show-current-patch
 
 DESCRIPTION
 -----------
@@ -244,12 +244,22 @@ leave out at most one of A and B, in which case it defaults to HEAD.
        Keep the commits that do not change anything from its
        parents in the result.
 
+--allow-empty-message::
+       By default, rebasing commits with an empty message will fail.
+       This option overrides that behavior, allowing commits with empty
+       messages to be rebased.
+
 --skip::
        Restart the rebasing process by skipping the current patch.
 
 --edit-todo::
        Edit the todo list during an interactive rebase.
 
+--show-current-patch::
+       Show the current patch in an interactive rebase or when rebase
+       is stopped because of conflicts. This is the equivalent of
+       `git show REBASE_HEAD`.
+
 -m::
 --merge::
        Use merging strategies to rebase.  When the recursive (default) merge
index 577b969c1bda2bfd95fd28e3ffc4bbbc96c5d16c..4feddc0293bd7eb9827ce45f11519335a484f013 100644 (file)
@@ -172,10 +172,14 @@ With `-n` option, the remote heads are not queried first with
 
 'prune'::
 
-Deletes all stale remote-tracking branches under <name>.
-These stale branches have already been removed from the remote repository
-referenced by <name>, but are still locally available in
-"remotes/<name>".
+Deletes stale references associated with <name>. By default, stale
+remote-tracking branches under <name> are deleted, but depending on
+global configuration and the configuration of the remote we might even
+prune local tags that haven't been pushed there. Equivalent to `git
+fetch --prune <name>`, except that no new references will be fetched.
++
+See the PRUNING section of linkgit:git-fetch[1] for what it'll prune
+depending on various configuration.
 +
 With `--dry-run` option, report what branches will be pruned, but do not
 actually prune them.
@@ -189,7 +193,7 @@ remotes.default is not defined, all remotes which do not have the
 configuration parameter remote.<name>.skipDefaultUpdate set to true will
 be updated.  (See linkgit:git-config[1]).
 +
-With `--prune` option, prune all the remotes that are updated.
+With `--prune` option, run pruning against all the remotes that are updated.
 
 
 DISCUSSION
index 8060ea35c5f932c1e4328f000a501ff399418f27..71ef97ba9b22aad996049bd0c1500a835a433350 100644 (file)
@@ -84,6 +84,11 @@ See the CONFIGURATION section for `sendemail.multiEdit`.
        the value of GIT_AUTHOR_IDENT, or GIT_COMMITTER_IDENT if that is not
        set, as returned by "git var -l".
 
+--reply-to=<address>::
+       Specify the address where replies from recipients should go to.
+       Use this if replies to messages should go to another address than what
+       is specified with the --from parameter.
+
 --in-reply-to=<identifier>::
        Make the first mail (or all the mails with `--no-thread`) appear as a
        reply to the given Message-Id, which avoids breaking threads to
index ee6c5476c1d2bf3b2a708e6152ebaba5882cc4f7..5e35ea18acd790469735400644358af9843b6c99 100644 (file)
@@ -8,8 +8,8 @@ git-shortlog - Summarize 'git log' output
 SYNOPSIS
 --------
 [verse]
-git log --pretty=short | 'git shortlog' [<options>]
 'git shortlog' [<options>] [<revision range>] [[\--] <path>...]
+git log --pretty=short | 'git shortlog' [<options>]
 
 DESCRIPTION
 -----------
index 056dfb86612d0c5be0d01f16ac6b126fac80b92c..7ef8c4791177b2b54c61573c044659fbbacea9b8 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
 'git stash' ( pop | apply ) [--index] [-q|--quiet] [<stash>]
 'git stash' branch <branchname> [<stash>]
 'git stash' [push [-p|--patch] [-k|--[no-]keep-index] [-q|--quiet]
-            [-u|--include-untracked] [-a|--all] [-m|--message <message>]]
+            [-u|--include-untracked] [-a|--all] [-m|--message <message>]
             [--] [<pathspec>...]]
 'git stash' clear
 'git stash' create [<message>]
index 72bfb87f6658b209e9b45956e73e492e957971b7..6c230c0c7200412b988d233352e3411a9fb813a8 100644 (file)
@@ -130,6 +130,11 @@ ignored, then the directory is not shown, but all contents are shown.
        without options are equivalent to 'always' and 'never'
        respectively.
 
+--ahead-behind::
+--no-ahead-behind::
+       Display or do not display detailed ahead/behind counts for the
+       branch relative to its upstream branch.  Defaults to true.
+
 <pathspec>...::
        See the 'pathspec' entry in linkgit:gitglossary[7].
 
@@ -184,10 +189,10 @@ in which case `XY` are `!!`.
 
     X          Y     Meaning
     -------------------------------------------------
-              [MD]   not updated
+            [AMD]   not updated
     M        [ MD]   updated in index
     A        [ MD]   added to index
-    D         [ M]   deleted from index
+    D                deleted from index
     R        [ MD]   renamed in index
     C        [ MD]   copied in index
     [MARC]           index and work tree matches
index 956fc019f984bca1754a72dc0d7308b39a28445d..1d17101bac39d64cc3d4aff89cd588955dd00735 100644 (file)
@@ -9,7 +9,7 @@ git-tag - Create, list, delete or verify a tag object signed with GPG
 SYNOPSIS
 --------
 [verse]
-'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>]
+'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e]
        <tagname> [<commit> | <object>]
 'git tag' -d <tagname>...
 'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
@@ -167,6 +167,12 @@ This option is only applicable when listing tags without annotation lines.
        Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
        is given.
 
+-e::
+--edit::
+       The message taken from file with `-F` and command line with
+       `-m` are usually used as the tag message unmodified.
+       This option lets you further edit the message taken from these sources.
+
 --cleanup=<mode>::
        This option sets how the tag message is cleaned up.
        The  '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'.  The
index bdb0342593229e32425bf73d17ad6fcdd0dd3732..3897a59ee94bc424c2c66cb5d05bc0193fb6eaf2 100644 (file)
@@ -464,6 +464,32 @@ command reads the index; while when `--[no-|force-]untracked-cache`
 are used, the untracked cache is immediately added to or removed from
 the index.
 
+Before 2.17, the untracked cache had a bug where replacing a directory
+with a symlink to another directory could cause it to incorrectly show
+files tracked by git as untracked. See the "status: add a failing test
+showing a core.untrackedCache bug" commit to git.git. A workaround for
+that is (and this might work for other undiscovered bugs in the
+future):
+
+----------------
+$ git -c core.untrackedCache=false status
+----------------
+
+This bug has also been shown to affect non-symlink cases of replacing
+a directory with a file when it comes to the internal structures of
+the untracked cache, but no case has been reported where this resulted in
+wrong "git status" output.
+
+There are also cases where existing indexes written by git versions
+before 2.17 will reference directories that don't exist anymore,
+potentially causing many "could not open directory" warnings to be
+printed on "git status". These are new warnings for existing issues
+that were previously silently discarded.
+
+As with the bug described above the solution is to one-off do a "git
+status" run with `core.untrackedCache=false` to flush out the leftover
+bad data.
+
 File System Monitor
 -------------------
 
@@ -484,8 +510,8 @@ the `core.fsmonitor` configuration variable (see
 linkgit:git-config[1]) than using the `--fsmonitor` option to
 `git update-index` in each repository, especially if you want to do so
 across all repositories you use, because you can set the configuration
-variable to `true` (or `false`) in your `$HOME/.gitconfig` just once
-and have it affect all repositories you touch.
+variable in your `$HOME/.gitconfig` just once and have it affect all
+repositories you touch.
 
 When the `core.fsmonitor` configuration variable is changed, the
 file system monitor is added to or removed from the index the next time
index 41585f535dba760590f2d67ecdc96cc8dea3a57f..e7eb24ab8528e39aa4e0a75f6feaa46d9cfb39fe 100644 (file)
@@ -12,7 +12,9 @@ SYNOPSIS
 'git worktree add' [-f] [--detach] [--checkout] [--lock] [-b <new-branch>] <path> [<commit-ish>]
 'git worktree list' [--porcelain]
 'git worktree lock' [--reason <string>] <worktree>
+'git worktree move' <worktree> <new-path>
 'git worktree prune' [-n] [-v] [--expire <expire>]
+'git worktree remove' [--force] <worktree>
 'git worktree unlock' <worktree>
 
 DESCRIPTION
@@ -34,10 +36,6 @@ The working tree's administrative files in the repository (see
 `git worktree prune` in the main or any linked working tree to
 clean up any stale administrative files.
 
-If you move a linked working tree, you need to manually update the
-administrative files so that they do not get pruned automatically. See
-section "DETAILS" for more information.
-
 If a linked working tree is stored on a portable device or network share
 which is not always mounted, you can prevent its administrative files from
 being pruned by issuing the `git worktree lock` command, optionally
@@ -52,10 +50,11 @@ is linked to the current repository, sharing everything except working
 directory specific files such as HEAD, index, etc. `-` may also be
 specified as `<commit-ish>`; it is synonymous with `@{-1}`.
 +
-If <commit-ish> is a branch name (call it `<branch>` and is not found,
+If <commit-ish> is a branch name (call it `<branch>`) and is not found,
 and neither `-b` nor `-B` nor `--detach` are used, but there does
 exist a tracking branch in exactly one remote (call it `<remote>`)
-with a matching name, treat as equivalent to
+with a matching name, treat as equivalent to:
++
 ------------
 $ git worktree add --track -b <branch> <path> <remote>/<branch>
 ------------
@@ -79,10 +78,22 @@ files from being pruned automatically. This also prevents it from
 being moved or deleted. Optionally, specify a reason for the lock
 with `--reason`.
 
+move::
+
+Move a working tree to a new location. Note that the main working tree
+or linked working trees containing submodules cannot be moved.
+
 prune::
 
 Prune working tree information in $GIT_DIR/worktrees.
 
+remove::
+
+Remove a working tree. Only clean working trees (no untracked files
+and no modification in tracked files) can be removed. Unclean working
+trees or ones with submodules can be removed with `--force`. The main
+working tree cannot be removed.
+
 unlock::
 
 Unlock a working tree, allowing it to be pruned, moved or deleted.
@@ -92,9 +103,10 @@ OPTIONS
 
 -f::
 --force::
-       By default, `add` refuses to create a new working tree when `<commit-ish>` is a branch name and
-       is already checked out by another working tree. This option overrides
-       that safeguard.
+       By default, `add` refuses to create a new working tree when
+       `<commit-ish>` is a branch name and is already checked out by
+       another working tree and `remove` refuses to remove an unclean
+       working tree. This option overrides that safeguard.
 
 -b <new-branch>::
 -B <new-branch>::
@@ -196,7 +208,7 @@ thumb is do not make any assumption about whether a path belongs to
 $GIT_DIR or $GIT_COMMON_DIR when you need to directly access something
 inside $GIT_DIR. Use `git rev-parse --git-path` to get the final path.
 
-If you move a linked working tree, you need to update the 'gitdir' file
+If you manually move a linked working tree, you need to update the 'gitdir' file
 in the entry's directory. For example, if a linked working tree is moved
 to `/newpath/test-next` and its `.git` file points to
 `/path/main/.git/worktrees/test-next`, then update
@@ -276,13 +288,6 @@ Multiple checkout in general is still experimental, and the support
 for submodules is incomplete. It is NOT recommended to make multiple
 checkouts of a superproject.
 
-git-worktree could provide more automation for tasks currently
-performed manually, such as:
-
-- `remove` to remove a linked working tree and its administrative files (and
-  warn if the working tree is dirty)
-- `mv` to move or rename a working tree and update its administrative files
-
 GIT
 ---
 Part of the linkgit:git[1] suite
index 8163b5796b192ee5f5c0cb13fc85bdeff8202e64..4767860e72f46d4e4df883f2fdbb4a46eb8e8eda 100644 (file)
@@ -849,6 +849,9 @@ Report bugs to the Git mailing list <git@vger.kernel.org> where the
 development and maintenance is primarily done.  You do not have to be
 subscribed to the list to send a message there.
 
+Issues which are security relevant should be disclosed privately to
+the Git Security mailing list <git-security@googlegroups.com>.
+
 SEE ALSO
 --------
 linkgit:gittutorial[7], linkgit:gittutorial-2[7],
index c21f5ca109b7ab7fede09e5b698e631daf585f16..1094fe2b5b0cc97030dc6694364f473999a9f4d3 100644 (file)
@@ -56,9 +56,16 @@ Unspecified::
 
 When more than one pattern matches the path, a later line
 overrides an earlier line.  This overriding is done per
-attribute.  The rules how the pattern matches paths are the
-same as in `.gitignore` files; see linkgit:gitignore[5].
-Unlike `.gitignore`, negative patterns are forbidden.
+attribute.
+
+The rules by which the pattern matches paths are the same as in
+`.gitignore` files (see linkgit:gitignore[5]), with a few exceptions:
+
+  - negative patterns are forbidden
+
+  - patterns that match a directory do not recursively match paths
+    inside that directory (so using the trailing-slash `path/` syntax is
+    pointless in an attributes file; use `path/**` instead)
 
 When deciding what attributes are assigned to a path, Git
 consults `$GIT_DIR/info/attributes` file (which has the highest
@@ -714,6 +721,8 @@ patterns are available:
 
 - `fountain` suitable for Fountain documents.
 
+- `golang` suitable for source code in the Go language.
+
 - `html` suitable for HTML/XHTML documents.
 
 - `java` suitable for source code in the Java language.
index 63260f0056491308cb35677917530a987c21c8a2..ff5d7f9ed6f089646807f93d59e35a10763e46c2 100644 (file)
@@ -102,12 +102,11 @@ PATTERN FORMAT
    (relative to the toplevel of the work tree if not from a
    `.gitignore` file).
 
- - Otherwise, Git treats the pattern as a shell glob suitable
-   for consumption by fnmatch(3) with the FNM_PATHNAME flag:
-   wildcards in the pattern will not match a / in the pathname.
-   For example, "Documentation/{asterisk}.html" matches
-   "Documentation/git.html" but not "Documentation/ppc/ppc.html"
-   or "tools/perf/Documentation/perf.html".
+ - Otherwise, Git treats the pattern as a shell glob: "`*`" matches
+   anything except "`/`", "`?`" matches any one character except "`/`"
+   and "`[]`" matches one character in a selected range. See
+   fnmatch(3) and the FNM_PATHNAME flag for a more detailed
+   description.
 
  - A leading slash matches the beginning of the pathname.
    For example, "/{asterisk}.c" matches "cat-file.c" but not
index 4b8c93ec59de3db02b9914aed4955d486be5f875..9d1459aac6d0b12ad1a87ff25a158dca0f2bf470 100644 (file)
@@ -102,6 +102,14 @@ Capabilities for Pushing
 +
 Supported commands: 'connect'.
 
+'stateless-connect'::
+       Experimental; for internal use only.
+       Can attempt to connect to a remote server for communication
+       using git's wire-protocol version 2.  See the documentation
+       for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
 'push'::
        Can discover remote refs and push local commits and the
        history leading up to them to new or existing remote refs.
@@ -136,6 +144,14 @@ Capabilities for Fetching
 +
 Supported commands: 'connect'.
 
+'stateless-connect'::
+       Experimental; for internal use only.
+       Can attempt to connect to a remote server for communication
+       using git's wire-protocol version 2.  See the documentation
+       for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
 'fetch'::
        Can discover remote refs and transfer objects reachable from
        them to the local object store.
@@ -375,6 +391,22 @@ Supported if the helper has the "export" capability.
 +
 Supported if the helper has the "connect" capability.
 
+'stateless-connect' <service>::
+       Experimental; for internal use only.
+       Connects to the given remote service for communication using
+       git's wire-protocol version 2.  Valid replies to this command
+       are empty line (connection established), 'fallback' (no smart
+       transport support, fall back to dumb transports) and just
+       exiting with error message printed (can't connect, don't bother
+       trying to fall back).  After line feed terminating the positive
+       (empty) response, the output of the service starts.  Messages
+       (both request and response) must consist of zero or more
+       PKT-LINEs, terminating in a flush packet. The client must not
+       expect the server to store any state in between request-response
+       pairs.  After the connection ends, the remote helper exits.
++
+Supported if the helper has the "stateless-connect" capability.
+
 If a fatal error occurs, the program writes the error message to
 stderr and exits. The caller should expect that a suitable error
 message has been printed if the child closes the connection without
index c60bcad44aa581b2449a7a638b6487c1c73e8c23..e85148f05eb79a968ad84bac6ce7a88289270c49 100644 (file)
@@ -275,11 +275,6 @@ worktrees/<id>/locked::
        or manually by `git worktree prune`. The file may contain a string
        explaining why the repository is locked.
 
-worktrees/<id>/link::
-       If this file exists, it is a hard link to the linked .git
-       file. It is used to detect if the linked repository is
-       manually removed.
-
 SEE ALSO
 --------
 linkgit:git-init[1],
index 4d6c17782f1d4c2f65cddefd899b9a93bace6142..3b9faabdbb9cbc45893dff67fd40b0ed7552f287 100644 (file)
@@ -24,7 +24,7 @@ On the filesystem, a submodule usually (but not always - see FORMS below)
 consists of (i) a Git directory located under the `$GIT_DIR/modules/`
 directory of its superproject, (ii) a working directory inside the
 superproject's working directory, and a `.git` file at the root of
-the submodules working directory pointing to (i).
+the submodule's working directory pointing to (i).
 
 Assuming the submodule has a Git directory at `$GIT_DIR/modules/foo/`
 and a working directory at `path/to/bar/`, the superproject tracks the
@@ -33,7 +33,7 @@ in its `.gitmodules` file (see linkgit:gitmodules[5]) of the form
 `submodule.foo.path = path/to/bar`.
 
 The `gitlink` entry contains the object name of the commit that the
-superproject expects the submodules working directory to be at.
+superproject expects the submodule's working directory to be at.
 
 The section `submodule.foo.*` in the `.gitmodules` file gives additional
 hints to Git's porcelain layer. For example, the `submodule.foo.url`
@@ -136,27 +136,27 @@ using older versions of Git.
 +
 It is possible to construct these old form repositories manually.
 +
-When deinitialized or deleted (see below), the submodules Git
+When deinitialized or deleted (see below), the submodule's Git
 directory is automatically moved to `$GIT_DIR/modules/<name>/`
 of the superproject.
 
  * Deinitialized submodule: A `gitlink`, and a `.gitmodules` entry,
-but no submodule working directory. The submodules Git directory
+but no submodule working directory. The submodule's Git directory
 may be there as after deinitializing the Git directory is kept around.
 The directory which is supposed to be the working directory is empty instead.
 +
 A submodule can be deinitialized by running `git submodule deinit`.
 Besides emptying the working directory, this command only modifies
-the superproject’s `$GIT_DIR/config` file, so the superproject’s history
+the superproject's `$GIT_DIR/config` file, so the superproject's history
 is not affected. This can be undone using `git submodule init`.
 
  * Deleted submodule: A submodule can be deleted by running
 `git rm <submodule path> && git commit`. This can be undone
 using `git revert`.
 +
-The deletion removes the superprojects tracking data, which are
+The deletion removes the superproject's tracking data, which are
 both the `gitlink` entry and the section in the `.gitmodules` file.
-The submodules working directory is removed from the file
+The submodule's working directory is removed from the file
 system, but the Git directory is kept around as it to make it
 possible to checkout past commits without requiring fetching
 from another repository.
index 9c4cd0915fe3f3e14879184f3a474e559130bdef..8994e2559eac0c5746ca898b0bad1946a7b83298 100644 (file)
@@ -80,7 +80,7 @@ valid pack like:
     # now add our object data
     cat object >>tmp.pack
     # and then append the pack trailer
-    /path/to/git.git/test-sha1 -b <tmp.pack >trailer
+    /path/to/git.git/t/helper/test-tool sha1 -b <tmp.pack >trailer
     cat trailer >>tmp.pack
 ------------
 
index 3888c3ff85e2dc5b137e4e3ed50e39327760a02a..63a3fc09548abe8d34faab98f183e1817b21b878 100644 (file)
@@ -35,7 +35,8 @@ set to `no` at the beginning of them.
 --no-ff::
        Create a merge commit even when the merge resolves as a
        fast-forward.  This is the default behaviour when merging an
-       annotated (and possibly signed) tag.
+       annotated (and possibly signed) tag that is not stored in
+       its natural place in 'refs/tags/' hierarchy.
 
 --ff-only::
        Refuse to merge and exit with a non-zero status unless the
index fd5d748d1b508c9cef1063227b5c478952d3bc7e..4a58aad4b83b9e365b57a8afa71f1ab939c47aee 100644 (file)
@@ -40,7 +40,7 @@ the other tree did, declaring 'our' history contains all that happened in it.
 
 theirs;;
        This is the opposite of 'ours'; note that, unlike 'ours', there is
-       no 'theirs' merge stragegy to confuse this merge option with.
+       no 'theirs' merge strategy to confuse this merge option with.
 
 patience;;
        With this option, 'merge-recursive' spends a little extra time
index e664c088a5e6a4bf1d09425ff37e36fe11d9f2f3..6109ef09aa2eeba564023cced75728ffaa1c4f96 100644 (file)
@@ -202,7 +202,7 @@ endif::git-rev-list[]
 - '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)'
   respectively, except that if the next placeholder takes more spaces
   than given and there are spaces on its left, use those spaces
-- '%><(<N>)', '%><|(<N>)': similar to '% <(<N>)', '%<|(<N>)'
+- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
   respectively, but padding both sides (i.e. the text is centered)
 - %(trailers[:options]): display the trailers of the body as interpreted
   by linkgit:git-interpret-trailers[1]. The `trailers` string may be
index 03bb0e950dd1616b00f950f83263835c57bfa70a..a1162e5bcd19ba509fff39bceca49e1b33e2add2 100644 (file)
@@ -7,7 +7,7 @@ Talk about <sha1_file.c> and <object.h> family, things like
 * read_object_with_reference()
 * has_sha1_file()
 * write_sha1_file()
-* pretend_sha1_file()
+* pretend_object_file()
 * lookup_{object,commit,tag,blob,tree}
 * parse_{object,commit,tag,blob,tree}
 * Use of object flags
index 3dce003fda008e61c7a91c85554f9a1cafe6106f..ee907c4a82a9127c0abc67b8a5dd215a3b0535d4 100644 (file)
@@ -4,7 +4,7 @@ submodule config cache API
 The submodule config cache API allows to read submodule
 configurations/information from specified revisions. Internally
 information is lazily read into a cache that is used to avoid
-unnecessary parsing of the same .gitmodule files. Lookups can be done by
+unnecessary parsing of the same .gitmodules files. Lookups can be done by
 submodule path or name.
 
 Usage
index 417ba491d0f3fa06b60a47623a5f756919adcbd6..4ab6cd1012abae711acf02e6a76ee93eae86a1ad 100644 (file)
@@ -28,11 +28,30 @@ advantages:
   address stored content.
 
 Over time some flaws in SHA-1 have been discovered by security
-researchers. https://shattered.io demonstrated a practical SHA-1 hash
-collision. As a result, SHA-1 cannot be considered cryptographically
-secure any more. This impacts the communication of hash values because
-we cannot trust that a given hash value represents the known good
-version of content that the speaker intended.
+researchers. On 23 February 2017 the SHAttered attack
+(https://shattered.io) demonstrated a practical SHA-1 hash collision.
+
+Git v2.13.0 and later subsequently moved to a hardened SHA-1
+implementation by default, which isn't vulnerable to the SHAttered
+attack.
+
+Thus Git has in effect already migrated to a new hash that isn't SHA-1
+and doesn't share its vulnerabilities, its new hash function just
+happens to produce exactly the same output for all known inputs,
+except two PDFs published by the SHAttered researchers, and the new
+implementation (written by those researchers) claims to detect future
+cryptanalytic collision attacks.
+
+Regardless, it's considered prudent to move past any variant of SHA-1
+to a new hash. There's no guarantee that future attacks on SHA-1 won't
+be published in the future, and those attacks may not have viable
+mitigations.
+
+If SHA-1 and its variants were to be truly broken, Git's hash function
+could not be considered cryptographically secure any more. This would
+impact the communication of hash values because we could not trust
+that a given hash value represented the known good version of content
+that the speaker intended.
 
 SHA-1 still possesses the other properties such as fast object lookup
 and safe error checking, but other hash functions are equally suitable
@@ -116,10 +135,15 @@ Documentation/technical/repository-version.txt) with extensions
                objectFormat = newhash
                compatObjectFormat = sha1
 
-Specifying a repository format extension ensures that versions of Git
-not aware of NewHash do not try to operate on these repositories,
-instead producing an error message:
+The combination of setting `core.repositoryFormatVersion=1` and
+populating `extensions.*` ensures that all versions of Git later than
+`v0.99.9l` will die instead of trying to operate on the NewHash
+repository, instead producing an error message.
 
+       # Between v0.99.9l and v2.7.0
+       $ git status
+       fatal: Expected git repo version <= 0, found 1
+       # After v2.7.0
        $ git status
        fatal: unknown repository extensions found:
                objectformat
index a0e45f2889e6e52db71f88d7368bbb2efc02616a..64f49d0bbb7b0c28832562e59fc8b89e872c4f4b 100644 (file)
@@ -214,10 +214,12 @@ smart server reply:
    S: Cache-Control: no-cache
    S:
    S: 001e# service=git-upload-pack\n
+   S: 0000
    S: 004895dcfa3633004da0049d3d0fa03f80589cbcaf31 refs/heads/maint\0multi_ack\n
    S: 0042d049f6c27a2244e12041955e262a404c7faba355 refs/heads/master\n
    S: 003c2cb58b79488a98d2721cea644875a8dd0026b115 refs/tags/v1.0\n
    S: 003fa3c2e2402b99163d1d59756e5f207ae21cccba4c refs/tags/v1.0^{}\n
+   S: 0000
 
 The client may send Extra Parameters (see
 Documentation/technical/pack-protocol.txt) as a colon-separated string
@@ -277,6 +279,7 @@ The returned response contains "version 1" if "version=1" was sent as an
 Extra Parameter.
 
   smart_reply     =  PKT-LINE("# service=$servicename" LF)
+                    "0000"
                     *1("version 1")
                     ref_list
                     "0000"
diff --git a/Documentation/technical/protocol-v2.txt b/Documentation/technical/protocol-v2.txt
new file mode 100644 (file)
index 0000000..136179d
--- /dev/null
@@ -0,0 +1,395 @@
+ Git Wire Protocol, Version 2
+==============================
+
+This document presents a specification for a version 2 of Git's wire
+protocol.  Protocol v2 will improve upon v1 in the following ways:
+
+  * Instead of multiple service names, multiple commands will be
+    supported by a single service
+  * Easily extendable as capabilities are moved into their own section
+    of the protocol, no longer being hidden behind a NUL byte and
+    limited by the size of a pkt-line
+  * Separate out other information hidden behind NUL bytes (e.g. agent
+    string as a capability and symrefs can be requested using 'ls-refs')
+  * Reference advertisement will be omitted unless explicitly requested
+  * ls-refs command to explicitly request some refs
+  * Designed with http and stateless-rpc in mind.  With clear flush
+    semantics the http remote helper can simply act as a proxy
+
+In protocol v2 communication is command oriented.  When first contacting a
+server a list of capabilities will advertised.  Some of these capabilities
+will be commands which a client can request be executed.  Once a command
+has completed, a client can reuse the connection and request that other
+commands be executed.
+
+ Packet-Line Framing
+---------------------
+
+All communication is done using packet-line framing, just as in v1.  See
+`Documentation/technical/pack-protocol.txt` and
+`Documentation/technical/protocol-common.txt` for more information.
+
+In protocol v2 these special packets will have the following semantics:
+
+  * '0000' Flush Packet (flush-pkt) - indicates the end of a message
+  * '0001' Delimiter Packet (delim-pkt) - separates sections of a message
+
+ Initial Client Request
+------------------------
+
+In general a client can request to speak protocol v2 by sending
+`version=2` through the respective side-channel for the transport being
+used which inevitably sets `GIT_PROTOCOL`.  More information can be
+found in `pack-protocol.txt` and `http-protocol.txt`.  In all cases the
+response from the server is the capability advertisement.
+
+ Git Transport
+~~~~~~~~~~~~~~~
+
+When using the git:// transport, you can request to use protocol v2 by
+sending "version=2" as an extra parameter:
+
+   003egit-upload-pack /project.git\0host=myserver.com\0\0version=2\0
+
+ SSH and File Transport
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using either the ssh:// or file:// transport, the GIT_PROTOCOL
+environment variable must be set explicitly to include "version=2".
+
+ HTTP Transport
+~~~~~~~~~~~~~~~~
+
+When using the http:// or https:// transport a client makes a "smart"
+info/refs request as described in `http-protocol.txt` and requests that
+v2 be used by supplying "version=2" in the `Git-Protocol` header.
+
+   C: Git-Protocol: version=2
+   C:
+   C: GET $GIT_URL/info/refs?service=git-upload-pack HTTP/1.0
+
+A v2 server would reply:
+
+   S: 200 OK
+   S: <Some headers>
+   S: ...
+   S:
+   S: 000eversion 2\n
+   S: <capability-advertisement>
+
+Subsequent requests are then made directly to the service
+`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
+
+ Capability Advertisement
+--------------------------
+
+A server which decides to communicate (based on a request from a client)
+using protocol version 2, notifies the client by sending a version string
+in its initial response followed by an advertisement of its capabilities.
+Each capability is a key with an optional value.  Clients must ignore all
+unknown keys.  Semantics of unknown values are left to the definition of
+each key.  Some capabilities will describe commands which can be requested
+to be executed by the client.
+
+    capability-advertisement = protocol-version
+                              capability-list
+                              flush-pkt
+
+    protocol-version = PKT-LINE("version 2" LF)
+    capability-list = *capability
+    capability = PKT-LINE(key[=value] LF)
+
+    key = 1*(ALPHA | DIGIT | "-_")
+    value = 1*(ALPHA | DIGIT | " -_.,?\/{}[]()<>!@#$%^&*+=:;")
+
+ Command Request
+-----------------
+
+After receiving the capability advertisement, a client can then issue a
+request to select the command it wants with any particular capabilities
+or arguments.  There is then an optional section where the client can
+provide any command specific parameters or queries.  Only a single
+command can be requested at a time.
+
+    request = empty-request | command-request
+    empty-request = flush-pkt
+    command-request = command
+                     capability-list
+                     [command-args]
+                     flush-pkt
+    command = PKT-LINE("command=" key LF)
+    command-args = delim-pkt
+                  *command-specific-arg
+
+    command-specific-args are packet line framed arguments defined by
+    each individual command.
+
+The server will then check to ensure that the client's request is
+comprised of a valid command as well as valid capabilities which were
+advertised.  If the request is valid the server will then execute the
+command.  A server MUST wait till it has received the client's entire
+request before issuing a response.  The format of the response is
+determined by the command being executed, but in all cases a flush-pkt
+indicates the end of the response.
+
+When a command has finished, and the client has received the entire
+response from the server, a client can either request that another
+command be executed or can terminate the connection.  A client may
+optionally send an empty request consisting of just a flush-pkt to
+indicate that no more requests will be made.
+
+ Capabilities
+--------------
+
+There are two different types of capabilities: normal capabilities,
+which can be used to to convey information or alter the behavior of a
+request, and commands, which are the core actions that a client wants to
+perform (fetch, push, etc).
+
+Protocol version 2 is stateless by default.  This means that all commands
+must only last a single round and be stateless from the perspective of the
+server side, unless the client has requested a capability indicating that
+state should be maintained by the server.  Clients MUST NOT require state
+management on the server side in order to function correctly.  This
+permits simple round-robin load-balancing on the server side, without
+needing to worry about state management.
+
+ agent
+~~~~~~~
+
+The server can advertise the `agent` capability with a value `X` (in the
+form `agent=X`) to notify the client that the server is running version
+`X`.  The client may optionally send its own agent string by including
+the `agent` capability with a value `Y` (in the form `agent=Y`) in its
+request to the server (but it MUST NOT do so if the server did not
+advertise the agent capability). The `X` and `Y` strings may contain any
+printable ASCII characters except space (i.e., the byte range 32 < x <
+127), and are typically of the form "package/version" (e.g.,
+"git/1.8.3.1"). The agent strings are purely informative for statistics
+and debugging purposes, and MUST NOT be used to programmatically assume
+the presence or absence of particular features.
+
+ ls-refs
+~~~~~~~~~
+
+`ls-refs` is the command used to request a reference advertisement in v2.
+Unlike the current reference advertisement, ls-refs takes in arguments
+which can be used to limit the refs sent from the server.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+ls-refs takes in the following arguments:
+
+    symrefs
+       In addition to the object pointed by it, show the underlying ref
+       pointed by it when showing a symbolic ref.
+    peel
+       Show peeled tags.
+    ref-prefix <prefix>
+       When specified, only references having a prefix matching one of
+       the provided prefixes are displayed.
+
+The output of ls-refs is as follows:
+
+    output = *ref
+            flush-pkt
+    ref = PKT-LINE(obj-id SP refname *(SP ref-attribute) LF)
+    ref-attribute = (symref | peeled)
+    symref = "symref-target:" symref-target
+    peeled = "peeled:" obj-id
+
+ fetch
+~~~~~~~
+
+`fetch` is the command used to fetch a packfile in v2.  It can be looked
+at as a modified version of the v1 fetch where the ref-advertisement is
+stripped out (since the `ls-refs` command fills that role) and the
+message format is tweaked to eliminate redundancies and permit easy
+addition of future extensions.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+A `fetch` request can take the following arguments:
+
+    want <oid>
+       Indicates to the server an object which the client wants to
+       retrieve.  Wants can be anything and are not limited to
+       advertised objects.
+
+    have <oid>
+       Indicates to the server an object which the client has locally.
+       This allows the server to make a packfile which only contains
+       the objects that the client needs. Multiple 'have' lines can be
+       supplied.
+
+    done
+       Indicates to the server that negotiation should terminate (or
+       not even begin if performing a clone) and that the server should
+       use the information supplied in the request to construct the
+       packfile.
+
+    thin-pack
+       Request that a thin pack be sent, which is a pack with deltas
+       which reference base objects not contained within the pack (but
+       are known to exist at the receiving end). This can reduce the
+       network traffic significantly, but it requires the receiving end
+       to know how to "thicken" these packs by adding the missing bases
+       to the pack.
+
+    no-progress
+       Request that progress information that would normally be sent on
+       side-band channel 2, during the packfile transfer, should not be
+       sent.  However, the side-band channel 3 is still used for error
+       responses.
+
+    include-tag
+       Request that annotated tags should be sent if the objects they
+       point to are being sent.
+
+    ofs-delta
+       Indicate that the client understands PACKv2 with delta referring
+       to its base by position in pack rather than by an oid.  That is,
+       they can read OBJ_OFS_DELTA (ake type 6) in a packfile.
+
+If the 'shallow' feature is advertised the following arguments can be
+included in the clients request as well as the potential addition of the
+'shallow-info' section in the server's response as explained below.
+
+    shallow <oid>
+       A client must notify the server of all commits for which it only
+       has shallow copies (meaning that it doesn't have the parents of
+       a commit) by supplying a 'shallow <oid>' line for each such
+       object so that the server is aware of the limitations of the
+       client's history.  This is so that the server is aware that the
+       client may not have all objects reachable from such commits.
+
+    deepen <depth>
+       Requests that the fetch/clone should be shallow having a commit
+       depth of <depth> relative to the remote side.
+
+    deepen-relative
+       Requests that the semantics of the "deepen" command be changed
+       to indicate that the depth requested is relative to the client's
+       current shallow boundary, instead of relative to the requested
+       commits.
+
+    deepen-since <timestamp>
+       Requests that the shallow clone/fetch should be cut at a
+       specific time, instead of depth.  Internally it's equivalent to
+       doing "git rev-list --max-age=<timestamp>". Cannot be used with
+       "deepen".
+
+    deepen-not <rev>
+       Requests that the shallow clone/fetch should be cut at a
+       specific revision specified by '<rev>', instead of a depth.
+       Internally it's equivalent of doing "git rev-list --not <rev>".
+       Cannot be used with "deepen", but can be used with
+       "deepen-since".
+
+The response of `fetch` is broken into a number of sections separated by
+delimiter packets (0001), with each section beginning with its section
+header.
+
+    output = *section
+    section = (acknowledgments | shallow-info | packfile)
+             (flush-pkt | delim-pkt)
+
+    acknowledgments = PKT-LINE("acknowledgments" LF)
+                     (nak | *ack)
+                     (ready)
+    ready = PKT-LINE("ready" LF)
+    nak = PKT-LINE("NAK" LF)
+    ack = PKT-LINE("ACK" SP obj-id LF)
+
+    shallow-info = PKT-LINE("shallow-info" LF)
+                  *PKT-LINE((shallow | unshallow) LF)
+    shallow = "shallow" SP obj-id
+    unshallow = "unshallow" SP obj-id
+
+    packfile = PKT-LINE("packfile" LF)
+              *PKT-LINE(%x01-03 *%x00-ff)
+
+    acknowledgments section
+       * If the client determines that it is finished with negotiations
+         by sending a "done" line, the acknowledgments sections MUST be
+         omitted from the server's response.
+
+       * Always begins with the section header "acknowledgments"
+
+       * The server will respond with "NAK" if none of the object ids sent
+         as have lines were common.
+
+       * The server will respond with "ACK obj-id" for all of the
+         object ids sent as have lines which are common.
+
+       * A response cannot have both "ACK" lines as well as a "NAK"
+         line.
+
+       * The server will respond with a "ready" line indicating that
+         the server has found an acceptable common base and is ready to
+         make and send a packfile (which will be found in the packfile
+         section of the same response)
+
+       * If the server has found a suitable cut point and has decided
+         to send a "ready" line, then the server can decide to (as an
+         optimization) omit any "ACK" lines it would have sent during
+         its response.  This is because the server will have already
+         determined the objects it plans to send to the client and no
+         further negotiation is needed.
+
+    shallow-info section
+       * If the client has requested a shallow fetch/clone, a shallow
+         client requests a fetch or the server is shallow then the
+         server's response may include a shallow-info section.  The
+         shallow-info section will be included if (due to one of the
+         above conditions) the server needs to inform the client of any
+         shallow boundaries or adjustments to the clients already
+         existing shallow boundaries.
+
+       * Always begins with the section header "shallow-info"
+
+       * If a positive depth is requested, the server will compute the
+         set of commits which are no deeper than the desired depth.
+
+       * The server sends a "shallow obj-id" line for each commit whose
+         parents will not be sent in the following packfile.
+
+       * The server sends an "unshallow obj-id" line for each commit
+         which the client has indicated is shallow, but is no longer
+         shallow as a result of the fetch (due to its parents being
+         sent in the following packfile).
+
+       * The server MUST NOT send any "unshallow" lines for anything
+         which the client has not indicated was shallow as a part of
+         its request.
+
+       * This section is only included if a packfile section is also
+         included in the response.
+
+    packfile section
+       * This section is only included if the client has sent 'want'
+         lines in its request and either requested that no more
+         negotiation be done by sending 'done' or if the server has
+         decided it has found a sufficient cut point to produce a
+         packfile.
+
+       * Always begins with the section header "packfile"
+
+       * The transmission of the packfile begins immediately after the
+         section header
+
+       * The data transfer of the packfile is always multiplexed, using
+         the same semantics of the 'side-band-64k' capability from
+         protocol version 1.  This means that each packet, during the
+         packfile data stream, is made up of a leading 4-byte pkt-line
+         length (typical of the pkt-line format), followed by a 1-byte
+         stream code, followed by the actual data.
+
+         The stream code can be one of:
+               1 - pack data
+               2 - progress messages
+               3 - fatal error message just before stream aborts
index 0c86f1c768d96d73528921f387b38bdf4cd10ada..02ab86f2a76dc24bb36b4d3ca5974c2588706168 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v2.16.GIT
+DEF_VER=v2.17.0-rc2
 
 LF='
 '
diff --git a/INSTALL b/INSTALL
index 808e07b6599476afc8a0b3a385e73df8b7f9c63d..c39006e8e7e5c5be2114b79d50135dc08e3d1aaa 100644 (file)
--- a/INSTALL
+++ b/INSTALL
@@ -88,9 +88,9 @@ Issues of note:
        export GIT_EXEC_PATH PATH GITPERLLIB
 
  - By default (unless NO_PERL is provided) Git will ship various perl
-   scripts & libraries it needs. However, for simplicity it doesn't
-   use the ExtUtils::MakeMaker toolchain to decide where to place the
-   perl libraries. Depending on the system this can result in the perl
+   scripts. However, for simplicity it doesn't use the
+   ExtUtils::MakeMaker toolchain to decide where to place the perl
+   libraries. Depending on the system this can result in the perl
    libraries not being where you'd like them if they're expected to be
    used by things other than Git itself.
 
@@ -102,6 +102,11 @@ Issues of note:
    Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian,
    perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS.
 
+ - Unless NO_PERL is provided Git will ship various perl libraries it
+   needs. Distributors of Git will usually want to set
+   NO_PERL_CPAN_FALLBACKS if NO_PERL is not provided to use their own
+   copies of the CPAN modules Git needs.
+
  - Git is reasonably self-sufficient, but does depend on a few external
    programs and libraries.  Git can be used without most of them by adding
    the approriate "NO_<LIBRARY>=YesPlease" to the make command line or
@@ -121,7 +126,8 @@ Issues of note:
          Redhat/Fedora are reported to ship Perl binary package with some
          core modules stripped away (see http://lwn.net/Articles/477234/),
          so you might need to install additional packages other than Perl
-         itself, e.g. Time::HiRes.
+         itself, e.g. Digest::MD5, File::Spec, File::Temp, Net::Domain,
+         Net::SMTP, and Time::HiRes.
 
        - git-imap-send needs the OpenSSL library to talk IMAP over SSL if
          you are using libcurl older than 7.34.0.  Otherwise you can use
index 5bcd83ddf32707f8b98f650fd08d8eac5edf14af..ff7effb2f99efd83581039af9313cd3872e1c8ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -29,10 +29,10 @@ all::
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # When using USE_LIBPCRE1, define NO_LIBPCRE1_JIT if the PCRE v1
 # library is compiled without --enable-jit. We will auto-detect
@@ -296,6 +296,12 @@ all::
 #
 # Define NO_PERL if you do not want Perl scripts or libraries at all.
 #
+# Define NO_PERL_CPAN_FALLBACKS if you do not want to install bundled
+# copies of CPAN modules that serve as a fallback in case the modules
+# are not available on the system. This option is intended for
+# distributions that want to use their packaged versions of Perl
+# modules, instead of the fallbacks shipped with Git.
+#
 # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
 # but /usr/bin/python2.7 on some platforms).
 #
@@ -329,6 +335,13 @@ all::
 # when hardlinking a file to another name and unlinking the original file right
 # away (some NTFS drivers seem to zero the contents in that scenario).
 #
+# Define INSTALL_SYMLINKS if you prefer to have everything that can be
+# symlinked between bin/ and libexec/ to use relative symlinks between
+# the two. This option overrides NO_CROSS_DIRECTORY_HARDLINKS and
+# NO_INSTALL_HARDLINKS which will also use symlinking by indirection
+# within the same directory in some cases, INSTALL_SYMLINKS will
+# always symlink to the final target directly.
+#
 # Define NO_CROSS_DIRECTORY_HARDLINKS if you plan to distribute the installed
 # programs as a tar, where bin/ and libexec/ might be on different file systems.
 #
@@ -468,8 +481,7 @@ ARFLAGS = rcs
 # This can help installing the suite in a relocatable way.
 
 prefix = $(HOME)
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(prefix)/bin
 mandir = $(prefix)/share/man
 infodir = $(prefix)/share/info
 gitexecdir = libexec/git-core
@@ -486,8 +498,10 @@ lib = lib
 # DESTDIR =
 pathsep = :
 
+bindir_relative = $(patsubst $(prefix)/%,%,$(bindir))
 mandir_relative = $(patsubst $(prefix)/%,%,$(mandir))
 infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
+gitexecdir_relative = $(patsubst $(prefix)/%,%,$(gitexecdir))
 htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
 
 export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
@@ -540,6 +554,7 @@ SCRIPT_PERL =
 SCRIPT_PYTHON =
 SCRIPT_SH =
 SCRIPT_LIB =
+TEST_BUILTINS_OBJS =
 TEST_PROGRAMS_NEED_X =
 
 # Having this variable in your environment would break pipelines because
@@ -637,7 +652,6 @@ PROGRAM_OBJS += imap-send.o
 PROGRAM_OBJS += sh-i18n--envsubst.o
 PROGRAM_OBJS += shell.o
 PROGRAM_OBJS += show-index.o
-PROGRAM_OBJS += upload-pack.o
 PROGRAM_OBJS += remote-testsvn.o
 
 # Binary suffix, set to .exe for Windows builds
@@ -645,47 +659,50 @@ X =
 
 PROGRAMS += $(patsubst %.o,git-%$X,$(PROGRAM_OBJS))
 
-TEST_PROGRAMS_NEED_X += test-chmtime
-TEST_PROGRAMS_NEED_X += test-ctype
-TEST_PROGRAMS_NEED_X += test-config
-TEST_PROGRAMS_NEED_X += test-date
-TEST_PROGRAMS_NEED_X += test-delta
-TEST_PROGRAMS_NEED_X += test-drop-caches
-TEST_PROGRAMS_NEED_X += test-dump-cache-tree
+TEST_BUILTINS_OBJS += test-chmtime.o
+TEST_BUILTINS_OBJS += test-config.o
+TEST_BUILTINS_OBJS += test-ctype.o
+TEST_BUILTINS_OBJS += test-date.o
+TEST_BUILTINS_OBJS += test-delta.o
+TEST_BUILTINS_OBJS += test-drop-caches.o
+TEST_BUILTINS_OBJS += test-dump-cache-tree.o
+TEST_BUILTINS_OBJS += test-dump-split-index.o
+TEST_BUILTINS_OBJS += test-example-decorate.o
+TEST_BUILTINS_OBJS += test-genrandom.o
+TEST_BUILTINS_OBJS += test-hashmap.o
+TEST_BUILTINS_OBJS += test-index-version.o
+TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
+TEST_BUILTINS_OBJS += test-match-trees.o
+TEST_BUILTINS_OBJS += test-mergesort.o
+TEST_BUILTINS_OBJS += test-mktemp.o
+TEST_BUILTINS_OBJS += test-online-cpus.o
+TEST_BUILTINS_OBJS += test-path-utils.o
+TEST_BUILTINS_OBJS += test-prio-queue.o
+TEST_BUILTINS_OBJS += test-read-cache.o
+TEST_BUILTINS_OBJS += test-ref-store.o
+TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-revision-walking.o
+TEST_BUILTINS_OBJS += test-run-command.o
+TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
+TEST_BUILTINS_OBJS += test-sha1-array.o
+TEST_BUILTINS_OBJS += test-sha1.o
+TEST_BUILTINS_OBJS += test-sigchain.o
+TEST_BUILTINS_OBJS += test-strcmp-offset.o
+TEST_BUILTINS_OBJS += test-string-list.o
+TEST_BUILTINS_OBJS += test-submodule-config.o
+TEST_BUILTINS_OBJS += test-subprocess.o
+TEST_BUILTINS_OBJS += test-urlmatch-normalization.o
+TEST_BUILTINS_OBJS += test-wildmatch.o
+TEST_BUILTINS_OBJS += test-write-cache.o
+
 TEST_PROGRAMS_NEED_X += test-dump-fsmonitor
-TEST_PROGRAMS_NEED_X += test-dump-split-index
 TEST_PROGRAMS_NEED_X += test-dump-untracked-cache
-TEST_PROGRAMS_NEED_X += test-example-decorate
 TEST_PROGRAMS_NEED_X += test-fake-ssh
-TEST_PROGRAMS_NEED_X += test-genrandom
-TEST_PROGRAMS_NEED_X += test-hashmap
-TEST_PROGRAMS_NEED_X += test-index-version
-TEST_PROGRAMS_NEED_X += test-lazy-init-name-hash
 TEST_PROGRAMS_NEED_X += test-line-buffer
-TEST_PROGRAMS_NEED_X += test-match-trees
-TEST_PROGRAMS_NEED_X += test-mergesort
-TEST_PROGRAMS_NEED_X += test-mktemp
-TEST_PROGRAMS_NEED_X += test-online-cpus
 TEST_PROGRAMS_NEED_X += test-parse-options
-TEST_PROGRAMS_NEED_X += test-path-utils
-TEST_PROGRAMS_NEED_X += test-prio-queue
-TEST_PROGRAMS_NEED_X += test-read-cache
-TEST_PROGRAMS_NEED_X += test-write-cache
-TEST_PROGRAMS_NEED_X += test-ref-store
-TEST_PROGRAMS_NEED_X += test-regex
-TEST_PROGRAMS_NEED_X += test-revision-walking
-TEST_PROGRAMS_NEED_X += test-run-command
-TEST_PROGRAMS_NEED_X += test-scrap-cache-tree
-TEST_PROGRAMS_NEED_X += test-sha1
-TEST_PROGRAMS_NEED_X += test-sha1-array
-TEST_PROGRAMS_NEED_X += test-sigchain
-TEST_PROGRAMS_NEED_X += test-strcmp-offset
-TEST_PROGRAMS_NEED_X += test-string-list
-TEST_PROGRAMS_NEED_X += test-submodule-config
-TEST_PROGRAMS_NEED_X += test-subprocess
+TEST_PROGRAMS_NEED_X += test-pkt-line
 TEST_PROGRAMS_NEED_X += test-svn-fe
-TEST_PROGRAMS_NEED_X += test-urlmatch-normalization
-TEST_PROGRAMS_NEED_X += test-wildmatch
+TEST_PROGRAMS_NEED_X += test-tool
 
 TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X))
 
@@ -824,8 +841,10 @@ LIB_OBJS += list-objects-filter-options.o
 LIB_OBJS += ll-merge.o
 LIB_OBJS += lockfile.o
 LIB_OBJS += log-tree.o
+LIB_OBJS += ls-refs.o
 LIB_OBJS += mailinfo.o
 LIB_OBJS += mailmap.o
+LIB_OBJS += mem-pool.o
 LIB_OBJS += match-trees.o
 LIB_OBJS += merge.o
 LIB_OBJS += merge-blobs.o
@@ -879,6 +898,7 @@ LIB_OBJS += revision.o
 LIB_OBJS += run-command.o
 LIB_OBJS += send-pack.o
 LIB_OBJS += sequencer.o
+LIB_OBJS += serve.o
 LIB_OBJS += server-info.o
 LIB_OBJS += setup.o
 LIB_OBJS += sha1-array.o
@@ -907,6 +927,7 @@ LIB_OBJS += tree-diff.o
 LIB_OBJS += tree.o
 LIB_OBJS += tree-walk.o
 LIB_OBJS += unpack-trees.o
+LIB_OBJS += upload-pack.o
 LIB_OBJS += url.o
 LIB_OBJS += urlmatch.o
 LIB_OBJS += usage.o
@@ -1011,6 +1032,7 @@ BUILTIN_OBJS += builtin/rev-parse.o
 BUILTIN_OBJS += builtin/revert.o
 BUILTIN_OBJS += builtin/rm.o
 BUILTIN_OBJS += builtin/send-pack.o
+BUILTIN_OBJS += builtin/serve.o
 BUILTIN_OBJS += builtin/shortlog.o
 BUILTIN_OBJS += builtin/show-branch.o
 BUILTIN_OBJS += builtin/show-ref.o
@@ -1024,6 +1046,7 @@ BUILTIN_OBJS += builtin/update-index.o
 BUILTIN_OBJS += builtin/update-ref.o
 BUILTIN_OBJS += builtin/update-server-info.o
 BUILTIN_OBJS += builtin/upload-archive.o
+BUILTIN_OBJS += builtin/upload-pack.o
 BUILTIN_OBJS += builtin/var.o
 BUILTIN_OBJS += builtin/verify-commit.o
 BUILTIN_OBJS += builtin/verify-pack.o
@@ -1164,13 +1187,18 @@ ifdef NO_LIBGEN_H
        COMPAT_OBJS += compat/basename.o
 endif
 
-USE_LIBPCRE1 ?= $(USE_LIBPCRE)
+USE_LIBPCRE2 ?= $(USE_LIBPCRE)
 
-ifneq (,$(USE_LIBPCRE1))
-       ifdef USE_LIBPCRE2
-$(error Only set USE_LIBPCRE1 (or its alias USE_LIBPCRE) or USE_LIBPCRE2, not both!)
+ifneq (,$(USE_LIBPCRE2))
+       ifdef USE_LIBPCRE1
+$(error Only set USE_LIBPCRE2 (or its alias USE_LIBPCRE) or USE_LIBPCRE1, not both!)
        endif
 
+       BASIC_CFLAGS += -DUSE_LIBPCRE2
+       EXTLIBS += -lpcre2-8
+endif
+
+ifdef USE_LIBPCRE1
        BASIC_CFLAGS += -DUSE_LIBPCRE1
        EXTLIBS += -lpcre
 
@@ -1179,11 +1207,6 @@ ifdef NO_LIBPCRE1_JIT
 endif
 endif
 
-ifdef USE_LIBPCRE2
-       BASIC_CFLAGS += -DUSE_LIBPCRE2
-       EXTLIBS += -lpcre2-8
-endif
-
 ifdef LIBPCREDIR
        BASIC_CFLAGS += -I$(LIBPCREDIR)/include
        EXTLIBS += -L$(LIBPCREDIR)/$(lib) $(CC_LD_DYNPATH)$(LIBPCREDIR)/$(lib)
@@ -1513,7 +1536,9 @@ else
        LIB_OBJS += sha1dc_git.o
 ifdef DC_SHA1_EXTERNAL
        ifdef DC_SHA1_SUBMODULE
+               ifneq ($(DC_SHA1_SUBMODULE),auto)
 $(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
+               endif
        endif
        BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
        EXTLIBS += -lsha1detectcoll
@@ -1733,6 +1758,7 @@ infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
 perllibdir_SQ = $(subst ','\'',$(perllibdir))
 localedir_SQ = $(subst ','\'',$(localedir))
 gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
+gitexecdir_relative_SQ = $(subst ','\'',$(gitexecdir_relative))
 template_dir_SQ = $(subst ','\'',$(template_dir))
 htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative))
 prefix_SQ = $(subst ','\'',$(prefix))
@@ -2075,7 +2101,7 @@ VCSSVN_OBJS += vcs-svn/fast_export.o
 VCSSVN_OBJS += vcs-svn/svndiff.o
 VCSSVN_OBJS += vcs-svn/svndump.o
 
-TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS))
+TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS)) $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
 OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \
        $(XDIFF_OBJS) \
        $(VCSSVN_OBJS) \
@@ -2160,6 +2186,8 @@ gettext.sp gettext.s gettext.o: EXTRA_CPPFLAGS = \
 http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SPARSE_FLAGS += \
        -DCURL_DISABLE_TYPECHECK
 
+pack-revindex.sp: SPARSE_FLAGS += -Wno-memcpy-max-count
+
 ifdef NO_EXPAT
 http-walker.sp http-walker.s http-walker.o: EXTRA_CPPFLAGS = -DNO_EXPAT
 endif
@@ -2214,13 +2242,15 @@ $(VCSSVN_LIB): $(VCSSVN_OBJS)
 
 export DEFAULT_EDITOR DEFAULT_PAGER
 
-.PHONY: doc man html info pdf
-doc:
+.PHONY: doc man man-perl html info pdf
+doc: man-perl
        $(MAKE) -C Documentation all
 
-man:
+man: man-perl
        $(MAKE) -C Documentation man
 
+man-perl: perl/build/man/man3/Git.3pm
+
 html:
        $(MAKE) -C Documentation html
 
@@ -2298,14 +2328,22 @@ po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
 
 LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
 LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
+LIB_CPAN := $(wildcard perl/FromCPAN/*.pm perl/FromCPAN/*/*.pm)
+LIB_CPAN_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_CPAN))
 
 ifndef NO_PERL
 all:: $(LIB_PERL_GEN)
+ifndef NO_PERL_CPAN_FALLBACKS
+all:: $(LIB_CPAN_GEN)
+endif
+NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
 endif
 
 perl/build/lib/%.pm: perl/%.pm
        $(QUIET_GEN)mkdir -p $(dir $@) && \
-       sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' < $< > $@
+       sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \
+           -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
+       < $< > $@
 
 perl/build/man/man3/Git.3pm: perl/Git.pm
        $(QUIET_GEN)mkdir -p $(dir $@) && \
@@ -2474,10 +2512,12 @@ t/helper/test-svn-fe$X: $(VCSSVN_LIB)
 
 .PRECIOUS: $(TEST_OBJS)
 
+t/helper/test-tool$X: $(patsubst %,t/helper/%,$(TEST_BUILTINS_OBJS))
+
 t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(filter %.a,$^) $(LIBS)
 
-check-sha1:: t/helper/test-sha1$X
+check-sha1:: t/helper/test-tool$X
        t/helper/test-sha1.sh
 
 SP_OBJ = $(patsubst %.o,%.sp,$(C_OBJ))
@@ -2586,39 +2626,48 @@ endif
 
        bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \
        execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \
+       destdir_from_execdir_SQ=$$(echo '$(gitexecdir_relative_SQ)' | sed -e 's|[^/][^/]*|..|g') && \
        { test "$$bindir/" = "$$execdir/" || \
          for p in git$X $(filter $(install_bindir_programs),$(ALL_PROGRAMS)); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
-               ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$bindir/$$p" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/$$p" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
+                 ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$bindir/$$p" "$$execdir/$$p" || exit; } \
          done; \
        } && \
        for p in $(filter $(install_bindir_programs),$(BUILT_INS)); do \
                $(RM) "$$bindir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
-               cp "$$bindir/git$X" "$$bindir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git$X" "$$bindir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
+                 cp "$$bindir/git$X" "$$bindir/$$p" || exit; } \
        done && \
        for p in $(BUILT_INS); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/git$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git$X" "$$execdir/$$p" || exit; } \
        done && \
        remote_curl_aliases="$(REMOTE_CURL_ALIASES)" && \
        for p in $$remote_curl_aliases; do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git-remote-http$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \
        done && \
        ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
 
-.PHONY: install-gitweb install-doc install-man install-html install-info install-pdf
+.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf
 .PHONY: quick-install-doc quick-install-man quick-install-html
 install-gitweb:
        $(MAKE) -C gitweb install
@@ -2629,7 +2678,7 @@ install-doc: install-man-perl
 install-man: install-man-perl
        $(MAKE) -C Documentation install-man
 
-install-man-perl: perl/build/man/man3/Git.3pm
+install-man-perl: man-perl
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3'
        (cd perl/build/man/man3 && $(TAR) cf - .) | \
        (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -)
@@ -2668,6 +2717,21 @@ dist: git-archive$(X) configure
                $(GIT_TARNAME)/configure \
                $(GIT_TARNAME)/version \
                $(GIT_TARNAME)/git-gui/version
+ifdef DC_SHA1_SUBMODULE
+       @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib
+       @cp sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/
+       @cp sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/
+       @cp sha1collisiondetection/lib/sha1.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/
+       @cp sha1collisiondetection/lib/ubc_check.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/
+       $(TAR) rf $(GIT_TARNAME).tar \
+               $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch]
+endif
        @$(RM) -r $(GIT_TARNAME)
        gzip -f -9 $(GIT_TARNAME).tar
 
@@ -2717,7 +2781,7 @@ clean: profile-clean coverage-clean
        $(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
        $(RM) -r bin-wrappers $(dep_dirs)
        $(RM) -r po/build/
-       $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
+       $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
        $(RM) -r $(GIT_TARNAME) .doc-tmp-dir
        $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
        $(RM) $(htmldocs).tar.gz $(manpages).tar.gz
diff --git a/apply.c b/apply.c
index f8b67bfee2c39cc3fbebb7a3c72dfb9cb4fcb56f..7e5792c996f430952b1b768f8267de851156ce83 100644 (file)
--- a/apply.c
+++ b/apply.c
@@ -950,7 +950,7 @@ static int gitdiff_verify_name(struct apply_state *state,
                }
                free(another);
        } else {
-               if (!starts_with(line, "/dev/null\n"))
+               if (!is_dev_null(line))
                        return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
        }
 
@@ -2301,7 +2301,7 @@ static void update_pre_post_images(struct image *preimage,
                                   size_t len, size_t postlen)
 {
        int i, ctx, reduced;
-       char *new, *old, *fixed;
+       char *new_buf, *old_buf, *fixed;
        struct image fixed_preimage;
 
        /*
@@ -2327,25 +2327,25 @@ static void update_pre_post_images(struct image *preimage,
         * We trust the caller to tell us if the update can be done
         * in place (postlen==0) or not.
         */
-       old = postimage->buf;
+       old_buf = postimage->buf;
        if (postlen)
-               new = postimage->buf = xmalloc(postlen);
+               new_buf = postimage->buf = xmalloc(postlen);
        else
-               new = old;
+               new_buf = old_buf;
        fixed = preimage->buf;
 
        for (i = reduced = ctx = 0; i < postimage->nr; i++) {
                size_t l_len = postimage->line[i].len;
                if (!(postimage->line[i].flag & LINE_COMMON)) {
                        /* an added line -- no counterparts in preimage */
-                       memmove(new, old, l_len);
-                       old += l_len;
-                       new += l_len;
+                       memmove(new_buf, old_buf, l_len);
+                       old_buf += l_len;
+                       new_buf += l_len;
                        continue;
                }
 
                /* a common context -- skip it in the original postimage */
-               old += l_len;
+               old_buf += l_len;
 
                /* and find the corresponding one in the fixed preimage */
                while (ctx < preimage->nr &&
@@ -2365,29 +2365,29 @@ static void update_pre_post_images(struct image *preimage,
 
                /* and copy it in, while fixing the line length */
                l_len = preimage->line[ctx].len;
-               memcpy(new, fixed, l_len);
-               new += l_len;
+               memcpy(new_buf, fixed, l_len);
+               new_buf += l_len;
                fixed += l_len;
                postimage->line[i].len = l_len;
                ctx++;
        }
 
        if (postlen
-           ? postlen < new - postimage->buf
-           : postimage->len < new - postimage->buf)
+           ? postlen < new_buf - postimage->buf
+           : postimage->len < new_buf - postimage->buf)
                die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
-                   (int)postlen, (int) postimage->len, (int)(new - postimage->buf));
+                   (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
 
        /* Fix the length of the whole thing */
-       postimage->len = new - postimage->buf;
+       postimage->len = new_buf - postimage->buf;
        postimage->nr -= reduced;
 }
 
 static int line_by_line_fuzzy_match(struct image *img,
                                    struct image *preimage,
                                    struct image *postimage,
-                                   unsigned long try,
-                                   int try_lno,
+                                   unsigned long current,
+                                   int current_lno,
                                    int preimage_limit)
 {
        int i;
@@ -2404,9 +2404,9 @@ static int line_by_line_fuzzy_match(struct image *img,
 
        for (i = 0; i < preimage_limit; i++) {
                size_t prelen = preimage->line[i].len;
-               size_t imglen = img->line[try_lno+i].len;
+               size_t imglen = img->line[current_lno+i].len;
 
-               if (!fuzzy_matchlines(img->buf + try + imgoff, imglen,
+               if (!fuzzy_matchlines(img->buf + current + imgoff, imglen,
                                      preimage->buf + preoff, prelen))
                        return 0;
                if (preimage->line[i].flag & LINE_COMMON)
@@ -2443,7 +2443,7 @@ static int line_by_line_fuzzy_match(struct image *img,
         */
        extra_chars = preimage_end - preimage_eof;
        strbuf_init(&fixed, imgoff + extra_chars);
-       strbuf_add(&fixed, img->buf + try, imgoff);
+       strbuf_add(&fixed, img->buf + current, imgoff);
        strbuf_add(&fixed, preimage_eof, extra_chars);
        fixed_buf = strbuf_detach(&fixed, &fixed_len);
        update_pre_post_images(preimage, postimage,
@@ -2455,8 +2455,8 @@ static int match_fragment(struct apply_state *state,
                          struct image *img,
                          struct image *preimage,
                          struct image *postimage,
-                         unsigned long try,
-                         int try_lno,
+                         unsigned long current,
+                         int current_lno,
                          unsigned ws_rule,
                          int match_beginning, int match_end)
 {
@@ -2466,12 +2466,12 @@ static int match_fragment(struct apply_state *state,
        size_t fixed_len, postlen;
        int preimage_limit;
 
-       if (preimage->nr + try_lno <= img->nr) {
+       if (preimage->nr + current_lno <= img->nr) {
                /*
                 * The hunk falls within the boundaries of img.
                 */
                preimage_limit = preimage->nr;
-               if (match_end && (preimage->nr + try_lno != img->nr))
+               if (match_end && (preimage->nr + current_lno != img->nr))
                        return 0;
        } else if (state->ws_error_action == correct_ws_error &&
                   (ws_rule & WS_BLANK_AT_EOF)) {
@@ -2482,7 +2482,7 @@ static int match_fragment(struct apply_state *state,
                 * match with img, and the remainder of the preimage
                 * must be blank.
                 */
-               preimage_limit = img->nr - try_lno;
+               preimage_limit = img->nr - current_lno;
        } else {
                /*
                 * The hunk extends beyond the end of the img and
@@ -2492,27 +2492,27 @@ static int match_fragment(struct apply_state *state,
                return 0;
        }
 
-       if (match_beginning && try_lno)
+       if (match_beginning && current_lno)
                return 0;
 
        /* Quick hash check */
        for (i = 0; i < preimage_limit; i++)
-               if ((img->line[try_lno + i].flag & LINE_PATCHED) ||
-                   (preimage->line[i].hash != img->line[try_lno + i].hash))
+               if ((img->line[current_lno + i].flag & LINE_PATCHED) ||
+                   (preimage->line[i].hash != img->line[current_lno + i].hash))
                        return 0;
 
        if (preimage_limit == preimage->nr) {
                /*
                 * Do we have an exact match?  If we were told to match
-                * at the end, size must be exactly at try+fragsize,
-                * otherwise try+fragsize must be still within the preimage,
+                * at the end, size must be exactly at current+fragsize,
+                * otherwise current+fragsize must be still within the preimage,
                 * and either case, the old piece should match the preimage
                 * exactly.
                 */
                if ((match_end
-                    ? (try + preimage->len == img->len)
-                    : (try + preimage->len <= img->len)) &&
-                   !memcmp(img->buf + try, preimage->buf, preimage->len))
+                    ? (current + preimage->len == img->len)
+                    : (current + preimage->len <= img->len)) &&
+                   !memcmp(img->buf + current, preimage->buf, preimage->len))
                        return 1;
        } else {
                /*
@@ -2543,7 +2543,7 @@ static int match_fragment(struct apply_state *state,
         */
        if (state->ws_ignore_action == ignore_ws_change)
                return line_by_line_fuzzy_match(img, preimage, postimage,
-                                               try, try_lno, preimage_limit);
+                                               current, current_lno, preimage_limit);
 
        if (state->ws_error_action != correct_ws_error)
                return 0;
@@ -2577,10 +2577,10 @@ static int match_fragment(struct apply_state *state,
         */
        strbuf_init(&fixed, preimage->len + 1);
        orig = preimage->buf;
-       target = img->buf + try;
+       target = img->buf + current;
        for (i = 0; i < preimage_limit; i++) {
                size_t oldlen = preimage->line[i].len;
-               size_t tgtlen = img->line[try_lno + i].len;
+               size_t tgtlen = img->line[current_lno + i].len;
                size_t fixstart = fixed.len;
                struct strbuf tgtfix;
                int match;
@@ -2666,8 +2666,8 @@ static int find_pos(struct apply_state *state,
                    int match_beginning, int match_end)
 {
        int i;
-       unsigned long backwards, forwards, try;
-       int backwards_lno, forwards_lno, try_lno;
+       unsigned long backwards, forwards, current;
+       int backwards_lno, forwards_lno, current_lno;
 
        /*
         * If match_beginning or match_end is specified, there is no
@@ -2687,25 +2687,25 @@ static int find_pos(struct apply_state *state,
        if ((size_t) line > img->nr)
                line = img->nr;
 
-       try = 0;
+       current = 0;
        for (i = 0; i < line; i++)
-               try += img->line[i].len;
+               current += img->line[i].len;
 
        /*
         * There's probably some smart way to do this, but I'll leave
         * that to the smart and beautiful people. I'm simple and stupid.
         */
-       backwards = try;
+       backwards = current;
        backwards_lno = line;
-       forwards = try;
+       forwards = current;
        forwards_lno = line;
-       try_lno = line;
+       current_lno = line;
 
        for (i = 0; ; i++) {
                if (match_fragment(state, img, preimage, postimage,
-                                  try, try_lno, ws_rule,
+                                  current, current_lno, ws_rule,
                                   match_beginning, match_end))
-                       return try_lno;
+                       return current_lno;
 
        again:
                if (backwards_lno == 0 && forwards_lno == img->nr)
@@ -2718,8 +2718,8 @@ static int find_pos(struct apply_state *state,
                        }
                        backwards_lno--;
                        backwards -= img->line[backwards_lno].len;
-                       try = backwards;
-                       try_lno = backwards_lno;
+                       current = backwards;
+                       current_lno = backwards_lno;
                } else {
                        if (forwards_lno == img->nr) {
                                i++;
@@ -2727,8 +2727,8 @@ static int find_pos(struct apply_state *state,
                        }
                        forwards += img->line[forwards_lno].len;
                        forwards_lno++;
-                       try = forwards;
-                       try_lno = forwards_lno;
+                       current = forwards;
+                       current_lno = forwards_lno;
                }
 
        }
@@ -3154,7 +3154,7 @@ static int apply_binary(struct apply_state *state,
                 * See if the old one matches what the patch
                 * applies to.
                 */
-               hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+               hash_object_file(img->buf, img->len, blob_type, &oid);
                if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
                        return error(_("the patch applies to '%s' (%s), "
                                       "which does not match the "
@@ -3180,7 +3180,7 @@ static int apply_binary(struct apply_state *state,
                unsigned long size;
                char *result;
 
-               result = read_sha1_file(oid.hash, &type, &size);
+               result = read_object_file(&oid, &type, &size);
                if (!result)
                        return error(_("the necessary postimage %s for "
                                       "'%s' cannot be read"),
@@ -3199,7 +3199,7 @@ static int apply_binary(struct apply_state *state,
                                     name);
 
                /* verify that the result matches */
-               hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+               hash_object_file(img->buf, img->len, blob_type, &oid);
                if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
                        return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
                                name, patch->new_sha1_prefix, oid_to_hex(&oid));
@@ -3242,7 +3242,7 @@ static int read_blob_object(struct strbuf *buf, const struct object_id *oid, uns
                unsigned long sz;
                char *result;
 
-               result = read_sha1_file(oid->hash, &type, &sz);
+               result = read_object_file(oid, &type, &sz);
                if (!result)
                        return -1;
                /* XXX read_sha1_file NUL-terminates */
@@ -3554,7 +3554,7 @@ static int try_threeway(struct apply_state *state,
 
        /* Preimage the patch was prepared for */
        if (patch->is_new)
-               write_sha1_file("", 0, blob_type, pre_oid.hash);
+               write_object_file("", 0, blob_type, &pre_oid);
        else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
                 read_blob_object(&buf, &pre_oid, patch->old_mode))
                return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
@@ -3570,7 +3570,7 @@ static int try_threeway(struct apply_state *state,
                return -1;
        }
        /* post_oid is theirs */
-       write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+       write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
        clear_image(&tmp_image);
 
        /* our_oid is ours */
@@ -3583,7 +3583,7 @@ static int try_threeway(struct apply_state *state,
                        return error(_("cannot read the current contents of '%s'"),
                                     patch->old_name);
        }
-       write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+       write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
        clear_image(&tmp_image);
 
        /* in-core three-way merge between post and our using pre as base */
@@ -4163,30 +4163,30 @@ static void show_mode_change(struct patch *p, int show_name)
 static void show_rename_copy(struct patch *p)
 {
        const char *renamecopy = p->is_rename ? "rename" : "copy";
-       const char *old, *new;
+       const char *old_name, *new_name;
 
        /* Find common prefix */
-       old = p->old_name;
-       new = p->new_name;
+       old_name = p->old_name;
+       new_name = p->new_name;
        while (1) {
                const char *slash_old, *slash_new;
-               slash_old = strchr(old, '/');
-               slash_new = strchr(new, '/');
+               slash_old = strchr(old_name, '/');
+               slash_new = strchr(new_name, '/');
                if (!slash_old ||
                    !slash_new ||
-                   slash_old - old != slash_new - new ||
-                   memcmp(old, new, slash_new - new))
+                   slash_old - old_name != slash_new - new_name ||
+                   memcmp(old_name, new_name, slash_new - new_name))
                        break;
-               old = slash_old + 1;
-               new = slash_new + 1;
+               old_name = slash_old + 1;
+               new_name = slash_new + 1;
        }
-       /* p->old_name thru old is the common prefix, and old and new
+       /* p->old_name thru old_name is the common prefix, and old_name and new_name
         * through the end of names are renames
         */
-       if (old != p->old_name)
+       if (old_name != p->old_name)
                printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy,
-                      (int)(old - p->old_name), p->old_name,
-                      old, new, p->score);
+                      (int)(old_name - p->old_name), p->old_name,
+                      old_name, new_name, p->score);
        else
                printf(" %s %s => %s (%d%%)\n", renamecopy,
                       p->old_name, p->new_name, p->score);
@@ -4291,7 +4291,7 @@ static int add_index_file(struct apply_state *state,
                        }
                        fill_stat_cache_info(ce, &st);
                }
-               if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+               if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
                        free(ce);
                        return error(_("unable to create backing store "
                                       "for newly created file %s"), path);
@@ -4943,8 +4943,9 @@ int apply_parse_options(int argc, const char **argv,
                        N_("make sure the patch is applicable to the current index")),
                OPT_BOOL(0, "cached", &state->cached,
                        N_("apply a patch without touching the working tree")),
-               OPT_BOOL(0, "unsafe-paths", &state->unsafe_paths,
-                       N_("accept a patch that touches outside the working area")),
+               OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths,
+                          N_("accept a patch that touches outside the working area"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "apply", force_apply,
                        N_("also apply the patch (use with --stat/--summary/--check)")),
                OPT_BOOL('3', "3way", &state->threeway,
index c6ed96ee74ec10f5c9ffb6f520193326d4704b6b..3563bcb9f263f7782a77c679bf9be32af6bd13df 100644 (file)
@@ -111,7 +111,7 @@ static void write_trailer(void)
  * queues up writes, so that all our write(2) calls write exactly one
  * full block; pads writes to RECORDSIZE
  */
-static int stream_blocked(const unsigned char *sha1)
+static int stream_blocked(const struct object_id *oid)
 {
        struct git_istream *st;
        enum object_type type;
@@ -119,9 +119,9 @@ static int stream_blocked(const unsigned char *sha1)
        char buf[BLOCKSIZE];
        ssize_t readlen;
 
-       st = open_istream(sha1, &type, &sz, NULL);
+       st = open_istream(oid, &type, &sz, NULL);
        if (!st)
-               return error("cannot stream blob %s", sha1_to_hex(sha1));
+               return error("cannot stream blob %s", oid_to_hex(oid));
        for (;;) {
                readlen = read_istream(st, buf, sizeof(buf));
                if (readlen <= 0)
@@ -218,7 +218,7 @@ static void prepare_header(struct archiver_args *args,
 }
 
 static void write_extended_header(struct archiver_args *args,
-                                 const unsigned char *sha1,
+                                 const struct object_id *oid,
                                  const void *buffer, unsigned long size)
 {
        struct ustar_header header;
@@ -226,14 +226,14 @@ static void write_extended_header(struct archiver_args *args,
        memset(&header, 0, sizeof(header));
        *header.typeflag = TYPEFLAG_EXT_HEADER;
        mode = 0100666;
-       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", sha1_to_hex(sha1));
+       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", oid_to_hex(oid));
        prepare_header(args, &header, mode, size);
        write_blocked(&header, sizeof(header));
        write_blocked(buffer, size);
 }
 
 static int write_tar_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -257,7 +257,7 @@ static int write_tar_entry(struct archiver_args *args,
                mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)",
-                            mode, sha1_to_hex(sha1));
+                            mode, oid_to_hex(oid));
        }
        if (pathlen > sizeof(header.name)) {
                size_t plen = get_path_prefix(path, pathlen,
@@ -268,7 +268,7 @@ static int write_tar_entry(struct archiver_args *args,
                        memcpy(header.name, path + plen + 1, rest);
                } else {
                        xsnprintf(header.name, sizeof(header.name), "%s.data",
-                                 sha1_to_hex(sha1));
+                                 oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "path",
                                                 path, pathlen);
                }
@@ -276,14 +276,14 @@ static int write_tar_entry(struct archiver_args *args,
                memcpy(header.name, path, pathlen);
 
        if (S_ISREG(mode) && !args->convert &&
-           sha1_object_info(sha1, &size) == OBJ_BLOB &&
+           oid_object_info(oid, &size) == OBJ_BLOB &&
            size > big_file_threshold)
                buffer = NULL;
        else if (S_ISLNK(mode) || S_ISREG(mode)) {
                enum object_type type;
-               buffer = sha1_file_to_archive(args, path, sha1, old_mode, &type, &size);
+               buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
                if (!buffer)
-                       return error("cannot read %s", sha1_to_hex(sha1));
+                       return error("cannot read %s", oid_to_hex(oid));
        } else {
                buffer = NULL;
                size = 0;
@@ -292,7 +292,7 @@ static int write_tar_entry(struct archiver_args *args,
        if (S_ISLNK(mode)) {
                if (size > sizeof(header.linkname)) {
                        xsnprintf(header.linkname, sizeof(header.linkname),
-                                 "see %s.paxheader", sha1_to_hex(sha1));
+                                 "see %s.paxheader", oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "linkpath",
                                                 buffer, size);
                } else
@@ -308,7 +308,7 @@ static int write_tar_entry(struct archiver_args *args,
        prepare_header(args, &header, mode, size_in_header);
 
        if (ext_header.len > 0) {
-               write_extended_header(args, sha1, ext_header.buf,
+               write_extended_header(args, oid, ext_header.buf,
                                      ext_header.len);
        }
        strbuf_release(&ext_header);
@@ -317,7 +317,7 @@ static int write_tar_entry(struct archiver_args *args,
                if (buffer)
                        write_blocked(buffer, size);
                else
-                       err = stream_blocked(sha1);
+                       err = stream_blocked(oid);
        }
        free(buffer);
        return err;
index e8913e5a26c6e97216c4b79ad96b5e3ddf906c45..6b20bce4d1cd78563037c8658dd8bd8f7690c47b 100644 (file)
@@ -276,7 +276,7 @@ static int entry_is_binary(const char *path, const void *buffer, size_t size)
 #define STREAM_BUFFER_SIZE (1024 * 16)
 
 static int write_zip_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -314,7 +314,7 @@ static int write_zip_entry(struct archiver_args *args,
 
        if (pathlen > 0xffff) {
                return error("path too long (%d chars, SHA1: %s): %s",
-                               (int)pathlen, sha1_to_hex(sha1), path);
+                               (int)pathlen, oid_to_hex(oid), path);
        }
 
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
@@ -325,7 +325,7 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = 0;
                buffer = NULL;
        } else if (S_ISREG(mode) || S_ISLNK(mode)) {
-               enum object_type type = sha1_object_info(sha1, &size);
+               enum object_type type = oid_object_info(oid, &size);
 
                method = 0;
                attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) :
@@ -337,18 +337,18 @@ static int write_zip_entry(struct archiver_args *args,
 
                if (S_ISREG(mode) && type == OBJ_BLOB && !args->convert &&
                    size > big_file_threshold) {
-                       stream = open_istream(sha1, &type, &size, NULL);
+                       stream = open_istream(oid, &type, &size, NULL);
                        if (!stream)
                                return error("cannot stream blob %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        flags |= ZIP_STREAM;
                        out = buffer = NULL;
                } else {
-                       buffer = sha1_file_to_archive(args, path, sha1, mode,
-                                                     &type, &size);
+                       buffer = object_file_to_archive(args, path, oid, mode,
+                                                       &type, &size);
                        if (!buffer)
                                return error("cannot read %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        crc = crc32(crc, buffer, size);
                        is_binary = entry_is_binary(path_without_prefix,
                                                    buffer, size);
@@ -357,7 +357,7 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = (method == 0) ? size : 0;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)", mode,
-                               sha1_to_hex(sha1));
+                               oid_to_hex(oid));
        }
 
        if (creator_version > max_creator_version)
index 0b7b62af0c3ecee10a26e9bd2d274690604ffcad..93ab175b0b4055bcfbd9334c7ccb36475c33e549 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -63,16 +63,16 @@ static void format_subst(const struct commit *commit,
        free(to_free);
 }
 
-void *sha1_file_to_archive(const struct archiver_args *args,
-                          const char *path, const unsigned char *sha1,
-                          unsigned int mode, enum object_type *type,
-                          unsigned long *sizep)
+void *object_file_to_archive(const struct archiver_args *args,
+                            const char *path, const struct object_id *oid,
+                            unsigned int mode, enum object_type *type,
+                            unsigned long *sizep)
 {
        void *buffer;
        const struct commit *commit = args->convert ? args->commit : NULL;
 
        path += args->baselen;
-       buffer = read_sha1_file(sha1, type, sizep);
+       buffer = read_object_file(oid, type, sizep);
        if (buffer && S_ISREG(mode)) {
                struct strbuf buf = STRBUF_INIT;
                size_t size = 0;
@@ -121,7 +121,7 @@ static int check_attr_export_subst(const struct attr_check *check)
        return check && ATTR_TRUE(check->items[1].value);
 }
 
-static int write_archive_entry(const unsigned char *sha1, const char *base,
+static int write_archive_entry(const struct object_id *oid, const char *base,
                int baselen, const char *filename, unsigned mode, int stage,
                void *context)
 {
@@ -153,7 +153,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-               err = write_entry(args, sha1, path.buf, path.len, mode);
+               err = write_entry(args, oid, path.buf, path.len, mode);
                if (err)
                        return err;
                return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
@@ -161,7 +161,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
 
        if (args->verbose)
                fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-       return write_entry(args, sha1, path.buf, path.len, mode);
+       return write_entry(args, oid, path.buf, path.len, mode);
 }
 
 static void queue_directory(const unsigned char *sha1,
@@ -191,14 +191,14 @@ static int write_directory(struct archiver_context *c)
        d->path[d->len - 1] = '\0'; /* no trailing slash */
        ret =
                write_directory(c) ||
-               write_archive_entry(d->oid.hash, d->path, d->baselen,
+               write_archive_entry(&d->oid, d->path, d->baselen,
                                    d->path + d->baselen, d->mode,
                                    d->stage, c) != READ_TREE_RECURSIVE;
        free(d);
        return ret ? -1 : 0;
 }
 
-static int queue_or_write_archive_entry(const unsigned char *sha1,
+static int queue_or_write_archive_entry(const struct object_id *oid,
                struct strbuf *base, const char *filename,
                unsigned mode, int stage, void *context)
 {
@@ -224,14 +224,14 @@ static int queue_or_write_archive_entry(const unsigned char *sha1,
 
                if (check_attr_export_ignore(check))
                        return 0;
-               queue_directory(sha1, base, filename,
+               queue_directory(oid->hash, base, filename,
                                mode, stage, c);
                return READ_TREE_RECURSIVE;
        }
 
        if (write_directory(c))
                return -1;
-       return write_archive_entry(sha1, base->buf, base->len, filename, mode,
+       return write_archive_entry(oid, base->buf, base->len, filename, mode,
                                   stage, context);
 }
 
@@ -250,7 +250,7 @@ int write_archive_entries(struct archiver_args *args,
                        len--;
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)len, args->base);
-               err = write_entry(args, args->tree->object.oid.hash, args->base,
+               err = write_entry(args, &args->tree->object.oid, args->base,
                                  len, 040777);
                if (err)
                        return err;
@@ -303,7 +303,7 @@ static const struct archiver *lookup_archiver(const char *name)
        return NULL;
 }
 
-static int reject_entry(const unsigned char *sha1, struct strbuf *base,
+static int reject_entry(const struct object_id *oid, struct strbuf *base,
                        const char *filename, unsigned mode,
                        int stage, void *context)
 {
@@ -397,8 +397,8 @@ static void parse_treeish_arg(const char **argv,
                unsigned int mode;
                int err;
 
-               err = get_tree_entry(tree->object.oid.hash, prefix,
-                                    tree_oid.hash, &mode);
+               err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
+                                    &mode);
                if (err || !S_ISDIR(mode))
                        die("current working directory is untracked");
 
index 62d1d82c1af0fa3bf77b32d63e9b4866f3428898..1f9954f7cdc5a1ee8036321e439a65bdfb90e59f 100644 (file)
--- a/archive.h
+++ b/archive.h
@@ -31,7 +31,7 @@ extern void init_tar_archiver(void);
 extern void init_zip_archiver(void);
 
 typedef int (*write_archive_entry_fn_t)(struct archiver_args *args,
-                                       const unsigned char *sha1,
+                                       const struct object_id *oid,
                                        const char *path, size_t pathlen,
                                        unsigned int mode);
 
@@ -39,9 +39,9 @@ extern int write_archive_entries(struct archiver_args *args, write_archive_entry
 extern int write_archive(int argc, const char **argv, const char *prefix, const char *name_hint, int remote);
 
 const char *archive_format_from_filename(const char *filename);
-extern void *sha1_file_to_archive(const struct archiver_args *args,
-                                 const char *path, const unsigned char *sha1,
-                                 unsigned int mode, enum object_type *type,
-                                 unsigned long *sizep);
+extern void *object_file_to_archive(const struct archiver_args *args,
+                                   const char *path, const struct object_id *oid,
+                                   unsigned int mode, enum object_type *type,
+                                   unsigned long *sizep);
 
 #endif /* ARCHIVE_H */
index f6d05bd66f42bd9874a08f5585ae99337d22dad5..a579b50884f8e6f8ce8390308d39b2664d050583 100644 (file)
--- a/bisect.c
+++ b/bisect.c
@@ -132,7 +132,8 @@ static void show_list(const char *debug, int counted, int nr,
                unsigned flags = commit->object.flags;
                enum object_type type;
                unsigned long size;
-               char *buf = read_sha1_file(commit->object.oid.hash, &type, &size);
+               char *buf = read_object_file(&commit->object.oid, &type,
+                                            &size);
                const char *subject_start;
                int subject_len;
 
@@ -144,10 +145,10 @@ static void show_list(const char *debug, int counted, int nr,
                        fprintf(stderr, "%3d", weight(p));
                else
                        fprintf(stderr, "---");
-               fprintf(stderr, " %.*s", 8, sha1_to_hex(commit->object.oid.hash));
+               fprintf(stderr, " %.*s", 8, oid_to_hex(&commit->object.oid));
                for (pp = commit->parents; pp; pp = pp->next)
                        fprintf(stderr, " %.*s", 8,
-                               sha1_to_hex(pp->item->object.oid.hash));
+                               oid_to_hex(&pp->item->object.oid));
 
                subject_len = find_commit_subject(buf, &subject_start);
                if (subject_len)
diff --git a/blame.c b/blame.c
index 2893f3c1030aab91a42ff9e0daf8a54ba8c3ef3c..78c9808bd1a04a4c641b0f5f853540ea7618a522 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -80,8 +80,8 @@ static void verify_working_tree_path(struct commit *work_tree, const char *path)
                struct object_id blob_oid;
                unsigned mode;
 
-               if (!get_tree_entry(commit_oid->hash, path, blob_oid.hash, &mode) &&
-                   sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+               if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
+                   oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
                        return;
        }
 
@@ -232,7 +232,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
        convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
        origin->file.ptr = buf.buf;
        origin->file.size = buf.len;
-       pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+       pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
 
        /*
         * Read the current index, replace the path entry with
@@ -297,8 +297,8 @@ static void fill_origin_blob(struct diff_options *opt,
                    textconv_object(o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size))
                        ;
                else
-                       file->ptr = read_sha1_file(o->blob_oid.hash, &type,
-                                                  &file_size);
+                       file->ptr = read_object_file(&o->blob_oid, &type,
+                                                    &file_size);
                file->size = file_size;
 
                if (!file->ptr)
@@ -502,11 +502,9 @@ static int fill_blob_sha1_and_mode(struct blame_origin *origin)
 {
        if (!is_null_oid(&origin->blob_oid))
                return 0;
-       if (get_tree_entry(origin->commit->object.oid.hash,
-                          origin->path,
-                          origin->blob_oid.hash, &origin->mode))
+       if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
                goto error_out;
-       if (sha1_object_info(origin->blob_oid.hash, NULL) != OBJ_BLOB)
+       if (oid_object_info(&origin->blob_oid, NULL) != OBJ_BLOB)
                goto error_out;
        return 0;
  error_out:
@@ -998,28 +996,29 @@ unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e)
 }
 
 /*
- * best_so_far[] and this[] are both a split of an existing blame_entry
- * that passes blame to the parent.  Maintain best_so_far the best split
- * so far, by comparing this and best_so_far and copying this into
+ * best_so_far[] and potential[] are both a split of an existing blame_entry
+ * that passes blame to the parent.  Maintain best_so_far the best split so
+ * far, by comparing potential and best_so_far and copying potential into
  * bst_so_far as needed.
  */
 static void copy_split_if_better(struct blame_scoreboard *sb,
                                 struct blame_entry *best_so_far,
-                                struct blame_entry *this)
+                                struct blame_entry *potential)
 {
        int i;
 
-       if (!this[1].suspect)
+       if (!potential[1].suspect)
                return;
        if (best_so_far[1].suspect) {
-               if (blame_entry_score(sb, &this[1]) < blame_entry_score(sb, &best_so_far[1]))
+               if (blame_entry_score(sb, &potential[1]) <
+                   blame_entry_score(sb, &best_so_far[1]))
                        return;
        }
 
        for (i = 0; i < 3; i++)
-               blame_origin_incref(this[i].suspect);
+               blame_origin_incref(potential[i].suspect);
        decref_split(best_so_far);
-       memcpy(best_so_far, this, sizeof(struct blame_entry [3]));
+       memcpy(best_so_far, potential, sizeof(struct blame_entry[3]));
 }
 
 /*
@@ -1046,12 +1045,12 @@ static void handle_split(struct blame_scoreboard *sb,
        if (ent->num_lines <= tlno)
                return;
        if (tlno < same) {
-               struct blame_entry this[3];
+               struct blame_entry potential[3];
                tlno += ent->s_lno;
                same += ent->s_lno;
-               split_overlap(this, ent, tlno, plno, same, parent);
-               copy_split_if_better(sb, split, this);
-               decref_split(this);
+               split_overlap(potential, ent, tlno, plno, same, parent);
+               copy_split_if_better(sb, split, potential);
+               decref_split(potential);
        }
 }
 
@@ -1273,7 +1272,7 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
                        struct diff_filepair *p = diff_queued_diff.queue[i];
                        struct blame_origin *norigin;
                        mmfile_t file_p;
-                       struct blame_entry this[3];
+                       struct blame_entry potential[3];
 
                        if (!DIFF_FILE_VALID(p->one))
                                continue; /* does not exist in parent */
@@ -1292,10 +1291,10 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
 
                        for (j = 0; j < num_ents; j++) {
                                find_copy_in_blob(sb, blame_list[j].ent,
-                                                 norigin, this, &file_p);
+                                                 norigin, potential, &file_p);
                                copy_split_if_better(sb, blame_list[j].split,
-                                                    this);
-                               decref_split(this);
+                                                    potential);
+                               decref_split(potential);
                        }
                        blame_origin_decref(norigin);
                }
@@ -1830,8 +1829,8 @@ void setup_scoreboard(struct blame_scoreboard *sb, const char *path, struct blam
                                    &sb->final_buf_size))
                        ;
                else
-                       sb->final_buf = read_sha1_file(o->blob_oid.hash, &type,
-                                                      &sb->final_buf_size);
+                       sb->final_buf = read_object_file(&o->blob_oid, &type,
+                                                        &sb->final_buf_size);
 
                if (!sb->final_buf)
                        die(_("cannot read blob %s for path %s"),
index 42378f3aa471eb79594d96736ad2410b54d6c4dd..3f3fdfc281626d98c2ef0b9edba019ebd0deb3e5 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -215,6 +215,7 @@ extern int cmd_rev_parse(int argc, const char **argv, const char *prefix);
 extern int cmd_revert(int argc, const char **argv, const char *prefix);
 extern int cmd_rm(int argc, const char **argv, const char *prefix);
 extern int cmd_send_pack(int argc, const char **argv, const char *prefix);
+extern int cmd_serve(int argc, const char **argv, const char *prefix);
 extern int cmd_shortlog(int argc, const char **argv, const char *prefix);
 extern int cmd_show(int argc, const char **argv, const char *prefix);
 extern int cmd_show_branch(int argc, const char **argv, const char *prefix);
@@ -231,6 +232,7 @@ extern int cmd_update_ref(int argc, const char **argv, const char *prefix);
 extern int cmd_update_server_info(int argc, const char **argv, const char *prefix);
 extern int cmd_upload_archive(int argc, const char **argv, const char *prefix);
 extern int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
+extern int cmd_upload_pack(int argc, const char **argv, const char *prefix);
 extern int cmd_var(int argc, const char **argv, const char *prefix);
 extern int cmd_verify_commit(int argc, const char **argv, const char *prefix);
 extern int cmd_verify_tag(int argc, const char **argv, const char *prefix);
index bf01d89e28eaef0f8113d42c6b9d4142b8e912e4..9ef7fb02d56aac94d104b50aed7d7dfda09cfc98 100644 (file)
@@ -294,7 +294,7 @@ static struct option builtin_add_options[] = {
        OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")),
        OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")),
        OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")),
-       OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files")),
+       OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0),
        OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")),
        OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")),
        OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")),
@@ -534,10 +534,9 @@ int cmd_add(int argc, const char **argv, const char *prefix)
        unplug_bulk_checkin();
 
 finish:
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
-                       die(_("Unable to write new index file"));
-       }
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die(_("Unable to write new index file"));
 
        UNLEAK(pathspec);
        UNLEAK(dir);
index 47beddbe24040d81dea28a61d9e7f7bd0e791653..9c82603f70ff9605aa25c78023355b94860a7ce0 100644 (file)
@@ -1011,6 +1011,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format,
 
        if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
                die_errno(_("failed to create directory '%s'"), state->dir);
+       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
 
        if (split_mail(state, patch_format, paths, keep_cr) < 0) {
                am_destroy(state);
@@ -1110,6 +1111,7 @@ static void am_next(struct am_state *state)
 
        oidclr(&state->orig_commit);
        unlink(am_path(state, "original-commit"));
+       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
 
        if (!get_oid("HEAD", &head))
                write_state_text(state, "abort-safety", oid_to_hex(&head));
@@ -1441,6 +1443,8 @@ static int parse_mail_rebase(struct am_state *state, const char *mail)
 
        oidcpy(&state->orig_commit, &commit_oid);
        write_state_text(state, "original-commit", oid_to_hex(&commit_oid));
+       update_ref("am", "REBASE_HEAD", &commit_oid,
+                  NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
 
        return 0;
 }
@@ -1546,7 +1550,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
        discard_cache();
        read_cache_from(index_path);
 
-       if (write_index_as_tree(orig_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL))
                return error(_("Repository lacks necessary blobs to fall back on 3-way merge."));
 
        say(state, stdout, _("Using index info to reconstruct a base tree..."));
@@ -1571,7 +1575,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
                return error(_("Did you hand edit your patch?\n"
                                "It does not apply to blobs recorded in its index."));
 
-       if (write_index_as_tree(their_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL))
                return error("could not write tree");
 
        say(state, stdout, _("Falling back to patching base and 3-way merge..."));
@@ -1622,7 +1626,7 @@ static void do_commit(const struct am_state *state)
        if (run_hook_le(NULL, "pre-applypatch", NULL))
                exit(1);
 
-       if (write_cache_as_tree(tree.hash, 0, NULL))
+       if (write_cache_as_tree(&tree, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 
        if (!get_oid_commit("HEAD", &parent)) {
@@ -1641,8 +1645,8 @@ static void do_commit(const struct am_state *state)
                setenv("GIT_COMMITTER_DATE",
                        state->ignore_date ? "" : state->author_date, 1);
 
-       if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash,
-                               author, state->sign_commit))
+       if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
+                       author, state->sign_commit))
                die(_("failed to write commit object"));
 
        reflog_msg = getenv("GIT_REFLOG_ACTION");
@@ -1831,8 +1835,7 @@ static void am_run(struct am_state *state, int resume)
                        git_config_get_bool("advice.amworkdir", &advice_amworkdir);
 
                        if (advice_amworkdir)
-                               printf_ln(_("The copy of the patch that failed is found in: %s"),
-                                               am_path(state, "patch"));
+                               printf_ln(_("Use 'git am --show-current-patch' to see the failed patch"));
 
                        die_user_resolve(state);
                }
@@ -2001,7 +2004,7 @@ static int clean_index(const struct object_id *head, const struct object_id *rem
        if (fast_forward_to(head_tree, head_tree, 1))
                return -1;
 
-       if (write_cache_as_tree(index.hash, 0, NULL))
+       if (write_cache_as_tree(&index, 0, NULL))
                return -1;
 
        index_tree = parse_tree_indirect(&index);
@@ -2121,6 +2124,34 @@ static void am_abort(struct am_state *state)
        am_destroy(state);
 }
 
+static int show_patch(struct am_state *state)
+{
+       struct strbuf sb = STRBUF_INIT;
+       const char *patch_path;
+       int len;
+
+       if (!is_null_oid(&state->orig_commit)) {
+               const char *av[4] = { "show", NULL, "--", NULL };
+               char *new_oid_str;
+               int ret;
+
+               av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit));
+               ret = run_command_v_opt(av, RUN_GIT_CMD);
+               free(new_oid_str);
+               return ret;
+       }
+
+       patch_path = am_path(state, msgnum(state));
+       len = strbuf_read_file(&sb, patch_path, 0);
+       if (len < 0)
+               die_errno(_("failed to read '%s'"), patch_path);
+
+       setup_pager();
+       write_in_full(1, sb.buf, sb.len);
+       strbuf_release(&sb);
+       return 0;
+}
+
 /**
  * parse_options() callback that validates and sets opt->value to the
  * PATCH_FORMAT_* enum value corresponding to `arg`.
@@ -2149,7 +2180,9 @@ enum resume_mode {
        RESUME_APPLY,
        RESUME_RESOLVED,
        RESUME_SKIP,
-       RESUME_ABORT
+       RESUME_ABORT,
+       RESUME_QUIT,
+       RESUME_SHOW_PATCH
 };
 
 static int git_am_config(const char *k, const char *v, void *cb)
@@ -2171,6 +2204,7 @@ int cmd_am(int argc, const char **argv, const char *prefix)
        int patch_format = PATCH_FORMAT_UNKNOWN;
        enum resume_mode resume = RESUME_FALSE;
        int in_progress;
+       int ret = 0;
 
        const char * const usage[] = {
                N_("git am [<options>] [(<mbox> | <Maildir>)...]"),
@@ -2249,6 +2283,12 @@ int cmd_am(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE(0, "abort", &resume,
                        N_("restore the original branch and abort the patching operation."),
                        RESUME_ABORT),
+               OPT_CMDMODE(0, "quit", &resume,
+                       N_("abort the patching operation but keep HEAD where it is."),
+                       RESUME_QUIT),
+               OPT_CMDMODE(0, "show-current-patch", &resume,
+                       N_("show the patch being applied."),
+                       RESUME_SHOW_PATCH),
                OPT_BOOL(0, "committer-date-is-author-date",
                        &state.committer_date_is_author_date,
                        N_("lie about committer date")),
@@ -2317,7 +2357,7 @@ int cmd_am(int argc, const char **argv, const char *prefix)
                 * stray directories.
                 */
                if (file_exists(state.dir) && !state.rebasing) {
-                       if (resume == RESUME_ABORT) {
+                       if (resume == RESUME_ABORT || resume == RESUME_QUIT) {
                                am_destroy(&state);
                                am_state_release(&state);
                                return 0;
@@ -2359,11 +2399,18 @@ int cmd_am(int argc, const char **argv, const char *prefix)
        case RESUME_ABORT:
                am_abort(&state);
                break;
+       case RESUME_QUIT:
+               am_rerere_clear();
+               am_destroy(&state);
+               break;
+       case RESUME_SHOW_PATCH:
+               ret = show_patch(&state);
+               break;
        default:
                die("BUG: invalid resume value");
        }
 
        am_state_release(&state);
 
-       return 0;
+       return ret;
 }
index f863465a0fa137f3446071a0ec1653adad536fdc..73971d0dd20e7233d6cb1e6c16986ede25526939 100644 (file)
@@ -55,7 +55,7 @@ static int run_remote_archiver(int argc, const char **argv,
 
        buf = packet_read_line(fd[0], NULL);
        if (!buf)
-               die(_("git archive: expected ACK/NAK, got EOF"));
+               die(_("git archive: expected ACK/NAK, got a flush packet"));
        if (strcmp(buf, "ACK")) {
                if (starts_with(buf, "NACK "))
                        die(_("git archive: NACK %s"), buf + 5);
index 005f55aaa257fc34f81517650223a3c195c03178..db38c0b307c5719ab3bd5e6b8597ec8810c0de3d 100644 (file)
@@ -499,7 +499,7 @@ static int read_ancestry(const char *graft_file)
 
 static int update_auto_abbrev(int auto_abbrev, struct blame_origin *suspect)
 {
-       const char *uniq = find_unique_abbrev(suspect->commit->object.oid.hash,
+       const char *uniq = find_unique_abbrev(&suspect->commit->object.oid,
                                              auto_abbrev);
        int len = strlen(uniq);
        if (auto_abbrev < len)
@@ -649,6 +649,15 @@ static int blame_move_callback(const struct option *option, const char *arg, int
        return 0;
 }
 
+static int is_a_rev(const char *name)
+{
+       struct object_id oid;
+
+       if (get_oid(name, &oid))
+               return 0;
+       return OBJ_NONE < oid_object_info(&oid, NULL);
+}
+
 int cmd_blame(int argc, const char **argv, const char *prefix)
 {
        struct rev_info revs;
@@ -720,6 +729,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, blame_opt_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        if (ctx.argv[0])
@@ -845,16 +855,15 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        } else {
                if (argc < 2)
                        usage_with_options(blame_opt_usage, options);
-               path = add_prefix(prefix, argv[argc - 1]);
-               if (argc == 3 && !file_exists(path)) { /* (2b) */
+               if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
                        path = add_prefix(prefix, argv[1]);
                        argv[1] = argv[2];
+               } else {        /* (2a) */
+                       if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree())
+                               die("missing <path> to blame");
+                       path = add_prefix(prefix, argv[argc - 1]);
                }
                argv[argc - 1] = "--";
-
-               setup_work_tree();
-               if (!file_exists(path))
-                       die_errno("cannot stat path '%s'", path);
        }
 
        revs.disable_stdin = 1;
index 8dcc2ed058be6e653f885e48c9aa5f465a5f9749..0ac5e831f06694c48a2853cbe35ad2e33340ba16 100644 (file)
@@ -273,7 +273,7 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
                               bname.buf,
                               (flags & REF_ISBROKEN) ? "broken"
                               : (flags & REF_ISSYMREF) ? target
-                              : find_unique_abbrev(oid.hash, DEFAULT_ABBREV));
+                              : find_unique_abbrev(&oid, DEFAULT_ABBREV));
                }
                delete_branch_config(bname.buf);
 
@@ -570,6 +570,15 @@ static int edit_branch_description(const char *branch_name)
        return 0;
 }
 
+static int deprecated_reflog_option_cb(const struct option *opt,
+                                      const char *arg, int unset)
+{
+       warning("the '-l' alias for '--create-reflog' is deprecated;");
+       warning("it will be removed in a future version of Git");
+       *(int *)opt->value = !unset;
+       return 0;
+}
+
 int cmd_branch(int argc, const char **argv, const char *prefix)
 {
        int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
@@ -612,10 +621,16 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
                OPT_BIT('c', "copy", &copy, N_("copy a branch and its reflog"), 1),
                OPT_BIT('C', NULL, &copy, N_("copy a branch, even if target exists"), 2),
                OPT_BOOL(0, "list", &list, N_("list branch names")),
-               OPT_BOOL('l', "create-reflog", &reflog, N_("create the branch's reflog")),
+               OPT_BOOL(0, "create-reflog", &reflog, N_("create the branch's reflog")),
+               {
+                       OPTION_CALLBACK, 'l', NULL, &reflog, NULL,
+                       N_("deprecated synonym for --create-reflog"),
+                       PARSE_OPT_NOARG | PARSE_OPT_HIDDEN,
+                       deprecated_reflog_option_cb
+               },
                OPT_BOOL(0, "edit-description", &edit_description,
                         N_("edit the description for the branch")),
-               OPT__FORCE(&force, N_("force creation, move/rename, deletion")),
+               OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
                OPT_MERGED(&filter, N_("print only branches that are merged")),
                OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
                OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
index cf9ea5c79682d2e345fd6f5db9c386e98300fccd..2c46d257cd9a09a8f8ff05820b53224394385c7d 100644 (file)
@@ -32,7 +32,7 @@ static int filter_object(const char *path, unsigned mode,
 {
        enum object_type type;
 
-       *buf = read_sha1_file(oid->hash, &type, size);
+       *buf = read_object_file(oid, &type, size);
        if (!*buf)
                return error(_("cannot read object %s '%s'"),
                             oid_to_hex(oid), path);
@@ -76,8 +76,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        buf = NULL;
        switch (opt) {
        case 't':
-               oi.typename = &sb;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               oi.type_name = &sb;
+               if (oid_object_info_extended(&oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                if (sb.len) {
                        printf("%s\n", sb.buf);
@@ -88,7 +88,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
        case 's':
                oi.sizep = &size;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               if (oid_object_info_extended(&oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                printf("%lu\n", size);
                return 0;
@@ -116,7 +116,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                /* else fallthrough */
 
        case 'p':
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("Not a valid object name %s", obj_name);
 
@@ -130,7 +130,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
                if (type == OBJ_BLOB)
                        return stream_blob_to_fd(1, &oid, NULL, 0);
-               buf = read_sha1_file(oid.hash, &type, &size);
+               buf = read_object_file(&oid, &type, &size);
                if (!buf)
                        die("Cannot read object %s", obj_name);
 
@@ -140,8 +140,9 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        case 0:
                if (type_from_string(exp_type) == OBJ_BLOB) {
                        struct object_id blob_oid;
-                       if (sha1_object_info(oid.hash, NULL) == OBJ_TAG) {
-                               char *buffer = read_sha1_file(oid.hash, &type, &size);
+                       if (oid_object_info(&oid, NULL) == OBJ_TAG) {
+                               char *buffer = read_object_file(&oid, &type,
+                                                               &size);
                                const char *target;
                                if (!skip_prefix(buffer, "object ", &target) ||
                                    get_oid_hex(target, &blob_oid))
@@ -150,7 +151,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        } else
                                oidcpy(&blob_oid, &oid);
 
-                       if (sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+                       if (oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
                                return stream_blob_to_fd(1, &blob_oid, NULL, 0);
                        /*
                         * we attempted to dereference a tag to a blob
@@ -159,7 +160,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                         * fall-back to the usual case.
                         */
                }
-               buf = read_object_with_reference(oid.hash, exp_type, &size, NULL);
+               buf = read_object_with_reference(&oid, exp_type, &size, NULL);
                break;
 
        default:
@@ -229,7 +230,7 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len,
                if (data->mark_query)
                        data->info.typep = &data->type;
                else
-                       strbuf_addstr(sb, typename(data->type));
+                       strbuf_addstr(sb, type_name(data->type));
        } else if (is_atom("objectsize", atom, len)) {
                if (data->mark_query)
                        data->info.sizep = &data->size;
@@ -304,8 +305,9 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                                enum object_type type;
                                if (!textconv_object(data->rest, 0100644, oid,
                                                     1, &contents, &size))
-                                       contents = read_sha1_file(oid->hash, &type,
-                                                                 &size);
+                                       contents = read_object_file(oid,
+                                                                   &type,
+                                                                   &size);
                                if (!contents)
                                        die("could not convert '%s' %s",
                                            oid_to_hex(oid), data->rest);
@@ -321,7 +323,7 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                unsigned long size;
                void *contents;
 
-               contents = read_sha1_file(oid->hash, &type, &size);
+               contents = read_object_file(oid, &type, &size);
                if (!contents)
                        die("object %s disappeared", oid_to_hex(oid));
                if (type != data->type)
@@ -340,8 +342,8 @@ static void batch_object_write(const char *obj_name, struct batch_options *opt,
        struct strbuf buf = STRBUF_INIT;
 
        if (!data->skip_object_info &&
-           sha1_object_info_extended(data->oid.hash, &data->info,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0) {
+           oid_object_info_extended(&data->oid, &data->info,
+                                    OBJECT_INFO_LOOKUP_REPLACE) < 0) {
                printf("%s missing\n",
                       obj_name ? obj_name : oid_to_hex(&data->oid));
                fflush(stdout);
index 3e280b9c7aa9c93c8e7572a4fe3f7ef3ac92b3af..ec9a959e08d0e3dfa0d2b08529cacbe8d2e04c6e 100644 (file)
@@ -72,7 +72,7 @@ static int check_ignore(struct dir_struct *dir,
 {
        const char *full_path;
        char *seen;
-       int num_ignored = 0, dtype = DT_UNKNOWN, i;
+       int num_ignored = 0, i;
        struct exclude *exclude;
        struct pathspec pathspec;
 
@@ -104,6 +104,7 @@ static int check_ignore(struct dir_struct *dir,
                full_path = pathspec.items[i].match;
                exclude = NULL;
                if (!seen[i]) {
+                       int dtype = DT_UNKNOWN;
                        exclude = last_exclude_matching(dir, &the_index,
                                                        full_path, &dtype);
                }
index b0e78b819db3138f1c5ac4fe4b5d850ab306b470..a730f6a1aa47a60d4c131ee47076411a129d8080 100644 (file)
@@ -157,7 +157,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
        struct option builtin_checkout_index_options[] = {
                OPT_BOOL('a', "all", &all,
                        N_("check out all files in the index")),
-               OPT__FORCE(&force, N_("force overwrite of existing files")),
+               OPT__FORCE(&force, N_("force overwrite of existing files"), 0),
                OPT__QUIET(&quiet,
                        N_("no warning for existing files and files not in index")),
                OPT_BOOL('n', "no-create", &not_new,
index c54c78df547c8c66377f023730958f7a95d3aea1..b49b5820718335ba6a70b70ef339ece7157281cc 100644 (file)
@@ -54,19 +54,19 @@ struct checkout_opts {
        struct tree *source_tree;
 };
 
-static int post_checkout_hook(struct commit *old, struct commit *new,
+static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
                              int changed)
 {
        return run_hook_le(NULL, "post-checkout",
-                          oid_to_hex(old ? &old->object.oid : &null_oid),
-                          oid_to_hex(new ? &new->object.oid : &null_oid),
+                          oid_to_hex(old_commit ? &old_commit->object.oid : &null_oid),
+                          oid_to_hex(new_commit ? &new_commit->object.oid : &null_oid),
                           changed ? "1" : "0", NULL);
-       /* "new" can be NULL when checking out from the index before
+       /* "new_commit" can be NULL when checking out from the index before
           a commit exists. */
 
 }
 
-static int update_some(const unsigned char *sha1, struct strbuf *base,
+static int update_some(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int len;
@@ -78,7 +78,7 @@ static int update_some(const unsigned char *sha1, struct strbuf *base,
 
        len = base->len + strlen(pathname);
        ce = xcalloc(1, cache_entry_size(len));
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        memcpy(ce->name, base->buf, base->len);
        memcpy(ce->name + base->len, pathname, len - base->len);
        ce->ce_flags = create_ce_flags(0) | CE_UPDATE;
@@ -227,8 +227,7 @@ static int checkout_merged(int pos, const struct checkout *state)
         * (it also writes the merge result to the object database even
         * when it may contain conflicts).
         */
-       if (write_sha1_file(result_buf.ptr, result_buf.size,
-                           blob_type, oid.hash))
+       if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
                die(_("Unable to add merge result for '%s'"), path);
        free(result_buf.ptr);
        ce = make_cache_entry(mode, oid.hash, path, 2, 0);
@@ -406,10 +405,10 @@ static void describe_detached_head(const char *msg, struct commit *commit)
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &sb);
        if (print_sha1_ellipsis()) {
                fprintf(stderr, "%s %s... %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        } else {
                fprintf(stderr, "%s %s %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        }
        strbuf_release(&sb);
 }
@@ -472,8 +471,8 @@ static void setup_branch_path(struct branch_info *branch)
 }
 
 static int merge_working_tree(const struct checkout_opts *opts,
-                             struct branch_info *old,
-                             struct branch_info *new,
+                             struct branch_info *old_branch_info,
+                             struct branch_info *new_branch_info,
                              int *writeout_error)
 {
        int ret;
@@ -485,7 +484,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
 
        resolve_undo_clear();
        if (opts->force) {
-               ret = reset_tree(new->commit->tree, opts, 1, writeout_error);
+               ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
                if (ret)
                        return ret;
        } else {
@@ -511,7 +510,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
                topts.initial_checkout = is_cache_unborn();
                topts.update = 1;
                topts.merge = 1;
-               topts.gently = opts->merge && old->commit;
+               topts.gently = opts->merge && old_branch_info->commit;
                topts.verbose_update = opts->show_progress;
                topts.fn = twoway_merge;
                if (opts->overwrite_ignore) {
@@ -519,11 +518,11 @@ static int merge_working_tree(const struct checkout_opts *opts,
                        topts.dir->flags |= DIR_SHOW_IGNORED;
                        setup_standard_excludes(topts.dir);
                }
-               tree = parse_tree_indirect(old->commit ?
-                                          &old->commit->object.oid :
+               tree = parse_tree_indirect(old_branch_info->commit ?
+                                          &old_branch_info->commit->object.oid :
                                           the_hash_algo->empty_tree);
                init_tree_desc(&trees[0], tree->buffer, tree->size);
-               tree = parse_tree_indirect(&new->commit->object.oid);
+               tree = parse_tree_indirect(&new_branch_info->commit->object.oid);
                init_tree_desc(&trees[1], tree->buffer, tree->size);
 
                ret = unpack_trees(2, trees, &topts);
@@ -540,10 +539,10 @@ static int merge_working_tree(const struct checkout_opts *opts,
                                return 1;
 
                        /*
-                        * Without old->commit, the below is the same as
+                        * Without old_branch_info->commit, the below is the same as
                         * the two-tree unpack we already tried and failed.
                         */
-                       if (!old->commit)
+                       if (!old_branch_info->commit)
                                return 1;
 
                        /* Do more real merge */
@@ -571,18 +570,18 @@ static int merge_working_tree(const struct checkout_opts *opts,
                        o.verbosity = 0;
                        work = write_tree_from_memory(&o);
 
-                       ret = reset_tree(new->commit->tree, opts, 1,
+                       ret = reset_tree(new_branch_info->commit->tree, opts, 1,
                                         writeout_error);
                        if (ret)
                                return ret;
-                       o.ancestor = old->name;
-                       o.branch1 = new->name;
+                       o.ancestor = old_branch_info->name;
+                       o.branch1 = new_branch_info->name;
                        o.branch2 = "local";
-                       ret = merge_trees(&o, new->commit->tree, work,
-                               old->commit->tree, &result);
+                       ret = merge_trees(&o, new_branch_info->commit->tree, work,
+                               old_branch_info->commit->tree, &result);
                        if (ret < 0)
                                exit(128);
-                       ret = reset_tree(new->commit->tree, opts, 0,
+                       ret = reset_tree(new_branch_info->commit->tree, opts, 0,
                                         writeout_error);
                        strbuf_release(&o.obuf);
                        if (ret)
@@ -600,25 +599,25 @@ static int merge_working_tree(const struct checkout_opts *opts,
                die(_("unable to write new index file"));
 
        if (!opts->force && !opts->quiet)
-               show_local_changes(&new->commit->object, &opts->diff_options);
+               show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
 
        return 0;
 }
 
-static void report_tracking(struct branch_info *new)
+static void report_tracking(struct branch_info *new_branch_info)
 {
        struct strbuf sb = STRBUF_INIT;
-       struct branch *branch = branch_get(new->name);
+       struct branch *branch = branch_get(new_branch_info->name);
 
-       if (!format_tracking_info(branch, &sb))
+       if (!format_tracking_info(branch, &sb, AHEAD_BEHIND_FULL))
                return;
        fputs(sb.buf, stdout);
        strbuf_release(&sb);
 }
 
 static void update_refs_for_switch(const struct checkout_opts *opts,
-                                  struct branch_info *old,
-                                  struct branch_info *new)
+                                  struct branch_info *old_branch_info,
+                                  struct branch_info *new_branch_info)
 {
        struct strbuf msg = STRBUF_INIT;
        const char *old_desc, *reflog_msg;
@@ -645,69 +644,69 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
                        free(refname);
                }
                else
-                       create_branch(opts->new_branch, new->name,
+                       create_branch(opts->new_branch, new_branch_info->name,
                                      opts->new_branch_force ? 1 : 0,
                                      opts->new_branch_force ? 1 : 0,
                                      opts->new_branch_log,
                                      opts->quiet,
                                      opts->track);
-               new->name = opts->new_branch;
-               setup_branch_path(new);
+               new_branch_info->name = opts->new_branch;
+               setup_branch_path(new_branch_info);
        }
 
-       old_desc = old->name;
-       if (!old_desc && old->commit)
-               old_desc = oid_to_hex(&old->commit->object.oid);
+       old_desc = old_branch_info->name;
+       if (!old_desc && old_branch_info->commit)
+               old_desc = oid_to_hex(&old_branch_info->commit->object.oid);
 
        reflog_msg = getenv("GIT_REFLOG_ACTION");
        if (!reflog_msg)
                strbuf_addf(&msg, "checkout: moving from %s to %s",
-                       old_desc ? old_desc : "(invalid)", new->name);
+                       old_desc ? old_desc : "(invalid)", new_branch_info->name);
        else
                strbuf_insert(&msg, 0, reflog_msg, strlen(reflog_msg));
 
-       if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) {
+       if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) {
                /* Nothing to do. */
-       } else if (opts->force_detach || !new->path) {  /* No longer on any branch. */
-               update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
+       } else if (opts->force_detach || !new_branch_info->path) {      /* No longer on any branch. */
+               update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL,
                           REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
                if (!opts->quiet) {
-                       if (old->path &&
+                       if (old_branch_info->path &&
                            advice_detached_head && !opts->force_detach)
-                               detach_advice(new->name);
-                       describe_detached_head(_("HEAD is now at"), new->commit);
+                               detach_advice(new_branch_info->name);
+                       describe_detached_head(_("HEAD is now at"), new_branch_info->commit);
                }
-       } else if (new->path) { /* Switch branches. */
-               if (create_symref("HEAD", new->path, msg.buf) < 0)
+       } else if (new_branch_info->path) {     /* Switch branches. */
+               if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0)
                        die(_("unable to update HEAD"));
                if (!opts->quiet) {
-                       if (old->path && !strcmp(new->path, old->path)) {
+                       if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) {
                                if (opts->new_branch_force)
                                        fprintf(stderr, _("Reset branch '%s'\n"),
-                                               new->name);
+                                               new_branch_info->name);
                                else
                                        fprintf(stderr, _("Already on '%s'\n"),
-                                               new->name);
+                                               new_branch_info->name);
                        } else if (opts->new_branch) {
                                if (opts->branch_exists)
-                                       fprintf(stderr, _("Switched to and reset branch '%s'\n"), new->name);
+                                       fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name);
                                else
-                                       fprintf(stderr, _("Switched to a new branch '%s'\n"), new->name);
+                                       fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name);
                        } else {
                                fprintf(stderr, _("Switched to branch '%s'\n"),
-                                       new->name);
+                                       new_branch_info->name);
                        }
                }
-               if (old->path && old->name) {
-                       if (!ref_exists(old->path) && reflog_exists(old->path))
-                               delete_reflog(old->path);
+               if (old_branch_info->path && old_branch_info->name) {
+                       if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path))
+                               delete_reflog(old_branch_info->path);
                }
        }
        remove_branch_state();
        strbuf_release(&msg);
        if (!opts->quiet &&
-           (new->path || (!opts->force_detach && !strcmp(new->name, "HEAD"))))
-               report_tracking(new);
+           (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+               report_tracking(new_branch_info);
 }
 
 static int add_pending_uninteresting_ref(const char *refname,
@@ -721,7 +720,7 @@ static int add_pending_uninteresting_ref(const char *refname,
 static void describe_one_orphan(struct strbuf *sb, struct commit *commit)
 {
        strbuf_addstr(sb, "  ");
-       strbuf_add_unique_abbrev(sb, commit->object.oid.hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(sb, &commit->object.oid, DEFAULT_ABBREV);
        strbuf_addch(sb, ' ');
        if (!parse_commit(commit))
                pp_commit_easy(CMIT_FMT_ONELINE, commit, sb);
@@ -779,7 +778,7 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs)
                        " git branch <new-branch-name> %s\n\n",
                        /* Give ngettext() the count */
                        lost),
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
 }
 
 /*
@@ -787,10 +786,10 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs)
  * HEAD.  If it is not reachable from any ref, this is the last chance
  * for the user to do so without resorting to reflog.
  */
-static void orphaned_commit_warning(struct commit *old, struct commit *new)
+static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit)
 {
        struct rev_info revs;
-       struct object *object = &old->object;
+       struct object *object = &old_commit->object;
 
        init_revisions(&revs, NULL);
        setup_revisions(0, NULL, &revs, NULL);
@@ -799,57 +798,57 @@ static void orphaned_commit_warning(struct commit *old, struct commit *new)
        add_pending_object(&revs, object, oid_to_hex(&object->oid));
 
        for_each_ref(add_pending_uninteresting_ref, &revs);
-       add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+       add_pending_oid(&revs, "HEAD", &new_commit->object.oid, UNINTERESTING);
 
        if (prepare_revision_walk(&revs))
                die(_("internal error in revision walk"));
-       if (!(old->object.flags & UNINTERESTING))
-               suggest_reattach(old, &revs);
+       if (!(old_commit->object.flags & UNINTERESTING))
+               suggest_reattach(old_commit, &revs);
        else
-               describe_detached_head(_("Previous HEAD position was"), old);
+               describe_detached_head(_("Previous HEAD position was"), old_commit);
 
        /* Clean up objects used, as they will be reused. */
        clear_commit_marks_all(ALL_REV_FLAGS);
 }
 
 static int switch_branches(const struct checkout_opts *opts,
-                          struct branch_info *new)
+                          struct branch_info *new_branch_info)
 {
        int ret = 0;
-       struct branch_info old;
+       struct branch_info old_branch_info;
        void *path_to_free;
        struct object_id rev;
        int flag, writeout_error = 0;
-       memset(&old, 0, sizeof(old));
-       old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
-       if (old.path)
-               old.commit = lookup_commit_reference_gently(&rev, 1);
+       memset(&old_branch_info, 0, sizeof(old_branch_info));
+       old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
+       if (old_branch_info.path)
+               old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
        if (!(flag & REF_ISSYMREF))
-               old.path = NULL;
+               old_branch_info.path = NULL;
 
-       if (old.path)
-               skip_prefix(old.path, "refs/heads/", &old.name);
+       if (old_branch_info.path)
+               skip_prefix(old_branch_info.path, "refs/heads/", &old_branch_info.name);
 
-       if (!new->name) {
-               new->name = "HEAD";
-               new->commit = old.commit;
-               if (!new->commit)
+       if (!new_branch_info->name) {
+               new_branch_info->name = "HEAD";
+               new_branch_info->commit = old_branch_info.commit;
+               if (!new_branch_info->commit)
                        die(_("You are on a branch yet to be born"));
-               parse_commit_or_die(new->commit);
+               parse_commit_or_die(new_branch_info->commit);
        }
 
-       ret = merge_working_tree(opts, &old, new, &writeout_error);
+       ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
        if (ret) {
                free(path_to_free);
                return ret;
        }
 
-       if (!opts->quiet && !old.path && old.commit && new->commit != old.commit)
-               orphaned_commit_warning(old.commit, new->commit);
+       if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
+               orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit);
 
-       update_refs_for_switch(opts, &old, new);
+       update_refs_for_switch(opts, &old_branch_info, new_branch_info);
 
-       ret = post_checkout_hook(old.commit, new->commit, 1);
+       ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1);
        free(path_to_free);
        return ret || writeout_error;
 }
@@ -870,7 +869,7 @@ static int git_checkout_config(const char *var, const char *value, void *cb)
 
 static int parse_branchname_arg(int argc, const char **argv,
                                int dwim_new_local_branch_ok,
-                               struct branch_info *new,
+                               struct branch_info *new_branch_info,
                                struct checkout_opts *opts,
                                struct object_id *rev)
 {
@@ -988,22 +987,22 @@ static int parse_branchname_arg(int argc, const char **argv,
        argv++;
        argc--;
 
-       new->name = arg;
-       setup_branch_path(new);
+       new_branch_info->name = arg;
+       setup_branch_path(new_branch_info);
 
-       if (!check_refname_format(new->path, 0) &&
-           !read_ref(new->path, &branch_rev))
+       if (!check_refname_format(new_branch_info->path, 0) &&
+           !read_ref(new_branch_info->path, &branch_rev))
                oidcpy(rev, &branch_rev);
        else
-               new->path = NULL; /* not an existing branch */
+               new_branch_info->path = NULL; /* not an existing branch */
 
-       new->commit = lookup_commit_reference_gently(rev, 1);
-       if (!new->commit) {
+       new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+       if (!new_branch_info->commit) {
                /* not a commit */
                *source_tree = parse_tree_indirect(rev);
        } else {
-               parse_commit_or_die(new->commit);
-               *source_tree = new->commit->tree;
+               parse_commit_or_die(new_branch_info->commit);
+               *source_tree = new_branch_info->commit->tree;
        }
 
        if (!*source_tree)                   /* case (1): want a tree */
@@ -1043,7 +1042,7 @@ static int switch_unborn_to_new_branch(const struct checkout_opts *opts)
 }
 
 static int checkout_branch(struct checkout_opts *opts,
-                          struct branch_info *new)
+                          struct branch_info *new_branch_info)
 {
        if (opts->pathspec.nr)
                die(_("paths cannot be used with switching branches"));
@@ -1072,21 +1071,21 @@ static int checkout_branch(struct checkout_opts *opts,
        } else if (opts->track == BRANCH_TRACK_UNSPECIFIED)
                opts->track = git_branch_track;
 
-       if (new->name && !new->commit)
+       if (new_branch_info->name && !new_branch_info->commit)
                die(_("Cannot switch branch to a non-commit '%s'"),
-                   new->name);
+                   new_branch_info->name);
 
-       if (new->path && !opts->force_detach && !opts->new_branch &&
+       if (new_branch_info->path && !opts->force_detach && !opts->new_branch &&
            !opts->ignore_other_worktrees) {
                int flag;
                char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
                if (head_ref &&
-                   (!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
-                       die_if_checked_out(new->path, 1);
+                   (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path)))
+                       die_if_checked_out(new_branch_info->path, 1);
                free(head_ref);
        }
 
-       if (!new->commit && opts->new_branch) {
+       if (!new_branch_info->commit && opts->new_branch) {
                struct object_id rev;
                int flag;
 
@@ -1094,13 +1093,13 @@ static int checkout_branch(struct checkout_opts *opts,
                    (flag & REF_ISSYMREF) && is_null_oid(&rev))
                        return switch_unborn_to_new_branch(opts);
        }
-       return switch_branches(opts, new);
+       return switch_branches(opts, new_branch_info);
 }
 
 int cmd_checkout(int argc, const char **argv, const char *prefix)
 {
        struct checkout_opts opts;
-       struct branch_info new;
+       struct branch_info new_branch_info;
        char *conflict_style = NULL;
        int dwim_new_local_branch = 1;
        struct option options[] = {
@@ -1118,9 +1117,12 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
                            2),
                OPT_SET_INT('3', "theirs", &opts.writeout_stage, N_("checkout their version for unmerged files"),
                            3),
-               OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)")),
+               OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('m', "merge", &opts.merge, N_("perform a 3-way merge with the new branch")),
-               OPT_BOOL(0, "overwrite-ignore", &opts.overwrite_ignore, N_("update ignored files (default)")),
+               OPT_BOOL_F(0, "overwrite-ignore", &opts.overwrite_ignore,
+                          N_("update ignored files (default)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_STRING(0, "conflict", &conflict_style, N_("style"),
                           N_("conflict style (merge or diff3)")),
                OPT_BOOL('p', "patch", &opts.patch_mode, N_("select hunks interactively")),
@@ -1138,7 +1140,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
        };
 
        memset(&opts, 0, sizeof(opts));
-       memset(&new, 0, sizeof(new));
+       memset(&new_branch_info, 0, sizeof(new_branch_info));
        opts.overwrite_ignore = 1;
        opts.prefix = prefix;
        opts.show_progress = -1;
@@ -1210,7 +1212,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
                        opts.track == BRANCH_TRACK_UNSPECIFIED &&
                        !opts.new_branch;
                int n = parse_branchname_arg(argc, argv, dwim_ok,
-                                            &new, &opts, &rev);
+                                            &new_branch_info, &opts, &rev);
                argv += n;
                argc -= n;
        }
@@ -1253,7 +1255,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
 
        UNLEAK(opts);
        if (opts.patch_mode || opts.pathspec.nr)
-               return checkout_paths(&opts, new.name);
+               return checkout_paths(&opts, new_branch_info.name);
        else
-               return checkout_branch(&opts, &new);
+               return checkout_branch(&opts, &new_branch_info);
 }
index 189e20628c07774089c5c925380b6b69af16fc7f..fad533a0a7382f10ecf48a738c955734ad5c0d96 100644 (file)
@@ -909,7 +909,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
        struct option options[] = {
                OPT__QUIET(&quiet, N_("do not print names of files removed")),
                OPT__DRY_RUN(&dry_run, N_("dry run")),
-               OPT__FORCE(&force, N_("force")),
+               OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")),
                OPT_BOOL('d', NULL, &remove_directories,
                                N_("remove whole directories")),
index 7df5932b855e874d45b970f6150d65ed25b06f5f..84f1473d19dc5a521e58c0bc1a7363808888dff1 100644 (file)
@@ -1135,7 +1135,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        if (transport->smart_options && !deepen && !filter_options.choice)
                transport->smart_options->check_self_contained_and_connected = 1;
 
-       refs = transport_get_remote_refs(transport);
+       refs = transport_get_remote_refs(transport, NULL);
 
        if (refs) {
                mapped_refs = wanted_peer_refs(refs, refspec);
index 2177251e247cc144d6ac8619fa5940262430d50a..ecf42191da10cd2e87360f001d5493e792b9682e 100644 (file)
@@ -58,7 +58,7 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
                                usage(commit_tree_usage);
                        if (get_oid_commit(argv[i], &oid))
                                die("Not a valid object name %s", argv[i]);
-                       assert_sha1_type(oid.hash, OBJ_COMMIT);
+                       assert_oid_type(&oid, OBJ_COMMIT);
                        new_parent(lookup_commit(&oid), &parents);
                        continue;
                }
@@ -117,8 +117,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
                        die_errno("git commit-tree: failed to read");
        }
 
-       if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents,
-                       commit_oid.hash, NULL, sign_commit)) {
+       if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+                       NULL, sign_commit)) {
                strbuf_release(&buffer);
                return 1;
        }
index 5dd766af2842dddb80d30cd73b8be8ccb4956eac..37fcb55ab0a03a5fdabaca1913bc700201fd8e10 100644 (file)
@@ -389,13 +389,9 @@ static const char *prepare_index(int argc, const char **argv, const char *prefix
                if (active_cache_changed
                    || !cache_tree_fully_valid(active_cache_tree))
                        update_main_cache_tree(WRITE_TREE_SILENT);
-               if (active_cache_changed) {
-                       if (write_locked_index(&the_index, &index_lock,
-                                              COMMIT_LOCK))
-                               die(_("unable to write new_index file"));
-               } else {
-                       rollback_lock_file(&index_lock);
-               }
+               if (write_locked_index(&the_index, &index_lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED))
+                       die(_("unable to write new_index file"));
                commit_style = COMMIT_AS_IS;
                ret = get_index_file();
                goto out;
@@ -1061,6 +1057,9 @@ static void finalize_deferred_config(struct wt_status *s)
                s->show_branch = status_deferred_config.show_branch;
        if (s->show_branch < 0)
                s->show_branch = 0;
+
+       if (s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED)
+               s->ahead_behind_flags = AHEAD_BEHIND_FULL;
 }
 
 static int parse_and_validate_options(int argc, const char *argv[],
@@ -1277,6 +1276,8 @@ int cmd_status(int argc, const char **argv, const char *prefix)
                         N_("show branch information")),
                OPT_BOOL(0, "show-stash", &s.show_stash,
                         N_("show stash information")),
+               OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+                        N_("compute full ahead/behind values")),
                { OPTION_CALLBACK, 0, "porcelain", &status_format,
                  N_("version"), N_("machine-readable output"),
                  PARSE_OPT_OPTARG, opt_parse_porcelain },
@@ -1402,6 +1403,7 @@ int run_commit_hook(int editor_is_used, const char *index_file, const char *name
 
 int cmd_commit(int argc, const char **argv, const char *prefix)
 {
+       const char *argv_gc_auto[] = {"gc", "--auto", NULL};
        static struct wt_status s;
        static struct option builtin_commit_options[] = {
                OPT__QUIET(&quiet, N_("suppress summary after successful commit")),
@@ -1437,6 +1439,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                OPT_SET_INT(0, "short", &status_format, N_("show status concisely"),
                            STATUS_FORMAT_SHORT),
                OPT_BOOL(0, "branch", &s.show_branch, N_("show branch information")),
+               OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+                        N_("compute full ahead/behind values")),
                OPT_SET_INT(0, "porcelain", &status_format,
                            N_("machine-readable output"), STATUS_FORMAT_PORCELAIN),
                OPT_SET_INT(0, "long", &status_format,
@@ -1579,8 +1583,9 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                append_merge_tag_headers(parents, &tail);
        }
 
-       if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash,
-                        parents, oid.hash, author_ident.buf, sign_commit, extra)) {
+       if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
+                                parents, &oid, author_ident.buf, sign_commit,
+                                extra)) {
                rollback_index_files();
                die(_("failed to write commit object"));
        }
@@ -1606,6 +1611,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                     "not exceeded, and then \"git reset HEAD\" to recover."));
 
        rerere(0);
+       run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
        run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
        if (amend && !no_post_rewrite) {
                commit_post_rewrite(current_head, &oid);
index ab5f95476e6c726798fd84b4a6300577bf0fafba..01169dd628b24a7b5502550a6342ab73cb8154c5 100644 (file)
@@ -48,6 +48,13 @@ static int show_origin;
 #define ACTION_GET_COLORBOOL (1<<14)
 #define ACTION_GET_URLMATCH (1<<15)
 
+/*
+ * The actions "ACTION_LIST | ACTION_GET_*" which may produce more than
+ * one line of output and which should therefore be paged.
+ */
+#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
+                       ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
+
 #define TYPE_BOOL (1<<0)
 #define TYPE_INT (1<<1)
 #define TYPE_BOOL_OR_INT (1<<2)
@@ -594,6 +601,9 @@ int cmd_config(int argc, const char **argv, const char *prefix)
                usage_with_options(builtin_config_usage, builtin_config_options);
        }
 
+       if (actions & PAGING_ACTIONS)
+               setup_auto_pager("config", 1);
+
        if (actions == ACTION_LIST) {
                check_argc(argc, 0, 0);
                if (config_with_options(show_all_config, NULL,
index c4289847063ac02ed061f890ffd71cd329d67109..de840f96a4ba0135667261cfd74c674672f4c6b4 100644 (file)
@@ -285,7 +285,7 @@ static void append_name(struct commit_name *n, struct strbuf *dst)
 
 static void append_suffix(int depth, const struct object_id *oid, struct strbuf *dst)
 {
-       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid->hash, abbrev));
+       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid, abbrev));
 }
 
 static void describe_commit(struct object_id *oid, struct strbuf *dst)
@@ -383,7 +383,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
        if (!match_cnt) {
                struct object_id *cmit_oid = &cmit->object.oid;
                if (always) {
-                       strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
+                       strbuf_add_unique_abbrev(dst, cmit_oid, abbrev);
                        if (suffix)
                                strbuf_addstr(dst, suffix);
                        return;
@@ -502,7 +502,7 @@ static void describe(const char *arg, int last_one)
 
        if (cmit)
                describe_commit(&oid, &sb);
-       else if (lookup_blob(&oid))
+       else if (oid_object_info(&oid, NULL) == OBJ_BLOB)
                describe_blob(oid, &sb);
        else
                die(_("%s is neither a commit nor blob"), arg);
index b775a756470ddc365907fae53dcc949bcaae7ca5..473615117e0adbb301e6ddc0ab000accc513a9ee 100644 (file)
@@ -76,7 +76,7 @@ static int diff_tree_stdin(char *line)
        if (obj->type == OBJ_TREE)
                return stdin_diff_trees((struct tree *)obj, p);
        error("Object %s is a %s, not a commit or tree",
-             oid_to_hex(&oid), typename(obj->type));
+             oid_to_hex(&oid), type_name(obj->type));
        return -1;
 }
 
index bcc79d1888f2217bcb380ffb1e7178c100a41e8e..ee8dce019e1ca04111de185b5f47c12076b32a0c 100644 (file)
@@ -306,7 +306,7 @@ static char *get_symlink(const struct object_id *oid, const char *path)
        } else {
                enum object_type type;
                unsigned long size;
-               data = read_sha1_file(oid->hash, &type, &size);
+               data = read_object_file(oid, &type, &size);
                if (!data)
                        die(_("could not read object %s for symlink %s"),
                                oid_to_hex(oid), path);
index 796d0cd66c7750a20613dea77cd75b1b8e7665a4..a15898d64177b380ea021e3bc63fb91446bc02b3 100644 (file)
@@ -237,10 +237,10 @@ static void export_blob(const struct object_id *oid)
                object = (struct object *)lookup_blob(oid);
                eaten = 0;
        } else {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        die ("Could not read blob %s", oid_to_hex(oid));
-               if (check_sha1_signature(oid->hash, buf, size, typename(type)) < 0)
+               if (check_object_signature(oid, buf, size, type_name(type)) < 0)
                        die("sha1 mismatch in blob %s", oid_to_hex(oid));
                object = parse_object_buffer(oid, type, size, buf, &eaten);
        }
@@ -682,7 +682,7 @@ static void handle_tag(const char *name, struct tag *tag)
                return;
        }
 
-       buf = read_sha1_file(tag->object.oid.hash, &type, &size);
+       buf = read_object_file(&tag->object.oid, &type, &size);
        if (!buf)
                die ("Could not read tag %s", oid_to_hex(&tag->object.oid));
        message = memmem(buf, size, "\n\n", 2);
@@ -757,7 +757,7 @@ static void handle_tag(const char *name, struct tag *tag)
                        if (tagged->type != OBJ_COMMIT) {
                                die ("Tag %s tags unexported %s!",
                                     oid_to_hex(&tag->object.oid),
-                                    typename(tagged->type));
+                                    type_name(tagged->type));
                        }
                        p = (struct commit *)tagged;
                        for (;;) {
@@ -839,7 +839,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
                if (!commit) {
                        warning("%s: Unexpected object of type %s, skipping.",
                                e->name,
-                               typename(e->item->type));
+                               type_name(e->item->type));
                        continue;
                }
 
@@ -851,7 +851,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
                        continue;
                default: /* OBJ_TAG (nested tags) is already handled */
                        warning("Tag points to object of unexpected type %s, skipping.",
-                               typename(commit->object.type));
+                               type_name(commit->object.type));
                        continue;
                }
 
@@ -947,7 +947,7 @@ static void import_marks(char *input_file)
                if (last_idnum < mark)
                        last_idnum = mark;
 
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(&oid));
 
index a7bc1366ab375765c41014640743ef9d77c84c42..1a1bc63566b44bc83c8429463104615d1b2117ff 100644 (file)
@@ -4,6 +4,7 @@
 #include "remote.h"
 #include "connect.h"
 #include "sha1-array.h"
+#include "protocol.h"
 
 static const char fetch_pack_usage[] =
 "git fetch-pack [--all] [--stdin] [--quiet | -q] [--keep | -k] [--thin] "
@@ -52,6 +53,7 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
        struct fetch_pack_args args;
        struct oid_array shallow = OID_ARRAY_INIT;
        struct string_list deepen_not = STRING_LIST_INIT_DUP;
+       struct packet_reader reader;
 
        fetch_if_missing = 0;
 
@@ -211,10 +213,24 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
                if (!conn)
                        return args.diag_url ? 0 : 1;
        }
-       get_remote_heads(fd[0], NULL, 0, &ref, 0, NULL, &shallow);
+
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       switch (discover_version(&reader)) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &ref, 0, NULL, &shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
-                        &shallow, pack_lockfile_ptr);
+                        &shallow, pack_lockfile_ptr, protocol_v0);
        if (pack_lockfile) {
                printf("lock %s\n", pack_lockfile);
                fflush(stdout);
index a39e9d7b1579fd6adb8d421ac8984bc0592b5eda..7ee83ac0f8170d9e68c374a21caebfe01cf64533 100644 (file)
@@ -39,6 +39,10 @@ static int fetch_prune_config = -1; /* unspecified */
 static int prune = -1; /* unspecified */
 #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
 
+static int fetch_prune_tags_config = -1; /* unspecified */
+static int prune_tags = -1; /* unspecified */
+#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
+
 static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
 static int progress = -1;
 static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
@@ -66,6 +70,11 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
                return 0;
        }
 
+       if (!strcmp(k, "fetch.prunetags")) {
+               fetch_prune_tags_config = git_config_bool(k, v);
+               return 0;
+       }
+
        if (!strcmp(k, "submodule.recurse")) {
                int r = git_config_bool(k, v) ?
                        RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
@@ -117,7 +126,7 @@ static struct option builtin_fetch_options[] = {
                 N_("append to .git/FETCH_HEAD instead of overwriting")),
        OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
                   N_("path to upload pack on remote end")),
-       OPT__FORCE(&force, N_("force overwrite of local branch")),
+       OPT__FORCE(&force, N_("force overwrite of local branch"), 0),
        OPT_BOOL('m', "multiple", &multiple,
                 N_("fetch from multiple remotes")),
        OPT_SET_INT('t', "tags", &tags,
@@ -128,6 +137,8 @@ static struct option builtin_fetch_options[] = {
                    N_("number of submodules fetched in parallel")),
        OPT_BOOL('p', "prune", &prune,
                 N_("prune remote-tracking branches no longer on remote")),
+       OPT_BOOL('P', "prune-tags", &prune_tags,
+                N_("prune local tags no longer on remote and clobber changed tags")),
        { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
                    N_("control recursive fetching of submodules"),
                    PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules },
@@ -253,7 +264,7 @@ static void find_non_local_tags(struct transport *transport,
        struct string_list_item *item = NULL;
 
        for_each_ref(add_existing, &existing_refs);
-       for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
+       for (ref = transport_get_remote_refs(transport, NULL); ref; ref = ref->next) {
                if (!starts_with(ref->name, "refs/tags/"))
                        continue;
 
@@ -335,11 +346,28 @@ static struct ref *get_ref_map(struct transport *transport,
        struct ref *rm;
        struct ref *ref_map = NULL;
        struct ref **tail = &ref_map;
+       struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
 
        /* opportunistically-updated references: */
        struct ref *orefs = NULL, **oref_tail = &orefs;
 
-       const struct ref *remote_refs = transport_get_remote_refs(transport);
+       const struct ref *remote_refs;
+
+       for (i = 0; i < refspec_count; i++) {
+               if (!refspecs[i].exact_sha1) {
+                       const char *glob = strchr(refspecs[i].src, '*');
+                       if (glob)
+                               argv_array_pushf(&ref_prefixes, "%.*s",
+                                                (int)(glob - refspecs[i].src),
+                                                refspecs[i].src);
+                       else
+                               expand_ref_prefix(&ref_prefixes, refspecs[i].src);
+               }
+       }
+
+       remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
+
+       argv_array_clear(&ref_prefixes);
 
        if (refspec_count) {
                struct refspec *fetch_refspec;
@@ -626,7 +654,7 @@ static int update_local_ref(struct ref *ref,
        struct branch *current_branch = branch_get(NULL);
        const char *pretty_ref = prettify_refname(ref->name);
 
-       type = sha1_object_info(ref->new_oid.hash, NULL);
+       type = oid_object_info(&ref->new_oid, NULL);
        if (type < 0)
                die(_("object %s not found"), oid_to_hex(&ref->new_oid));
 
@@ -697,9 +725,9 @@ static int update_local_ref(struct ref *ref,
        if (in_merge_bases(current, updated)) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "..");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -712,9 +740,9 @@ static int update_local_ref(struct ref *ref,
        } else if (force || ref->force) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "...");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -1220,6 +1248,8 @@ static void add_options_to_argv(struct argv_array *argv)
                argv_array_push(argv, "--dry-run");
        if (prune != -1)
                argv_array_push(argv, prune ? "--prune" : "--no-prune");
+       if (prune_tags != -1)
+               argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
        if (update_head_ok)
                argv_array_push(argv, "--update-head-ok");
        if (force)
@@ -1323,12 +1353,15 @@ static inline void fetch_one_setup_partial(struct remote *remote)
        return;
 }
 
-static int fetch_one(struct remote *remote, int argc, const char **argv)
+static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
 {
        static const char **refs = NULL;
        struct refspec *refspec;
        int ref_nr = 0;
+       int j = 0;
        int exit_code;
+       int maybe_prune_tags;
+       int remote_via_config = remote_is_configured(remote, 0);
 
        if (!remote)
                die(_("No remote repository specified.  Please, specify either a URL or a\n"
@@ -1338,18 +1371,39 @@ static int fetch_one(struct remote *remote, int argc, const char **argv)
 
        if (prune < 0) {
                /* no command line request */
-               if (0 <= gtransport->remote->prune)
-                       prune = gtransport->remote->prune;
+               if (0 <= remote->prune)
+                       prune = remote->prune;
                else if (0 <= fetch_prune_config)
                        prune = fetch_prune_config;
                else
                        prune = PRUNE_BY_DEFAULT;
        }
 
+       if (prune_tags < 0) {
+               /* no command line request */
+               if (0 <= remote->prune_tags)
+                       prune_tags = remote->prune_tags;
+               else if (0 <= fetch_prune_tags_config)
+                       prune_tags = fetch_prune_tags_config;
+               else
+                       prune_tags = PRUNE_TAGS_BY_DEFAULT;
+       }
+
+       maybe_prune_tags = prune_tags_ok && prune_tags;
+       if (maybe_prune_tags && remote_via_config)
+               add_prune_tags_to_fetch_refspec(remote);
+
+       if (argc > 0 || (maybe_prune_tags && !remote_via_config)) {
+               size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1);
+               refs = xcalloc(nr_alloc, sizeof(const char *));
+               if (maybe_prune_tags) {
+                       refs[j++] = xstrdup("refs/tags/*:refs/tags/*");
+                       ref_nr++;
+               }
+       }
+
        if (argc > 0) {
-               int j = 0;
                int i;
-               refs = xcalloc(st_add(argc, 1), sizeof(const char *));
                for (i = 0; i < argc; i++) {
                        if (!strcmp(argv[i], "tag")) {
                                i++;
@@ -1359,9 +1413,8 @@ static int fetch_one(struct remote *remote, int argc, const char **argv)
                                                    argv[i], argv[i]);
                        } else
                                refs[j++] = argv[i];
+                       ref_nr++;
                }
-               refs[j] = NULL;
-               ref_nr = j;
        }
 
        sigchain_push_common(unlock_pack_on_signal);
@@ -1380,6 +1433,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
        struct string_list list = STRING_LIST_INIT_DUP;
        struct remote *remote = NULL;
        int result = 0;
+       int prune_tags_ok = 1;
        struct argv_array argv_gc_auto = ARGV_ARRAY_INIT;
 
        packet_trace_identity("fetch");
@@ -1446,6 +1500,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
                } else {
                        /* Zero or one remotes */
                        remote = remote_get(argv[0]);
+                       prune_tags_ok = (argc == 1);
                        argc--;
                        argv++;
                }
@@ -1454,7 +1509,7 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
        if (remote) {
                if (filter_options.choice || repository_format_partial_clone)
                        fetch_one_setup_partial(remote);
-               result = fetch_one(remote, argc, argv);
+               result = fetch_one(remote, argc, argv, prune_tags_ok);
        } else {
                if (filter_options.choice)
                        die(_("--filter can only be used with the remote configured in core.partialClone"));
index 8e8a15ea4ad6de2bb73f63d39a0895a263918774..bd680be6874da29cf776c84468a60888beea53fb 100644 (file)
@@ -485,10 +485,10 @@ static void fmt_merge_msg_sigs(struct strbuf *out)
        struct strbuf tagbuf = STRBUF_INIT;
 
        for (i = 0; i < origins.nr; i++) {
-               unsigned char *sha1 = origins.items[i].util;
+               struct object_id *oid = origins.items[i].util;
                enum object_type type;
                unsigned long size, len;
-               char *buf = read_sha1_file(sha1, &type, &size);
+               char *buf = read_object_file(oid, &type, &size);
                struct strbuf sig = STRBUF_INIT;
 
                if (!buf || type != OBJ_TAG)
index f9632353d9448a63a61c4c6f458afe075bd22aea..087360a6757c607c4b38cce5fafad3bbb0822fc0 100644 (file)
@@ -67,12 +67,12 @@ static const char *printable_type(struct object *obj)
        const char *ret;
 
        if (obj->type == OBJ_NONE) {
-               enum object_type type = sha1_object_info(obj->oid.hash, NULL);
+               enum object_type type = oid_object_info(&obj->oid, NULL);
                if (type > 0)
                        object_as_type(obj, type, 0);
        }
 
-       ret = typename(obj->type);
+       ret = type_name(obj->type);
        if (!ret)
                ret = "unknown";
 
@@ -139,7 +139,7 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt
                printf("broken link from %7s %s\n",
                           printable_type(parent), describe_object(parent));
                printf("broken link from %7s %s\n",
-                          (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+                          (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
                errors_found |= ERROR_REACHABLE;
                return 1;
        }
@@ -182,7 +182,13 @@ static void mark_object_reachable(struct object *obj)
 
 static int traverse_one_object(struct object *obj)
 {
-       return fsck_walk(obj, obj, &fsck_walk_options);
+       int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+       if (obj->type == OBJ_TREE) {
+               struct tree *tree = (struct tree *)obj;
+               free_tree_buffer(tree);
+       }
+       return result;
 }
 
 static int traverse_reachable(void)
@@ -509,7 +515,7 @@ static struct object *parse_loose_object(const struct object_id *oid,
        unsigned long size;
        int eaten;
 
-       if (read_loose_object(path, oid->hash, &type, &size, &contents) < 0)
+       if (read_loose_object(path, oid, &type, &size, &contents) < 0)
                return NULL;
 
        if (!contents && type != OBJ_BLOB)
index 0a667972ab518d529909f473f6b1c85149bb4dbf..3e67124eaaed256f440eea2a08101e87678eee0e 100644 (file)
@@ -361,8 +361,11 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                        N_("prune unreferenced objects"),
                        PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
                OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
-               OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")),
-               OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")),
+               OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+                          PARSE_OPT_NOCOMPLETE),
+               OPT_BOOL_F(0, "force", &force,
+                          N_("force running gc even if there may be another gc running"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
index 1e9cdbdf78ddbd803ba9fa65210fb79ff1e27583..5f32d2ce84f27bca725c12aa51d2d91a0c38be8b 100644 (file)
@@ -93,8 +93,7 @@ static pthread_cond_t cond_result;
 
 static int skip_first_line;
 
-static void add_work(struct grep_opt *opt, enum grep_source_type type,
-                    const char *name, const char *path, const void *id)
+static void add_work(struct grep_opt *opt, const struct grep_source *gs)
 {
        grep_lock();
 
@@ -102,7 +101,7 @@ static void add_work(struct grep_opt *opt, enum grep_source_type type,
                pthread_cond_wait(&cond_write, &grep_mutex);
        }
 
-       grep_source_init(&todo[todo_end].source, type, name, path, id);
+       todo[todo_end].source = *gs;
        if (opt->binary != GREP_BINARY_TEXT)
                grep_source_load_driver(&todo[todo_end].source);
        todo[todo_end].done = 0;
@@ -308,7 +307,7 @@ static void *lock_and_read_oid_file(const struct object_id *oid, enum object_typ
        void *data;
 
        grep_read_lock();
-       data = read_sha1_file(oid->hash, type, size);
+       data = read_object_file(oid, type, size);
        grep_read_unlock();
        return data;
 }
@@ -318,6 +317,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
                     const char *path)
 {
        struct strbuf pathbuf = STRBUF_INIT;
+       struct grep_source gs;
 
        if (opt->relative && opt->prefix_length) {
                quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf);
@@ -326,19 +326,22 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
                strbuf_addstr(&pathbuf, filename);
        }
 
+       grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
+       strbuf_release(&pathbuf);
+
 #ifndef NO_PTHREADS
        if (num_threads) {
-               add_work(opt, GREP_SOURCE_OID, pathbuf.buf, path, oid);
-               strbuf_release(&pathbuf);
+               /*
+                * add_work() copies gs and thus assumes ownership of
+                * its fields, so do not call grep_source_clear()
+                */
+               add_work(opt, &gs);
                return 0;
        } else
 #endif
        {
-               struct grep_source gs;
                int hit;
 
-               grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
-               strbuf_release(&pathbuf);
                hit = grep_source(opt, &gs);
 
                grep_source_clear(&gs);
@@ -349,25 +352,29 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
 static int grep_file(struct grep_opt *opt, const char *filename)
 {
        struct strbuf buf = STRBUF_INIT;
+       struct grep_source gs;
 
        if (opt->relative && opt->prefix_length)
                quote_path_relative(filename, opt->prefix, &buf);
        else
                strbuf_addstr(&buf, filename);
 
+       grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
+       strbuf_release(&buf);
+
 #ifndef NO_PTHREADS
        if (num_threads) {
-               add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename);
-               strbuf_release(&buf);
+               /*
+                * add_work() copies gs and thus assumes ownership of
+                * its fields, so do not call grep_source_clear()
+                */
+               add_work(opt, &gs);
                return 0;
        } else
 #endif
        {
-               struct grep_source gs;
                int hit;
 
-               grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
-               strbuf_release(&buf);
                hit = grep_source(opt, &gs);
 
                grep_source_clear(&gs);
@@ -446,7 +453,7 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject,
                object = parse_object_or_die(oid, oid_to_hex(oid));
 
                grep_read_lock();
-               data = read_object_with_reference(object->oid.hash, tree_type,
+               data = read_object_with_reference(&object->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -608,7 +615,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                int hit, len;
 
                grep_read_lock();
-               data = read_object_with_reference(obj->oid.hash, tree_type,
+               data = read_object_with_reference(&obj->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -628,7 +635,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                free(data);
                return hit;
        }
-       die(_("unable to grep from object of type %s"), typename(obj->type));
+       die(_("unable to grep from object of type %s"), type_name(obj->type));
 }
 
 static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
@@ -833,8 +840,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                OPT_BOOL('L', "files-without-match",
                        &opt.unmatch_name_only,
                        N_("show only the names of files without match")),
-               OPT_BOOL('z', "null", &opt.null_following_name,
-                       N_("print NUL after filenames")),
+               OPT_BOOL_F('z', "null", &opt.null_following_name,
+                          N_("print NUL after filenames"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('c', "count", &opt.count,
                        N_("show the number of matches instead of matching lines")),
                OPT__COLOR(&opt.color, N_("highlight matches")),
@@ -885,9 +893,11 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                OPT_GROUP(""),
                { OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager,
                        N_("pager"), N_("show matching files in the pager"),
-                       PARSE_OPT_OPTARG, NULL, (intptr_t)default_pager },
-               OPT_BOOL(0, "ext-grep", &external_grep_allowed__ignored,
-                        N_("allow calling of grep(1) (ignored by this build)")),
+                       PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE,
+                       NULL, (intptr_t)default_pager },
+               OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
+                          N_("allow calling of grep(1) (ignored by this build)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
index c532ff9320c751d1db5475add51f2c3c6a8c7146..526da5c1856ed1c387975a767f4d01382d1ea1a9 100644 (file)
@@ -24,7 +24,8 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig
        if (strbuf_read(&buf, fd, 4096) < 0)
                ret = -1;
        else
-               ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags);
+               ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+                                                flags);
        strbuf_release(&buf);
        return ret;
 }
index d3c8fc40820faa10f7c9004560159986b2900e9e..598867cfea40c6e0df25111c5484dde5d2cd8b47 100644 (file)
@@ -194,11 +194,11 @@ static void do_add_man_viewer_info(const char *name,
                                   size_t len,
                                   const char *value)
 {
-       struct man_viewer_info_list *new;
-       FLEX_ALLOC_MEM(new, name, name, len);
-       new->info = xstrdup(value);
-       new->next = man_viewer_info_list;
-       man_viewer_info_list = new;
+       struct man_viewer_info_list *new_man_viewer;
+       FLEX_ALLOC_MEM(new_man_viewer, name, name, len);
+       new_man_viewer->info = xstrdup(value);
+       new_man_viewer->next = man_viewer_info_list;
+       man_viewer_info_list = new_man_viewer;
 }
 
 static int add_man_viewer_path(const char *name,
index 1d6bc87b7694a3d63fdfb0e7e96ec1db7bb80056..d81473e722e7c99b9b05b34506fa4925196ff6d7 100644 (file)
@@ -50,6 +50,7 @@ struct thread_local {
        int pack_fd;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define FLAG_LINK (1u<<20)
 #define FLAG_CHECKED (1u<<21)
 
@@ -59,7 +60,7 @@ struct ofs_delta_entry {
 };
 
 struct ref_delta_entry {
-       unsigned char sha1[20];
+       struct object_id oid;
        int obj_no;
 };
 
@@ -92,7 +93,7 @@ static unsigned int input_offset, input_len;
 static off_t consumed_bytes;
 static off_t max_input_size;
 static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
 static uint32_t input_crc32;
 static int input_fd, output_fd;
 static const char *curr_pack;
@@ -222,14 +223,14 @@ static unsigned check_object(struct object *obj)
 
        if (!(obj->flags & FLAG_CHECKED)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(&obj->oid, &size);
                if (type <= 0)
                        die(_("did not receive expected object %s"),
                              oid_to_hex(&obj->oid));
                if (type != obj->type)
                        die(_("object %s: expected type %s, found %s"),
                            oid_to_hex(&obj->oid),
-                           typename(obj->type), typename(type));
+                           type_name(obj->type), type_name(type));
                obj->flags |= FLAG_CHECKED;
                return 1;
        }
@@ -254,7 +255,7 @@ static void flush(void)
        if (input_offset) {
                if (output_fd >= 0)
                        write_or_die(output_fd, input_buffer, input_offset);
-               git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+               the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
                memmove(input_buffer, input_buffer + input_offset, input_len);
                input_offset = 0;
        }
@@ -327,7 +328,7 @@ static const char *open_pack_file(const char *pack_name)
                output_fd = -1;
                nothread_data.pack_fd = input_fd;
        }
-       git_SHA1_Init(&input_ctx);
+       the_hash_algo->init_fn(&input_ctx);
        return pack_name;
 }
 
@@ -438,22 +439,22 @@ static int is_delta_type(enum object_type type)
 }
 
 static void *unpack_entry_data(off_t offset, unsigned long size,
-                              enum object_type type, unsigned char *sha1)
+                              enum object_type type, struct object_id *oid)
 {
        static char fixed_buf[8192];
        int status;
        git_zstream stream;
        void *buf;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        char hdr[32];
        int hdrlen;
 
        if (!is_delta_type(type)) {
-               hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1;
-               git_SHA1_Init(&c);
-               git_SHA1_Update(&c, hdr, hdrlen);
+               hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1;
+               the_hash_algo->init_fn(&c);
+               the_hash_algo->update_fn(&c, hdr, hdrlen);
        } else
-               sha1 = NULL;
+               oid = NULL;
        if (type == OBJ_BLOB && size > big_file_threshold)
                buf = fixed_buf;
        else
@@ -470,8 +471,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
                stream.avail_in = input_len;
                status = git_inflate(&stream, 0);
                use(input_len - stream.avail_in);
-               if (sha1)
-                       git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+               if (oid)
+                       the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
                if (buf == fixed_buf) {
                        stream.next_out = buf;
                        stream.avail_out = sizeof(fixed_buf);
@@ -480,15 +481,15 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
        if (stream.total_out != size || status != Z_STREAM_END)
                bad_object(offset, _("inflate returned %d"), status);
        git_inflate_end(&stream);
-       if (sha1)
-               git_SHA1_Final(sha1, &c);
+       if (oid)
+               the_hash_algo->final_fn(oid->hash, &c);
        return buf == fixed_buf ? NULL : buf;
 }
 
 static void *unpack_raw_entry(struct object_entry *obj,
                              off_t *ofs_offset,
-                             unsigned char *ref_sha1,
-                             unsigned char *sha1)
+                             struct object_id *ref_oid,
+                             struct object_id *oid)
 {
        unsigned char *p;
        unsigned long size, c;
@@ -516,8 +517,8 @@ static void *unpack_raw_entry(struct object_entry *obj,
 
        switch (obj->type) {
        case OBJ_REF_DELTA:
-               hashcpy(ref_sha1, fill(20));
-               use(20);
+               hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+               use(the_hash_algo->rawsz);
                break;
        case OBJ_OFS_DELTA:
                p = fill(1);
@@ -547,7 +548,7 @@ static void *unpack_raw_entry(struct object_entry *obj,
        }
        obj->hdr_size = consumed_bytes - obj->idx.offset;
 
-       data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+       data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
        obj->idx.crc32 = input_crc32;
        return data;
 }
@@ -672,18 +673,18 @@ static void find_ofs_delta_children(off_t offset,
        *last_index = last;
 }
 
-static int compare_ref_delta_bases(const unsigned char *sha1,
-                                  const unsigned char *sha2,
+static int compare_ref_delta_bases(const struct object_id *oid1,
+                                  const struct object_id *oid2,
                                   enum object_type type1,
                                   enum object_type type2)
 {
        int cmp = type1 - type2;
        if (cmp)
                return cmp;
-       return hashcmp(sha1, sha2);
+       return oidcmp(oid1, oid2);
 }
 
-static int find_ref_delta(const unsigned char *sha1, enum object_type type)
+static int find_ref_delta(const struct object_id *oid, enum object_type type)
 {
        int first = 0, last = nr_ref_deltas;
 
@@ -692,7 +693,7 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
                struct ref_delta_entry *delta = &ref_deltas[next];
                int cmp;
 
-               cmp = compare_ref_delta_bases(sha1, delta->sha1,
+               cmp = compare_ref_delta_bases(oid, &delta->oid,
                                              type, objects[delta->obj_no].type);
                if (!cmp)
                        return next;
@@ -705,11 +706,11 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
        return -first-1;
 }
 
-static void find_ref_delta_children(const unsigned char *sha1,
+static void find_ref_delta_children(const struct object_id *oid,
                                    int *first_index, int *last_index,
                                    enum object_type type)
 {
-       int first = find_ref_delta(sha1, type);
+       int first = find_ref_delta(oid, type);
        int last = first;
        int end = nr_ref_deltas - 1;
 
@@ -718,9 +719,9 @@ static void find_ref_delta_children(const unsigned char *sha1,
                *last_index = -1;
                return;
        }
-       while (first > 0 && !hashcmp(ref_deltas[first - 1].sha1, sha1))
+       while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid))
                --first;
-       while (last < end && !hashcmp(ref_deltas[last + 1].sha1, sha1))
+       while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid))
                ++last;
        *first_index = first;
        *last_index = last;
@@ -772,7 +773,7 @@ static int check_collison(struct object_entry *entry)
 
        memset(&data, 0, sizeof(data));
        data.entry = entry;
-       data.st = open_istream(entry->idx.oid.hash, &type, &size, NULL);
+       data.st = open_istream(&entry->idx.oid, &type, &size, NULL);
        if (!data.st)
                return -1;
        if (size != entry->size || type != entry->type)
@@ -811,12 +812,12 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                enum object_type has_type;
                unsigned long has_size;
                read_lock();
-               has_type = sha1_object_info(oid->hash, &has_size);
+               has_type = oid_object_info(oid, &has_size);
                if (has_type < 0)
                        die(_("cannot read existing object info %s"), oid_to_hex(oid));
                if (has_type != type || has_size != size)
                        die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
-               has_data = read_sha1_file(oid->hash, &has_type, &has_size);
+               has_data = read_object_file(oid, &has_type, &has_size);
                read_unlock();
                if (!data)
                        data = new_data = get_data_from_pack(obj_entry);
@@ -828,7 +829,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                free(has_data);
        }
 
-       if (strict) {
+       if (strict || do_fsck_object) {
                read_lock();
                if (type == OBJ_BLOB) {
                        struct blob *blob = lookup_blob(oid);
@@ -850,11 +851,11 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                        obj = parse_object_buffer(oid, type, size, buf,
                                                  &eaten);
                        if (!obj)
-                               die(_("invalid %s"), typename(type));
+                               die(_("invalid %s"), type_name(type));
                        if (do_fsck_object &&
                            fsck_object(obj, buf, size, &fsck_options))
                                die(_("Error in object"));
-                       if (fsck_walk(obj, NULL, &fsck_options))
+                       if (strict && fsck_walk(obj, NULL, &fsck_options))
                                die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
 
                        if (obj->type == OBJ_TREE) {
@@ -959,9 +960,8 @@ static void resolve_delta(struct object_entry *delta_obj,
        free(delta_data);
        if (!result->data)
                bad_object(delta_obj->idx.offset, _("failed to apply delta"));
-       hash_sha1_file(result->data, result->size,
-                      typename(delta_obj->real_type),
-                      delta_obj->idx.oid.hash);
+       hash_object_file(result->data, result->size,
+                        type_name(delta_obj->real_type), &delta_obj->idx.oid);
        sha1_object(result->data, NULL, result->size, delta_obj->real_type,
                    &delta_obj->idx.oid);
        counter_lock();
@@ -993,7 +993,7 @@ static struct base_data *find_unresolved_deltas_1(struct base_data *base,
                                                  struct base_data *prev_base)
 {
        if (base->ref_last == -1 && base->ofs_last == -1) {
-               find_ref_delta_children(base->obj->idx.oid.hash,
+               find_ref_delta_children(&base->obj->idx.oid,
                                        &base->ref_first, &base->ref_last,
                                        OBJ_REF_DELTA);
 
@@ -1077,7 +1077,7 @@ static int compare_ref_delta_entry(const void *a, const void *b)
        const struct ref_delta_entry *delta_a = a;
        const struct ref_delta_entry *delta_b = b;
 
-       return hashcmp(delta_a->sha1, delta_b->sha1);
+       return oidcmp(&delta_a->oid, &delta_b->oid);
 }
 
 static void resolve_base(struct object_entry *obj)
@@ -1120,11 +1120,11 @@ static void *threaded_second_pass(void *data)
  * - calculate SHA1 of all non-delta objects;
  * - remember base (SHA1 or offset) for all deltas.
  */
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
 {
        int i, nr_delays = 0;
        struct ofs_delta_entry *ofs_delta = ofs_deltas;
-       unsigned char ref_delta_sha1[20];
+       struct object_id ref_delta_oid;
        struct stat st;
 
        if (verbose)
@@ -1134,8 +1134,8 @@ static void parse_pack_objects(unsigned char *sha1)
        for (i = 0; i < nr_objects; i++) {
                struct object_entry *obj = &objects[i];
                void *data = unpack_raw_entry(obj, &ofs_delta->offset,
-                                             ref_delta_sha1,
-                                             obj->idx.oid.hash);
+                                             &ref_delta_oid,
+                                             &obj->idx.oid);
                obj->real_type = obj->type;
                if (obj->type == OBJ_OFS_DELTA) {
                        nr_ofs_deltas++;
@@ -1143,7 +1143,7 @@ static void parse_pack_objects(unsigned char *sha1)
                        ofs_delta++;
                } else if (obj->type == OBJ_REF_DELTA) {
                        ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
-                       hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+                       oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
                        ref_deltas[nr_ref_deltas].obj_no = i;
                        nr_ref_deltas++;
                } else if (!data) {
@@ -1161,10 +1161,10 @@ static void parse_pack_objects(unsigned char *sha1)
 
        /* Check pack integrity */
        flush();
-       git_SHA1_Final(sha1, &input_ctx);
-       if (hashcmp(fill(20), sha1))
+       the_hash_algo->final_fn(hash, &input_ctx);
+       if (hashcmp(fill(the_hash_algo->rawsz), hash))
                die(_("pack is corrupted (SHA1 mismatch)"));
-       use(20);
+       use(the_hash_algo->rawsz);
 
        /* If input_fd is a file, we should have reached its end now. */
        if (fstat(input_fd, &st))
@@ -1240,21 +1240,21 @@ static void resolve_deltas(void)
 /*
  * Third pass:
  * - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
  */
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
 {
        if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
                stop_progress(&progress);
-               /* Flush remaining pack final 20-byte SHA1. */
+               /* Flush remaining pack final hash. */
                flush();
                return;
        }
 
        if (fix_thin_pack) {
-               struct sha1file *f;
-               unsigned char read_sha1[20], tail_sha1[20];
+               struct hashfile *f;
+               unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
                struct strbuf msg = STRBUF_INIT;
                int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
                int nr_objects_initial = nr_objects;
@@ -1263,7 +1263,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
                memset(objects + nr_objects + 1, 0,
                       nr_unresolved * sizeof(*objects));
-               f = sha1fd(output_fd, curr_pack);
+               f = hashfd(output_fd, curr_pack);
                fix_unresolved_deltas(f);
                strbuf_addf(&msg, Q_("completed with %d local object",
                                     "completed with %d local objects",
@@ -1271,12 +1271,12 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                            nr_objects - nr_objects_initial);
                stop_progress_msg(&progress, msg.buf);
                strbuf_release(&msg);
-               sha1close(f, tail_sha1, 0);
-               hashcpy(read_sha1, pack_sha1);
-               fixup_pack_header_footer(output_fd, pack_sha1,
+               hashclose(f, tail_hash, 0);
+               hashcpy(read_hash, pack_hash);
+               fixup_pack_header_footer(output_fd, pack_hash,
                                         curr_pack, nr_objects,
-                                        read_sha1, consumed_bytes-20);
-               if (hashcmp(read_sha1, tail_sha1) != 0)
+                                        read_hash, consumed_bytes-the_hash_algo->rawsz);
+               if (hashcmp(read_hash, tail_hash) != 0)
                        die(_("Unexpected tail checksum for %s "
                              "(disk corruption?)"), curr_pack);
        }
@@ -1287,7 +1287,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                    nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
 }
 
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
 {
        git_zstream stream;
        int status;
@@ -1301,7 +1301,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
                stream.next_out = outbuf;
                stream.avail_out = sizeof(outbuf);
                status = git_deflate(&stream, Z_FINISH);
-               sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+               hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
        } while (status == Z_OK);
 
        if (status != Z_STREAM_END)
@@ -1311,7 +1311,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
        return size;
 }
 
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
                               const unsigned char *sha1, void *buf,
                               unsigned long size, enum object_type type)
 {
@@ -1328,7 +1328,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
        }
        header[n++] = c;
        crc32_begin(f);
-       sha1write(f, header, n);
+       hashwrite(f, header, n);
        obj[0].size = size;
        obj[0].hdr_size = n;
        obj[0].type = type;
@@ -1336,7 +1336,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
        obj[1].idx.offset = obj[0].idx.offset + n;
        obj[1].idx.offset += write_compressed(f, buf, size);
        obj[0].idx.crc32 = crc32_end(f);
-       sha1flush(f);
+       hashflush(f);
        hashcpy(obj->idx.oid.hash, sha1);
        return obj;
 }
@@ -1348,7 +1348,7 @@ static int delta_pos_compare(const void *_a, const void *_b)
        return a->obj_no - b->obj_no;
 }
 
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
 {
        struct ref_delta_entry **sorted_by_pos;
        int i;
@@ -1375,14 +1375,15 @@ static void fix_unresolved_deltas(struct sha1file *f)
 
                if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
                        continue;
-               base_obj->data = read_sha1_file(d->sha1, &type, &base_obj->size);
+               base_obj->data = read_object_file(&d->oid, &type,
+                                                 &base_obj->size);
                if (!base_obj->data)
                        continue;
 
-               if (check_sha1_signature(d->sha1, base_obj->data,
-                               base_obj->size, typename(type)))
-                       die(_("local object %s is corrupt"), sha1_to_hex(d->sha1));
-               base_obj->obj = append_obj_to_pack(f, d->sha1,
+               if (check_object_signature(&d->oid, base_obj->data,
+                               base_obj->size, type_name(type)))
+                       die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
+               base_obj->obj = append_obj_to_pack(f, d->oid.hash,
                                        base_obj->data, base_obj->size, type);
                find_unresolved_deltas(base_obj);
                display_progress(progress, nr_resolved_deltas);
@@ -1404,7 +1405,7 @@ static const char *derive_filename(const char *pack_name, const char *suffix,
 }
 
 static void write_special_file(const char *suffix, const char *msg,
-                              const char *pack_name, const unsigned char *sha1,
+                              const char *pack_name, const unsigned char *hash,
                               const char **report)
 {
        struct strbuf name_buf = STRBUF_INIT;
@@ -1415,7 +1416,7 @@ static void write_special_file(const char *suffix, const char *msg,
        if (pack_name)
                filename = derive_filename(pack_name, suffix, &name_buf);
        else
-               filename = odb_pack_name(&name_buf, sha1, suffix);
+               filename = odb_pack_name(&name_buf, hash, suffix);
 
        fd = odb_pack_keep(filename);
        if (fd < 0) {
@@ -1439,7 +1440,7 @@ static void write_special_file(const char *suffix, const char *msg,
 static void final(const char *final_pack_name, const char *curr_pack_name,
                  const char *final_index_name, const char *curr_index_name,
                  const char *keep_msg, const char *promisor_msg,
-                 unsigned char *sha1)
+                 unsigned char *hash)
 {
        const char *report = "pack";
        struct strbuf pack_name = STRBUF_INIT;
@@ -1456,15 +1457,15 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
        }
 
        if (keep_msg)
-               write_special_file("keep", keep_msg, final_pack_name, sha1,
+               write_special_file("keep", keep_msg, final_pack_name, hash,
                                   &report);
        if (promisor_msg)
                write_special_file("promisor", promisor_msg, final_pack_name,
-                                  sha1, NULL);
+                                  hash, NULL);
 
        if (final_pack_name != curr_pack_name) {
                if (!final_pack_name)
-                       final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+                       final_pack_name = odb_pack_name(&pack_name, hash, "pack");
                if (finalize_object_file(curr_pack_name, final_pack_name))
                        die(_("cannot store pack file"));
        } else if (from_stdin)
@@ -1472,18 +1473,18 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
 
        if (final_index_name != curr_index_name) {
                if (!final_index_name)
-                       final_index_name = odb_pack_name(&index_name, sha1, "idx");
+                       final_index_name = odb_pack_name(&index_name, hash, "idx");
                if (finalize_object_file(curr_index_name, final_index_name))
                        die(_("cannot store index file"));
        } else
                chmod(final_index_name, 0444);
 
        if (!from_stdin) {
-               printf("%s\n", sha1_to_hex(sha1));
+               printf("%s\n", sha1_to_hex(hash));
        } else {
                struct strbuf buf = STRBUF_INIT;
 
-               strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+               strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
                write_or_die(1, buf.buf, buf.len);
                strbuf_release(&buf);
 
@@ -1617,7 +1618,7 @@ static void show_pack_info(int stat_only)
                        continue;
                printf("%s %-6s %lu %lu %"PRIuMAX,
                       oid_to_hex(&obj->idx.oid),
-                      typename(obj->real_type), obj->size,
+                      type_name(obj->real_type), obj->size,
                       (unsigned long)(obj[1].idx.offset - obj->idx.offset),
                       (uintmax_t)obj->idx.offset);
                if (is_delta_type(obj->type)) {
@@ -1654,7 +1655,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        struct strbuf index_name_buf = STRBUF_INIT;
        struct pack_idx_entry **idx_objects;
        struct pack_idx_option opts;
-       unsigned char pack_sha1[20];
+       unsigned char pack_hash[GIT_MAX_RAWSZ];
        unsigned foreign_nr = 1;        /* zero is a "good" value, assume bad */
        int report_end_of_input = 0;
 
@@ -1690,6 +1691,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
                        } else if (!strcmp(arg, "--check-self-contained-and-connected")) {
                                strict = 1;
                                check_self_contained_and_connected = 1;
+                       } else if (!strcmp(arg, "--fsck-objects")) {
+                               do_fsck_object = 1;
                        } else if (!strcmp(arg, "--verify")) {
                                verify = 1;
                        } else if (!strcmp(arg, "--verify-stat")) {
@@ -1791,11 +1794,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        if (show_stat)
                obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
        ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
-       parse_pack_objects(pack_sha1);
+       parse_pack_objects(pack_hash);
        if (report_end_of_input)
                write_in_full(2, "\0", 1);
        resolve_deltas();
-       conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+       conclude_pack(fix_thin_pack, curr_pack, pack_hash);
        free(ofs_deltas);
        free(ref_deltas);
        if (strict)
@@ -1807,14 +1810,14 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        ALLOC_ARRAY(idx_objects, nr_objects);
        for (i = 0; i < nr_objects; i++)
                idx_objects[i] = &objects[i].idx;
-       curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+       curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
        free(idx_objects);
 
        if (!verify)
                final(pack_name, curr_pack,
                      index_name, curr_index,
                      keep_msg, promisor_msg,
-                     pack_sha1);
+                     pack_hash);
        else
                close(input_fd);
        free(objects);
index c9b7946bade9e10f799942137480e71ee3233701..68ff4ad75ace6566a233c1343fed93365c5abbe4 100644 (file)
@@ -24,11 +24,11 @@ static int init_is_bare_repository = 0;
 static int init_shared_repository = -1;
 static const char *init_db_template_dir;
 
-static void copy_templates_1(struct strbuf *path, struct strbuf *template,
+static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
                             DIR *dir)
 {
        size_t path_baselen = path->len;
-       size_t template_baselen = template->len;
+       size_t template_baselen = template_path->len;
        struct dirent *de;
 
        /* Note: if ".git/hooks" file exists in the repository being
@@ -44,12 +44,12 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template,
                int exists = 0;
 
                strbuf_setlen(path, path_baselen);
-               strbuf_setlen(template, template_baselen);
+               strbuf_setlen(template_path, template_baselen);
 
                if (de->d_name[0] == '.')
                        continue;
                strbuf_addstr(path, de->d_name);
-               strbuf_addstr(template, de->d_name);
+               strbuf_addstr(template_path, de->d_name);
                if (lstat(path->buf, &st_git)) {
                        if (errno != ENOENT)
                                die_errno(_("cannot stat '%s'"), path->buf);
@@ -57,36 +57,36 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template,
                else
                        exists = 1;
 
-               if (lstat(template->buf, &st_template))
-                       die_errno(_("cannot stat template '%s'"), template->buf);
+               if (lstat(template_path->buf, &st_template))
+                       die_errno(_("cannot stat template '%s'"), template_path->buf);
 
                if (S_ISDIR(st_template.st_mode)) {
-                       DIR *subdir = opendir(template->buf);
+                       DIR *subdir = opendir(template_path->buf);
                        if (!subdir)
-                               die_errno(_("cannot opendir '%s'"), template->buf);
+                               die_errno(_("cannot opendir '%s'"), template_path->buf);
                        strbuf_addch(path, '/');
-                       strbuf_addch(template, '/');
-                       copy_templates_1(path, template, subdir);
+                       strbuf_addch(template_path, '/');
+                       copy_templates_1(path, template_path, subdir);
                        closedir(subdir);
                }
                else if (exists)
                        continue;
                else if (S_ISLNK(st_template.st_mode)) {
                        struct strbuf lnk = STRBUF_INIT;
-                       if (strbuf_readlink(&lnk, template->buf, 0) < 0)
-                               die_errno(_("cannot readlink '%s'"), template->buf);
+                       if (strbuf_readlink(&lnk, template_path->buf, 0) < 0)
+                               die_errno(_("cannot readlink '%s'"), template_path->buf);
                        if (symlink(lnk.buf, path->buf))
                                die_errno(_("cannot symlink '%s' '%s'"),
                                          lnk.buf, path->buf);
                        strbuf_release(&lnk);
                }
                else if (S_ISREG(st_template.st_mode)) {
-                       if (copy_file(path->buf, template->buf, st_template.st_mode))
+                       if (copy_file(path->buf, template_path->buf, st_template.st_mode))
                                die_errno(_("cannot copy '%s' to '%s'"),
-                                         template->buf, path->buf);
+                                         template_path->buf, path->buf);
                }
                else
-                       error(_("ignoring template %s"), template->buf);
+                       error(_("ignoring template %s"), template_path->buf);
        }
 }
 
index 46b4ca13e5c11ff43c15d80e618914e671ff17a0..71f68a3e4f59d987c653fb8f32a4d8770e9c6654 100644 (file)
@@ -29,6 +29,8 @@
 #include "gpg-interface.h"
 #include "progress.h"
 
+#define MAIL_DEFAULT_WRAP 72
+
 /* Set a default date-time format for git log ("log.date" config variable) */
 static const char *default_date_mode = NULL;
 
@@ -516,7 +518,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(oid->hash, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        int offset = 0;
 
        if (!buf)
@@ -539,7 +541,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
        return 0;
 }
 
-static int show_tree_object(const unsigned char *sha1,
+static int show_tree_object(const struct object_id *oid,
                struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
@@ -1044,7 +1046,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        shortlog_init(&log);
        log.wrap_lines = 1;
-       log.wrap = 72;
+       log.wrap = MAIL_DEFAULT_WRAP;
        log.in1 = 2;
        log.in2 = 4;
        log.file = rev->diffopt.file;
@@ -1061,6 +1063,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        memcpy(&opts, &rev->diffopt, sizeof(opts));
        opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+       opts.stat_width = MAIL_DEFAULT_WRAP;
 
        diff_setup_done(&opts);
 
@@ -1614,6 +1617,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                (!rev.diffopt.output_format ||
                 rev.diffopt.output_format == DIFF_FORMAT_PATCH))
                rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY;
+       if (!rev.diffopt.stat_width)
+               rev.diffopt.stat_width = MAIL_DEFAULT_WRAP;
 
        /* Always generate a patch */
        rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
@@ -1868,12 +1873,12 @@ static void print_commit(char sign, struct commit *commit, int verbose,
 {
        if (!verbose) {
                fprintf(file, "%c %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev));
+                      find_unique_abbrev(&commit->object.oid, abbrev));
        } else {
                struct strbuf buf = STRBUF_INIT;
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
                fprintf(file, "%c %s %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev),
+                      find_unique_abbrev(&commit->object.oid, abbrev),
                       buf.buf);
                strbuf_release(&buf);
        }
index 2fc836e33086d5b70b86a3fc746f9e75706ef7ea..a71f6bd088a2666f0637463e1c168171dd319a96 100644 (file)
@@ -240,7 +240,7 @@ static void show_ce(struct repository *repo, struct dir_struct *dir,
                        printf("%s%06o %s %d\t",
                               tag,
                               ce->ce_mode,
-                              find_unique_abbrev(ce->oid.hash, abbrev),
+                              find_unique_abbrev(&ce->oid, abbrev),
                               ce_stage(ce));
                }
                write_eolinfo(repo->index, ce, fullname);
@@ -271,7 +271,7 @@ static void show_ru_info(const struct index_state *istate)
                        if (!ui->mode[i])
                                continue;
                        printf("%s%06o %s %d\t", tag_resolve_undo, ui->mode[i],
-                              find_unique_abbrev(ui->sha1[i], abbrev),
+                              find_unique_abbrev(&ui->oid[i], abbrev),
                               i + 1);
                        write_name(path);
                }
index c4be98ab9e84fdcde2842b88d4bb60600f7bf627..380c180270e263ed6701aa5a003ac999dfcfcec0 100644 (file)
@@ -2,6 +2,7 @@
 #include "cache.h"
 #include "transport.h"
 #include "remote.h"
+#include "refs.h"
 
 static const char * const ls_remote_usage[] = {
        N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
@@ -43,6 +44,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
        int show_symref_target = 0;
        const char *uploadpack = NULL;
        const char **pattern = NULL;
+       struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
 
        struct remote *remote;
        struct transport *transport;
@@ -60,8 +62,9 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
                OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
                OPT_BOOL(0, "get-url", &get_url,
                         N_("take url.<base>.insteadOf into account")),
-               OPT_SET_INT(0, "exit-code", &status,
-                           N_("exit with exit code 2 if no matching refs are found"), 2),
+               OPT_SET_INT_F(0, "exit-code", &status,
+                             N_("exit with exit code 2 if no matching refs are found"),
+                             2, PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "symref", &show_symref_target,
                         N_("show underlying ref in addition to the object pointed by it")),
                OPT_END()
@@ -74,8 +77,17 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
        if (argc > 1) {
                int i;
                pattern = xcalloc(argc, sizeof(const char *));
-               for (i = 1; i < argc; i++)
+               for (i = 1; i < argc; i++) {
+                       const char *glob;
                        pattern[i - 1] = xstrfmt("*/%s", argv[i]);
+
+                       glob = strchr(argv[i], '*');
+                       if (glob)
+                               argv_array_pushf(&ref_prefixes, "%.*s",
+                                                (int)(glob - argv[i]), argv[i]);
+                       else
+                               expand_ref_prefix(&ref_prefixes, argv[i]);
+               }
        }
 
        remote = remote_get(dest);
@@ -96,7 +108,7 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
        if (uploadpack != NULL)
                transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
 
-       ref = transport_get_remote_refs(transport);
+       ref = transport_get_remote_refs(transport, &ref_prefixes);
        if (transport_disconnect(transport))
                return 1;
 
index ef965408e8fc5d80fa9e9daf0264a91abccd978c..d44b4f9c27d31cfa331ccef7ae538d8a6e6c38c6 100644 (file)
@@ -60,7 +60,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
        return 0;
 }
 
-static int show_tree(const unsigned char *sha1, struct strbuf *base,
+static int show_tree(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int retval = 0;
@@ -94,7 +94,7 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        char size_text[24];
                        if (!strcmp(type, blob_type)) {
                                unsigned long size;
-                               if (sha1_object_info(sha1, &size) == OBJ_BAD)
+                               if (oid_object_info(oid, &size) == OBJ_BAD)
                                        xsnprintf(size_text, sizeof(size_text),
                                                  "BAD");
                                else
@@ -103,11 +103,11 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        } else
                                xsnprintf(size_text, sizeof(size_text), "-");
                        printf("%06o %s %s %7s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev),
+                              find_unique_abbrev(oid, abbrev),
                               size_text);
                } else
                        printf("%06o %s %s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev));
+                              find_unique_abbrev(oid, abbrev));
        }
        baselen = base->len;
        strbuf_addstr(base, pathname);
index d01ddecf6602eabdca97a175e5c2a57bf1257865..32736e0b1011f575d2585fd2e012501d807f9e9b 100644 (file)
@@ -60,7 +60,7 @@ static void *result(struct merge_list *entry, unsigned long *size)
        const char *path = entry->path;
 
        if (!entry->stage)
-               return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+               return read_object_file(&entry->blob->object.oid, &type, size);
        base = NULL;
        if (entry->stage == 1) {
                base = entry->blob;
@@ -82,7 +82,8 @@ static void *origin(struct merge_list *entry, unsigned long *size)
        enum object_type type;
        while (entry) {
                if (entry->stage == 2)
-                       return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+                       return read_object_file(&entry->blob->object.oid,
+                                               &type, size);
                entry = entry->link;
        }
        return NULL;
index 96d56cbdd2f2bbb9a76d2027037b54f32c892c23..9db5a2cf16e189bb3bd0ceec7d34c6651d630225 100644 (file)
@@ -33,6 +33,7 @@
 #include "sequencer.h"
 #include "string-list.h"
 #include "packfile.h"
+#include "tag.h"
 
 #define DEFAULT_TWOHEAD (1<<0)
 #define DEFAULT_OCTOPUS (1<<1)
@@ -520,7 +521,7 @@ static void merge_name(const char *remote, struct strbuf *msg)
                if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
                        strbuf_addf(msg, "%s\t\t%s '%s'\n",
                                    oid_to_hex(&desc->obj->oid),
-                                   typename(desc->obj->type),
+                                   type_name(desc->obj->type),
                                    remote);
                        goto cleanup;
                }
@@ -638,7 +639,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
 
 static void write_tree_trivial(struct object_id *oid)
 {
-       if (write_cache_as_tree(oid->hash, 0, NULL))
+       if (write_cache_as_tree(oid, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 }
 
@@ -651,10 +652,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
 
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        refresh_cache(REFRESH_QUIET);
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return error(_("Unable to write index."));
-       rollback_lock_file(&lock);
 
        if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) {
                int clean, x;
@@ -691,10 +691,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
                                remoteheads->item, reversed, &result);
                if (clean < 0)
                        exit(128);
-               if (active_cache_changed &&
-                   write_locked_index(&the_index, &lock, COMMIT_LOCK))
+               if (write_locked_index(&the_index, &lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED))
                        die (_("unable to write %s"), get_index_file());
-               rollback_lock_file(&lock);
                return clean ? 0 : 1;
        } else {
                return try_merge_command(strategy, xopts_nr, xopts,
@@ -810,18 +809,17 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads)
 
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        refresh_cache(REFRESH_QUIET);
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return error(_("Unable to write index."));
-       rollback_lock_file(&lock);
 
        write_tree_trivial(&result_tree);
        printf(_("Wonderful.\n"));
        pptr = commit_list_append(head, pptr);
        pptr = commit_list_append(remoteheads->item, pptr);
        prepare_to_commit(remoteheads);
-       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
-                       result_commit.hash, NULL, sign_commit))
+       if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+                       &result_commit, NULL, sign_commit))
                die(_("failed to write commit object"));
        finish(head, remoteheads, &result_commit, "In-index merge");
        drop_save();
@@ -845,8 +843,8 @@ static int finish_automerge(struct commit *head,
                commit_list_insert(head, &parents);
        strbuf_addch(&merge_msg, '\n');
        prepare_to_commit(remoteheads);
-       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
-                       result_commit.hash, NULL, sign_commit))
+       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+                       &result_commit, NULL, sign_commit))
                die(_("failed to write commit object"));
        strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
        finish(head, remoteheads, &result_commit, buf.buf);
@@ -1125,6 +1123,43 @@ static struct commit_list *collect_parents(struct commit *head_commit,
        return remoteheads;
 }
 
+static int merging_a_throwaway_tag(struct commit *commit)
+{
+       char *tag_ref;
+       struct object_id oid;
+       int is_throwaway_tag = 0;
+
+       /* Are we merging a tag? */
+       if (!merge_remote_util(commit) ||
+           !merge_remote_util(commit)->obj ||
+           merge_remote_util(commit)->obj->type != OBJ_TAG)
+               return is_throwaway_tag;
+
+       /*
+        * Now we know we are merging a tag object.  Are we downstream
+        * and following the tags from upstream?  If so, we must have
+        * the tag object pointed at by "refs/tags/$T" where $T is the
+        * tagname recorded in the tag object.  We want to allow such
+        * a "just to catch up" merge to fast-forward.
+        *
+        * Otherwise, we are playing an integrator's role, making a
+        * merge with a throw-away tag from a contributor with
+        * something like "git pull $contributor $signed_tag".
+        * We want to forbid such a merge from fast-forwarding
+        * by default; otherwise we would not keep the signature
+        * anywhere.
+        */
+       tag_ref = xstrfmt("refs/tags/%s",
+                         ((struct tag *)merge_remote_util(commit)->obj)->tag);
+       if (!read_ref(tag_ref, &oid) &&
+           !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+               is_throwaway_tag = 0;
+       else
+               is_throwaway_tag = 1;
+       free(tag_ref);
+       return is_throwaway_tag;
+}
+
 int cmd_merge(int argc, const char **argv, const char *prefix)
 {
        struct object_id result_tree, stash, head_oid;
@@ -1289,7 +1324,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                        check_commit_signature(commit, &signature_check);
 
-                       find_unique_abbrev_r(hex, commit->object.oid.hash, DEFAULT_ABBREV);
+                       find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV);
                        switch (signature_check.result) {
                        case 'G':
                                break;
@@ -1322,10 +1357,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                            oid_to_hex(&commit->object.oid));
                setenv(buf.buf, merge_remote_util(commit)->name, 1);
                strbuf_reset(&buf);
-               if (fast_forward != FF_ONLY &&
-                   merge_remote_util(commit) &&
-                   merge_remote_util(commit)->obj &&
-                   merge_remote_util(commit)->obj->type == OBJ_TAG)
+               if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit))
                        fast_forward = FF_NO;
        }
 
@@ -1385,9 +1417,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                if (verbosity >= 0) {
                        printf(_("Updating %s..%s\n"),
-                              find_unique_abbrev(head_commit->object.oid.hash,
+                              find_unique_abbrev(&head_commit->object.oid,
                                                  DEFAULT_ABBREV),
-                              find_unique_abbrev(remoteheads->item->object.oid.hash,
+                              find_unique_abbrev(&remoteheads->item->object.oid,
                                                  DEFAULT_ABBREV));
                }
                strbuf_addstr(&msg, "Fast-forward");
index 031b750f068dea5e1652129a5df6d684f6bca850..9f5a50a8fd5b0b3acf664df8d20ae5bdee288fe8 100644 (file)
 /*
  * We refuse to tag something we can't verify. Just because.
  */
-static int verify_object(const unsigned char *sha1, const char *expected_type)
+static int verify_object(const struct object_id *oid, const char *expected_type)
 {
        int ret = -1;
        enum object_type type;
        unsigned long size;
-       void *buffer = read_sha1_file(sha1, &type, &size);
-       const unsigned char *repl = lookup_replace_object(sha1);
+       void *buffer = read_object_file(oid, &type, &size);
+       const struct object_id *repl = lookup_replace_object(oid);
 
        if (buffer) {
                if (type == type_from_string(expected_type))
-                       ret = check_sha1_signature(repl, buffer, size, expected_type);
+                       ret = check_object_signature(repl, buffer, size, expected_type);
                free(buffer);
        }
        return ret;
@@ -38,8 +38,8 @@ static int verify_tag(char *buffer, unsigned long size)
 {
        int typelen;
        char type[20];
-       unsigned char sha1[20];
-       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb;
+       struct object_id oid;
+       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb, *p;
        size_t len;
 
        if (size < 84)
@@ -52,11 +52,11 @@ static int verify_tag(char *buffer, unsigned long size)
        if (memcmp(object, "object ", 7))
                return error("char%d: does not start with \"object \"", 0);
 
-       if (get_sha1_hex(object + 7, sha1))
+       if (parse_oid_hex(object + 7, &oid, &p))
                return error("char%d: could not get SHA1 hash", 7);
 
        /* Verify type line */
-       type_line = object + 48;
+       type_line = p + 1;
        if (memcmp(type_line - 1, "\ntype ", 6))
                return error("char%d: could not find \"\\ntype \"", 47);
 
@@ -80,8 +80,8 @@ static int verify_tag(char *buffer, unsigned long size)
        type[typelen] = 0;
 
        /* Verify that the object matches */
-       if (verify_object(sha1, type))
-               return error("char%d: could not verify object %s", 7, sha1_to_hex(sha1));
+       if (verify_object(&oid, type))
+               return error("char%d: could not verify object %s", 7, oid_to_hex(&oid));
 
        /* Verify the tag-name: we don't allow control characters or spaces in it */
        tag_line += 4;
@@ -151,7 +151,7 @@ static int verify_tag(char *buffer, unsigned long size)
 int cmd_mktag(int argc, const char **argv, const char *prefix)
 {
        struct strbuf buf = STRBUF_INIT;
-       unsigned char result_sha1[20];
+       struct object_id result;
 
        if (argc != 1)
                usage("git mktag");
@@ -165,10 +165,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
        if (verify_tag(buf.buf, buf.len) < 0)
                die("invalid tag signature file");
 
-       if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0)
+       if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
                die("unable to write tag file");
 
        strbuf_release(&buf);
-       printf("%s\n", sha1_to_hex(result_sha1));
+       printf("%s\n", oid_to_hex(&result));
        return 0;
 }
index da0fd8cd706659a8784da8112cd1b3acd306375f..263c530315a4fe435a88f2042c768350f72e837b 100644 (file)
 
 static struct treeent {
        unsigned mode;
-       unsigned char sha1[20];
+       struct object_id oid;
        int len;
        char name[FLEX_ARRAY];
 } **entries;
 static int alloc, used;
 
-static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
+static void append_to_tree(unsigned mode, struct object_id *oid, char *path)
 {
        struct treeent *ent;
        size_t len = strlen(path);
@@ -26,7 +26,7 @@ static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
        FLEX_ALLOC_MEM(ent, name, path, len);
        ent->mode = mode;
        ent->len = len;
-       hashcpy(ent->sha1, sha1);
+       oidcpy(&ent->oid, oid);
 
        ALLOC_GROW(entries, used + 1, alloc);
        entries[used++] = ent;
@@ -40,7 +40,7 @@ static int ent_compare(const void *a_, const void *b_)
                                 b->name, b->len, b->mode);
 }
 
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
 {
        struct strbuf buf;
        size_t size;
@@ -54,10 +54,10 @@ static void write_tree(unsigned char *sha1)
        for (i = 0; i < used; i++) {
                struct treeent *ent = entries[i];
                strbuf_addf(&buf, "%o %s%c", ent->mode, ent->name, '\0');
-               strbuf_add(&buf, ent->sha1, 20);
+               strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz);
        }
 
-       write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+       write_object_file(buf.buf, buf.len, tree_type, oid);
        strbuf_release(&buf);
 }
 
@@ -69,11 +69,12 @@ static const char *mktree_usage[] = {
 static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_missing)
 {
        char *ptr, *ntr;
+       const char *p;
        unsigned mode;
        enum object_type mode_type; /* object type derived from mode */
        enum object_type obj_type; /* object type derived from sha */
        char *path, *to_free = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
 
        ptr = buf;
        /*
@@ -85,9 +86,8 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                die("input format error: %s", buf);
        ptr = ntr + 1; /* type */
        ntr = strchr(ptr, ' ');
-       if (!ntr || buf + len <= ntr + 40 ||
-           ntr[41] != '\t' ||
-           get_sha1_hex(ntr + 1, sha1))
+       if (!ntr || parse_oid_hex(ntr + 1, &oid, &p) ||
+           *p != '\t')
                die("input format error: %s", buf);
 
        /* It is perfectly normal if we do not have a commit from a submodule */
@@ -112,16 +112,16 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
        mode_type = object_type(mode);
        if (mode_type != type_from_string(ptr)) {
                die("entry '%s' object type (%s) doesn't match mode type (%s)",
-                       path, ptr, typename(mode_type));
+                       path, ptr, type_name(mode_type));
        }
 
        /* Check the type of object identified by sha1 */
-       obj_type = sha1_object_info(sha1, NULL);
+       obj_type = oid_object_info(&oid, NULL);
        if (obj_type < 0) {
                if (allow_missing) {
                        ; /* no problem - missing objects are presumed to be of the right type */
                } else {
-                       die("entry '%s' object %s is unavailable", path, sha1_to_hex(sha1));
+                       die("entry '%s' object %s is unavailable", path, oid_to_hex(&oid));
                }
        } else {
                if (obj_type != mode_type) {
@@ -131,18 +131,18 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                         * because the new tree entry will never be correct.
                         */
                        die("entry '%s' object %s is a %s but specified type was (%s)",
-                               path, sha1_to_hex(sha1), typename(obj_type), typename(mode_type));
+                               path, oid_to_hex(&oid), type_name(obj_type), type_name(mode_type));
                }
        }
 
-       append_to_tree(mode, sha1, path);
+       append_to_tree(mode, &oid, path);
        free(to_free);
 }
 
 int cmd_mktree(int ac, const char **av, const char *prefix)
 {
        struct strbuf sb = STRBUF_INIT;
-       unsigned char sha1[20];
+       struct object_id oid;
        int nul_term_line = 0;
        int allow_missing = 0;
        int is_batch_mode = 0;
@@ -181,8 +181,8 @@ int cmd_mktree(int ac, const char **av, const char *prefix)
                         */
                        ; /* skip creating an empty tree */
                } else {
-                       write_tree(sha1);
-                       puts(sha1_to_hex(sha1));
+                       write_tree(&oid);
+                       puts(oid_to_hex(&oid));
                        fflush(stdout);
                }
                used=0; /* reset tree entry buffer for re-use in batch mode */
index cf3684d907a2fe3408581502c682d6290c790827..6d141f7a532c08e52f1f5f82330d046c60073f93 100644 (file)
@@ -122,7 +122,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
        struct option builtin_mv_options[] = {
                OPT__VERBOSE(&verbose, N_("be verbose")),
                OPT__DRY_RUN(&show_only, N_("dry run")),
-               OPT__FORCE(&force, N_("force move/rename even if target exists")),
+               OPT__FORCE(&force, N_("force move/rename even if target exists"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
                OPT_END(),
        };
@@ -286,15 +287,14 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
 
                pos = cache_name_pos(src, strlen(src));
                assert(pos >= 0);
-               if (!show_only)
-                       rename_cache_entry_at(pos, dst);
+               rename_cache_entry_at(pos, dst);
        }
 
        if (gitmodules_modified)
                stage_updated_gitmodules(&the_index);
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                die(_("Unable to write new index file"));
 
        return 0;
index 9e088ebd11dced248640df9e17adbbd8b9a73ffb..387ddf85d21a443f060dbb212fe95a983d9e4f58 100644 (file)
@@ -328,7 +328,7 @@ static void show_name(const struct object *obj,
        else if (allow_undefined)
                printf("undefined\n");
        else if (always)
-               printf("%s\n", find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+               printf("%s\n", find_unique_abbrev(oid, DEFAULT_ABBREV));
        else
                die("cannot describe '%s'", oid_to_hex(oid));
        strbuf_release(&buf);
index 7c8176164561be1d57771ad2fdeb9260cc1d8c87..921e08d5bf545ac4124933459d257d95e2144409 100644 (file)
@@ -118,11 +118,11 @@ static int list_each_note(const struct object_id *object_oid,
        return 0;
 }
 
-static void copy_obj_to_fd(int fd, const unsigned char *sha1)
+static void copy_obj_to_fd(int fd, const struct object_id *oid)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(sha1, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        if (buf) {
                if (size)
                        write_or_die(fd, buf, size);
@@ -162,7 +162,7 @@ static void write_commented_object(int fd, const struct object_id *object)
 }
 
 static void prepare_note_data(const struct object_id *object, struct note_data *d,
-               const unsigned char *old_note)
+               const struct object_id *old_note)
 {
        if (d->use_editor || !d->given) {
                int fd;
@@ -198,9 +198,9 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
        }
 }
 
-static void write_note_data(struct note_data *d, unsigned char *sha1)
+static void write_note_data(struct note_data *d, struct object_id *oid)
 {
-       if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) {
+       if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
                error(_("unable to write note object"));
                if (d->edit_path)
                        error(_("the note contents have been left in %s"),
@@ -253,7 +253,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
 
        if (get_oid(arg, &object))
                die(_("failed to resolve '%s' as a valid ref."), arg);
-       if (!(buf = read_sha1_file(object.hash, &type, &len))) {
+       if (!(buf = read_object_file(&object, &type, &len))) {
                free(buf);
                die(_("failed to read object '%s'."), arg);
        }
@@ -413,7 +413,7 @@ static int add(int argc, const char **argv, const char *prefix)
                        parse_reuse_arg},
                OPT_BOOL(0, "allow-empty", &allow_empty,
                        N_("allow storing empty note")),
-               OPT__FORCE(&force, N_("replace existing notes")),
+               OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
@@ -457,9 +457,9 @@ static int add(int argc, const char **argv, const char *prefix)
                        oid_to_hex(&object));
        }
 
-       prepare_note_data(&object, &d, note ? note->hash : NULL);
+       prepare_note_data(&object, &d, note);
        if (d.buf.len || allow_empty) {
-               write_note_data(&d, new_note.hash);
+               write_note_data(&d, &new_note);
                if (add_note(t, &object, &new_note, combine_notes_overwrite))
                        die("BUG: combine_notes_overwrite failed");
                commit_notes(t, "Notes added by 'git notes add'");
@@ -484,7 +484,7 @@ static int copy(int argc, const char **argv, const char *prefix)
        struct notes_tree *t;
        const char *rewrite_cmd = NULL;
        struct option options[] = {
-               OPT__FORCE(&force, N_("replace existing notes")),
+               OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "stdin", &from_stdin, N_("read objects from stdin")),
                OPT_STRING(0, "for-rewrite", &rewrite_cmd, N_("command"),
                           N_("load rewriting config for <command> (implies "
@@ -602,13 +602,13 @@ static int append_edit(int argc, const char **argv, const char *prefix)
        t = init_notes_check(argv[0], NOTES_INIT_WRITABLE);
        note = get_note(t, &object);
 
-       prepare_note_data(&object, &d, edit && note ? note->hash : NULL);
+       prepare_note_data(&object, &d, edit && note ? note : NULL);
 
        if (note && !edit) {
                /* Append buf to previous note contents */
                unsigned long size;
                enum object_type type;
-               char *prev_buf = read_sha1_file(note->hash, &type, &size);
+               char *prev_buf = read_object_file(note, &type, &size);
 
                strbuf_grow(&d.buf, size + 1);
                if (d.buf.len && prev_buf && size)
@@ -619,7 +619,7 @@ static int append_edit(int argc, const char **argv, const char *prefix)
        }
 
        if (d.buf.len || allow_empty) {
-               write_note_data(&d, new_note.hash);
+               write_note_data(&d, &new_note);
                if (add_note(t, &object, &new_note, combine_notes_overwrite))
                        die("BUG: combine_notes_overwrite failed");
                logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
index 2f49b03cb167ed79ba5936637d3d4b9f00da9c97..4bdae5a1d8f4c988064475c0583e19c06dabf101 100644 (file)
@@ -124,11 +124,10 @@ static void *get_delta(struct object_entry *entry)
        void *buf, *base_buf, *delta_buf;
        enum object_type type;
 
-       buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
+       buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
                die("unable to read %s", oid_to_hex(&entry->idx.oid));
-       base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
-                                 &base_size);
+       base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
        if (!base_buf)
                die("unable to read %s",
                    oid_to_hex(&entry->delta->idx.oid));
@@ -166,7 +165,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
        return stream.total_out;
 }
 
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
                                           const struct object_id *oid)
 {
        git_zstream stream;
@@ -190,7 +189,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
                        stream.next_out = obuf;
                        stream.avail_out = sizeof(obuf);
                        zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
-                       sha1write(f, obuf, stream.next_out - obuf);
+                       hashwrite(f, obuf, stream.next_out - obuf);
                        olen += stream.next_out - obuf;
                }
                if (stream.avail_in)
@@ -235,7 +234,7 @@ static int check_pack_inflate(struct packed_git *p,
                stream.total_in == len) ? 0 : -1;
 }
 
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
                struct packed_git *p,
                struct pack_window **w_curs,
                off_t offset,
@@ -248,14 +247,14 @@ static void copy_pack_data(struct sha1file *f,
                in = use_pack(p, w_curs, offset, &avail);
                if (avail > len)
                        avail = (unsigned long)len;
-               sha1write(f, in, avail);
+               hashwrite(f, in, avail);
                offset += avail;
                len -= avail;
        }
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
                                           unsigned long limit, int usable_delta)
 {
        unsigned long size, datalen;
@@ -269,11 +268,10 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
        if (!usable_delta) {
                if (entry->type == OBJ_BLOB &&
                    entry->size > big_file_threshold &&
-                   (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
+                   (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
                        buf = NULL;
                else {
-                       buf = read_sha1_file(entry->idx.oid.hash, &type,
-                                            &size);
+                       buf = read_object_file(&entry->idx.oid, &type, &size);
                        if (!buf)
                                die(_("unable to read %s"),
                                    oid_to_hex(&entry->idx.oid));
@@ -328,8 +326,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
        } else if (type == OBJ_REF_DELTA) {
                /*
@@ -342,8 +340,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
        } else {
                if (limit && hdrlen + datalen + 20 >= limit) {
@@ -352,13 +350,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        if (st) {
                datalen = write_large_blob_data(st, f, &entry->idx.oid);
                close_istream(st);
        } else {
-               sha1write(f, buf, datalen);
+               hashwrite(f, buf, datalen);
                free(buf);
        }
 
@@ -366,7 +364,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
 {
        struct packed_git *p = entry->in_pack;
@@ -417,8 +415,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
                reused_delta++;
        } else if (type == OBJ_REF_DELTA) {
@@ -426,8 +424,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
                reused_delta++;
        } else {
@@ -435,7 +433,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        copy_pack_data(f, p, &w_curs, offset, datalen);
        unuse_pack(&w_curs);
@@ -444,7 +442,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
                          struct object_entry *entry,
                          off_t write_offset)
 {
@@ -517,7 +515,7 @@ enum write_one_status {
        WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
 };
 
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
                                       struct object_entry *e,
                                       off_t *offset)
 {
@@ -736,7 +734,7 @@ static struct object_entry **compute_write_order(void)
        return wo;
 }
 
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
 {
        unsigned char buffer[8192];
        off_t to_write, total;
@@ -767,7 +765,7 @@ static off_t write_reused_pack(struct sha1file *f)
                if (read_pack > to_write)
                        read_pack = to_write;
 
-               sha1write(f, buffer, read_pack);
+               hashwrite(f, buffer, read_pack);
                to_write -= read_pack;
 
                /*
@@ -796,7 +794,7 @@ static const char no_split_warning[] = N_(
 static void write_pack_file(void)
 {
        uint32_t i = 0, j;
-       struct sha1file *f;
+       struct hashfile *f;
        off_t offset;
        uint32_t nr_remaining = nr_result;
        time_t last_mtime = 0;
@@ -812,7 +810,7 @@ static void write_pack_file(void)
                char *pack_tmp_name = NULL;
 
                if (pack_to_stdout)
-                       f = sha1fd_throughput(1, "<stdout>", progress_state);
+                       f = hashfd_throughput(1, "<stdout>", progress_state);
                else
                        f = create_tmp_packfile(&pack_tmp_name);
 
@@ -839,11 +837,11 @@ static void write_pack_file(void)
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
-                       sha1close(f, oid.hash, CSUM_CLOSE);
+                       hashclose(f, oid.hash, CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
-                       sha1close(f, oid.hash, CSUM_FSYNC);
+                       hashclose(f, oid.hash, CSUM_FSYNC);
                } else {
-                       int fd = sha1close(f, oid.hash, 0);
+                       int fd = hashclose(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);
@@ -1192,7 +1190,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
        /* Did not find one.  Either we got a bogus request or
         * we need to read and perhaps cache.
         */
-       data = read_sha1_file(oid->hash, &type, &size);
+       data = read_object_file(oid, &type, &size);
        if (!data)
                return NULL;
        if (type != OBJ_TREE) {
@@ -1353,7 +1351,7 @@ static void add_preferred_base(struct object_id *oid)
        if (window <= num_preferred_base++)
                return;
 
-       data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
+       data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
        if (!data)
                return;
 
@@ -1381,10 +1379,10 @@ static void cleanup_preferred_base(void)
        it = pbase_tree;
        pbase_tree = NULL;
        while (it) {
-               struct pbase_tree *this = it;
-               it = this->next;
-               free(this->pcache.tree_data);
-               free(this);
+               struct pbase_tree *tmp = it;
+               it = tmp->next;
+               free(tmp->pcache.tree_data);
+               free(tmp);
        }
 
        for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
@@ -1518,7 +1516,7 @@ static void check_object(struct object_entry *entry)
                unuse_pack(&w_curs);
        }
 
-       entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
+       entry->type = oid_object_info(&entry->idx.oid, &entry->size);
        /*
         * The error condition is checked in prepare_pack().  This is
         * to permit a missing preferred base object to be ignored
@@ -1580,8 +1578,7 @@ static void drop_reused_delta(struct object_entry *entry)
                 * And if that fails, the error will be recorded in entry->type
                 * and dealt with in prepare_pack().
                 */
-               entry->type = sha1_object_info(entry->idx.oid.hash,
-                                              &entry->size);
+               entry->type = oid_object_info(&entry->idx.oid, &entry->size);
        }
 }
 
@@ -1873,8 +1870,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
-               trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
-                                          &sz);
+               trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
                        die("object %s cannot be read",
@@ -1887,8 +1883,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        }
        if (!src->data) {
                read_lock();
-               src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
-                                          &sz);
+               src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!src->data) {
                        if (src_entry->preferred_base) {
@@ -2551,6 +2546,7 @@ static void read_object_list_from_stdin(void)
        }
 }
 
+/* Remember to update object flag allocation in object.h */
 #define OBJECT_ADDED (1u<<20)
 
 static void show_commit(struct commit *commit, void *data)
@@ -2710,7 +2706,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
 static int add_loose_object(const struct object_id *oid, const char *path,
                            void *data)
 {
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
 
        if (type < 0) {
                warning("loose object at %s could not be examined", path);
@@ -2796,7 +2792,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
                        if (!packlist_find(&to_pack, oid.hash, NULL) &&
                            !has_sha1_pack_kept_or_nonlocal(&oid) &&
                            !loosened_object_can_be_discarded(&oid, p->mtime))
-                               if (force_object_loose(oid.hash, p->mtime))
+                               if (force_object_loose(&oid, p->mtime))
                                        die("unable to force loose object");
                }
        }
index 710cd0fb6998672f371a3390cd13480836d03cfc..354478a12762d0b500cf7f56baf90ffc8d2b7c93 100644 (file)
@@ -50,17 +50,17 @@ static inline void llist_item_put(struct llist_item *item)
 
 static inline struct llist_item *llist_item_get(void)
 {
-       struct llist_item *new;
+       struct llist_item *new_item;
        if ( free_nodes ) {
-               new = free_nodes;
+               new_item = free_nodes;
                free_nodes = free_nodes->next;
        } else {
                int i = 1;
-               ALLOC_ARRAY(new, BLKSIZE);
+               ALLOC_ARRAY(new_item, BLKSIZE);
                for (; i < BLKSIZE; i++)
-                       llist_item_put(&new[i]);
+                       llist_item_put(&new_item[i]);
        }
-       return new;
+       return new_item;
 }
 
 static void llist_free(struct llist *list)
@@ -82,26 +82,26 @@ static inline void llist_init(struct llist **list)
 static struct llist * llist_copy(struct llist *list)
 {
        struct llist *ret;
-       struct llist_item *new, *old, *prev;
+       struct llist_item *new_item, *old_item, *prev;
 
        llist_init(&ret);
 
        if ((ret->size = list->size) == 0)
                return ret;
 
-       new = ret->front = llist_item_get();
-       new->sha1 = list->front->sha1;
+       new_item = ret->front = llist_item_get();
+       new_item->sha1 = list->front->sha1;
 
-       old = list->front->next;
-       while (old) {
-               prev = new;
-               new = llist_item_get();
-               prev->next = new;
-               new->sha1 = old->sha1;
-               old = old->next;
+       old_item = list->front->next;
+       while (old_item) {
+               prev = new_item;
+               new_item = llist_item_get();
+               prev->next = new_item;
+               new_item->sha1 = old_item->sha1;
+               old_item = old_item->next;
        }
-       new->next = NULL;
-       ret->back = new;
+       new_item->next = NULL;
+       ret->back = new_item;
 
        return ret;
 }
@@ -110,24 +110,24 @@ static inline struct llist_item *llist_insert(struct llist *list,
                                              struct llist_item *after,
                                               const unsigned char *sha1)
 {
-       struct llist_item *new = llist_item_get();
-       new->sha1 = sha1;
-       new->next = NULL;
+       struct llist_item *new_item = llist_item_get();
+       new_item->sha1 = sha1;
+       new_item->next = NULL;
 
        if (after != NULL) {
-               new->next = after->next;
-               after->next = new;
+               new_item->next = after->next;
+               after->next = new_item;
                if (after == list->back)
-                       list->back = new;
+                       list->back = new_item;
        } else {/* insert in front */
                if (list->size == 0)
-                       list->back = new;
+                       list->back = new_item;
                else
-                       new->next = list->front;
-               list->front = new;
+                       new_item->next = list->front;
+               list->front = new_item;
        }
        list->size++;
-       return new;
+       return new_item;
 }
 
 static inline struct llist_item *llist_insert_back(struct llist *list,
index 4cfec82f4076e357048bcb5816ceeb0f4b020192..38ced18dadff03836c6bed3fcbdefe8bd528a7c0 100644 (file)
@@ -50,9 +50,9 @@ static int prune_object(const struct object_id *oid, const char *fullpath,
        if (st.st_mtime > expire)
                return 0;
        if (show_only || verbose) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(oid, NULL);
                printf("%s %s\n", oid_to_hex(oid),
-                      (type > 0) ? typename(type) : "unknown");
+                      (type > 0) ? type_name(type) : "unknown");
        }
        if (!show_only)
                unlink_or_warn(fullpath);
index 511dbbe0f6e25d8f0e8c6ce511cc0ff734adc9bc..e32d6cd5b4c999bc45b961c1387af066c72a823a 100644 (file)
@@ -193,7 +193,7 @@ static struct option pull_options[] = {
        OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
                N_("path to upload pack on remote end"),
                0),
-       OPT__FORCE(&opt_force, N_("force overwrite of local branch")),
+       OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
        OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
                N_("fetch all tags and associated objects"),
                PARSE_OPT_NOARG),
@@ -574,6 +574,7 @@ static int rebase_submodules(void)
        cp.no_stdin = 1;
        argv_array_pushl(&cp.args, "submodule", "update",
                                   "--recursive", "--rebase", NULL);
+       argv_push_verbosity(&cp.args);
 
        return run_command(&cp);
 }
@@ -586,6 +587,7 @@ static int update_submodules(void)
        cp.no_stdin = 1;
        argv_array_pushl(&cp.args, "submodule", "update",
                                   "--recursive", "--checkout", NULL);
+       argv_push_verbosity(&cp.args);
 
        return run_command(&cp);
 }
index 1c28427d82ee73631fb02b5f126c40ebd2cb1774..013c20d6164f61dc404b89271c0281d28b5069a7 100644 (file)
@@ -548,7 +548,7 @@ int cmd_push(int argc, const char **argv, const char *prefix)
                { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no",
                        N_("control recursive pushing of submodules"),
                        PARSE_OPT_OPTARG, option_parse_recurse_submodules },
-               OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")),
+               OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE),
                OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
                OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")),
                OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"),
index 00faf14d07403fedd07bfbbaf1123bea07e3e328..ad074705bb51d1de4221b3c5dfaa7229903c0ef0 100644 (file)
@@ -22,6 +22,8 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
        struct option options[] = {
                OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
                OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
+               OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
+                       N_("allow commits with empty messages")),
                OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
                                CONTINUE),
                OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
index 469b91670782c32d7bc3fb0cb82543ce60d53976..4fadf7e41e37bd0e6457c6bdbbec176b2152c070 100644 (file)
@@ -70,7 +70,7 @@ static int sent_capabilities;
 static int shallow_update;
 static const char *alt_shallow_file;
 static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
 static struct signature_check sigcheck;
 static const char *push_cert_nonce;
 static const char *cert_nonce_seed;
@@ -634,8 +634,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
                int bogs /* beginning_of_gpg_sig */;
 
                already_done = 1;
-               if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
-                       hashclr(push_cert_sha1);
+               if (write_object_file(push_cert.buf, push_cert.len, "blob",
+                                     &push_cert_oid))
+                       oidclr(&push_cert_oid);
 
                memset(&sigcheck, '\0', sizeof(sigcheck));
                sigcheck.result = 'N';
@@ -656,9 +657,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
                strbuf_release(&gpg_status);
                nonce_status = check_nonce(push_cert.buf, bogs);
        }
-       if (!is_null_sha1(push_cert_sha1)) {
+       if (!is_null_oid(&push_cert_oid)) {
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
-                                sha1_to_hex(push_cert_sha1));
+                                oid_to_hex(&push_cert_oid));
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
                                 sigcheck.signer ? sigcheck.signer : "");
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
@@ -1242,11 +1243,11 @@ static void check_aliased_update(struct command *cmd, struct string_list *list)
        rp_error("refusing inconsistent update between symref '%s' (%s..%s) and"
                 " its target '%s' (%s..%s)",
                 cmd->ref_name,
-                find_unique_abbrev(cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(cmd->new_oid.hash, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV),
                 dst_cmd->ref_name,
-                find_unique_abbrev(dst_cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(dst_cmd->new_oid.hash, DEFAULT_ABBREV));
+                find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV));
 
        cmd->error_string = dst_cmd->error_string =
                "inconsistent aliased update";
@@ -1964,6 +1965,12 @@ int cmd_receive_pack(int argc, const char **argv, const char *prefix)
                unpack_limit = receive_unpack_limit;
 
        switch (determine_protocol_version_server()) {
+       case protocol_v2:
+               /*
+                * push support for protocol v2 has not been implemented yet,
+                * so ignore the request to use v2 and fallback to using v0.
+                */
+               break;
        case protocol_v1:
                /*
                 * v1 is just the original protocol with a version string,
index 2233725315ba32832ec5c866b7dbdf72103ac74d..a89bd1dd25252ddfe5ad6b32ee89950d3a4b258f 100644 (file)
@@ -52,6 +52,7 @@ struct collect_reflog_cb {
        int nr;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define INCOMPLETE     (1u<<10)
 #define STUDYING       (1u<<11)
 #define REACHABLE      (1u<<12)
@@ -74,7 +75,7 @@ static int tree_is_complete(const struct object_id *oid)
        if (!tree->buffer) {
                enum object_type type;
                unsigned long size;
-               void *data = read_sha1_file(oid->hash, &type, &size);
+               void *data = read_object_file(oid, &type, &size);
                if (!data) {
                        tree->object.flags |= INCOMPLETE;
                        return 0;
@@ -289,20 +290,20 @@ static int should_expire_reflog_ent(struct object_id *ooid, struct object_id *no
                                    const char *message, void *cb_data)
 {
        struct expire_reflog_policy_cb *cb = cb_data;
-       struct commit *old, *new;
+       struct commit *old_commit, *new_commit;
 
        if (timestamp < cb->cmd.expire_total)
                return 1;
 
-       old = new = NULL;
+       old_commit = new_commit = NULL;
        if (cb->cmd.stalefix &&
-           (!keep_entry(&old, ooid) || !keep_entry(&new, noid)))
+           (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid)))
                return 1;
 
        if (timestamp < cb->cmd.expire_unreachable) {
                if (cb->unreachable_expire_kind == UE_ALWAYS)
                        return 1;
-               if (unreachable(cb, old, ooid) || unreachable(cb, new, noid))
+               if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid))
                        return 1;
        }
 
index d95bf904c3b3fb40438335198a2729e45a9897cb..8708e584e9e8793f3a5e44861d9fb9f049027fa3 100644 (file)
@@ -168,7 +168,7 @@ static int add(int argc, const char **argv)
                OPT_STRING('m', "master", &master, N_("branch"), N_("master branch")),
                { OPTION_CALLBACK, 0, "mirror", &mirror, N_("push|fetch"),
                        N_("set up remote as a mirror to push to or fetch from"),
-                       PARSE_OPT_OPTARG, parse_mirror_opt },
+                       PARSE_OPT_OPTARG | PARSE_OPT_COMP_ARG, parse_mirror_opt },
                OPT_END()
        };
 
@@ -322,7 +322,7 @@ static void read_branches(void)
 
 struct ref_states {
        struct remote *remote;
-       struct string_list new, stale, tracked, heads, push;
+       struct string_list new_refs, stale, tracked, heads, push;
        int queried;
 };
 
@@ -337,12 +337,12 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
                        die(_("Could not get fetch map for refspec %s"),
                                states->remote->fetch_refspec[i]);
 
-       states->new.strdup_strings = 1;
+       states->new_refs.strdup_strings = 1;
        states->tracked.strdup_strings = 1;
        states->stale.strdup_strings = 1;
        for (ref = fetch_map; ref; ref = ref->next) {
                if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
-                       string_list_append(&states->new, abbrev_branch(ref->name));
+                       string_list_append(&states->new_refs, abbrev_branch(ref->name));
                else
                        string_list_append(&states->tracked, abbrev_branch(ref->name));
        }
@@ -356,7 +356,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
        free_refs(stale_refs);
        free_refs(fetch_map);
 
-       string_list_sort(&states->new);
+       string_list_sort(&states->new_refs);
        string_list_sort(&states->tracked);
        string_list_sort(&states->stale);
 
@@ -546,8 +546,8 @@ static int add_branch_for_removal(const char *refname,
 }
 
 struct rename_info {
-       const char *old;
-       const char *new;
+       const char *old_name;
+       const char *new_name;
        struct string_list *remote_branches;
 };
 
@@ -560,7 +560,7 @@ static int read_remote_branches(const char *refname,
        int flag;
        const char *symref;
 
-       strbuf_addf(&buf, "refs/remotes/%s/", rename->old);
+       strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name);
        if (starts_with(refname, buf.buf)) {
                item = string_list_append(rename->remote_branches, xstrdup(refname));
                symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
@@ -615,36 +615,36 @@ static int mv(int argc, const char **argv)
        if (argc != 3)
                usage_with_options(builtin_remote_rename_usage, options);
 
-       rename.old = argv[1];
-       rename.new = argv[2];
+       rename.old_name = argv[1];
+       rename.new_name = argv[2];
        rename.remote_branches = &remote_branches;
 
-       oldremote = remote_get(rename.old);
+       oldremote = remote_get(rename.old_name);
        if (!remote_is_configured(oldremote, 1))
-               die(_("No such remote: %s"), rename.old);
+               die(_("No such remote: %s"), rename.old_name);
 
-       if (!strcmp(rename.old, rename.new) && oldremote->origin != REMOTE_CONFIG)
+       if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG)
                return migrate_file(oldremote);
 
-       newremote = remote_get(rename.new);
+       newremote = remote_get(rename.new_name);
        if (remote_is_configured(newremote, 1))
-               die(_("remote %s already exists."), rename.new);
+               die(_("remote %s already exists."), rename.new_name);
 
-       strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new);
+       strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new_name);
        if (!valid_fetch_refspec(buf.buf))
-               die(_("'%s' is not a valid remote name"), rename.new);
+               die(_("'%s' is not a valid remote name"), rename.new_name);
 
        strbuf_reset(&buf);
-       strbuf_addf(&buf, "remote.%s", rename.old);
-       strbuf_addf(&buf2, "remote.%s", rename.new);
+       strbuf_addf(&buf, "remote.%s", rename.old_name);
+       strbuf_addf(&buf2, "remote.%s", rename.new_name);
        if (git_config_rename_section(buf.buf, buf2.buf) < 1)
                return error(_("Could not rename config section '%s' to '%s'"),
                                buf.buf, buf2.buf);
 
        strbuf_reset(&buf);
-       strbuf_addf(&buf, "remote.%s.fetch", rename.new);
+       strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
        git_config_set_multivar(buf.buf, NULL, NULL, 1);
-       strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old);
+       strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
        for (i = 0; i < oldremote->fetch_refspec_nr; i++) {
                char *ptr;
 
@@ -655,8 +655,8 @@ static int mv(int argc, const char **argv)
                        refspec_updated = 1;
                        strbuf_splice(&buf2,
                                      ptr-buf2.buf + strlen(":refs/remotes/"),
-                                     strlen(rename.old), rename.new,
-                                     strlen(rename.new));
+                                     strlen(rename.old_name), rename.new_name,
+                                     strlen(rename.new_name));
                } else
                        warning(_("Not updating non-default fetch refspec\n"
                                  "\t%s\n"
@@ -670,10 +670,10 @@ static int mv(int argc, const char **argv)
        for (i = 0; i < branch_list.nr; i++) {
                struct string_list_item *item = branch_list.items + i;
                struct branch_info *info = item->util;
-               if (info->remote_name && !strcmp(info->remote_name, rename.old)) {
+               if (info->remote_name && !strcmp(info->remote_name, rename.old_name)) {
                        strbuf_reset(&buf);
                        strbuf_addf(&buf, "branch.%s.remote", item->string);
-                       git_config_set(buf.buf, rename.new);
+                       git_config_set(buf.buf, rename.new_name);
                }
        }
 
@@ -703,8 +703,8 @@ static int mv(int argc, const char **argv)
                        continue;
                strbuf_reset(&buf);
                strbuf_addstr(&buf, item->string);
-               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf2);
                strbuf_addf(&buf2, "remote: renamed %s to %s",
                                item->string, buf.buf);
@@ -718,12 +718,12 @@ static int mv(int argc, const char **argv)
                        continue;
                strbuf_reset(&buf);
                strbuf_addstr(&buf, item->string);
-               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf2);
                strbuf_addstr(&buf2, item->util);
-               strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf3);
                strbuf_addf(&buf3, "remote: renamed %s to %s",
                                item->string, buf.buf);
@@ -822,7 +822,7 @@ static void clear_push_info(void *util, const char *string)
 
 static void free_remote_ref_states(struct ref_states *states)
 {
-       string_list_clear(&states->new, 0);
+       string_list_clear(&states->new_refs, 0);
        string_list_clear(&states->stale, 1);
        string_list_clear(&states->tracked, 0);
        string_list_clear(&states->heads, 0);
@@ -862,7 +862,7 @@ static int get_remote_ref_states(const char *name,
        if (query) {
                transport = transport_get(states->remote, states->remote->url_nr > 0 ?
                        states->remote->url[0] : NULL);
-               remote_refs = transport_get_remote_refs(transport);
+               remote_refs = transport_get_remote_refs(transport, NULL);
                transport_disconnect(transport);
 
                states->queried = 1;
@@ -907,7 +907,7 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data)
        if (states->queried) {
                const char *fmt = "%s";
                const char *arg = "";
-               if (string_list_has_string(&states->new, name)) {
+               if (string_list_has_string(&states->new_refs, name)) {
                        fmt = _(" new (next fetch will store in remotes/%s)");
                        arg = states->remote->name;
                } else if (string_list_has_string(&states->tracked, name))
@@ -1176,7 +1176,7 @@ static int show(int argc, const char **argv)
 
                /* remote branch info */
                info.width = 0;
-               for_each_string_list(&states.new, add_remote_to_show_info, &info);
+               for_each_string_list(&states.new_refs, add_remote_to_show_info, &info);
                for_each_string_list(&states.tracked, add_remote_to_show_info, &info);
                for_each_string_list(&states.stale, add_remote_to_show_info, &info);
                if (info.list->nr)
index 10078ae37136f154486d8163b6b905ad163c7047..935647be6bdf2ffc2b460038a10e63c2eb387a5c 100644 (file)
@@ -53,11 +53,11 @@ static int show_reference(const char *refname, const struct object_id *oid,
                        if (get_oid(refname, &object))
                                return error("Failed to resolve '%s' as a valid ref.", refname);
 
-                       obj_type = sha1_object_info(object.hash, NULL);
-                       repl_type = sha1_object_info(oid->hash, NULL);
+                       obj_type = oid_object_info(&object, NULL);
+                       repl_type = oid_object_info(oid, NULL);
 
-                       printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type),
-                              oid_to_hex(oid), typename(repl_type));
+                       printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
+                              oid_to_hex(oid), type_name(repl_type));
                }
        }
 
@@ -162,14 +162,14 @@ static int replace_object_oid(const char *object_ref,
        struct ref_transaction *transaction;
        struct strbuf err = STRBUF_INIT;
 
-       obj_type = sha1_object_info(object->hash, NULL);
-       repl_type = sha1_object_info(repl->hash, NULL);
+       obj_type = oid_object_info(object, NULL);
+       repl_type = oid_object_info(repl, NULL);
        if (!force && obj_type != repl_type)
                die("Objects must be of the same type.\n"
                    "'%s' points to a replaced object of type '%s'\n"
                    "while '%s' points to a replacement object of type '%s'.",
-                   object_ref, typename(obj_type),
-                   replace_ref, typename(repl_type));
+                   object_ref, type_name(obj_type),
+                   replace_ref, type_name(repl_type));
 
        check_ref_valid(object, &prev, &ref, force);
 
@@ -215,7 +215,7 @@ static void export_object(const struct object_id *oid, enum object_type type,
        argv_array_push(&cmd.args, "--no-replace-objects");
        argv_array_push(&cmd.args, "cat-file");
        if (raw)
-               argv_array_push(&cmd.args, typename(type));
+               argv_array_push(&cmd.args, type_name(type));
        else
                argv_array_push(&cmd.args, "-p");
        argv_array_push(&cmd.args, oid_to_hex(oid));
@@ -284,30 +284,30 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
 {
        char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
        enum object_type type;
-       struct object_id old, new, prev;
+       struct object_id old_oid, new_oid, prev;
        struct strbuf ref = STRBUF_INIT;
 
-       if (get_oid(object_ref, &old) < 0)
+       if (get_oid(object_ref, &old_oid) < 0)
                die("Not a valid object name: '%s'", object_ref);
 
-       type = sha1_object_info(old.hash, NULL);
+       type = oid_object_info(&old_oid, NULL);
        if (type < 0)
-               die("unable to get object type for %s", oid_to_hex(&old));
+               die("unable to get object type for %s", oid_to_hex(&old_oid));
 
-       check_ref_valid(&old, &prev, &ref, force);
+       check_ref_valid(&old_oid, &prev, &ref, force);
        strbuf_release(&ref);
 
-       export_object(&old, type, raw, tmpfile);
+       export_object(&old_oid, type, raw, tmpfile);
        if (launch_editor(tmpfile, NULL, NULL) < 0)
                die("editing object file failed");
-       import_object(&new, type, raw, tmpfile);
+       import_object(&new_oid, type, raw, tmpfile);
 
        free(tmpfile);
 
-       if (!oidcmp(&old, &new))
-               return error("new object is the same as the old one: '%s'", oid_to_hex(&old));
+       if (!oidcmp(&old_oid, &new_oid))
+               return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid));
 
-       return replace_object_oid(object_ref, &old, "replacement", &new, force);
+       return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
 }
 
 static void replace_parents(struct strbuf *buf, int argc, const char **argv)
@@ -355,7 +355,7 @@ static void check_one_mergetag(struct commit *commit,
        struct tag *tag;
        int i;
 
-       hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash);
+       hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
        tag = lookup_tag(&tag_oid);
        if (!tag)
                die(_("bad mergetag in commit '%s'"), ref);
@@ -386,16 +386,16 @@ static void check_mergetags(struct commit *commit, int argc, const char **argv)
 
 static int create_graft(int argc, const char **argv, int force)
 {
-       struct object_id old, new;
+       struct object_id old_oid, new_oid;
        const char *old_ref = argv[0];
        struct commit *commit;
        struct strbuf buf = STRBUF_INIT;
        const char *buffer;
        unsigned long size;
 
-       if (get_oid(old_ref, &old) < 0)
+       if (get_oid(old_ref, &old_oid) < 0)
                die(_("Not a valid object name: '%s'"), old_ref);
-       commit = lookup_commit_or_die(&old, old_ref);
+       commit = lookup_commit_or_die(&old_oid, old_ref);
 
        buffer = get_commit_buffer(commit, &size);
        strbuf_add(&buf, buffer, size);
@@ -410,15 +410,15 @@ static int create_graft(int argc, const char **argv, int force)
 
        check_mergetags(commit, argc, argv);
 
-       if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash))
+       if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
                die(_("could not write replacement commit for: '%s'"), old_ref);
 
        strbuf_release(&buf);
 
-       if (!oidcmp(&old, &new))
-               return error("new commit is the same as the old one: '%s'", oid_to_hex(&old));
+       if (!oidcmp(&old_oid, &new_oid))
+               return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
 
-       return replace_object_oid(old_ref, &old, "replacement", &new, force);
+       return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
 }
 
 int cmd_replace(int argc, const char **argv, const char *prefix)
@@ -439,7 +439,8 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
                OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
                OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
-               OPT_BOOL('f', "force", &force, N_("replace the ref if it exists")),
+               OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
                OPT_STRING(0, "format", &format, N_("format"), N_("use this format")),
                OPT_END()
index e15f595799c40933e31f909715e35b4d0a665fc0..7f1c3f02a302128d6c00c35b8783c1a62353b37a 100644 (file)
@@ -106,24 +106,16 @@ static int reset_index(const struct object_id *oid, int reset_type, int quiet)
 
 static void print_new_head_line(struct commit *commit)
 {
-       const char *hex, *body;
-       const char *msg;
-
-       hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
-       printf(_("HEAD is now at %s"), hex);
-       msg = logmsg_reencode(commit, NULL, get_log_output_encoding());
-       body = strstr(msg, "\n\n");
-       if (body) {
-               const char *eol;
-               size_t len;
-               body = skip_blank_lines(body + 2);
-               eol = strchr(body, '\n');
-               len = eol ? eol - body : strlen(body);
-               printf(" %.*s\n", (int) len, body);
-       }
-       else
-               printf("\n");
-       unuse_commit_buffer(commit, msg);
+       struct strbuf buf = STRBUF_INIT;
+
+       printf(_("HEAD is now at %s"),
+               find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+
+       pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+       if (buf.len > 0)
+               printf(" %s", buf.buf);
+       putchar('\n');
+       strbuf_release(&buf);
 }
 
 static void update_index_from_diff(struct diff_queue_struct *q,
index 48300d9e11733396610e81fbb00ee7d17a7dba66..fadd3ec14cbf0469c332a85278e5d1b4932ef788 100644 (file)
@@ -108,7 +108,7 @@ static void show_commit(struct commit *commit, void *data)
        if (!revs->graph)
                fputs(get_revision_mark(revs, commit), stdout);
        if (revs->abbrev_commit && revs->abbrev)
-               fputs(find_unique_abbrev(commit->object.oid.hash, revs->abbrev),
+               fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
                      stdout);
        else
                fputs(oid_to_hex(&commit->object.oid), stdout);
@@ -134,7 +134,7 @@ static void show_commit(struct commit *commit, void *data)
        else
                putchar('\n');
 
-       if (revs->verbose_header && get_cached_commit_buffer(commit, NULL)) {
+       if (revs->verbose_header) {
                struct strbuf buf = STRBUF_INIT;
                struct pretty_print_context ctx = {0};
                ctx.abbrev = revs->abbrev;
@@ -536,7 +536,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                mark_edges_uninteresting(&revs, show_edge);
 
        if (bisect_list) {
-               int reaches = reaches, all = all;
+               int reaches, all;
 
                find_bisection(&revs.commits, &reaches, &all, bisect_find_all);
 
index 96d06a5d01d6189a647baae4f8c401d8435fff37..36b208778280e6019d9bc4fb4063dff3d44f08e6 100644 (file)
@@ -159,7 +159,7 @@ static void show_rev(int type, const struct object_id *oid, const char *name)
                }
        }
        else if (abbrev)
-               show_with_type(type, find_unique_abbrev(oid->hash, abbrev));
+               show_with_type(type, find_unique_abbrev(oid, abbrev));
        else
                show_with_type(type, oid_to_hex(oid));
 }
@@ -243,28 +243,28 @@ static int show_file(const char *arg, int output_prefix)
 static int try_difference(const char *arg)
 {
        char *dotdot;
-       struct object_id oid;
-       struct object_id end;
-       const char *next;
-       const char *this;
+       struct object_id start_oid;
+       struct object_id end_oid;
+       const char *end;
+       const char *start;
        int symmetric;
        static const char head_by_default[] = "HEAD";
 
        if (!(dotdot = strstr(arg, "..")))
                return 0;
-       next = dotdot + 2;
-       this = arg;
-       symmetric = (*next == '.');
+       end = dotdot + 2;
+       start = arg;
+       symmetric = (*end == '.');
 
        *dotdot = 0;
-       next += symmetric;
+       end += symmetric;
 
-       if (!*next)
-               next = head_by_default;
+       if (!*end)
+               end = head_by_default;
        if (dotdot == arg)
-               this = head_by_default;
+               start = head_by_default;
 
-       if (this == head_by_default && next == head_by_default &&
+       if (start == head_by_default && end == head_by_default &&
            !symmetric) {
                /*
                 * Just ".."?  That is not a range but the
@@ -274,14 +274,14 @@ static int try_difference(const char *arg)
                return 0;
        }
 
-       if (!get_oid_committish(this, &oid) && !get_oid_committish(next, &end)) {
-               show_rev(NORMAL, &end, next);
-               show_rev(symmetric ? NORMAL : REVERSED, &oid, this);
+       if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) {
+               show_rev(NORMAL, &end_oid, end);
+               show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start);
                if (symmetric) {
                        struct commit_list *exclude;
                        struct commit *a, *b;
-                       a = lookup_commit_reference(&oid);
-                       b = lookup_commit_reference(&end);
+                       a = lookup_commit_reference(&start_oid);
+                       b = lookup_commit_reference(&end_oid);
                        exclude = get_merge_bases(a, b);
                        while (exclude) {
                                struct commit *commit = pop_commit(&exclude);
index 4a2fcca27b3f722ca520c2411b80e6984ecf780a..5b6fc7ee818be4a4f060dc06f12fb45a25a2ea9b 100644 (file)
@@ -178,7 +178,7 @@ static int check_local_mod(struct object_id *head, int index_only)
                 * way as changed from the HEAD.
                 */
                if (no_head
-                    || get_tree_entry(head->hash, name, oid.hash, &mode)
+                    || get_tree_entry(head, name, &oid, &mode)
                     || ce->ce_mode != create_ce_mode(mode)
                     || oidcmp(&ce->oid, &oid))
                        staged_changes = 1;
@@ -242,7 +242,7 @@ static struct option builtin_rm_options[] = {
        OPT__DRY_RUN(&show_only, N_("dry run")),
        OPT__QUIET(&quiet, N_("do not list removed files")),
        OPT_BOOL( 0 , "cached",         &index_only, N_("only remove from the index")),
-       OPT__FORCE(&force, N_("override the up-to-date check")),
+       OPT__FORCE(&force, N_("override the up-to-date check"), PARSE_OPT_NOCOMPLETE),
        OPT_BOOL('r', NULL,             &recursive,  N_("allow recursive removal")),
        OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch,
                                N_("exit with a zero status even if nothing matched")),
@@ -385,10 +385,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
                        stage_updated_gitmodules(&the_index);
        }
 
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
-                       die(_("Unable to write new index file"));
-       }
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die(_("Unable to write new index file"));
 
        return 0;
 }
index fc4f0bb5fbc033604a13a147094c0d1bc661db17..b5427f75e34901ad8fa876cc6c53066211e4a2a0 100644 (file)
@@ -14,6 +14,7 @@
 #include "sha1-array.h"
 #include "gpg-interface.h"
 #include "gettext.h"
+#include "protocol.h"
 
 static const char * const send_pack_usage[] = {
        N_("git send-pack [--all | --mirror] [--dry-run] [--force] "
@@ -154,6 +155,7 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
        int progress = -1;
        int from_stdin = 0;
        struct push_cas_option cas = {0};
+       struct packet_reader reader;
 
        struct option options[] = {
                OPT__VERBOSITY(&verbose),
@@ -256,8 +258,22 @@ int cmd_send_pack(int argc, const char **argv, const char *prefix)
                        args.verbose ? CONNECT_VERBOSE : 0);
        }
 
-       get_remote_heads(fd[0], NULL, 0, &remote_refs, REF_NORMAL,
-                        &extra_have, &shallow);
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       switch (discover_version(&reader)) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &remote_refs, REF_NORMAL,
+                                &extra_have, &shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        transport_verify_remote_names(nr_refspecs, refspecs);
 
diff --git a/builtin/serve.c b/builtin/serve.c
new file mode 100644 (file)
index 0000000..d3fd240
--- /dev/null
@@ -0,0 +1,30 @@
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "serve.h"
+
+static char const * const serve_usage[] = {
+       N_("git serve [<options>]"),
+       NULL
+};
+
+int cmd_serve(int argc, const char **argv, const char *prefix)
+{
+       struct serve_options opts = SERVE_OPTIONS_INIT;
+
+       struct option options[] = {
+               OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+                        N_("quit after a single request/response exchange")),
+               OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
+                        N_("exit immediately after advertising capabilities")),
+               OPT_END()
+       };
+
+       /* ignore all unknown cmdline switches for now */
+       argc = parse_options(argc, argv, prefix, options, serve_usage,
+                            PARSE_OPT_KEEP_DASHDASH |
+                            PARSE_OPT_KEEP_UNKNOWN);
+       serve(&opts);
+
+       return 0;
+}
index e29875b84389b25237e39e0112eafa8ac34599ee..608d6ba77bdfb4673513444651053d0e8e789020 100644 (file)
@@ -11,7 +11,8 @@
 #include "parse-options.h"
 
 static char const * const shortlog_usage[] = {
-       N_("git shortlog [<options>] [<revision-range>] [[--] [<path>...]]"),
+       N_("git shortlog [<options>] [<revision-range>] [[--] <path>...]"),
+       N_("git log --pretty=short | git shortlog [<options>]"),
        NULL
 };
 
@@ -283,6 +284,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, shortlog_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        goto parse_done;
@@ -292,6 +294,11 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
 parse_done:
        argc = parse_options_end(&ctx);
 
+       if (nongit && argc > 1) {
+               error(_("too many arguments given outside repository"));
+               usage_with_options(shortlog_usage, options);
+       }
+
        if (setup_revisions(argc, argv, &rev, NULL) != 1) {
                error(_("unrecognized argument: %s"), argv[1]);
                usage_with_options(shortlog_usage, options);
index e8a4aa40cb4b6cf8787af3dd35d833a92a85bba3..6c2148b71db593af1a4ef8d2d1bdbdfe16661851 100644 (file)
@@ -292,7 +292,7 @@ static void show_one_commit(struct commit *commit, int no_name)
                }
                else
                        printf("[%s] ",
-                              find_unique_abbrev(commit->object.oid.hash,
+                              find_unique_abbrev(&commit->object.oid,
                                                  DEFAULT_ABBREV));
        }
        puts(pretty_str);
index 41e5e71cad660d26ddc90ffeaa383fd7bf10f79f..f2eb1a7724058bb1db237a6199d16e5ff1ef495a 100644 (file)
@@ -29,7 +29,7 @@ static void show_one(const char *refname, const struct object_id *oid)
        if (quiet)
                return;
 
-       hex = find_unique_abbrev(oid->hash, abbrev);
+       hex = find_unique_abbrev(oid, abbrev);
        if (hash_only)
                printf("%s\n", hex);
        else
@@ -39,7 +39,7 @@ static void show_one(const char *refname, const struct object_id *oid)
                return;
 
        if (!peel_ref(refname, &peeled)) {
-               hex = find_unique_abbrev(peeled.hash, abbrev);
+               hex = find_unique_abbrev(&peeled, abbrev);
                printf("%s %s^{}\n", hex, refname);
        }
 }
index 6d8e002be707f4e2d189f3353c1fb9eacb581b19..a404df3ea494d4e4753e5ca95be06b7eed14f617 100644 (file)
@@ -655,9 +655,13 @@ static void status_submodule(const char *path, const struct object_id *ce_oid,
                             displaypath);
        } else if (!(flags & OPT_CACHED)) {
                struct object_id oid;
+               struct ref_store *refs = get_submodule_ref_store(path);
 
-               if (refs_head_ref(get_submodule_ref_store(path),
-                                 handle_submodule_head_ref, &oid))
+               if (!refs) {
+                       print_status(flags, '-', path, ce_oid, displaypath);
+                       goto cleanup;
+               }
+               if (refs_head_ref(refs, handle_submodule_head_ref, &oid))
                        die(_("could not resolve HEAD ref inside the "
                              "submodule '%s'"), path);
 
@@ -1020,7 +1024,7 @@ static int module_deinit(int argc, const char **argv, const char *prefix)
 
        struct option module_deinit_options[] = {
                OPT__QUIET(&quiet, N_("Suppress submodule status output")),
-               OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes")),
+               OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes"), 0),
                OPT_BOOL(0, "all", &all, N_("Unregister all submodules")),
                OPT_END()
        };
@@ -1043,7 +1047,7 @@ static int module_deinit(int argc, const char **argv, const char *prefix)
                die(_("Use '--all' if you really want to deinitialize all submodules"));
 
        if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
-               BUG("module_list_compute should not choke on empty pathspec");
+               return 1;
 
        info.prefix = prefix;
        if (quiet)
index a7e6a5b0f234a95fb45a71d7e9aa7f0baa2b47f8..8cff6d0b727a572a34adcfe72246953b93c79edd 100644 (file)
@@ -99,7 +99,8 @@ static int delete_tag(const char *name, const char *ref,
 {
        if (delete_ref(NULL, ref, oid, 0))
                return 1;
-       printf(_("Deleted tag '%s' (was %s)\n"), name, find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+       printf(_("Deleted tag '%s' (was %s)\n"), name,
+              find_unique_abbrev(oid, DEFAULT_ABBREV));
        return 0;
 }
 
@@ -167,7 +168,7 @@ static void write_tag_body(int fd, const struct object_id *oid)
        enum object_type type;
        char *buf, *sp;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return;
        /* skip header */
@@ -187,13 +188,14 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu
 {
        if (sign && do_sign(buf) < 0)
                return error(_("unable to sign the tag"));
-       if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+       if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
                return error(_("unable to write tag file"));
        return 0;
 }
 
 struct create_tag_options {
        unsigned int message_given:1;
+       unsigned int use_editor:1;
        unsigned int sign;
        enum {
                CLEANUP_NONE,
@@ -210,7 +212,7 @@ static void create_tag(const struct object_id *object, const char *tag,
        struct strbuf header = STRBUF_INIT;
        char *path = NULL;
 
-       type = sha1_object_info(object->hash, NULL);
+       type = oid_object_info(object, NULL);
        if (type <= OBJ_NONE)
            die(_("bad object type."));
 
@@ -220,11 +222,11 @@ static void create_tag(const struct object_id *object, const char *tag,
                    "tag %s\n"
                    "tagger %s\n\n",
                    oid_to_hex(object),
-                   typename(type),
+                   type_name(type),
                    tag,
                    git_committer_info(IDENT_STRICT));
 
-       if (!opt->message_given) {
+       if (!opt->message_given || opt->use_editor) {
                int fd;
 
                /* write the template message before editing: */
@@ -233,7 +235,10 @@ static void create_tag(const struct object_id *object, const char *tag,
                if (fd < 0)
                        die_errno(_("could not create file '%s'"), path);
 
-               if (!is_null_oid(prev)) {
+               if (opt->message_given) {
+                       write_or_die(fd, buf->buf, buf->len);
+                       strbuf_reset(buf);
+               } else if (!is_null_oid(prev)) {
                        write_tag_body(fd, prev);
                } else {
                        struct strbuf buf = STRBUF_INIT;
@@ -289,17 +294,17 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
                strbuf_addstr(sb, rla);
        } else {
                strbuf_addstr(sb, "tag: tagging ");
-               strbuf_add_unique_abbrev(sb, oid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(sb, oid, DEFAULT_ABBREV);
        }
 
        strbuf_addstr(sb, " (");
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        switch (type) {
        default:
                strbuf_addstr(sb, "object of unknown type");
                break;
        case OBJ_COMMIT:
-               if ((buf = read_sha1_file(oid->hash, &type, &size)) != NULL) {
+               if ((buf = read_object_file(oid, &type, &size)) != NULL) {
                        subject_len = find_commit_subject(buf, &subject_start);
                        strbuf_insert(sb, sb->len, subject_start, subject_len);
                } else {
@@ -372,6 +377,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
        static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
        struct ref_format format = REF_FORMAT_INIT;
        int icase = 0;
+       int edit_flag = 0;
        struct option options[] = {
                OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
                { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
@@ -386,12 +392,13 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                OPT_CALLBACK('m', "message", &msg, N_("message"),
                             N_("tag message"), parse_msg_arg),
                OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+               OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
                OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
                OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
                        N_("how to strip spaces and #comments from message")),
                OPT_STRING('u', "local-user", &keyid, N_("key-id"),
                                        N_("use another key to sign the tag")),
-               OPT__FORCE(&force, N_("replace the tag if exists")),
+               OPT__FORCE(&force, N_("replace the tag if exists"), 0),
                OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")),
 
                OPT_GROUP(N_("Tag listing options")),
@@ -524,6 +531,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                die(_("tag '%s' already exists"), tag);
 
        opt.message_given = msg.given || msgfile;
+       opt.use_editor = edit_flag;
 
        if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
                opt.cleanup_mode = CLEANUP_ALL;
@@ -551,7 +559,8 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                die("%s", err.buf);
        ref_transaction_free(transaction);
        if (force && !is_null_oid(&prev) && oidcmp(&prev, &object))
-               printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(prev.hash, DEFAULT_ABBREV));
+               printf(_("Updated tag '%s' (was %s)\n"), tag,
+                      find_unique_abbrev(&prev, DEFAULT_ABBREV));
 
        UNLEAK(buf);
        UNLEAK(ref);
index 32e01555774c838e489fd33c675488e754c3e8e2..300eb59657e29cace38798029a9170834cac7c9e 100644 (file)
@@ -9,7 +9,7 @@ static char *create_temp_file(struct object_id *oid)
        unsigned long size;
        int fd;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
 
index 62ea264c46783374d0f1968c19ea7581498a1f87..b7755c6cc5a05a536666d0ae2fd9d0c3171a6cfd 100644 (file)
@@ -21,7 +21,7 @@ static unsigned char buffer[4096];
 static unsigned int offset, len;
 static off_t consumed_bytes;
 static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
 static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
 
 /*
@@ -62,7 +62,7 @@ static void *fill(int min)
        if (min > sizeof(buffer))
                die("cannot fill %d bytes", min);
        if (offset) {
-               git_SHA1_Update(&ctx, buffer, offset);
+               the_hash_algo->update_fn(&ctx, buffer, offset);
                memmove(buffer, buffer + offset, len);
                offset = 0;
        }
@@ -158,6 +158,7 @@ struct obj_info {
        struct object *obj;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define FLAG_OPEN (1u<<20)
 #define FLAG_WRITTEN (1u<<21)
 
@@ -172,7 +173,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
 {
        struct object_id oid;
 
-       if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0)
+       if (write_object_file(obj_buf->buffer, obj_buf->size,
+                             type_name(obj->type), &oid) < 0)
                die("failed to write object %s", oid_to_hex(&obj->oid));
        obj->flags |= FLAG_WRITTEN;
 }
@@ -197,7 +199,7 @@ static int check_object(struct object *obj, int type, void *data, struct fsck_op
 
        if (!(obj->flags & FLAG_OPEN)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(&obj->oid, &size);
                if (type != obj->type || type <= 0)
                        die("object of unexpected type");
                obj->flags |= FLAG_WRITTEN;
@@ -237,14 +239,16 @@ static void write_object(unsigned nr, enum object_type type,
                         void *buf, unsigned long size)
 {
        if (!strict) {
-               if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+               if (write_object_file(buf, size, type_name(type),
+                                     &obj_list[nr].oid) < 0)
                        die("failed to write object");
                added_object(nr, type, buf, size);
                free(buf);
                obj_list[nr].obj = NULL;
        } else if (type == OBJ_BLOB) {
                struct blob *blob;
-               if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+               if (write_object_file(buf, size, type_name(type),
+                                     &obj_list[nr].oid) < 0)
                        die("failed to write object");
                added_object(nr, type, buf, size);
                free(buf);
@@ -258,12 +262,12 @@ static void write_object(unsigned nr, enum object_type type,
        } else {
                struct object *obj;
                int eaten;
-               hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash);
+               hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
                added_object(nr, type, buf, size);
                obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
                                          &eaten);
                if (!obj)
-                       die("invalid %s", typename(type));
+                       die("invalid %s", type_name(type));
                add_object_buffer(obj, buf, size);
                obj->flags |= FLAG_OPEN;
                obj_list[nr].obj = obj;
@@ -345,8 +349,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
        struct object_id base_oid;
 
        if (type == OBJ_REF_DELTA) {
-               hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
-               use(GIT_SHA1_RAWSZ);
+               hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+               use(the_hash_algo->rawsz);
                delta_data = get_data(delta_size);
                if (dry_run || !delta_data) {
                        free(delta_data);
@@ -419,7 +423,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
        if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
                return;
 
-       base = read_sha1_file(base_oid.hash, &type, &base_size);
+       base = read_object_file(&base_oid, &type, &base_size);
        if (!base) {
                error("failed to read delta-pack base object %s",
                      oid_to_hex(&base_oid));
@@ -564,15 +568,15 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
                /* We don't take any non-flag arguments now.. Maybe some day */
                usage(unpack_usage);
        }
-       git_SHA1_Init(&ctx);
+       the_hash_algo->init_fn(&ctx);
        unpack_all();
-       git_SHA1_Update(&ctx, buffer, offset);
-       git_SHA1_Final(oid.hash, &ctx);
+       the_hash_algo->update_fn(&ctx, buffer, offset);
+       the_hash_algo->final_fn(oid.hash, &ctx);
        if (strict)
                write_rest();
-       if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+       if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
                die("final sha1 did not match");
-       use(GIT_SHA1_RAWSZ);
+       use(the_hash_algo->rawsz);
 
        /* Write the last part of the buffer to stdout */
        while (len) {
index 58d1c2d2827d61899d73f1ea7632c5ee219f3ace..10d070a76fb1b0b94c058f60934bb05db37a4164 100644 (file)
@@ -592,7 +592,7 @@ static struct cache_entry *read_one_ent(const char *which,
        int size;
        struct cache_entry *ce;
 
-       if (get_tree_entry(ent->hash, path, oid.hash, &mode)) {
+       if (get_tree_entry(ent, path, &oid, &mode)) {
                if (which)
                        error("%s: not in %s branch.", path, which);
                return NULL;
@@ -1059,6 +1059,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
                        break;
                switch (parseopt_state) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_NON_OPTION:
                case PARSE_OPT_DONE:
index 873070e517090cbb21bf53cbc03088da6f85ec1f..4321a344567ed83e6828be41eb150d90c4d7756a 100644 (file)
@@ -12,7 +12,7 @@ int cmd_update_server_info(int argc, const char **argv, const char *prefix)
 {
        int force = 0;
        struct option options[] = {
-               OPT__FORCE(&force, N_("update the info files from scratch")),
+               OPT__FORCE(&force, N_("update the info files from scratch"), 0),
                OPT_END()
        };
 
diff --git a/builtin/upload-pack.c b/builtin/upload-pack.c
new file mode 100644 (file)
index 0000000..a757df8
--- /dev/null
@@ -0,0 +1,74 @@
+#include "cache.h"
+#include "builtin.h"
+#include "exec_cmd.h"
+#include "pkt-line.h"
+#include "parse-options.h"
+#include "protocol.h"
+#include "upload-pack.h"
+#include "serve.h"
+
+static const char * const upload_pack_usage[] = {
+       N_("git upload-pack [<options>] <dir>"),
+       NULL
+};
+
+int cmd_upload_pack(int argc, const char **argv, const char *prefix)
+{
+       const char *dir;
+       int strict = 0;
+       struct upload_pack_options opts = { 0 };
+       struct serve_options serve_opts = SERVE_OPTIONS_INIT;
+       struct option options[] = {
+               OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+                        N_("quit after a single request/response exchange")),
+               OPT_BOOL(0, "advertise-refs", &opts.advertise_refs,
+                        N_("exit immediately after initial ref advertisement")),
+               OPT_BOOL(0, "strict", &strict,
+                        N_("do not try <directory>/.git/ if <directory> is no Git directory")),
+               OPT_INTEGER(0, "timeout", &opts.timeout,
+                           N_("interrupt transfer after <n> seconds of inactivity")),
+               OPT_END()
+       };
+
+       packet_trace_identity("upload-pack");
+       check_replace_refs = 0;
+
+       argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+
+       if (argc != 1)
+               usage_with_options(upload_pack_usage, options);
+
+       if (opts.timeout)
+               opts.daemon_mode = 1;
+
+       setup_path();
+
+       dir = argv[0];
+
+       if (!enter_repo(dir, strict))
+               die("'%s' does not appear to be a git repository", dir);
+
+       switch (determine_protocol_version_server()) {
+       case protocol_v2:
+               serve_opts.advertise_capabilities = opts.advertise_refs;
+               serve_opts.stateless_rpc = opts.stateless_rpc;
+               serve(&serve_opts);
+               break;
+       case protocol_v1:
+               /*
+                * v1 is just the original protocol with a version string,
+                * so just fall through after writing the version string.
+                */
+               if (opts.advertise_refs || !opts.stateless_rpc)
+                       packet_write_fmt(1, "version 1\n");
+
+               /* fallthrough */
+       case protocol_v0:
+               upload_pack(&opts);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
+       return 0;
+}
index ba38ac9b1518884693e2c89ec19cc9e00fce9fa3..dcdaada111071c84b56022d9050ae06ffafbc25f 100644 (file)
@@ -44,12 +44,12 @@ static int verify_commit(const char *name, unsigned flags)
        if (get_oid(name, &oid))
                return error("commit '%s' not found.", name);
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.", name);
        if (type != OBJ_COMMIT)
                return error("%s: cannot verify a non-commit object of type %s.",
-                               name, typename(type));
+                               name, type_name(type));
 
        ret = run_gpg_verify(&oid, buf, size, flags);
 
index 7cef5b120b7786d107282d03052c5e06d3a2c85a..40a438ed6ce802e0745495ba611e49c23e2eefce 100644 (file)
 #include "worktree.h"
 
 static const char * const worktree_usage[] = {
-       N_("git worktree add [<options>] <path> [<branch>]"),
+       N_("git worktree add [<options>] <path> [<commit-ish>]"),
        N_("git worktree list [<options>]"),
        N_("git worktree lock [<options>] <path>"),
+       N_("git worktree move <worktree> <new-path>"),
        N_("git worktree prune [<options>]"),
+       N_("git worktree remove [<options>] <worktree>"),
        N_("git worktree unlock <path>"),
        NULL
 };
@@ -99,16 +101,9 @@ static int prune_worktree(const char *id, struct strbuf *reason)
        }
        path[len] = '\0';
        if (!file_exists(path)) {
-               struct stat st_link;
                free(path);
-               /*
-                * the repo is moved manually and has not been
-                * accessed since?
-                */
-               if (!stat(git_path("worktrees/%s/link", id), &st_link) &&
-                   st_link.st_nlink > 1)
-                       return 0;
-               if (st.st_mtime <= expire) {
+               if (stat(git_path("worktrees/%s/index", id), &st) ||
+                   st.st_mtime <= expire) {
                        strbuf_addf(reason, _("Removing worktrees/%s: gitdir file points to non-existent location"), id);
                        return 1;
                } else {
@@ -345,9 +340,23 @@ static int add_worktree(const char *path, const char *refname,
         * Hook failure does not warrant worktree deletion, so run hook after
         * is_junk is cleared, but do return appropriate code when hook fails.
         */
-       if (!ret && opts->checkout)
-               ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid),
-                                 oid_to_hex(&commit->object.oid), "1", NULL);
+       if (!ret && opts->checkout) {
+               const char *hook = find_hook("post-checkout");
+               if (hook) {
+                       const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL };
+                       cp.git_cmd = 0;
+                       cp.no_stdin = 1;
+                       cp.stdout_to_stderr = 1;
+                       cp.dir = path;
+                       cp.env = env;
+                       cp.argv = NULL;
+                       argv_array_pushl(&cp.args, absolute_path(hook),
+                                        oid_to_hex(&null_oid),
+                                        oid_to_hex(&commit->object.oid),
+                                        "1", NULL);
+                       ret = run_command(&cp);
+               }
+       }
 
        argv_array_clear(&child_env);
        strbuf_release(&sb);
@@ -365,7 +374,9 @@ static int add(int ac, const char **av, const char *prefix)
        const char *branch;
        const char *opt_track = NULL;
        struct option options[] = {
-               OPT__FORCE(&opts.force, N_("checkout <branch> even if already checked out in other worktree")),
+               OPT__FORCE(&opts.force,
+                          N_("checkout <branch> even if already checked out in other worktree"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
                           N_("create a new branch")),
                OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
@@ -484,7 +495,7 @@ static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
                strbuf_addstr(&sb, "(bare)");
        else {
                strbuf_addf(&sb, "%-*s ", abbrev_len,
-                               find_unique_abbrev(wt->head_oid.hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(&wt->head_oid, DEFAULT_ABBREV));
                if (wt->is_detached)
                        strbuf_addstr(&sb, "(detached HEAD)");
                else if (wt->head_ref) {
@@ -509,7 +520,7 @@ static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
 
                if (path_len > *maxlen)
                        *maxlen = path_len;
-               sha1_len = strlen(find_unique_abbrev(wt[i]->head_oid.hash, *abbrev));
+               sha1_len = strlen(find_unique_abbrev(&wt[i]->head_oid, *abbrev));
                if (sha1_len > *abbrev)
                        *abbrev = sha1_len;
        }
@@ -605,6 +616,220 @@ static int unlock_worktree(int ac, const char **av, const char *prefix)
        return ret;
 }
 
+static void validate_no_submodules(const struct worktree *wt)
+{
+       struct index_state istate = { NULL };
+       int i, found_submodules = 0;
+
+       if (read_index_from(&istate, worktree_git_path(wt, "index"),
+                           get_worktree_git_dir(wt)) > 0) {
+               for (i = 0; i < istate.cache_nr; i++) {
+                       struct cache_entry *ce = istate.cache[i];
+
+                       if (S_ISGITLINK(ce->ce_mode)) {
+                               found_submodules = 1;
+                               break;
+                       }
+               }
+       }
+       discard_index(&istate);
+
+       if (found_submodules)
+               die(_("working trees containing submodules cannot be moved or removed"));
+}
+
+static int move_worktree(int ac, const char **av, const char *prefix)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       struct worktree **worktrees, *wt;
+       struct strbuf dst = STRBUF_INIT;
+       struct strbuf errmsg = STRBUF_INIT;
+       const char *reason;
+       char *path;
+
+       ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+       if (ac != 2)
+               usage_with_options(worktree_usage, options);
+
+       path = prefix_filename(prefix, av[1]);
+       strbuf_addstr(&dst, path);
+       free(path);
+
+       worktrees = get_worktrees(0);
+       wt = find_worktree(worktrees, prefix, av[0]);
+       if (!wt)
+               die(_("'%s' is not a working tree"), av[0]);
+       if (is_main_worktree(wt))
+               die(_("'%s' is a main working tree"), av[0]);
+       if (is_directory(dst.buf)) {
+               const char *sep = find_last_dir_sep(wt->path);
+
+               if (!sep)
+                       die(_("could not figure out destination name from '%s'"),
+                           wt->path);
+               strbuf_trim_trailing_dir_sep(&dst);
+               strbuf_addstr(&dst, sep);
+       }
+       if (file_exists(dst.buf))
+               die(_("target '%s' already exists"), dst.buf);
+
+       validate_no_submodules(wt);
+
+       reason = is_worktree_locked(wt);
+       if (reason) {
+               if (*reason)
+                       die(_("cannot move a locked working tree, lock reason: %s"),
+                           reason);
+               die(_("cannot move a locked working tree"));
+       }
+       if (validate_worktree(wt, &errmsg, 0))
+               die(_("validation failed, cannot move working tree: %s"),
+                   errmsg.buf);
+       strbuf_release(&errmsg);
+
+       if (rename(wt->path, dst.buf) == -1)
+               die_errno(_("failed to move '%s' to '%s'"), wt->path, dst.buf);
+
+       update_worktree_location(wt, dst.buf);
+
+       strbuf_release(&dst);
+       free_worktrees(worktrees);
+       return 0;
+}
+
+/*
+ * Note, "git status --porcelain" is used to determine if it's safe to
+ * delete a whole worktree. "git status" does not ignore user
+ * configuration, so if a normal "git status" shows "clean" for the
+ * user, then it's ok to remove it.
+ *
+ * This assumption may be a bad one. We may want to ignore
+ * (potentially bad) user settings and only delete a worktree when
+ * it's absolutely safe to do so from _our_ point of view because we
+ * know better.
+ */
+static void check_clean_worktree(struct worktree *wt,
+                                const char *original_path)
+{
+       struct argv_array child_env = ARGV_ARRAY_INIT;
+       struct child_process cp;
+       char buf[1];
+       int ret;
+
+       /*
+        * Until we sort this out, all submodules are "dirty" and
+        * will abort this function.
+        */
+       validate_no_submodules(wt);
+
+       argv_array_pushf(&child_env, "%s=%s/.git",
+                        GIT_DIR_ENVIRONMENT, wt->path);
+       argv_array_pushf(&child_env, "%s=%s",
+                        GIT_WORK_TREE_ENVIRONMENT, wt->path);
+       memset(&cp, 0, sizeof(cp));
+       argv_array_pushl(&cp.args, "status",
+                        "--porcelain", "--ignore-submodules=none",
+                        NULL);
+       cp.env = child_env.argv;
+       cp.git_cmd = 1;
+       cp.dir = wt->path;
+       cp.out = -1;
+       ret = start_command(&cp);
+       if (ret)
+               die_errno(_("failed to run 'git status' on '%s'"),
+                         original_path);
+       ret = xread(cp.out, buf, sizeof(buf));
+       if (ret)
+               die(_("'%s' is dirty, use --force to delete it"),
+                   original_path);
+       close(cp.out);
+       ret = finish_command(&cp);
+       if (ret)
+               die_errno(_("failed to run 'git status' on '%s', code %d"),
+                         original_path, ret);
+}
+
+static int delete_git_work_tree(struct worktree *wt)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int ret = 0;
+
+       strbuf_addstr(&sb, wt->path);
+       if (remove_dir_recursively(&sb, 0)) {
+               error_errno(_("failed to delete '%s'"), sb.buf);
+               ret = -1;
+       }
+       strbuf_release(&sb);
+       return ret;
+}
+
+static int delete_git_dir(struct worktree *wt)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int ret = 0;
+
+       strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id));
+       if (remove_dir_recursively(&sb, 0)) {
+               error_errno(_("failed to delete '%s'"), sb.buf);
+               ret = -1;
+       }
+       strbuf_release(&sb);
+       return ret;
+}
+
+static int remove_worktree(int ac, const char **av, const char *prefix)
+{
+       int force = 0;
+       struct option options[] = {
+               OPT_BOOL(0, "force", &force,
+                        N_("force removing even if the worktree is dirty")),
+               OPT_END()
+       };
+       struct worktree **worktrees, *wt;
+       struct strbuf errmsg = STRBUF_INIT;
+       const char *reason;
+       int ret = 0;
+
+       ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+       if (ac != 1)
+               usage_with_options(worktree_usage, options);
+
+       worktrees = get_worktrees(0);
+       wt = find_worktree(worktrees, prefix, av[0]);
+       if (!wt)
+               die(_("'%s' is not a working tree"), av[0]);
+       if (is_main_worktree(wt))
+               die(_("'%s' is a main working tree"), av[0]);
+       reason = is_worktree_locked(wt);
+       if (reason) {
+               if (*reason)
+                       die(_("cannot remove a locked working tree, lock reason: %s"),
+                           reason);
+               die(_("cannot remove a locked working tree"));
+       }
+       if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
+               die(_("validation failed, cannot remove working tree: %s"),
+                   errmsg.buf);
+       strbuf_release(&errmsg);
+
+       if (file_exists(wt->path)) {
+               if (!force)
+                       check_clean_worktree(wt, av[0]);
+
+               ret |= delete_git_work_tree(wt);
+       }
+       /*
+        * continue on even if ret is non-zero, there's no going back
+        * from here.
+        */
+       ret |= delete_git_dir(wt);
+
+       free_worktrees(worktrees);
+       return ret;
+}
+
 int cmd_worktree(int ac, const char **av, const char *prefix)
 {
        struct option options[] = {
@@ -627,5 +852,9 @@ int cmd_worktree(int ac, const char **av, const char *prefix)
                return lock_worktree(ac - 1, av + 1, prefix);
        if (!strcmp(av[1], "unlock"))
                return unlock_worktree(ac - 1, av + 1, prefix);
+       if (!strcmp(av[1], "move"))
+               return move_worktree(ac - 1, av + 1, prefix);
+       if (!strcmp(av[1], "remove"))
+               return remove_worktree(ac - 1, av + 1, prefix);
        usage_with_options(worktree_usage, options);
 }
index bd0a78aa3c56b7e817c7ddf6fcaba703d6d5fecc..c9d3c544e79f46bab9e5fd50079d1bb574b722f2 100644 (file)
@@ -19,7 +19,7 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
 {
        int flags = 0, ret;
        const char *prefix = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
        const char *me = "git-write-tree";
        struct option write_tree_options[] = {
                OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"),
@@ -38,10 +38,10 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
        argc = parse_options(argc, argv, unused_prefix, write_tree_options,
                             write_tree_usage, 0);
 
-       ret = write_cache_as_tree(sha1, flags, prefix);
+       ret = write_cache_as_tree(&oid, flags, prefix);
        switch (ret) {
        case 0:
-               printf("%s\n", sha1_to_hex(sha1));
+               printf("%s\n", oid_to_hex(&oid));
                break;
        case WRITE_TREE_UNREADABLE_INDEX:
                die("%s: error reading the index", me);
index eadc2d5172052878c2b9276313cef752baedd839..de1f4040c788f683a0cca3695c10b163811dd945 100644 (file)
@@ -13,7 +13,7 @@ static struct bulk_checkin_state {
        unsigned plugged:1;
 
        char *pack_tmp_name;
-       struct sha1file *f;
+       struct hashfile *f;
        off_t offset;
        struct pack_idx_option pack_idx_opts;
 
@@ -36,9 +36,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
                unlink(state->pack_tmp_name);
                goto clear_exit;
        } else if (state->nr_written == 1) {
-               sha1close(state->f, oid.hash, CSUM_FSYNC);
+               hashclose(state->f, oid.hash, CSUM_FSYNC);
        } else {
-               int fd = sha1close(state->f, oid.hash, 0);
+               int fd = hashclose(state->f, oid.hash, 0);
                fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
                                         state->nr_written, oid.hash,
                                         state->offset);
@@ -61,17 +61,17 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
        reprepare_packed_git(the_repository);
 }
 
-static int already_written(struct bulk_checkin_state *state, unsigned char sha1[])
+static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
 {
        int i;
 
        /* The object may already exist in the repository */
-       if (has_sha1_file(sha1))
+       if (has_sha1_file(oid->hash))
                return 1;
 
        /* Might want to keep the list sorted */
        for (i = 0; i < state->nr_written; i++)
-               if (!hashcmp(state->written[i]->oid.hash, sha1))
+               if (!oidcmp(&state->written[i]->oid, oid))
                        return 1;
 
        /* This is a new object we need to keep */
@@ -94,7 +94,7 @@ static int already_written(struct bulk_checkin_state *state, unsigned char sha1[
  * with a new pack.
  */
 static int stream_to_pack(struct bulk_checkin_state *state,
-                         git_SHA_CTX *ctx, off_t *already_hashed_to,
+                         git_hash_ctx *ctx, off_t *already_hashed_to,
                          int fd, size_t size, enum object_type type,
                          const char *path, unsigned flags)
 {
@@ -128,7 +128,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
                                if (rsize < hsize)
                                        hsize = rsize;
                                if (hsize)
-                                       git_SHA1_Update(ctx, ibuf, hsize);
+                                       the_hash_algo->update_fn(ctx, ibuf, hsize);
                                *already_hashed_to = offset;
                        }
                        s.next_in = ibuf;
@@ -150,7 +150,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
                                        return -1;
                                }
 
-                               sha1write(state->f, obuf, written);
+                               hashwrite(state->f, obuf, written);
                                state->offset += written;
                        }
                        s.next_out = obuf;
@@ -187,16 +187,16 @@ static void prepare_to_stream(struct bulk_checkin_state *state,
 }
 
 static int deflate_to_pack(struct bulk_checkin_state *state,
-                          unsigned char result_sha1[],
+                          struct object_id *result_oid,
                           int fd, size_t size,
                           enum object_type type, const char *path,
                           unsigned flags)
 {
        off_t seekback, already_hashed_to;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        unsigned char obuf[16384];
        unsigned header_len;
-       struct sha1file_checkpoint checkpoint;
+       struct hashfile_checkpoint checkpoint;
        struct pack_idx_entry *idx = NULL;
 
        seekback = lseek(fd, 0, SEEK_CUR);
@@ -204,9 +204,9 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
                return error("cannot find the current offset");
 
        header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
-                              typename(type), (uintmax_t)size) + 1;
-       git_SHA1_Init(&ctx);
-       git_SHA1_Update(&ctx, obuf, header_len);
+                              type_name(type), (uintmax_t)size) + 1;
+       the_hash_algo->init_fn(&ctx);
+       the_hash_algo->update_fn(&ctx, obuf, header_len);
 
        /* Note: idx is non-NULL when we are writing */
        if ((flags & HASH_WRITE_OBJECT) != 0)
@@ -217,7 +217,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
        while (1) {
                prepare_to_stream(state, flags);
                if (idx) {
-                       sha1file_checkpoint(state->f, &checkpoint);
+                       hashfile_checkpoint(state->f, &checkpoint);
                        idx->offset = state->offset;
                        crc32_begin(state->f);
                }
@@ -231,23 +231,23 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
                 */
                if (!idx)
                        die("BUG: should not happen");
-               sha1file_truncate(state->f, &checkpoint);
+               hashfile_truncate(state->f, &checkpoint);
                state->offset = checkpoint.offset;
                finish_bulk_checkin(state);
                if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
                        return error("cannot seek back");
        }
-       git_SHA1_Final(result_sha1, &ctx);
+       the_hash_algo->final_fn(result_oid->hash, &ctx);
        if (!idx)
                return 0;
 
        idx->crc32 = crc32_end(state->f);
-       if (already_written(state, result_sha1)) {
-               sha1file_truncate(state->f, &checkpoint);
+       if (already_written(state, result_oid)) {
+               hashfile_truncate(state->f, &checkpoint);
                state->offset = checkpoint.offset;
                free(idx);
        } else {
-               hashcpy(idx->oid.hash, result_sha1);
+               oidcpy(&idx->oid, result_oid);
                ALLOC_GROW(state->written,
                           state->nr_written + 1,
                           state->alloc_written);
@@ -256,11 +256,11 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
        return 0;
 }
 
-int index_bulk_checkin(unsigned char *sha1,
+int index_bulk_checkin(struct object_id *oid,
                       int fd, size_t size, enum object_type type,
                       const char *path, unsigned flags)
 {
-       int status = deflate_to_pack(&state, sha1, fd, size, type,
+       int status = deflate_to_pack(&state, oid, fd, size, type,
                                     path, flags);
        if (!state.plugged)
                finish_bulk_checkin(&state);
index fbd40fc98c955c192a6de698a75b8d56af766f09..a85527318b15b36bb60b0b6b166569b4fcaa9dcf 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef BULK_CHECKIN_H
 #define BULK_CHECKIN_H
 
-extern int index_bulk_checkin(unsigned char sha1[],
+extern int index_bulk_checkin(struct object_id *oid,
                              int fd, size_t size, enum object_type type,
                              const char *path, unsigned flags);
 
index efe547e25fe2a53bd0ef7954cf3bec6d55218365..902c9b54485be2000696a697472fa10d97b36153 100644 (file)
--- a/bundle.c
+++ b/bundle.c
@@ -222,7 +222,7 @@ static int is_tag_in_date_range(struct object *tag, struct rev_info *revs)
        if (revs->max_age == -1 && revs->min_age == -1)
                goto out;
 
-       buf = read_sha1_file(tag->oid.hash, &type, &size);
+       buf = read_object_file(&tag->oid, &type, &size);
        if (!buf)
                goto out;
        line = memmem(buf, size, "\ntagger ", 8);
index 3841cef0c064b19722cb60742ed3bd95b9cde520..6a555f4d431f9f6dbf8dad06d75a4ec81a4254fd 100644 (file)
@@ -320,7 +320,7 @@ static int update_one(struct cache_tree *it,
                struct cache_tree_sub *sub = NULL;
                const char *path, *slash;
                int pathlen, entlen;
-               const unsigned char *sha1;
+               const struct object_id *oid;
                unsigned mode;
                int expected_missing = 0;
                int contains_ita = 0;
@@ -338,7 +338,7 @@ static int update_one(struct cache_tree *it,
                                die("cache-tree.c: '%.*s' in '%s' not found",
                                    entlen, path + baselen, path);
                        i += sub->count;
-                       sha1 = sub->cache_tree->oid.hash;
+                       oid = &sub->cache_tree->oid;
                        mode = S_IFDIR;
                        contains_ita = sub->cache_tree->entry_count < 0;
                        if (contains_ita) {
@@ -347,19 +347,19 @@ static int update_one(struct cache_tree *it,
                        }
                }
                else {
-                       sha1 = ce->oid.hash;
+                       oid = &ce->oid;
                        mode = ce->ce_mode;
                        entlen = pathlen - baselen;
                        i++;
                }
 
-               if (is_null_sha1(sha1) ||
-                   (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1))) {
+               if (is_null_oid(oid) ||
+                   (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) {
                        strbuf_release(&buffer);
                        if (expected_missing)
                                return -1;
                        return error("invalid object %06o %s for '%.*s'",
-                               mode, sha1_to_hex(sha1), entlen+baselen, path);
+                               mode, oid_to_hex(oid), entlen+baselen, path);
                }
 
                /*
@@ -385,12 +385,12 @@ static int update_one(struct cache_tree *it,
                /*
                 * "sub" can be an empty tree if all subentries are i-t-a.
                 */
-               if (contains_ita && !hashcmp(sha1, EMPTY_TREE_SHA1_BIN))
+               if (contains_ita && !oidcmp(oid, &empty_tree_oid))
                        continue;
 
                strbuf_grow(&buffer, entlen + 100);
                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
-               strbuf_add(&buffer, sha1, 20);
+               strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 
 #if DEBUG
                fprintf(stderr, "cache-tree update-one %o %.*s\n",
@@ -399,16 +399,16 @@ static int update_one(struct cache_tree *it,
        }
 
        if (repair) {
-               unsigned char sha1[20];
-               hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
-               if (has_sha1_file(sha1))
-                       hashcpy(it->oid.hash, sha1);
+               struct object_id oid;
+               hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+               if (has_object_file(&oid))
+                       oidcpy(&it->oid, &oid);
                else
                        to_invalidate = 1;
-       } else if (dryrun)
-               hash_sha1_file(buffer.buf, buffer.len, tree_type,
-                              it->oid.hash);
-       else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+       } else if (dryrun) {
+               hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+       } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+                                    &it->oid)) {
                strbuf_release(&buffer);
                return -1;
        }
@@ -465,7 +465,7 @@ static void write_one(struct strbuf *buffer, struct cache_tree *it,
 #endif
 
        if (0 <= it->entry_count) {
-               strbuf_add(buffer, it->oid.hash, 20);
+               strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
        }
        for (i = 0; i < it->subtree_nr; i++) {
                struct cache_tree_sub *down = it->down[i];
@@ -492,6 +492,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
        char *ep;
        struct cache_tree *it;
        int i, subtree_nr;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        it = NULL;
        /* skip name, but make sure name exists */
@@ -520,11 +521,11 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
                goto free_return;
        buf++; size--;
        if (0 <= it->entry_count) {
-               if (size < 20)
+               if (size < rawsz)
                        goto free_return;
-               hashcpy(it->oid.hash, (const unsigned char*)buf);
-               buf += 20;
-               size -= 20;
+               memcpy(it->oid.hash, (const unsigned char*)buf, rawsz);
+               buf += rawsz;
+               size -= rawsz;
        }
 
 #if DEBUG
@@ -599,7 +600,7 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
        return it;
 }
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 {
        int entries, was_valid;
        struct lock_file lock_file = LOCK_INIT;
@@ -640,19 +641,19 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
                        ret = WRITE_TREE_PREFIX_ERROR;
                        goto out;
                }
-               hashcpy(sha1, subtree->oid.hash);
+               oidcpy(oid, &subtree->oid);
        }
        else
-               hashcpy(sha1, index_state->cache_tree->oid.hash);
+               oidcpy(oid, &index_state->cache_tree->oid);
 
 out:
        rollback_lock_file(&lock_file);
        return ret;
 }
 
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix)
 {
-       return write_index_as_tree(sha1, &the_index, get_index_file(), flags, prefix);
+       return write_index_as_tree(oid, &the_index, get_index_file(), flags, prefix);
 }
 
 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
index f7b9cab7ee87dd04cecbd25f34101a58fc002014..cfd5328cc93694e23037e15148241e17bd4f3a04 100644 (file)
@@ -47,8 +47,8 @@ int update_main_cache_tree(int);
 #define WRITE_TREE_UNMERGED_INDEX (-2)
 #define WRITE_TREE_PREFIX_ERROR (-3)
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix);
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix);
 void prime_cache_tree(struct index_state *, struct tree *);
 
 extern int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info);
diff --git a/cache.h b/cache.h
index 720664e394510ee0194d8dccf42d5ee5a003903b..bbaf5c349ab893a6f0f4549b61bcc70eff50745f 100644 (file)
--- a/cache.h
+++ b/cache.h
 #include "sha1-array.h"
 #include "repository.h"
 
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc...  Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX       SHA_CTX
-#define platform_SHA1_Init     SHA1_Init
-#define platform_SHA1_Update   SHA1_Update
-#define platform_SHA1_Final            SHA1_Final
-#endif
-
-#define git_SHA_CTX            platform_SHA_CTX
-#define git_SHA1_Init          platform_SHA1_Init
-#define git_SHA1_Update                platform_SHA1_Update
-#define git_SHA1_Final         platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update                git_SHA1_Update_Chunked
-#endif
-
 #include <zlib.h>
 typedef struct git_zstream {
        z_stream z;
@@ -624,6 +599,7 @@ extern int read_index_unmerged(struct index_state *);
 
 /* For use with `write_locked_index()`. */
 #define COMMIT_LOCK            (1 << 0)
+#define SKIP_IF_UNCHANGED      (1 << 1)
 
 /*
  * Write the index while holding an already-taken lock. Close the lock,
@@ -640,6 +616,9 @@ extern int read_index_unmerged(struct index_state *);
  * With `COMMIT_LOCK`, the lock is always committed or rolled back.
  * Without it, the lock is closed, but neither committed nor rolled
  * back.
+ *
+ * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
+ * is written (and the lock is rolled back if `COMMIT_LOCK` is given).
  */
 extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
 
@@ -970,14 +949,14 @@ extern void check_repository_format(void);
  * more calls to find_unique_abbrev are made.
  *
  * The `_r` variant writes to a buffer supplied by the caller, which must be at
- * least `GIT_SHA1_HEXSZ + 1` bytes. The return value is the number of bytes
+ * least `GIT_MAX_HEXSZ + 1` bytes. The return value is the number of bytes
  * written (excluding the NUL terminator).
  *
  * Note that while this version avoids the static buffer, it is not fully
  * reentrant, as it calls into other non-reentrant git code.
  */
-extern const char *find_unique_abbrev(const unsigned char *sha1, int len);
-extern int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len);
+extern const char *find_unique_abbrev(const struct object_id *oid, int len);
+extern int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len);
 
 extern const unsigned char null_sha1[GIT_MAX_RAWSZ];
 extern const struct object_id null_oid;
@@ -1026,7 +1005,7 @@ static inline void hashclr(unsigned char *hash)
 
 static inline void oidclr(struct object_id *oid)
 {
-       hashclr(oid->hash);
+       memset(oid->hash, 0, GIT_MAX_RAWSZ);
 }
 
 
@@ -1044,8 +1023,6 @@ extern const struct object_id empty_tree_oid;
        "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
        "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
 extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
 
 static inline int is_empty_blob_sha1(const unsigned char *sha1)
 {
@@ -1206,19 +1183,19 @@ extern char *xdg_config_home(const char *filename);
  */
 extern char *xdg_cache_home(const char *filename);
 
-extern void *read_sha1_file_extended(const unsigned char *sha1,
-                                    enum object_type *type,
-                                    unsigned long *size, int lookup_replace);
-static inline void *read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size)
+extern void *read_object_file_extended(const struct object_id *oid,
+                                      enum object_type *type,
+                                      unsigned long *size, int lookup_replace);
+static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
 {
-       return read_sha1_file_extended(sha1, type, size, 1);
+       return read_object_file_extended(oid, type, size, 1);
 }
 
 /*
  * This internal function is only declared here for the benefit of
  * lookup_replace_object().  Please do not call it directly.
  */
-extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
+extern const struct object_id *do_lookup_replace_object(const struct object_id *oid);
 
 /*
  * If object sha1 should be replaced, return the replacement object's
@@ -1226,38 +1203,49 @@ extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
  * either sha1 or a pointer to a permanently-allocated value.  When
  * object replacement is suppressed, always return sha1.
  */
-static inline const unsigned char *lookup_replace_object(const unsigned char *sha1)
+static inline const struct object_id *lookup_replace_object(const struct object_id *oid)
 {
        if (!check_replace_refs)
-               return sha1;
-       return do_lookup_replace_object(sha1);
+               return oid;
+       return do_lookup_replace_object(oid);
 }
 
-/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
-extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+/* Read and unpack an object file into memory, write memory to an object file */
+extern int oid_object_info(const struct object_id *, unsigned long *);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+                           const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+                            const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+                                     const char *type, struct object_id *oid,
+                                     unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+                              struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
 extern int git_open_cloexec(const char *name, int flags);
 #define git_open(name) git_open_cloexec(name, O_RDONLY)
 extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
 extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
 
-extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
+extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
 
 extern int finalize_object_file(const char *tmpfile, const char *filename);
 
 /*
- * Open the loose object at path, check its sha1, and return the contents,
+ * Open the loose object at path, check its hash, and return the contents,
  * type, and size. If the object is a blob, then "contents" may return NULL,
  * to allow streaming of large blobs.
  *
  * Returns 0 on success, negative on error (details may be written to stderr).
  */
 int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
+                     const struct object_id *expected_oid,
                      enum object_type *type,
                      unsigned long *size,
                      void **contents);
@@ -1284,7 +1272,7 @@ extern int has_object_file_with_flags(const struct object_id *oid, int flags);
  */
 extern int has_loose_object_nonlocal(const unsigned char *sha1);
 
-extern void assert_sha1_type(const unsigned char *sha1, enum object_type expect);
+extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
 
 /* Helper to check and "touch" a file */
 extern int check_and_freshen_file(const char *fn, int freshen);
@@ -1440,10 +1428,10 @@ extern int df_name_compare(const char *name1, int len1, int mode1, const char *n
 extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
 extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
 
-extern void *read_object_with_reference(const unsigned char *sha1,
+extern void *read_object_with_reference(const struct object_id *oid,
                                        const char *required_type,
                                        unsigned long *size,
-                                       unsigned char *sha1_ret);
+                                       struct object_id *oid_ret);
 
 extern struct object *peel_to_type(const char *name, int namelen,
                                   struct object *o, enum object_type);
@@ -1590,7 +1578,7 @@ struct pack_entry {
  * usual "XXXXXX" trailer, and the resulting filename is written into the
  * "template" buffer. Returns the open descriptor.
  */
-extern int odb_mkstemp(struct strbuf *template, const char *pattern);
+extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
 
 /*
  * Create a pack .keep file named "name" (which should generally be the output
@@ -1661,7 +1649,7 @@ struct object_info {
        unsigned long *sizep;
        off_t *disk_sizep;
        unsigned char *delta_base_sha1;
-       struct strbuf *typename;
+       struct strbuf *type_name;
        void **contentp;
 
        /* Response */
@@ -1702,7 +1690,9 @@ struct object_info {
 #define OBJECT_INFO_SKIP_CACHED 4
 /* Do not retry packed storage after checking packed and loose storage */
 #define OBJECT_INFO_QUICK 8
-extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/* Do not check loose object */
+#define OBJECT_INFO_IGNORE_LOOSE 16
+extern int oid_object_info_extended(const struct object_id *, struct object_info *, unsigned flags);
 
 /*
  * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
index 1efee55ef79cdcf2ca07135eed76073b91d91ff9..109ef280da9884027f17fe7f233b6b818bd71393 100755 (executable)
@@ -97,7 +97,7 @@ fi
 export DEVELOPER=1
 export DEFAULT_TEST_TARGET=prove
 export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
-export GIT_TEST_OPTS="--verbose-log"
+export GIT_TEST_OPTS="--verbose-log -x"
 export GIT_TEST_CLONE_2GB=YesPlease
 
 case "$jobname" in
diff --git a/ci/run-build-and-tests.sh b/ci/run-build-and-tests.sh
new file mode 100755 (executable)
index 0000000..3735ce4
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# Build and test Git
+#
+
+. ${0%/*}/lib-travisci.sh
+
+ln -s "$cache_dir/.prove" t/.prove
+
+make --jobs=2
+make --quiet test
+if test "$jobname" = "linux-gcc"
+then
+       GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
+fi
+
+check_unignored_build_artifacts
+
+save_good_tree
diff --git a/ci/run-build.sh b/ci/run-build.sh
deleted file mode 100755 (executable)
index 4f940d1..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-#
-# Build Git
-#
-
-. ${0%/*}/lib-travisci.sh
-
-make --jobs=2
diff --git a/ci/run-tests.sh b/ci/run-tests.sh
deleted file mode 100755 (executable)
index 73e273f..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-#
-# Test Git
-#
-
-. ${0%/*}/lib-travisci.sh
-
-ln -s "$cache_dir/.prove" t/.prove
-
-make --quiet test
-if test "$jobname" = "linux-gcc"
-then
-       GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
-fi
-
-check_unignored_build_artifacts
-
-save_good_tree
diff --git a/color.c b/color.c
index d48dd947c987cdca13f103fd8b4a2e56eceab238..f277e72e4ce04815f71c949dfdf7c89c9462c5b7 100644 (file)
--- a/color.c
+++ b/color.c
@@ -161,11 +161,6 @@ int color_parse(const char *value, char *dst)
        return color_parse_mem(value, strlen(value), dst);
 }
 
-void color_set(char *dst, const char *color_bytes)
-{
-       xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
-}
-
 /*
  * Write the ANSI color codes for "c" to "out"; the string should
  * already have the ANSI escape code in it. "out" should have enough
@@ -399,8 +394,6 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt,
        return r;
 }
 
-
-
 int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
 {
        va_list args;
diff --git a/color.h b/color.h
index fd2b688dfbccbe4eef4c1f79b680db2a01e2f9ef..cd0bcedd084f3741fad55569b18ec15e12d75cf8 100644 (file)
--- a/color.h
+++ b/color.h
@@ -76,22 +76,46 @@ int git_color_config(const char *var, const char *value, void *cb);
 int git_color_default_config(const char *var, const char *value, void *cb);
 
 /*
- * Set the color buffer (which must be COLOR_MAXLEN bytes)
- * to the raw color bytes; this is useful for initializing
- * default color variables.
+ * Parse a config option, which can be a boolean or one of
+ * "never", "auto", "always". Return a constant of
+ * GIT_COLOR_NEVER for "never" or negative boolean,
+ * GIT_COLOR_ALWAYS for "always" or a positive boolean,
+ * and GIT_COLOR_AUTO for "auto".
  */
-void color_set(char *dst, const char *color_bytes);
-
 int git_config_colorbool(const char *var, const char *value);
+
+/*
+ * Return a boolean whether to use color, where the argument 'var' is
+ * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO.
+ */
 int want_color(int var);
+
+/*
+ * Translate a Git color from 'value' into a string that the terminal can
+ * interpret and store it into 'dst'. The Git color values are of the form
+ * "foreground [background] [attr]" where fore- and background can be a color
+ * name ("red"), a RGB code (#0xFF0000) or a 256-color-mode from the terminal.
+ */
 int color_parse(const char *value, char *dst);
 int color_parse_mem(const char *value, int len, char *dst);
+
+/*
+ * Output the formatted string in the specified color (and then reset to normal
+ * color so subsequent output is uncolored). Omits the color encapsulation if
+ * `color` is NULL. The `color_fprintf_ln` prints a new line after resetting
+ * the color.  The `color_print_strbuf` prints the contents of the given
+ * strbuf (BUG: but only up to its first NUL character).
+ */
 __attribute__((format (printf, 3, 4)))
 int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
 __attribute__((format (printf, 3, 4)))
 int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
 void color_print_strbuf(FILE *fp, const char *color, const struct strbuf *sb);
 
+/*
+ * Check if the given color is GIT_COLOR_NIL that means "no color selected".
+ * The caller needs to replace the color with the actual desired color.
+ */
 int color_is_nil(const char *color);
 
 #endif /* COLOR_H */
index 18c74dad51daf4cd7fb0694ad9ace62098fefac4..2ef495963fc1cf2e092778d35f1965912d7eaac0 100644 (file)
@@ -162,7 +162,7 @@ enum coalesce_direction { MATCH, BASE, NEW };
 
 /* Coalesce new lines into base by finding LCS */
 static struct lline *coalesce_lines(struct lline *base, int *lenbase,
-                                   struct lline *new, int lennew,
+                                   struct lline *newline, int lennew,
                                    unsigned long parent, long flags)
 {
        int **lcs;
@@ -170,12 +170,12 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
        struct lline *baseend, *newend = NULL;
        int i, j, origbaselen = *lenbase;
 
-       if (new == NULL)
+       if (newline == NULL)
                return base;
 
        if (base == NULL) {
                *lenbase = lennew;
-               return new;
+               return newline;
        }
 
        /*
@@ -200,7 +200,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                directions[0][j] = NEW;
 
        for (i = 1, baseend = base; i < origbaselen + 1; i++) {
-               for (j = 1, newend = new; j < lennew + 1; j++) {
+               for (j = 1, newend = newline; j < lennew + 1; j++) {
                        if (match_string_spaces(baseend->line, baseend->len,
                                                newend->line, newend->len, flags)) {
                                lcs[i][j] = lcs[i - 1][j - 1] + 1;
@@ -241,7 +241,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                        if (lline->prev)
                                lline->prev->next = lline->next;
                        else
-                               new = lline->next;
+                               newline = lline->next;
                        if (lline->next)
                                lline->next->prev = lline->prev;
 
@@ -270,7 +270,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                }
        }
 
-       newend = new;
+       newend = newline;
        while (newend) {
                struct lline *lline = newend;
                newend = newend->next;
@@ -306,7 +306,7 @@ static char *grab_blob(const struct object_id *oid, unsigned int mode,
                *size = fill_textconv(textconv, df, &blob);
                free_filespec(df);
        } else {
-               blob = read_sha1_file(oid->hash, &type, size);
+               blob = read_object_file(oid, &type, size);
                if (type != OBJ_BLOB)
                        die("object '%s' is not a blob!", oid_to_hex(oid));
        }
@@ -915,11 +915,11 @@ static void show_combined_header(struct combine_diff_path *elem,
                         "", elem->path, line_prefix, c_meta, c_reset);
        printf("%s%sindex ", line_prefix, c_meta);
        for (i = 0; i < num_parent; i++) {
-               abb = find_unique_abbrev(elem->parent[i].oid.hash,
+               abb = find_unique_abbrev(&elem->parent[i].oid,
                                         abbrev);
                printf("%s%s", i ? "," : "", abb);
        }
-       abb = find_unique_abbrev(elem->oid.hash, abbrev);
+       abb = find_unique_abbrev(&elem->oid, abbrev);
        printf("..%s%s\n", abb, c_reset);
 
        if (mode_differs) {
index c948c8b0e46a4b13d1e3045944ca1cea05d75517..ca474a7c112855a49b77cce7cdfb83937fa17fae 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -266,13 +266,13 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
        if (!ret) {
                enum object_type type;
                unsigned long size;
-               ret = read_sha1_file(commit->object.oid.hash, &type, &size);
+               ret = read_object_file(&commit->object.oid, &type, &size);
                if (!ret)
                        die("cannot read commit object %s",
                            oid_to_hex(&commit->object.oid));
                if (type != OBJ_COMMIT)
                        die("expected commit for %s, got %s",
-                           oid_to_hex(&commit->object.oid), typename(type));
+                           oid_to_hex(&commit->object.oid), type_name(type));
                if (sizep)
                        *sizep = size;
        }
@@ -383,7 +383,7 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
                return -1;
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
@@ -859,19 +859,19 @@ struct commit_list *get_octopus_merge_bases(struct commit_list *in)
        commit_list_insert(in->item, &ret);
 
        for (i = in->next; i; i = i->next) {
-               struct commit_list *new = NULL, *end = NULL;
+               struct commit_list *new_commits = NULL, *end = NULL;
 
                for (j = ret; j; j = j->next) {
                        struct commit_list *bases;
                        bases = get_merge_bases(i->item, j->item);
-                       if (!new)
-                               new = bases;
+                       if (!new_commits)
+                               new_commits = bases;
                        else
                                end->next = bases;
                        for (k = bases; k; k = k->next)
                                end = k;
                }
-               ret = new;
+               ret = new_commits;
        }
        return ret;
 }
@@ -1206,7 +1206,7 @@ static void handle_signed_tag(struct commit *parent, struct commit_extra_header
        desc = merge_remote_util(parent);
        if (!desc || !desc->obj)
                return;
-       buf = read_sha1_file(desc->obj->oid.hash, &type, &size);
+       buf = read_object_file(&desc->obj->oid, &type, &size);
        if (!buf || type != OBJ_TAG)
                goto free_return;
        len = parse_signature(buf, size);
@@ -1378,9 +1378,8 @@ void free_commit_extra_headers(struct commit_extra_header *extra)
        }
 }
 
-int commit_tree(const char *msg, size_t msg_len,
-               const unsigned char *tree,
-               struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+               struct commit_list *parents, struct object_id *ret,
                const char *author, const char *sign_commit)
 {
        struct commit_extra_header *extra = NULL, **tail = &extra;
@@ -1509,8 +1508,8 @@ N_("Warning: commit message did not conform to UTF-8.\n"
    "variable i18n.commitencoding to the encoding your project uses.\n");
 
 int commit_tree_extended(const char *msg, size_t msg_len,
-                        const unsigned char *tree,
-                        struct commit_list *parents, unsigned char *ret,
+                        const struct object_id *tree,
+                        struct commit_list *parents, struct object_id *ret,
                         const char *author, const char *sign_commit,
                         struct commit_extra_header *extra)
 {
@@ -1518,7 +1517,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
        int encoding_is_utf8;
        struct strbuf buffer;
 
-       assert_sha1_type(tree, OBJ_TREE);
+       assert_oid_type(tree, OBJ_TREE);
 
        if (memchr(msg, '\0', msg_len))
                return error("a NUL byte in commit log message not allowed.");
@@ -1527,7 +1526,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
        encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
 
        strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
-       strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+       strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
 
        /*
         * NOTE! This ordering means that the same exact tree merged with a
@@ -1566,7 +1565,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
                goto out;
        }
 
-       result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+       result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
 out:
        strbuf_release(&buffer);
        return result;
@@ -1615,11 +1614,11 @@ struct commit *get_merge_parent(const char *name)
 struct commit_list **commit_list_append(struct commit *commit,
                                        struct commit_list **next)
 {
-       struct commit_list *new = xmalloc(sizeof(struct commit_list));
-       new->item = commit;
-       *next = new;
-       new->next = NULL;
-       return &new->next;
+       struct commit_list *new_commit = xmalloc(sizeof(struct commit_list));
+       new_commit->item = commit;
+       *next = new_commit;
+       new_commit->next = NULL;
+       return &new_commit->next;
 }
 
 const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
index 425f4027752fb47190a53ddc72a64ebf9d928633..0fb8271665c6c98ccca803fbe002327bf38fcfb3 100644 (file)
--- a/commit.h
+++ b/commit.h
@@ -262,14 +262,15 @@ extern void append_merge_tag_headers(struct commit_list *parents,
                                     struct commit_extra_header ***tail);
 
 extern int commit_tree(const char *msg, size_t msg_len,
-                      const unsigned char *tree,
-                      struct commit_list *parents, unsigned char *ret,
+                      const struct object_id *tree,
+                      struct commit_list *parents, struct object_id *ret,
                       const char *author, const char *sign_commit);
 
 extern int commit_tree_extended(const char *msg, size_t msg_len,
-                               const unsigned char *tree,
-                               struct commit_list *parents, unsigned char *ret,
-                               const char *author, const char *sign_commit,
+                               const struct object_id *tree,
+                               struct commit_list *parents,
+                               struct object_id *ret, const char *author,
+                               const char *sign_commit,
                                struct commit_extra_header *);
 
 extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
index 2d44d21aca8d31f67b16cb9a90245b4db526ff76..a67872babf332b7d8177e8477c2ee595d8cbbd3f 100644 (file)
@@ -761,6 +761,17 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
        return rc;
 }
 
+#undef strftime
+size_t mingw_strftime(char *s, size_t max,
+                     const char *format, const struct tm *tm)
+{
+       size_t ret = strftime(s, max, format, tm);
+
+       if (!ret && errno == EINVAL)
+               die("invalid strftime format: '%s'", format);
+       return ret;
+}
+
 unsigned int sleep (unsigned int seconds)
 {
        Sleep(seconds*1000);
index e03aecfe2e6556e1ef513922104557373eaa9260..571019d0bddceaf3245e15dbcc4ebfb70a501d17 100644 (file)
@@ -361,6 +361,9 @@ int mingw_fstat(int fd, struct stat *buf);
 
 int mingw_utime(const char *file_name, const struct utimbuf *times);
 #define utime mingw_utime
+size_t mingw_strftime(char *s, size_t max,
+                  const char *format, const struct tm *tm);
+#define strftime mingw_strftime
 
 pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **env,
                     const char *dir,
index b0c20e6cb8ab1f5dc3cd3b4573b7e9d6dbf8cd2b..c698988f5e11cd416bef9a77c998d94b9ef87930 100644 (file)
--- a/config.c
+++ b/config.c
@@ -1488,7 +1488,7 @@ int git_config_from_blob_oid(config_fn_t fn,
        unsigned long size;
        int ret;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("unable to load config blob object '%s'", name);
        if (type != OBJ_BLOB) {
index 685a80d13843800be3c633539b0a22e88652444a..6a1d0de0cc571f395eeeb8f149d4377a1a5e1602 100644 (file)
@@ -182,7 +182,6 @@ ifeq ($(uname_O),Cygwin)
        NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
        X = .exe
        UNRELIABLE_FSTAT = UnfortunatelyYes
-       SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
        OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
        MMAP_PREVENTS_DELETE = UnfortunatelyYes
        COMPAT_OBJS += compat/cygwin.o
index 7f8415140f309e0522310fac3160b5a01cefc7cd..6f1fd9df35ef7d477ddb75de454a4708176ad634 100644 (file)
@@ -254,25 +254,25 @@ GIT_PARSE_WITH([openssl]))
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # Define LIBPCREDIR=/foo/bar if your PCRE header and library files are in
 # /foo/bar/include and /foo/bar/lib directories.
 #
 AC_ARG_WITH(libpcre,
-AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre1]),
+AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre2]),
     if test "$withval" = "no"; then
-       USE_LIBPCRE1=
+       USE_LIBPCRE2=
     elif test "$withval" = "yes"; then
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
     else
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
        LIBPCREDIR=$withval
        AC_MSG_NOTICE([Setting LIBPCREDIR to $LIBPCREDIR])
-        dnl USE_LIBPCRE1 can still be modified below, so don't substitute
+        dnl USE_LIBPCRE2 can still be modified below, so don't substitute
         dnl it yet.
        GIT_CONF_SUBST([LIBPCREDIR])
     fi)
@@ -296,6 +296,10 @@ AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and hea
 AC_ARG_WITH(libpcre2,
 AS_HELP_STRING([--with-libpcre2],[support Perl-compatible regexes via libpcre2 (default is NO)])
 AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and headers]),
+    if test -n "$USE_LIBPCRE2"; then
+        AC_MSG_ERROR([Only supply one of --with-libpcre or its synonym --with-libpcre2!])
+    fi
+
     if test -n "$USE_LIBPCRE1"; then
         AC_MSG_ERROR([Only supply one of --with-libpcre1 or --with-libpcre2!])
     fi
@@ -549,8 +553,8 @@ if test -n "$USE_LIBPCRE1"; then
 GIT_STASH_FLAGS($LIBPCREDIR)
 
 AC_CHECK_LIB([pcre], [pcre_version],
-[USE_LIBPCRE=YesPlease],
-[USE_LIBPCRE=])
+[USE_LIBPCRE1=YesPlease],
+[USE_LIBPCRE1=])
 
 GIT_UNSTASH_FLAGS($LIBPCREDIR)
 
index c3a014c5babf72ee4c0d135fec264afb37b040de..54971166ac4ba6646ea52051f8131dc16ea2e0ae 100644 (file)
--- a/connect.c
+++ b/connect.c
 #include "sha1-array.h"
 #include "transport.h"
 #include "strbuf.h"
+#include "version.h"
 #include "protocol.h"
 
-static char *server_capabilities;
+static char *server_capabilities_v1;
+static struct argv_array server_capabilities_v2 = ARGV_ARRAY_INIT;
 static const char *parse_feature_value(const char *, const char *, int *);
 
 static int check_ref(const char *name, unsigned int flags)
@@ -48,6 +50,12 @@ int check_ref_type(const struct ref *ref, int flags)
 
 static void die_initial_contact(int unexpected)
 {
+       /*
+        * A hang-up after seeing some response from the other end
+        * means that it is unexpected, as we know the other end is
+        * willing to talk to us.  A hang-up before seeing any
+        * response does not necessarily mean an ACL problem, though.
+        */
        if (unexpected)
                die(_("The remote end hung up upon initial contact"));
        else
@@ -56,6 +64,92 @@ static void die_initial_contact(int unexpected)
                      "and the repository exists."));
 }
 
+/* Checks if the server supports the capability 'c' */
+int server_supports_v2(const char *c, int die_on_error)
+{
+       int i;
+
+       for (i = 0; i < server_capabilities_v2.argc; i++) {
+               const char *out;
+               if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+                   (!*out || *out == '='))
+                       return 1;
+       }
+
+       if (die_on_error)
+               die("server doesn't support '%s'", c);
+
+       return 0;
+}
+
+int server_supports_feature(const char *c, const char *feature,
+                           int die_on_error)
+{
+       int i;
+
+       for (i = 0; i < server_capabilities_v2.argc; i++) {
+               const char *out;
+               if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+                   (!*out || *(out++) == '=')) {
+                       if (parse_feature_request(out, feature))
+                               return 1;
+                       else
+                               break;
+               }
+       }
+
+       if (die_on_error)
+               die("server doesn't support feature '%s'", feature);
+
+       return 0;
+}
+
+static void process_capabilities_v2(struct packet_reader *reader)
+{
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL)
+               argv_array_push(&server_capabilities_v2, reader->line);
+
+       if (reader->status != PACKET_READ_FLUSH)
+               die("expected flush after capabilities");
+}
+
+enum protocol_version discover_version(struct packet_reader *reader)
+{
+       enum protocol_version version = protocol_unknown_version;
+
+       /*
+        * Peek the first line of the server's response to
+        * determine the protocol version the server is speaking.
+        */
+       switch (packet_reader_peek(reader)) {
+       case PACKET_READ_EOF:
+               die_initial_contact(0);
+       case PACKET_READ_FLUSH:
+       case PACKET_READ_DELIM:
+               version = protocol_v0;
+               break;
+       case PACKET_READ_NORMAL:
+               version = determine_protocol_version_client(reader->line);
+               break;
+       }
+
+       switch (version) {
+       case protocol_v2:
+               process_capabilities_v2(reader);
+               break;
+       case protocol_v1:
+               /* Read the peeked version line */
+               packet_reader_read(reader);
+               break;
+       case protocol_v0:
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
+       return version;
+}
+
 static void parse_one_symref_info(struct string_list *symref, const char *val, int len)
 {
        char *sym, *target;
@@ -85,7 +179,7 @@ static void parse_one_symref_info(struct string_list *symref, const char *val, i
 static void annotate_refs_with_symref_info(struct ref *ref)
 {
        struct string_list symref = STRING_LIST_INIT_DUP;
-       const char *feature_list = server_capabilities;
+       const char *feature_list = server_capabilities_v1;
 
        while (feature_list) {
                int len;
@@ -109,60 +203,21 @@ static void annotate_refs_with_symref_info(struct ref *ref)
        string_list_clear(&symref, 0);
 }
 
-/*
- * Read one line of a server's ref advertisement into packet_buffer.
- */
-static int read_remote_ref(int in, char **src_buf, size_t *src_len,
-                          int *responded)
+static void process_capabilities(const char *line, int *len)
 {
-       int len = packet_read(in, src_buf, src_len,
-                             packet_buffer, sizeof(packet_buffer),
-                             PACKET_READ_GENTLE_ON_EOF |
-                             PACKET_READ_CHOMP_NEWLINE);
-       const char *arg;
-       if (len < 0)
-               die_initial_contact(*responded);
-       if (len > 4 && skip_prefix(packet_buffer, "ERR ", &arg))
-               die("remote error: %s", arg);
-
-       *responded = 1;
-
-       return len;
-}
-
-#define EXPECTING_PROTOCOL_VERSION 0
-#define EXPECTING_FIRST_REF 1
-#define EXPECTING_REF 2
-#define EXPECTING_SHALLOW 3
-
-/* Returns 1 if packet_buffer is a protocol version pkt-line, 0 otherwise. */
-static int process_protocol_version(void)
-{
-       switch (determine_protocol_version_client(packet_buffer)) {
-       case protocol_v1:
-               return 1;
-       case protocol_v0:
-               return 0;
-       default:
-               die("server is speaking an unknown protocol");
-       }
-}
-
-static void process_capabilities(int *len)
-{
-       int nul_location = strlen(packet_buffer);
+       int nul_location = strlen(line);
        if (nul_location == *len)
                return;
-       server_capabilities = xstrdup(packet_buffer + nul_location + 1);
+       server_capabilities_v1 = xstrdup(line + nul_location + 1);
        *len = nul_location;
 }
 
-static int process_dummy_ref(void)
+static int process_dummy_ref(const char *line)
 {
        struct object_id oid;
        const char *name;
 
-       if (parse_oid_hex(packet_buffer, &oid, &name))
+       if (parse_oid_hex(line, &oid, &name))
                return 0;
        if (*name != ' ')
                return 0;
@@ -171,20 +226,20 @@ static int process_dummy_ref(void)
        return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
 }
 
-static void check_no_capabilities(int len)
+static void check_no_capabilities(const char *line, int len)
 {
-       if (strlen(packet_buffer) != len)
+       if (strlen(line) != len)
                warning("Ignoring capabilities after first line '%s'",
-                       packet_buffer + strlen(packet_buffer));
+                       line + strlen(line));
 }
 
-static int process_ref(int len, struct ref ***list, unsigned int flags,
-                      struct oid_array *extra_have)
+static int process_ref(const char *line, int len, struct ref ***list,
+                      unsigned int flags, struct oid_array *extra_have)
 {
        struct object_id old_oid;
        const char *name;
 
-       if (parse_oid_hex(packet_buffer, &old_oid, &name))
+       if (parse_oid_hex(line, &old_oid, &name))
                return 0;
        if (*name != ' ')
                return 0;
@@ -200,16 +255,17 @@ static int process_ref(int len, struct ref ***list, unsigned int flags,
                **list = ref;
                *list = &ref->next;
        }
-       check_no_capabilities(len);
+       check_no_capabilities(line, len);
        return 1;
 }
 
-static int process_shallow(int len, struct oid_array *shallow_points)
+static int process_shallow(const char *line, int len,
+                          struct oid_array *shallow_points)
 {
        const char *arg;
        struct object_id old_oid;
 
-       if (!skip_prefix(packet_buffer, "shallow ", &arg))
+       if (!skip_prefix(line, "shallow ", &arg))
                return 0;
 
        if (get_oid_hex(arg, &old_oid))
@@ -217,60 +273,68 @@ static int process_shallow(int len, struct oid_array *shallow_points)
        if (!shallow_points)
                die("repository on the other end cannot be shallow");
        oid_array_append(shallow_points, &old_oid);
-       check_no_capabilities(len);
+       check_no_capabilities(line, len);
        return 1;
 }
 
+enum get_remote_heads_state {
+       EXPECTING_FIRST_REF = 0,
+       EXPECTING_REF,
+       EXPECTING_SHALLOW,
+       EXPECTING_DONE,
+};
+
 /*
  * Read all the refs from the other end
  */
-struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct ref **get_remote_heads(struct packet_reader *reader,
                              struct ref **list, unsigned int flags,
                              struct oid_array *extra_have,
                              struct oid_array *shallow_points)
 {
        struct ref **orig_list = list;
-
-       /*
-        * A hang-up after seeing some response from the other end
-        * means that it is unexpected, as we know the other end is
-        * willing to talk to us.  A hang-up before seeing any
-        * response does not necessarily mean an ACL problem, though.
-        */
-       int responded = 0;
-       int len;
-       int state = EXPECTING_PROTOCOL_VERSION;
+       int len = 0;
+       enum get_remote_heads_state state = EXPECTING_FIRST_REF;
+       const char *arg;
 
        *list = NULL;
 
-       while ((len = read_remote_ref(in, &src_buf, &src_len, &responded))) {
+       while (state != EXPECTING_DONE) {
+               switch (packet_reader_read(reader)) {
+               case PACKET_READ_EOF:
+                       die_initial_contact(1);
+               case PACKET_READ_NORMAL:
+                       len = reader->pktlen;
+                       if (len > 4 && skip_prefix(reader->line, "ERR ", &arg))
+                               die("remote error: %s", arg);
+                       break;
+               case PACKET_READ_FLUSH:
+                       state = EXPECTING_DONE;
+                       break;
+               case PACKET_READ_DELIM:
+                       die("invalid packet");
+               }
+
                switch (state) {
-               case EXPECTING_PROTOCOL_VERSION:
-                       if (process_protocol_version()) {
-                               state = EXPECTING_FIRST_REF;
-                               break;
-                       }
-                       state = EXPECTING_FIRST_REF;
-                       /* fallthrough */
                case EXPECTING_FIRST_REF:
-                       process_capabilities(&len);
-                       if (process_dummy_ref()) {
+                       process_capabilities(reader->line, &len);
+                       if (process_dummy_ref(reader->line)) {
                                state = EXPECTING_SHALLOW;
                                break;
                        }
                        state = EXPECTING_REF;
                        /* fallthrough */
                case EXPECTING_REF:
-                       if (process_ref(len, &list, flags, extra_have))
+                       if (process_ref(reader->line, len, &list, flags, extra_have))
                                break;
                        state = EXPECTING_SHALLOW;
                        /* fallthrough */
                case EXPECTING_SHALLOW:
-                       if (process_shallow(len, shallow_points))
+                       if (process_shallow(reader->line, len, shallow_points))
                                break;
-                       die("protocol error: unexpected '%s'", packet_buffer);
-               default:
-                       die("unexpected state %d", state);
+                       die("protocol error: unexpected '%s'", reader->line);
+               case EXPECTING_DONE:
+                       break;
                }
        }
 
@@ -279,6 +343,105 @@ struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
        return list;
 }
 
+/* Returns 1 when a valid ref has been added to `list`, 0 otherwise */
+static int process_ref_v2(const char *line, struct ref ***list)
+{
+       int ret = 1;
+       int i = 0;
+       struct object_id old_oid;
+       struct ref *ref;
+       struct string_list line_sections = STRING_LIST_INIT_DUP;
+       const char *end;
+
+       /*
+        * Ref lines have a number of fields which are space deliminated.  The
+        * first field is the OID of the ref.  The second field is the ref
+        * name.  Subsequent fields (symref-target and peeled) are optional and
+        * don't have a particular order.
+        */
+       if (string_list_split(&line_sections, line, ' ', -1) < 2) {
+               ret = 0;
+               goto out;
+       }
+
+       if (parse_oid_hex(line_sections.items[i++].string, &old_oid, &end) ||
+           *end) {
+               ret = 0;
+               goto out;
+       }
+
+       ref = alloc_ref(line_sections.items[i++].string);
+
+       oidcpy(&ref->old_oid, &old_oid);
+       **list = ref;
+       *list = &ref->next;
+
+       for (; i < line_sections.nr; i++) {
+               const char *arg = line_sections.items[i].string;
+               if (skip_prefix(arg, "symref-target:", &arg))
+                       ref->symref = xstrdup(arg);
+
+               if (skip_prefix(arg, "peeled:", &arg)) {
+                       struct object_id peeled_oid;
+                       char *peeled_name;
+                       struct ref *peeled;
+                       if (parse_oid_hex(arg, &peeled_oid, &end) || *end) {
+                               ret = 0;
+                               goto out;
+                       }
+
+                       peeled_name = xstrfmt("%s^{}", ref->name);
+                       peeled = alloc_ref(peeled_name);
+
+                       oidcpy(&peeled->old_oid, &peeled_oid);
+                       **list = peeled;
+                       *list = &peeled->next;
+
+                       free(peeled_name);
+               }
+       }
+
+out:
+       string_list_clear(&line_sections, 0);
+       return ret;
+}
+
+struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+                            struct ref **list, int for_push,
+                            const struct argv_array *ref_prefixes)
+{
+       int i;
+       *list = NULL;
+
+       if (server_supports_v2("ls-refs", 1))
+               packet_write_fmt(fd_out, "command=ls-refs\n");
+
+       if (server_supports_v2("agent", 0))
+               packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
+
+       packet_delim(fd_out);
+       /* When pushing we don't want to request the peeled tags */
+       if (!for_push)
+               packet_write_fmt(fd_out, "peel\n");
+       packet_write_fmt(fd_out, "symrefs\n");
+       for (i = 0; ref_prefixes && i < ref_prefixes->argc; i++) {
+               packet_write_fmt(fd_out, "ref-prefix %s\n",
+                                ref_prefixes->argv[i]);
+       }
+       packet_flush(fd_out);
+
+       /* Process response from server */
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               if (!process_ref_v2(reader->line, &list))
+                       die("invalid ls-refs response: %s", reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH)
+               die("expected flush after ref listing");
+
+       return list;
+}
+
 static const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp)
 {
        int len;
@@ -323,7 +486,7 @@ int parse_feature_request(const char *feature_list, const char *feature)
 
 const char *server_feature_value(const char *feature, int *len)
 {
-       return parse_feature_value(server_capabilities, feature, len);
+       return parse_feature_value(server_capabilities_v1, feature, len);
 }
 
 int server_supports(const char *feature)
@@ -872,6 +1035,7 @@ static enum ssh_variant determine_ssh_variant(const char *ssh_command,
  */
 static struct child_process *git_connect_git(int fd[2], char *hostandport,
                                             const char *path, const char *prog,
+                                            enum protocol_version version,
                                             int flags)
 {
        struct child_process *conn;
@@ -910,10 +1074,10 @@ static struct child_process *git_connect_git(int fd[2], char *hostandport,
                    target_host, 0);
 
        /* If using a new version put that stuff here after a second null byte */
-       if (get_protocol_version_config() > 0) {
+       if (version > 0) {
                strbuf_addch(&request, '\0');
                strbuf_addf(&request, "version=%d%c",
-                           get_protocol_version_config(), '\0');
+                           version, '\0');
        }
 
        packet_write(fd[1], request.buf, request.len);
@@ -929,14 +1093,14 @@ static struct child_process *git_connect_git(int fd[2], char *hostandport,
  */
 static void push_ssh_options(struct argv_array *args, struct argv_array *env,
                             enum ssh_variant variant, const char *port,
-                            int flags)
+                            enum protocol_version version, int flags)
 {
        if (variant == VARIANT_SSH &&
-           get_protocol_version_config() > 0) {
+           version > 0) {
                argv_array_push(args, "-o");
                argv_array_push(args, "SendEnv=" GIT_PROTOCOL_ENVIRONMENT);
                argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
-                                get_protocol_version_config());
+                                version);
        }
 
        if (flags & CONNECT_IPV4) {
@@ -989,7 +1153,8 @@ static void push_ssh_options(struct argv_array *args, struct argv_array *env,
 
 /* Prepare a child_process for use by Git's SSH-tunneled transport. */
 static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
-                         const char *port, int flags)
+                         const char *port, enum protocol_version version,
+                         int flags)
 {
        const char *ssh;
        enum ssh_variant variant;
@@ -1023,14 +1188,14 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
                argv_array_push(&detect.args, ssh);
                argv_array_push(&detect.args, "-G");
                push_ssh_options(&detect.args, &detect.env_array,
-                                VARIANT_SSH, port, flags);
+                                VARIANT_SSH, port, version, flags);
                argv_array_push(&detect.args, ssh_host);
 
                variant = run_command(&detect) ? VARIANT_SIMPLE : VARIANT_SSH;
        }
 
        argv_array_push(&conn->args, ssh);
-       push_ssh_options(&conn->args, &conn->env_array, variant, port, flags);
+       push_ssh_options(&conn->args, &conn->env_array, variant, port, version, flags);
        argv_array_push(&conn->args, ssh_host);
 }
 
@@ -1051,6 +1216,15 @@ struct child_process *git_connect(int fd[2], const char *url,
        char *hostandport, *path;
        struct child_process *conn;
        enum protocol protocol;
+       enum protocol_version version = get_protocol_version_config();
+
+       /*
+        * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+        * to perform a push, then fallback to v0 since the client doesn't know
+        * how to push yet using v2.
+        */
+       if (version == protocol_v2 && !strcmp("git-receive-pack", prog))
+               version = protocol_v0;
 
        /* Without this we cannot rely on waitpid() to tell
         * what happened to our children.
@@ -1065,7 +1239,7 @@ struct child_process *git_connect(int fd[2], const char *url,
                printf("Diag: path=%s\n", path ? path : "NULL");
                conn = NULL;
        } else if (protocol == PROTO_GIT) {
-               conn = git_connect_git(fd, hostandport, path, prog, flags);
+               conn = git_connect_git(fd, hostandport, path, prog, version, flags);
        } else {
                struct strbuf cmd = STRBUF_INIT;
                const char *const *var;
@@ -1108,12 +1282,12 @@ struct child_process *git_connect(int fd[2], const char *url,
                                strbuf_release(&cmd);
                                return NULL;
                        }
-                       fill_ssh_args(conn, ssh_host, port, flags);
+                       fill_ssh_args(conn, ssh_host, port, version, flags);
                } else {
                        transport_check_allowed("file");
-                       if (get_protocol_version_config() > 0) {
+                       if (version > 0) {
                                argv_array_pushf(&conn->env_array, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
-                                                get_protocol_version_config());
+                                                version);
                        }
                }
                argv_array_push(&conn->args, cmd.buf);
index 01f14cdf3fa4e6b6c8cd3b4c9ec3c3d55e7fc04f..0e69c6709c9fdb83b4888443f490d79ef504c8a3 100644 (file)
--- a/connect.h
+++ b/connect.h
@@ -13,4 +13,11 @@ extern int parse_feature_request(const char *features, const char *feature);
 extern const char *server_feature_value(const char *feature, int *len_ret);
 extern int url_is_local_not_ssh(const char *url);
 
+struct packet_reader;
+extern enum protocol_version discover_version(struct packet_reader *reader);
+
+extern int server_supports_v2(const char *c, int die_on_error);
+extern int server_supports_feature(const char *c, const char *feature,
+                                  int die_on_error);
+
 #endif
index 6fe8727421101efc04c15d8c6def448984eff74d..e34eada1ad52933230c7fcace595677747f9e1d6 100644 (file)
@@ -1,21 +1,6 @@
 @ strbuf_addf_with_format_only @
 expression E;
-constant fmt;
-@@
-  strbuf_addf(E,
-(
-  fmt
-|
-  _(fmt)
-)
-  );
-
-@ script:python @
-fmt << strbuf_addf_with_format_only.fmt;
-@@
-cocci.include_match("%" not in fmt)
-
-@ extends strbuf_addf_with_format_only @
+constant fmt !~ "%";
 @@
 - strbuf_addf
 + strbuf_addstr
index 88813e91244d820b4aa2c0fb5e60a44db03c3e59..a7570739454ac793430b7f0edb8ba229f3ff50d8 100644 (file)
@@ -29,6 +29,8 @@
 # tell the completion to use commit completion.  This also works with aliases
 # of form "!sh -c '...'".  For example, "!sh -c ': git commit ; ... '".
 #
+# Compatible with bash 3.2.57.
+#
 # You can set the following environment variables to influence the behavior of
 # the completion routines:
 #
@@ -280,6 +282,43 @@ __gitcomp ()
        esac
 }
 
+# Clear the variables caching builtins' options when (re-)sourcing
+# the completion script.
+unset $(set |sed -ne 's/^\(__gitcomp_builtin_[a-zA-Z0-9_][a-zA-Z0-9_]*\)=.*/\1/p') 2>/dev/null
+
+# This function is equivalent to
+#
+#    __gitcomp "$(git xxx --git-completion-helper) ..."
+#
+# except that the output is cached. Accept 1-3 arguments:
+# 1: the git command to execute, this is also the cache key
+# 2: extra options to be added on top (e.g. negative forms)
+# 3: options to be excluded
+__gitcomp_builtin ()
+{
+       # spaces must be replaced with underscore for multi-word
+       # commands, e.g. "git remote add" becomes remote_add.
+       local cmd="$1"
+       local incl="$2"
+       local excl="$3"
+
+       local var=__gitcomp_builtin_"${cmd/-/_}"
+       local options
+       eval "options=\$$var"
+
+       if [ -z "$options" ]; then
+               # leading and trailing spaces are significant to make
+               # option removal work correctly.
+               options=" $(__git ${cmd/_/ } --git-completion-helper) $incl "
+               for i in $excl; do
+                       options="${options/ $i / }"
+               done
+               eval "$var=\"$options\""
+       fi
+
+       __gitcomp "$options"
+}
+
 # Variation of __gitcomp_nl () that appends to the existing list of
 # completion candidates, COMPREPLY.
 __gitcomp_nl_append ()
@@ -439,7 +478,7 @@ __git_refs ()
                        track=""
                        ;;
                *)
-                       for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do
+                       for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
                                case "$i" in
                                $match*)
                                        if [ -e "$dir/$i" ]; then
@@ -1072,12 +1111,13 @@ __git_count_arguments ()
 }
 
 __git_whitespacelist="nowarn warn error error-all fix"
+__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch"
 
 _git_am ()
 {
        __git_find_repo_path
        if [ -d "$__git_repo_path"/rebase-apply ]; then
-               __gitcomp "--skip --continue --resolved --abort"
+               __gitcomp "$__git_am_inprogress_options"
                return
        fi
        case "$cur" in
@@ -1086,12 +1126,8 @@ _git_am ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --3way --committer-date-is-author-date --ignore-date
-                       --ignore-whitespace --ignore-space-change
-                       --interactive --keep --no-utf8 --signoff --utf8
-                       --whitespace= --scissors
-                       "
+               __gitcomp_builtin am "--no-utf8" \
+                       "$__git_am_inprogress_options"
                return
        esac
 }
@@ -1104,14 +1140,7 @@ _git_apply ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --stat --numstat --summary --check --index
-                       --cached --index-info --reverse --reject --unidiff-zero
-                       --apply --no-add --exclude=
-                       --ignore-whitespace --ignore-space-change
-                       --whitespace= --inaccurate-eof --verbose
-                       --recount --directory=
-                       "
+               __gitcomp_builtin apply
                return
        esac
 }
@@ -1120,10 +1149,7 @@ _git_add ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --interactive --refresh --patch --update --dry-run
-                       --ignore-errors --intent-to-add --force --edit --chmod=
-                       "
+               __gitcomp_builtin add
                return
        esac
 
@@ -1200,12 +1226,8 @@ _git_branch ()
                __git_complete_refs --cur="${cur##--set-upstream-to=}"
                ;;
        --*)
-               __gitcomp "
-                       --color --no-color --verbose --abbrev= --no-abbrev
-                       --track --no-track --contains --no-contains --merged --no-merged
-                       --set-upstream-to= --edit-description --list
-                       --unset-upstream --delete --move --copy --remotes
-                       --column --no-column --sort= --points-at
+               __gitcomp_builtin branch "--no-color --no-abbrev
+                       --no-track --no-column
                        "
                ;;
        *)
@@ -1247,11 +1269,7 @@ _git_checkout ()
                __gitcomp "diff3 merge" "" "${cur##--conflict=}"
                ;;
        --*)
-               __gitcomp "
-                       --quiet --ours --theirs --track --no-track --merge
-                       --conflict= --orphan --patch --detach --ignore-skip-worktree-bits
-                       --recurse-submodules --no-recurse-submodules
-                       "
+               __gitcomp_builtin checkout "--no-track --no-recurse-submodules"
                ;;
        *)
                # check if --track, --no-track, or --no-guess was specified
@@ -1268,19 +1286,28 @@ _git_checkout ()
 
 _git_cherry ()
 {
+       case "$cur" in
+       --*)
+               __gitcomp_builtin cherry
+               return
+       esac
+
        __git_complete_refs
 }
 
+__git_cherry_pick_inprogress_options="--continue --quit --abort"
+
 _git_cherry_pick ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/CHERRY_PICK_HEAD ]; then
-               __gitcomp "--continue --quit --abort"
+               __gitcomp "$__git_cherry_pick_inprogress_options"
                return
        fi
        case "$cur" in
        --*)
-               __gitcomp "--edit --no-commit --signoff --strategy= --mainline"
+               __gitcomp_builtin cherry-pick "" \
+                       "$__git_cherry_pick_inprogress_options"
                ;;
        *)
                __git_complete_refs
@@ -1292,7 +1319,7 @@ _git_clean ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--dry-run --quiet"
+               __gitcomp_builtin clean
                return
                ;;
        esac
@@ -1305,26 +1332,7 @@ _git_clone ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --local
-                       --no-hardlinks
-                       --shared
-                       --reference
-                       --quiet
-                       --no-checkout
-                       --bare
-                       --mirror
-                       --origin
-                       --upload-pack
-                       --template=
-                       --depth
-                       --single-branch
-                       --no-tags
-                       --branch
-                       --recurse-submodules
-                       --no-single-branch
-                       --shallow-submodules
-                       "
+               __gitcomp_builtin clone "--no-single-branch"
                return
                ;;
        esac
@@ -1357,16 +1365,7 @@ _git_commit ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --all --author= --signoff --verify --no-verify
-                       --edit --no-edit
-                       --amend --include --only --interactive
-                       --dry-run --reuse-message= --reedit-message=
-                       --reset-author --file= --message= --template=
-                       --cleanup= --untracked-files --untracked-files=
-                       --verbose --quiet --fixup= --squash=
-                       --patch --short --date --allow-empty
-                       "
+               __gitcomp_builtin commit "--no-edit --verify"
                return
        esac
 
@@ -1382,11 +1381,7 @@ _git_describe ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --all --tags --contains --abbrev= --candidates=
-                       --exact-match --debug --long --match --always --first-parent
-                       --exclude --dirty --broken
-                       "
+               __gitcomp_builtin describe
                return
        esac
        __git_complete_refs
@@ -1411,7 +1406,7 @@ __git_diff_common_options="--stat --numstat --shortstat --summary
                        --dirstat --dirstat= --dirstat-by-file
                        --dirstat-by-file= --cumulative
                        --diff-algorithm=
-                       --submodule --submodule=
+                       --submodule --submodule= --ignore-submodules
 "
 
 _git_diff ()
@@ -1452,11 +1447,11 @@ _git_difftool ()
                return
                ;;
        --*)
-               __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex
-                       --base --ours --theirs
-                       --no-renames --diff-filter= --find-copies-harder
-                       --relative --ignore-submodules
-                       --tool="
+               __gitcomp_builtin difftool "$__git_diff_common_options
+                                       --base --cached --ours --theirs
+                                       --pickaxe-all --pickaxe-regex
+                                       --relative --staged
+                                       "
                return
                ;;
        esac
@@ -1465,12 +1460,6 @@ _git_difftool ()
 
 __git_fetch_recurse_submodules="yes on-demand no"
 
-__git_fetch_options="
-       --quiet --verbose --append --upload-pack --force --keep --depth=
-       --tags --no-tags --all --prune --dry-run --recurse-submodules=
-       --unshallow --update-shallow
-"
-
 _git_fetch ()
 {
        case "$cur" in
@@ -1479,7 +1468,7 @@ _git_fetch ()
                return
                ;;
        --*)
-               __gitcomp "$__git_fetch_options"
+               __gitcomp_builtin fetch "--no-tags"
                return
                ;;
        esac
@@ -1516,20 +1505,7 @@ _git_fsck ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --tags --root --unreachable --cache --no-reflogs --full
-                       --strict --verbose --lost-found --name-objects
-                       "
-               return
-               ;;
-       esac
-}
-
-_git_gc ()
-{
-       case "$cur" in
-       --*)
-               __gitcomp "--prune --aggressive"
+               __gitcomp_builtin fsck "--no-reflogs"
                return
                ;;
        esac
@@ -1585,21 +1561,7 @@ _git_grep ()
 
        case "$cur" in
        --*)
-               __gitcomp "
-                       --cached
-                       --text --ignore-case --word-regexp --invert-match
-                       --full-name --line-number
-                       --extended-regexp --basic-regexp --fixed-strings
-                       --perl-regexp
-                       --threads
-                       --files-with-matches --name-only
-                       --files-without-match
-                       --max-depth
-                       --count
-                       --and --or --not --all-match
-                       --break --heading --show-function --function-context
-                       --untracked --no-index
-                       "
+               __gitcomp_builtin grep
                return
                ;;
        esac
@@ -1617,7 +1579,7 @@ _git_help ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--all --guides --info --man --web"
+               __gitcomp_builtin help
                return
                ;;
        esac
@@ -1640,7 +1602,7 @@ _git_init ()
                return
                ;;
        --*)
-               __gitcomp "--quiet --bare --template= --shared --shared="
+               __gitcomp_builtin init
                return
                ;;
        esac
@@ -1650,13 +1612,7 @@ _git_ls_files ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--cached --deleted --modified --others --ignored
-                       --stage --directory --no-empty-directory --unmerged
-                       --killed --exclude= --exclude-from=
-                       --exclude-per-directory= --exclude-standard
-                       --error-unmatch --with-tree= --full-name
-                       --abbrev --ignored --exclude-per-directory
-                       "
+               __gitcomp_builtin ls-files "--no-empty-directory"
                return
                ;;
        esac
@@ -1670,7 +1626,7 @@ _git_ls_remote ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--heads --tags --refs --get-url --symref"
+               __gitcomp_builtin ls-remote
                return
                ;;
        esac
@@ -1679,6 +1635,13 @@ _git_ls_remote ()
 
 _git_ls_tree ()
 {
+       case "$cur" in
+       --*)
+               __gitcomp_builtin ls-tree
+               return
+               ;;
+       esac
+
        __git_complete_file
 }
 
@@ -1794,22 +1757,18 @@ _git_log ()
        __git_complete_revlist
 }
 
-# Common merge options shared by git-merge(1) and git-pull(1).
-__git_merge_options="
-       --no-commit --no-stat --log --no-log --squash --strategy
-       --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
-       --verify-signatures --no-verify-signatures --gpg-sign
-       --quiet --verbose --progress --no-progress
-"
-
 _git_merge ()
 {
        __git_complete_strategy && return
 
        case "$cur" in
        --*)
-               __gitcomp "$__git_merge_options
-                       --rerere-autoupdate --no-rerere-autoupdate --abort --continue"
+               __gitcomp_builtin merge "--no-rerere-autoupdate
+                               --no-commit --no-edit --no-ff
+                               --no-log --no-progress
+                               --no-squash --no-stat
+                               --no-verify-signatures
+                               "
                return
        esac
        __git_complete_refs
@@ -1833,7 +1792,7 @@ _git_merge_base ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--octopus --independent --is-ancestor --fork-point"
+               __gitcomp_builtin merge-base
                return
                ;;
        esac
@@ -1844,7 +1803,7 @@ _git_mv ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--dry-run"
+               __gitcomp_builtin mv
                return
                ;;
        esac
@@ -1858,19 +1817,14 @@ _git_mv ()
        fi
 }
 
-_git_name_rev ()
-{
-       __gitcomp "--tags --all --stdin"
-}
-
 _git_notes ()
 {
-       local subcommands='add append copy edit list prune remove show'
+       local subcommands='add append copy edit get-ref list merge prune remove show'
        local subcommand="$(__git_find_on_cmdline "$subcommands")"
 
        case "$subcommand,$cur" in
        ,--*)
-               __gitcomp '--ref'
+               __gitcomp_builtin notes
                ;;
        ,*)
                case "$prev" in
@@ -1882,21 +1836,14 @@ _git_notes ()
                        ;;
                esac
                ;;
-       add,--reuse-message=*|append,--reuse-message=*|\
-       add,--reedit-message=*|append,--reedit-message=*)
+       *,--reuse-message=*|*,--reedit-message=*)
                __git_complete_refs --cur="${cur#*=}"
                ;;
-       add,--*|append,--*)
-               __gitcomp '--file= --message= --reedit-message=
-                               --reuse-message='
+       *,--*)
+               __gitcomp_builtin notes_$subcommand
                ;;
-       copy,--*)
-               __gitcomp '--stdin'
-               ;;
-       prune,--*)
-               __gitcomp '--dry-run --verbose'
-               ;;
-       prune,*)
+       prune,*|get-ref,*)
+               # this command does not take a ref, do not complete it
                ;;
        *)
                case "$prev" in
@@ -1920,12 +1867,11 @@ _git_pull ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --rebase --no-rebase
-                       --autostash --no-autostash
-                       $__git_merge_options
-                       $__git_fetch_options
-               "
+               __gitcomp_builtin pull "--no-autostash --no-commit --no-edit
+                                       --no-ff --no-log --no-progress --no-rebase
+                                       --no-squash --no-stat --no-tags
+                                       --no-verify-signatures"
+
                return
                ;;
        esac
@@ -1976,12 +1922,7 @@ _git_push ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --all --mirror --tags --dry-run --force --verbose
-                       --quiet --prune --delete --follow-tags
-                       --receive-pack= --repo= --set-upstream
-                       --force-with-lease --force-with-lease= --recurse-submodules=
-               "
+               __gitcomp_builtin push
                return
                ;;
        esac
@@ -1992,11 +1933,11 @@ _git_rebase ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then
-               __gitcomp "--continue --skip --abort --quit --edit-todo"
+               __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch"
                return
        elif [ -d "$__git_repo_path"/rebase-apply ] || \
             [ -d "$__git_repo_path"/rebase-merge ]; then
-               __gitcomp "--continue --skip --abort --quit"
+               __gitcomp "--continue --skip --abort --quit --show-current-patch"
                return
        fi
        __git_complete_strategy && return
@@ -2016,6 +1957,7 @@ _git_rebase ()
                        --autostash --no-autostash
                        --verify --no-verify
                        --keep-empty --root --force-rebase --no-ff
+                       --rerere-autoupdate
                        --exec
                        "
 
@@ -2081,7 +2023,7 @@ _git_send_email ()
                        --compose --confirm= --dry-run --envelope-sender
                        --from --identity
                        --in-reply-to --no-chain-reply-to --no-signed-off-by-cc
-                       --no-suppress-from --no-thread --quiet
+                       --no-suppress-from --no-thread --quiet --reply-to
                        --signed-off-by-cc --smtp-pass --smtp-server
                        --smtp-server-port --smtp-encryption= --smtp-user
                        --subject --suppress-cc= --suppress-from --thread --to
@@ -2119,11 +2061,7 @@ _git_status ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --short --branch --porcelain --long --verbose
-                       --untracked-files= --ignore-submodules= --ignored
-                       --column= --no-column
-                       "
+               __gitcomp_builtin status "--no-column"
                return
                ;;
        esac
@@ -2265,14 +2203,7 @@ _git_config ()
        esac
        case "$cur" in
        --*)
-               __gitcomp "
-                       --system --global --local --file=
-                       --list --replace-all
-                       --get --get-all --get-regexp
-                       --add --unset --unset-all
-                       --remove-section --rename-section
-                       --name-only
-                       "
+               __gitcomp_builtin config
                return
                ;;
        branch.*.*)
@@ -2672,7 +2603,7 @@ _git_remote ()
        if [ -z "$subcommand" ]; then
                case "$cur" in
                --*)
-                       __gitcomp "--verbose"
+                       __gitcomp_builtin remote
                        ;;
                *)
                        __gitcomp "$subcommands"
@@ -2683,33 +2614,33 @@ _git_remote ()
 
        case "$subcommand,$cur" in
        add,--*)
-               __gitcomp "--track --master --fetch --tags --no-tags --mirror="
+               __gitcomp_builtin remote_add "--no-tags"
                ;;
        add,*)
                ;;
        set-head,--*)
-               __gitcomp "--auto --delete"
+               __gitcomp_builtin remote_set-head
                ;;
        set-branches,--*)
-               __gitcomp "--add"
+               __gitcomp_builtin remote_set-branches
                ;;
        set-head,*|set-branches,*)
                __git_complete_remote_or_refspec
                ;;
        update,--*)
-               __gitcomp "--prune"
+               __gitcomp_builtin remote_update
                ;;
        update,*)
                __gitcomp "$(__git_get_config_variables "remotes")"
                ;;
        set-url,--*)
-               __gitcomp "--push --add --delete"
+               __gitcomp_builtin remote_set-url
                ;;
        get-url,--*)
-               __gitcomp "--push --all"
+               __gitcomp_builtin remote_get-url
                ;;
        prune,--*)
-               __gitcomp "--dry-run"
+               __gitcomp_builtin remote_prune
                ;;
        *)
                __gitcomp_nl "$(__git_remotes)"
@@ -2721,7 +2652,7 @@ _git_replace ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--edit --graft --format= --list --delete"
+               __gitcomp_builtin replace
                return
                ;;
        esac
@@ -2745,26 +2676,26 @@ _git_reset ()
 
        case "$cur" in
        --*)
-               __gitcomp "--merge --mixed --hard --soft --patch --keep"
+               __gitcomp_builtin reset
                return
                ;;
        esac
        __git_complete_refs
 }
 
+__git_revert_inprogress_options="--continue --quit --abort"
+
 _git_revert ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/REVERT_HEAD ]; then
-               __gitcomp "--continue --quit --abort"
+               __gitcomp "$__git_revert_inprogress_options"
                return
        fi
        case "$cur" in
        --*)
-               __gitcomp "
-                       --edit --mainline --no-edit --no-commit --signoff
-                       --strategy= --strategy-option=
-                       "
+               __gitcomp_builtin revert "--no-edit" \
+                       "$__git_revert_inprogress_options"
                return
                ;;
        esac
@@ -2775,7 +2706,7 @@ _git_rm ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--cached --dry-run --ignore-unmatch --quiet"
+               __gitcomp_builtin rm
                return
                ;;
        esac
@@ -2833,12 +2764,7 @@ _git_show_branch ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --all --remotes --topo-order --date-order --current --more=
-                       --list --independent --merge-base --no-name
-                       --color --no-color
-                       --sha1-name --sparse --topics --reflog
-                       "
+               __gitcomp_builtin show-branch "--no-color"
                return
                ;;
        esac
@@ -3045,7 +2971,7 @@ _git_tag ()
        while [ $c -lt $cword ]; do
                i="${words[c]}"
                case "$i" in
-               -d|-v)
+               -d|--delete|-v|--verify)
                        __gitcomp_direct "$(__git_tags "" "$cur" " ")"
                        return
                        ;;
@@ -3071,11 +2997,7 @@ _git_tag ()
 
        case "$cur" in
        --*)
-               __gitcomp "
-                       --list --delete --verify --annotate --message --file
-                       --sign --cleanup --local-user --force --column --sort=
-                       --contains --no-contains --points-at --merged --no-merged --create-reflog
-                       "
+               __gitcomp_builtin tag
                ;;
        esac
 }
@@ -3087,23 +3009,26 @@ _git_whatchanged ()
 
 _git_worktree ()
 {
-       local subcommands="add list lock prune unlock"
+       local subcommands="add list lock move prune remove unlock"
        local subcommand="$(__git_find_on_cmdline "$subcommands")"
        if [ -z "$subcommand" ]; then
                __gitcomp "$subcommands"
        else
                case "$subcommand,$cur" in
                add,--*)
-                       __gitcomp "--detach"
+                       __gitcomp_builtin worktree_add
                        ;;
                list,--*)
-                       __gitcomp "--porcelain"
+                       __gitcomp_builtin worktree_list
                        ;;
                lock,--*)
-                       __gitcomp "--reason"
+                       __gitcomp_builtin worktree_lock
                        ;;
                prune,--*)
-                       __gitcomp "--dry-run --expire --verbose"
+                       __gitcomp_builtin worktree_prune
+                       ;;
+               remove,--*)
+                       __gitcomp "--force"
                        ;;
                *)
                        ;;
@@ -3111,6 +3036,45 @@ _git_worktree ()
        fi
 }
 
+__git_complete_common () {
+       local command="$1"
+
+       case "$cur" in
+       --*)
+               __gitcomp_builtin "$command"
+               ;;
+       esac
+}
+
+__git_cmds_with_parseopt_helper=
+__git_support_parseopt_helper () {
+       test -n "$__git_cmds_with_parseopt_helper" ||
+               __git_cmds_with_parseopt_helper="$(__git --list-parseopt-builtins)"
+
+       case " $__git_cmds_with_parseopt_helper " in
+       *" $1 "*)
+               return 0
+               ;;
+       *)
+               return 1
+               ;;
+       esac
+}
+
+__git_complete_command () {
+       local command="$1"
+       local completion_func="_git_${command//-/_}"
+       if declare -f $completion_func >/dev/null 2>/dev/null; then
+               $completion_func
+               return 0
+       elif __git_support_parseopt_helper "$command"; then
+               __git_complete_common "$command"
+               return 0
+       else
+               return 1
+       fi
+}
+
 __git_main ()
 {
        local i c=1 command __git_dir __git_repo_path
@@ -3170,14 +3134,12 @@ __git_main ()
                return
        fi
 
-       local completion_func="_git_${command//-/_}"
-       declare -f $completion_func >/dev/null 2>/dev/null && $completion_func && return
+       __git_complete_command "$command" && return
 
        local expansion=$(__git_aliased_command "$command")
        if [ -n "$expansion" ]; then
                words[1]=$expansion
-               completion_func="_git_${expansion//-/_}"
-               declare -f $completion_func >/dev/null 2>/dev/null && $completion_func
+               __git_complete_command "$expansion"
        fi
 }
 
index 663992e530c82f891361ccc9b952099aff3d56ec..536754583b59945e984ad487b6e524e5d1de7056 100644 (file)
@@ -21,37 +21,82 @@ package DiffHighlight;
 my $COLOR = qr/\x1b\[[0-9;]*m/;
 my $BORING = qr/$COLOR|\s/;
 
-# The patch portion of git log -p --graph should only ever have preceding | and
-# not / or \ as merge history only shows up on the commit line.
-my $GRAPH = qr/$COLOR?\|$COLOR?\s+/;
-
 my @removed;
 my @added;
 my $in_hunk;
+my $graph_indent = 0;
 
 our $line_cb = sub { print @_ };
 our $flush_cb = sub { local $| = 1 };
 
-sub handle_line {
+# Count the visible width of a string, excluding any terminal color sequences.
+sub visible_width {
        local $_ = shift;
+       my $ret = 0;
+       while (length) {
+               if (s/^$COLOR//) {
+                       # skip colors
+               } elsif (s/^.//) {
+                       $ret++;
+               }
+       }
+       return $ret;
+}
+
+# Return a substring of $str, omitting $len visible characters from the
+# beginning, where terminal color sequences do not count as visible.
+sub visible_substr {
+       my ($str, $len) = @_;
+       while ($len > 0) {
+               if ($str =~ s/^$COLOR//) {
+                       next
+               }
+               $str =~ s/^.//;
+               $len--;
+       }
+       return $str;
+}
+
+sub handle_line {
+       my $orig = shift;
+       local $_ = $orig;
+
+       # match a graph line that begins a commit
+       if (/^(?:$COLOR?\|$COLOR?[ ])* # zero or more leading "|" with space
+                $COLOR?\*$COLOR?[ ]   # a "*" with its trailing space
+             (?:$COLOR?\|$COLOR?[ ])* # zero or more trailing "|"
+                                [ ]*  # trailing whitespace for merges
+           /x) {
+               my $graph_prefix = $&;
+
+               # We must flush before setting graph indent, since the
+               # new commit may be indented differently from what we
+               # queued.
+               flush();
+               $graph_indent = visible_width($graph_prefix);
+
+       } elsif ($graph_indent) {
+               if (length($_) < $graph_indent) {
+                       $graph_indent = 0;
+               } else {
+                       $_ = visible_substr($_, $graph_indent);
+               }
+       }
 
        if (!$in_hunk) {
-               $line_cb->($_);
-               $in_hunk = /^$GRAPH*$COLOR*\@\@ /;
+               $line_cb->($orig);
+               $in_hunk = /^$COLOR*\@\@ /;
        }
-       elsif (/^$GRAPH*$COLOR*-/) {
-               push @removed, $_;
+       elsif (/^$COLOR*-/) {
+               push @removed, $orig;
        }
-       elsif (/^$GRAPH*$COLOR*\+/) {
-               push @added, $_;
+       elsif (/^$COLOR*\+/) {
+               push @added, $orig;
        }
        else {
-               show_hunk(\@removed, \@added);
-               @removed = ();
-               @added = ();
-
-               $line_cb->($_);
-               $in_hunk = /^$GRAPH*$COLOR*[\@ ]/;
+               flush();
+               $line_cb->($orig);
+               $in_hunk = /^$COLOR*[\@ ]/;
        }
 
        # Most of the time there is enough output to keep things streaming,
@@ -71,6 +116,8 @@ sub flush {
        # Flush any queued hunk (this can happen when there is no trailing
        # context in the final diff of the input).
        show_hunk(\@removed, \@added);
+       @removed = ();
+       @added = ();
 }
 
 sub highlight_stdin {
@@ -226,8 +273,8 @@ sub is_pair_interesting {
        my $suffix_a = join('', @$a[($sa+1)..$#$a]);
        my $suffix_b = join('', @$b[($sb+1)..$#$b]);
 
-       return $prefix_a !~ /^$GRAPH*$COLOR*-$BORING*$/ ||
-              $prefix_b !~ /^$GRAPH*$COLOR*\+$BORING*$/ ||
+       return visible_substr($prefix_a, $graph_indent) !~ /^$COLOR*-$BORING*$/ ||
+              visible_substr($prefix_b, $graph_indent) !~ /^$COLOR*\+$BORING*$/ ||
               $suffix_a !~ /^$BORING*$/ ||
               $suffix_b !~ /^$BORING*$/;
 }
index 3b43dbed7488c5f4a5f05809725ffd0bcd7e61b5..f6f5195d00f6ca01b0751fc1c1b58055f0ef25ee 100755 (executable)
@@ -52,15 +52,17 @@ test_strip_patch_header () {
 # dh_test_setup_history generates a contrived graph such that we have at least
 # 1 nesting (E) and 2 nestings (F).
 #
-#            A branch
-#           /
-#      D---E---F master
+#        A---B master
+#       /
+#      D---E---F branch
 #
 #      git log --all --graph
 #      * commit
-#      |    A
+#      |    B
 #      | * commit
 #      | |    F
+#      * | commit
+#      | |    A
 #      | * commit
 #      |/
 #      |    E
@@ -68,24 +70,30 @@ test_strip_patch_header () {
 #           D
 #
 dh_test_setup_history () {
-       echo "file1" >file1 &&
-       echo "file2" >file2 &&
-       echo "file3" >file3 &&
-
-       cat file1 >file &&
+       echo file1 >file &&
        git add file &&
+       test_tick &&
        git commit -m "D" &&
 
        git checkout -b branch &&
-       cat file2 >file &&
-       git commit -a -m "A" &&
+       echo file2 >file &&
+       test_tick &&
+       git commit -a -m "E" &&
 
        git checkout master &&
-       cat file2 >file &&
-       git commit -a -m "E" &&
+       echo file2 >file &&
+       test_tick &&
+       git commit -a -m "A" &&
 
-       cat file3 >file &&
-       git commit -a -m "F"
+       git checkout branch &&
+       echo file3 >file &&
+       test_tick &&
+       git commit -a -m "F" &&
+
+       git checkout master &&
+       echo file3 >file &&
+       test_tick &&
+       git commit -a -m "B"
 }
 
 left_trim () {
@@ -246,16 +254,25 @@ test_expect_failure 'diff-highlight treats combining code points as a unit' '
 test_expect_success 'diff-highlight works with the --graph option' '
        dh_test_setup_history &&
 
-       # topo-order so that the order of the commits is the same as with --graph
+       # date-order so that the commits are interleaved for both
        # trim graph elements so we can do a diff
        # trim leading space because our trim_graph is not perfect
-       git log --branches -p --topo-order |
+       git log --branches -p --date-order |
                "$DIFF_HIGHLIGHT" | left_trim >graph.exp &&
-       git log --branches -p --graph |
+       git log --branches -p --date-order --graph |
                "$DIFF_HIGHLIGHT" | trim_graph | left_trim >graph.act &&
        test_cmp graph.exp graph.act
 '
 
+# Just reuse the previous graph test, but with --color.  Our trimming
+# doesn't know about color, so just sanity check that something got
+# highlighted.
+test_expect_success 'diff-highlight works with color graph' '
+       git log --branches -p --date-order --graph --color |
+               "$DIFF_HIGHLIGHT" | trim_graph | left_trim >graph &&
+       grep "\[7m" graph
+'
+
 # Most combined diffs won't meet diff-highlight's line-number filter. So we
 # create one here where one side drops a line and the other modifies it. That
 # should result in a diff like:
@@ -293,4 +310,32 @@ test_expect_success 'diff-highlight ignores combined diffs' '
        test_cmp expect actual
 '
 
+test_expect_success 'diff-highlight handles --graph with leading dash' '
+       cat >file <<-\EOF &&
+       before
+       the old line
+       -leading dash
+       EOF
+       git add file &&
+       git commit -m before &&
+
+       sed s/old/new/ <file >file.tmp &&
+       mv file.tmp file &&
+       git add file &&
+       git commit -m after &&
+
+       cat >expect <<-EOF &&
+       --- a/file
+       +++ b/file
+       @@ -1,3 +1,3 @@
+        before
+       -the ${CW}old${CR} line
+       +the ${CW}new${CR} line
+        -leading dash
+       EOF
+       git log --graph -p -1 | "$DIFF_HIGHLIGHT" >actual.raw &&
+       trim_graph <actual.raw | sed -n "/^---/,\$p" >actual &&
+       test_cmp expect actual
+'
+
 test_done
diff --git a/contrib/emacs/.gitignore b/contrib/emacs/.gitignore
deleted file mode 100644 (file)
index c531d98..0000000
+++ /dev/null
@@ -1 +0,0 @@
-*.elc
diff --git a/contrib/emacs/Makefile b/contrib/emacs/Makefile
deleted file mode 100644 (file)
index 24d9312..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-## Build and install stuff
-
-EMACS = emacs
-
-ELC = git.elc git-blame.elc
-INSTALL ?= install
-INSTALL_ELC = $(INSTALL) -m 644
-prefix ?= $(HOME)
-emacsdir = $(prefix)/share/emacs/site-lisp
-RM ?= rm -f
-
-all: $(ELC)
-
-install: all
-       $(INSTALL) -d $(DESTDIR)$(emacsdir)
-       $(INSTALL_ELC) $(ELC:.elc=.el) $(ELC) $(DESTDIR)$(emacsdir)
-
-%.elc: %.el
-       $(EMACS) -batch -f batch-byte-compile $<
-
-clean:; $(RM) $(ELC)
index 82368bdbfff199465ff8e8cbea49d99e5485e1d7..977a16f1e339faca937dfd1a60bb10395bfd59c4 100644 (file)
@@ -1,30 +1,24 @@
-This directory contains various modules for Emacs support.
+This directory used to contain various modules for Emacs support.
 
-To make the modules available to Emacs, you should add this directory
-to your load-path, and then require the modules you want. This can be
-done by adding to your .emacs something like this:
+These were added shortly after Git was first released. Since then
+Emacs's own support for Git got better than what was offered by these
+modes. There are also popular 3rd-party Git modes such as Magit which
+offer replacements for these.
 
-  (add-to-list 'load-path ".../git/contrib/emacs")
-  (require 'git)
-  (require 'git-blame)
-
-
-The following modules are available:
+The following modules were available, and can be dug up from the Git
+history:
 
 * git.el:
 
-  Status manager that displays the state of all the files of the
-  project, and provides easy access to the most frequently used git
-  commands. The user interface is as far as possible compatible with
-  the pcl-cvs mode. It can be started with `M-x git-status'.
+  Wrapper for "git status" that provided access to other git commands.
+
+  Modern alternatives to this include Magit, and VC mode that ships
+  with Emacs.
 
 * git-blame.el:
 
-  Emacs implementation of incremental git-blame.  When you turn it on
-  while viewing a file, the editor buffer will be updated by setting
-  the background of individual lines to a color that reflects which
-  commit it comes from.  And when you move around the buffer, a
-  one-line summary will be shown in the echo area.
+  A wrapper for "git blame" written before Emacs's own vc-annotate
+  mode learned to invoke git-blame, which can be done via C-x v g.
 
 * vc-git.el:
 
diff --git a/contrib/emacs/git-blame.el b/contrib/emacs/git-blame.el
deleted file mode 100644 (file)
index 510e0f7..0000000
+++ /dev/null
@@ -1,483 +0,0 @@
-;;; git-blame.el --- Minor mode for incremental blame for Git  -*- coding: utf-8 -*-
-;;
-;; Copyright (C) 2007  David Kågedal
-;;
-;; Authors:    David Kågedal <davidk@lysator.liu.se>
-;; Created:    31 Jan 2007
-;; Message-ID: <87iren2vqx.fsf@morpheus.local>
-;; License:    GPL
-;; Keywords:   git, version control, release management
-;;
-;; Compatibility: Emacs21, Emacs22 and EmacsCVS
-;;                Git 1.5 and up
-
-;; This file is *NOT* part of GNU Emacs.
-;; This file is distributed under the same terms as GNU Emacs.
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;; http://www.fsf.org/copyleft/gpl.html
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Commentary:
-;;
-;; Here is an Emacs implementation of incremental git-blame.  When you
-;; turn it on while viewing a file, the editor buffer will be updated by
-;; setting the background of individual lines to a color that reflects
-;; which commit it comes from.  And when you move around the buffer, a
-;; one-line summary will be shown in the echo area.
-
-;;; Installation:
-;;
-;; To use this package, put it somewhere in `load-path' (or add
-;; directory with git-blame.el to `load-path'), and add the following
-;; line to your .emacs:
-;;
-;;    (require 'git-blame)
-;;
-;; If you do not want to load this package before it is necessary, you
-;; can make use of the `autoload' feature, e.g. by adding to your .emacs
-;; the following lines
-;;
-;;    (autoload 'git-blame-mode "git-blame"
-;;              "Minor mode for incremental blame for Git." t)
-;;
-;; Then first use of `M-x git-blame-mode' would load the package.
-
-;;; Compatibility:
-;;
-;; It requires GNU Emacs 21 or later and Git 1.5.0 and up
-;;
-;; If you'are using Emacs 20, try changing this:
-;;
-;;            (overlay-put ovl 'face (list :background
-;;                                         (cdr (assq 'color (cddddr info)))))
-;;
-;; to
-;;
-;;            (overlay-put ovl 'face (cons 'background-color
-;;                                         (cdr (assq 'color (cddddr info)))))
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Code:
-
-(eval-when-compile (require 'cl))                            ; to use `push', `pop'
-(require 'format-spec)
-
-(defface git-blame-prefix-face
-  '((((background dark)) (:foreground "gray"
-                          :background "black"))
-    (((background light)) (:foreground "gray"
-                           :background "white"))
-    (t (:weight bold)))
-  "The face used for the hash prefix."
-  :group 'git-blame)
-
-(defgroup git-blame nil
-  "A minor mode showing Git blame information."
-  :group 'git
-  :link '(function-link git-blame-mode))
-
-
-(defcustom git-blame-use-colors t
-  "Use colors to indicate commits in `git-blame-mode'."
-  :type 'boolean
-  :group 'git-blame)
-
-(defcustom git-blame-prefix-format
-  "%h %20A:"
-  "The format of the prefix added to each line in `git-blame'
-mode. The format is passed to `format-spec' with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-(defcustom git-blame-mouseover-format
-  "%h %a %A: %s"
-  "The format of the description shown when pointing at a line in
-`git-blame' mode. The format string is passed to `format-spec'
-with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-
-(defun git-blame-color-scale (&rest elements)
-  "Given a list, returns a list of triples formed with each
-elements of the list.
-
-a b => bbb bba bab baa abb aba aaa aab"
-  (let (result)
-    (dolist (a elements)
-      (dolist (b elements)
-        (dolist (c elements)
-          (setq result (cons (format "#%s%s%s" a b c) result)))))
-    result))
-
-;; (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c") =>
-;; ("#3c3c3c" "#3c3c14" "#3c3c34" "#3c3c2c" "#3c3c1c" "#3c3c24"
-;; "#3c3c04" "#3c3c0c" "#3c143c" "#3c1414" "#3c1434" "#3c142c" ...)
-
-(defmacro git-blame-random-pop (l)
-  "Select a random element from L and returns it. Also remove
-selected element from l."
-  ;; only works on lists with unique elements
-  `(let ((e (elt ,l (random (length ,l)))))
-     (setq ,l (remove e ,l))
-     e))
-
-(defvar git-blame-log-oneline-format
-  "format:[%cr] %cn: %s"
-  "*Formatting option used for describing current line in the minibuffer.
-
-This option is used to pass to git log --pretty= command-line option,
-and describe which commit the current line was made.")
-
-(defvar git-blame-dark-colors
-  (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c")
-  "*List of colors (format #RGB) to use in a dark environment.
-
-To check out the list, evaluate (list-colors-display git-blame-dark-colors).")
-
-(defvar git-blame-light-colors
-  (git-blame-color-scale "c4" "d4" "cc" "dc" "f4" "e4" "fc" "ec")
-  "*List of colors (format #RGB) to use in a light environment.
-
-To check out the list, evaluate (list-colors-display git-blame-light-colors).")
-
-(defvar git-blame-colors '()
-  "Colors used by git-blame. The list is built once when activating git-blame
-minor mode.")
-
-(defvar git-blame-ancient-color "dark green"
-  "*Color to be used for ancient commit.")
-
-(defvar git-blame-autoupdate t
-  "*Automatically update the blame display while editing")
-
-(defvar git-blame-proc nil
-  "The running git-blame process")
-(make-variable-buffer-local 'git-blame-proc)
-
-(defvar git-blame-overlays nil
-  "The git-blame overlays used in the current buffer.")
-(make-variable-buffer-local 'git-blame-overlays)
-
-(defvar git-blame-cache nil
-  "A cache of git-blame information for the current buffer")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-idle-timer nil
-  "An idle timer that updates the blame")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-update-queue nil
-  "A queue of update requests")
-(make-variable-buffer-local 'git-blame-update-queue)
-
-;; FIXME: docstrings
-(defvar git-blame-file nil)
-(defvar git-blame-current nil)
-
-(defvar git-blame-mode nil)
-(make-variable-buffer-local 'git-blame-mode)
-
-(defvar git-blame-mode-line-string " blame"
-  "String to display on the mode line when git-blame is active.")
-
-(or (assq 'git-blame-mode minor-mode-alist)
-    (setq minor-mode-alist
-         (cons '(git-blame-mode git-blame-mode-line-string) minor-mode-alist)))
-
-;;;###autoload
-(defun git-blame-mode (&optional arg)
-  "Toggle minor mode for displaying Git blame
-
-With prefix ARG, turn the mode on if ARG is positive."
-  (interactive "P")
-  (cond
-   ((null arg)
-    (if git-blame-mode (git-blame-mode-off) (git-blame-mode-on)))
-   ((> (prefix-numeric-value arg) 0) (git-blame-mode-on))
-   (t (git-blame-mode-off))))
-
-(defun git-blame-mode-on ()
-  "Turn on git-blame mode.
-
-See also function `git-blame-mode'."
-  (make-local-variable 'git-blame-colors)
-  (if git-blame-autoupdate
-      (add-hook 'after-change-functions 'git-blame-after-change nil t)
-    (remove-hook 'after-change-functions 'git-blame-after-change t))
-  (git-blame-cleanup)
-  (let ((bgmode (cdr (assoc 'background-mode (frame-parameters)))))
-    (if (eq bgmode 'dark)
-       (setq git-blame-colors git-blame-dark-colors)
-      (setq git-blame-colors git-blame-light-colors)))
-  (setq git-blame-cache (make-hash-table :test 'equal))
-  (setq git-blame-mode t)
-  (git-blame-run))
-
-(defun git-blame-mode-off ()
-  "Turn off git-blame mode.
-
-See also function `git-blame-mode'."
-  (git-blame-cleanup)
-  (if git-blame-idle-timer (cancel-timer git-blame-idle-timer))
-  (setq git-blame-mode nil))
-
-;;;###autoload
-(defun git-reblame ()
-  "Recalculate all blame information in the current buffer"
-  (interactive)
-  (unless git-blame-mode
-    (error "Git-blame is not active"))
-
-  (git-blame-cleanup)
-  (git-blame-run))
-
-(defun git-blame-run (&optional startline endline)
-  (if git-blame-proc
-      ;; Should maybe queue up a new run here
-      (message "Already running git blame")
-    (let ((display-buf (current-buffer))
-          (blame-buf (get-buffer-create
-                      (concat " git blame for " (buffer-name))))
-          (args '("--incremental" "--contents" "-")))
-      (if startline
-          (setq args (append args
-                             (list "-L" (format "%d,%d" startline endline)))))
-      (setq args (append args
-                         (list (file-name-nondirectory buffer-file-name))))
-      (setq git-blame-proc
-            (apply 'start-process
-                   "git-blame" blame-buf
-                   "git" "blame"
-                   args))
-      (with-current-buffer blame-buf
-        (erase-buffer)
-        (make-local-variable 'git-blame-file)
-        (make-local-variable 'git-blame-current)
-        (setq git-blame-file display-buf)
-        (setq git-blame-current nil))
-      (set-process-filter git-blame-proc 'git-blame-filter)
-      (set-process-sentinel git-blame-proc 'git-blame-sentinel)
-      (process-send-region git-blame-proc (point-min) (point-max))
-      (process-send-eof git-blame-proc))))
-
-(defun remove-git-blame-text-properties (start end)
-  (let ((modified (buffer-modified-p))
-        (inhibit-read-only t))
-    (remove-text-properties start end '(point-entered nil))
-    (set-buffer-modified-p modified)))
-
-(defun git-blame-cleanup ()
-  "Remove all blame properties"
-    (mapc 'delete-overlay git-blame-overlays)
-    (setq git-blame-overlays nil)
-    (remove-git-blame-text-properties (point-min) (point-max)))
-
-(defun git-blame-update-region (start end)
-  "Rerun blame to get updates between START and END"
-  (let ((overlays (overlays-in start end)))
-    (while overlays
-      (let ((overlay (pop overlays)))
-        (if (< (overlay-start overlay) start)
-            (setq start (overlay-start overlay)))
-        (if (> (overlay-end overlay) end)
-            (setq end (overlay-end overlay)))
-        (setq git-blame-overlays (delete overlay git-blame-overlays))
-        (delete-overlay overlay))))
-  (remove-git-blame-text-properties start end)
-  ;; We can be sure that start and end are at line breaks
-  (git-blame-run (1+ (count-lines (point-min) start))
-                 (count-lines (point-min) end)))
-
-(defun git-blame-sentinel (proc status)
-  (with-current-buffer (process-buffer proc)
-    (with-current-buffer git-blame-file
-      (setq git-blame-proc nil)
-      (if git-blame-update-queue
-          (git-blame-delayed-update))))
-  ;;(kill-buffer (process-buffer proc))
-  ;;(message "git blame finished")
-  )
-
-(defvar in-blame-filter nil)
-
-(defun git-blame-filter (proc str)
-  (with-current-buffer (process-buffer proc)
-    (save-excursion
-      (goto-char (process-mark proc))
-      (insert-before-markers str)
-      (goto-char (point-min))
-      (unless in-blame-filter
-        (let ((more t)
-              (in-blame-filter t))
-          (while more
-            (setq more (git-blame-parse))))))))
-
-(defun git-blame-parse ()
-  (cond ((looking-at "\\([0-9a-f]\\{40\\}\\) \\([0-9]+\\) \\([0-9]+\\) \\([0-9]+\\)\n")
-         (let ((hash (match-string 1))
-               (src-line (string-to-number (match-string 2)))
-               (res-line (string-to-number (match-string 3)))
-               (num-lines (string-to-number (match-string 4))))
-           (delete-region (point) (match-end 0))
-           (setq git-blame-current (list (git-blame-new-commit hash)
-                                         src-line res-line num-lines)))
-         t)
-        ((looking-at "\\([a-z-]+\\) \\(.+\\)\n")
-         (let ((key (match-string 1))
-               (value (match-string 2)))
-           (delete-region (point) (match-end 0))
-           (git-blame-add-info (car git-blame-current) key value)
-           (when (string= key "filename")
-             (git-blame-create-overlay (car git-blame-current)
-                                       (caddr git-blame-current)
-                                       (cadddr git-blame-current))
-             (setq git-blame-current nil)))
-         t)
-        (t
-         nil)))
-
-(defun git-blame-new-commit (hash)
-  (with-current-buffer git-blame-file
-    (or (gethash hash git-blame-cache)
-        ;; Assign a random color to each new commit info
-        ;; Take care not to select the same color multiple times
-        (let* ((color (if git-blame-colors
-                          (git-blame-random-pop git-blame-colors)
-                        git-blame-ancient-color))
-               (info `(,hash (color . ,color))))
-          (puthash hash info git-blame-cache)
-          info))))
-
-(defun git-blame-create-overlay (info start-line num-lines)
-  (with-current-buffer git-blame-file
-    (save-excursion
-      (let ((inhibit-point-motion-hooks t)
-            (inhibit-modification-hooks t))
-        (goto-char (point-min))
-        (forward-line (1- start-line))
-        (let* ((start (point))
-               (end (progn (forward-line num-lines) (point)))
-               (ovl (make-overlay start end))
-               (hash (car info))
-               (spec `((?h . ,(substring hash 0 6))
-                       (?H . ,hash)
-                       (?a . ,(git-blame-get-info info 'author))
-                       (?A . ,(git-blame-get-info info 'author-mail))
-                       (?c . ,(git-blame-get-info info 'committer))
-                       (?C . ,(git-blame-get-info info 'committer-mail))
-                       (?s . ,(git-blame-get-info info 'summary)))))
-          (push ovl git-blame-overlays)
-          (overlay-put ovl 'git-blame info)
-          (overlay-put ovl 'help-echo
-                       (format-spec git-blame-mouseover-format spec))
-          (if git-blame-use-colors
-              (overlay-put ovl 'face (list :background
-                                           (cdr (assq 'color (cdr info))))))
-          (overlay-put ovl 'line-prefix
-                       (propertize (format-spec git-blame-prefix-format spec)
-                                   'face 'git-blame-prefix-face)))))))
-
-(defun git-blame-add-info (info key value)
-  (nconc info (list (cons (intern key) value))))
-
-(defun git-blame-get-info (info key)
-  (cdr (assq key (cdr info))))
-
-(defun git-blame-current-commit ()
-  (let ((info (get-char-property (point) 'git-blame)))
-    (if info
-        (car info)
-      (error "No commit info"))))
-
-(defun git-describe-commit (hash)
-  (with-temp-buffer
-    (call-process "git" nil t nil
-                  "log" "-1"
-                 (concat "--pretty=" git-blame-log-oneline-format)
-                  hash)
-    (buffer-substring (point-min) (point-max))))
-
-(defvar git-blame-last-identification nil)
-(make-variable-buffer-local 'git-blame-last-identification)
-(defun git-blame-identify (&optional hash)
-  (interactive)
-  (let ((info (gethash (or hash (git-blame-current-commit)) git-blame-cache)))
-    (when (and info (not (eq info git-blame-last-identification)))
-      (message "%s" (nth 4 info))
-      (setq git-blame-last-identification info))))
-
-;; (defun git-blame-after-save ()
-;;   (when git-blame-mode
-;;     (git-blame-cleanup)
-;;     (git-blame-run)))
-;; (add-hook 'after-save-hook 'git-blame-after-save)
-
-(defun git-blame-after-change (start end length)
-  (when git-blame-mode
-    (git-blame-enq-update start end)))
-
-(defvar git-blame-last-update nil)
-(make-variable-buffer-local 'git-blame-last-update)
-(defun git-blame-enq-update (start end)
-  "Mark the region between START and END as needing blame update"
-  ;; Try to be smart and avoid multiple callouts for sequential
-  ;; editing
-  (cond ((and git-blame-last-update
-              (= start (cdr git-blame-last-update)))
-         (setcdr git-blame-last-update end))
-        ((and git-blame-last-update
-              (= end (car git-blame-last-update)))
-         (setcar git-blame-last-update start))
-        (t
-         (setq git-blame-last-update (cons start end))
-         (setq git-blame-update-queue (nconc git-blame-update-queue
-                                             (list git-blame-last-update)))))
-  (unless (or git-blame-proc git-blame-idle-timer)
-    (setq git-blame-idle-timer
-          (run-with-idle-timer 0.5 nil 'git-blame-delayed-update))))
-
-(defun git-blame-delayed-update ()
-  (setq git-blame-idle-timer nil)
-  (if git-blame-update-queue
-      (let ((first (pop git-blame-update-queue))
-            (inhibit-point-motion-hooks t))
-        (git-blame-update-region (car first) (cdr first)))))
-
-(provide 'git-blame)
-
-;;; git-blame.el ends here
diff --git a/contrib/emacs/git.el b/contrib/emacs/git.el
deleted file mode 100644 (file)
index 97919f2..0000000
+++ /dev/null
@@ -1,1704 +0,0 @@
-;;; git.el --- A user interface for git
-
-;; Copyright (C) 2005, 2006, 2007, 2008, 2009 Alexandre Julliard <julliard@winehq.org>
-
-;; Version: 1.0
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-;;
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;;; Commentary:
-
-;; This file contains an interface for the git version control
-;; system. It provides easy access to the most frequently used git
-;; commands. The user interface is as far as possible identical to
-;; that of the PCL-CVS mode.
-;;
-;; To install: put this file on the load-path and place the following
-;; in your .emacs file:
-;;
-;;    (require 'git)
-;;
-;; To start: `M-x git-status'
-;;
-;; TODO
-;;  - diff against other branch
-;;  - renaming files from the status buffer
-;;  - creating tags
-;;  - fetch/pull
-;;  - revlist browser
-;;  - git-show-branch browser
-;;
-
-;;; Compatibility:
-;;
-;; This file works on GNU Emacs 21 or later. It may work on older
-;; versions but this is not guaranteed.
-;;
-;; It may work on XEmacs 21, provided that you first install the ewoc
-;; and log-edit packages.
-;;
-
-(eval-when-compile (require 'cl))
-(require 'ewoc)
-(require 'log-edit)
-(require 'easymenu)
-
-
-;;;; Customizations
-;;;; ------------------------------------------------------------
-
-(defgroup git nil
-  "A user interface for the git versioning system."
-  :group 'tools)
-
-(defcustom git-committer-name nil
-  "User name to use for commits.
-The default is to fall back to the repository config,
-then to `add-log-full-name' and then to `user-full-name'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Name")))
-
-(defcustom git-committer-email nil
-  "Email address to use for commits.
-The default is to fall back to the git repository config,
-then to `add-log-mailing-address' and then to `user-mail-address'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Email")))
-
-(defcustom git-commits-coding-system nil
-  "Default coding system for the log message of git commits."
-  :group 'git
-  :type '(choice (const :tag "From repository config" nil)
-                 (coding-system)))
-
-(defcustom git-append-signed-off-by nil
-  "Whether to append a Signed-off-by line to the commit message before editing."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-reuse-status-buffer t
-  "Whether `git-status' should try to reuse an existing buffer
-if there is already one that displays the same directory."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-per-dir-ignore-file ".gitignore"
-  "Name of the per-directory ignore file."
-  :group 'git
-  :type 'string)
-
-(defcustom git-show-uptodate nil
-  "Whether to display up-to-date files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-ignored nil
-  "Whether to display ignored files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-unknown t
-  "Whether to display unknown files."
-  :group 'git
-  :type 'boolean)
-
-
-(defface git-status-face
-  '((((class color) (background light)) (:foreground "purple"))
-    (((class color) (background dark)) (:foreground "salmon")))
-  "Git mode face used to highlight added and modified files."
-  :group 'git)
-
-(defface git-unmerged-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "red" :bold t)))
-  "Git mode face used to highlight unmerged files."
-  :group 'git)
-
-(defface git-unknown-face
-  '((((class color) (background light)) (:foreground "goldenrod" :bold t))
-    (((class color) (background dark)) (:foreground "goldenrod" :bold t)))
-  "Git mode face used to highlight unknown files."
-  :group 'git)
-
-(defface git-uptodate-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight up-to-date files."
-  :group 'git)
-
-(defface git-ignored-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight ignored files."
-  :group 'git)
-
-(defface git-mark-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "tomato" :bold t)))
-  "Git mode face used for the file marks."
-  :group 'git)
-
-(defface git-header-face
-  '((((class color) (background light)) (:foreground "blue"))
-    (((class color) (background dark)) (:foreground "blue")))
-  "Git mode face used for commit headers."
-  :group 'git)
-
-(defface git-separator-face
-  '((((class color) (background light)) (:foreground "brown"))
-    (((class color) (background dark)) (:foreground "brown")))
-  "Git mode face used for commit separator."
-  :group 'git)
-
-(defface git-permission-face
-  '((((class color) (background light)) (:foreground "green" :bold t))
-    (((class color) (background dark)) (:foreground "green" :bold t)))
-  "Git mode face used for permission changes."
-  :group 'git)
-
-
-;;;; Utilities
-;;;; ------------------------------------------------------------
-
-(defconst git-log-msg-separator "--- log message follows this line ---")
-
-(defvar git-log-edit-font-lock-keywords
-  `(("^\\(Author:\\|Date:\\|Merge:\\|Signed-off-by:\\)\\(.*\\)$"
-     (1 font-lock-keyword-face)
-     (2 font-lock-function-name-face))
-    (,(concat "^\\(" (regexp-quote git-log-msg-separator) "\\)$")
-     (1 font-lock-comment-face))))
-
-(defun git-get-env-strings (env)
-  "Build a list of NAME=VALUE strings from a list of environment strings."
-  (mapcar (lambda (entry) (concat (car entry) "=" (cdr entry))) env))
-
-(defun git-call-process (buffer &rest args)
-  "Wrapper for call-process that sets environment strings."
-  (apply #'call-process "git" nil buffer nil args))
-
-(defun git-call-process-display-error (&rest args)
-  "Wrapper for call-process that displays error messages."
-  (let* ((dir default-directory)
-         (buffer (get-buffer-create "*Git Command Output*"))
-         (ok (with-current-buffer buffer
-               (let ((default-directory dir)
-                     (buffer-read-only nil))
-                 (erase-buffer)
-                 (eq 0 (apply #'git-call-process (list buffer t) args))))))
-    (unless ok (display-message-or-buffer buffer))
-    ok))
-
-(defun git-call-process-string (&rest args)
-  "Wrapper for call-process that returns the process output as a string,
-or nil if the git command failed."
-  (with-temp-buffer
-    (and (eq 0 (apply #'git-call-process t args))
-         (buffer-string))))
-
-(defun git-call-process-string-display-error (&rest args)
-  "Wrapper for call-process that displays error message and returns
-the process output as a string, or nil if the git command failed."
-  (with-temp-buffer
-    (if (eq 0 (apply #'git-call-process (list t t) args))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-process-region (buffer start end program args)
-  "Run a git process with a buffer region as input."
-  (let ((output-buffer (current-buffer))
-        (dir default-directory))
-    (with-current-buffer buffer
-      (cd dir)
-      (apply #'call-process-region start end program
-             nil (list output-buffer t) nil args))))
-
-(defun git-run-command-buffer (buffer-name &rest args)
-  "Run a git command, sending the output to a buffer named BUFFER-NAME."
-  (let ((dir default-directory)
-        (buffer (get-buffer-create buffer-name)))
-    (message "Running git %s..." (car args))
-    (with-current-buffer buffer
-      (let ((default-directory dir)
-            (buffer-read-only nil))
-        (erase-buffer)
-        (apply #'git-call-process buffer args)))
-    (message "Running git %s...done" (car args))
-    buffer))
-
-(defun git-run-command-region (buffer start end env &rest args)
-  "Run a git command with specified buffer region as input."
-  (with-temp-buffer
-    (if (eq 0 (if env
-                  (git-run-process-region
-                   buffer start end "env"
-                   (append (git-get-env-strings env) (list "git") args))
-                (git-run-process-region buffer start end "git" args)))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-hook (hook env &rest args)
-  "Run a git hook and display its output if any."
-  (let ((dir default-directory)
-        (hook-name (expand-file-name (concat ".git/hooks/" hook))))
-    (or (not (file-executable-p hook-name))
-        (let (status (buffer (get-buffer-create "*Git Hook Output*")))
-          (with-current-buffer buffer
-            (erase-buffer)
-            (cd dir)
-            (setq status
-                  (if env
-                      (apply #'call-process "env" nil (list buffer t) nil
-                             (append (git-get-env-strings env) (list hook-name) args))
-                    (apply #'call-process hook-name nil (list buffer t) nil args))))
-          (display-message-or-buffer buffer)
-          (eq 0 status)))))
-
-(defun git-get-string-sha1 (string)
-  "Read a SHA1 from the specified string."
-  (and string
-       (string-match "[0-9a-f]\\{40\\}" string)
-       (match-string 0 string)))
-
-(defun git-get-committer-name ()
-  "Return the name to use as GIT_COMMITTER_NAME."
-  ; copied from log-edit
-  (or git-committer-name
-      (git-config "user.name")
-      (and (boundp 'add-log-full-name) add-log-full-name)
-      (and (fboundp 'user-full-name) (user-full-name))
-      (and (boundp 'user-full-name) user-full-name)))
-
-(defun git-get-committer-email ()
-  "Return the email address to use as GIT_COMMITTER_EMAIL."
-  ; copied from log-edit
-  (or git-committer-email
-      (git-config "user.email")
-      (and (boundp 'add-log-mailing-address) add-log-mailing-address)
-      (and (fboundp 'user-mail-address) (user-mail-address))
-      (and (boundp 'user-mail-address) user-mail-address)))
-
-(defun git-get-commits-coding-system ()
-  "Return the coding system to use for commits."
-  (let ((repo-config (git-config "i18n.commitencoding")))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-get-logoutput-coding-system ()
-  "Return the coding system used for git-log output."
-  (let ((repo-config (or (git-config "i18n.logoutputencoding")
-                         (git-config "i18n.commitencoding"))))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-escape-file-name (name)
-  "Escape a file name if necessary."
-  (if (string-match "[\n\t\"\\]" name)
-      (concat "\""
-              (mapconcat (lambda (c)
-                   (case c
-                     (?\n "\\n")
-                     (?\t "\\t")
-                     (?\\ "\\\\")
-                     (?\" "\\\"")
-                     (t (char-to-string c))))
-                 name "")
-              "\"")
-    name))
-
-(defun git-success-message (text files)
-  "Print a success message after having handled FILES."
-  (let ((n (length files)))
-    (if (equal n 1)
-        (message "%s %s" text (car files))
-      (message "%s %d files" text n))))
-
-(defun git-get-top-dir (dir)
-  "Retrieve the top-level directory of a git tree."
-  (let ((cdup (with-output-to-string
-                (with-current-buffer standard-output
-                  (cd dir)
-                  (unless (eq 0 (git-call-process t "rev-parse" "--show-cdup"))
-                    (error "cannot find top-level git tree for %s." dir))))))
-    (expand-file-name (concat (file-name-as-directory dir)
-                              (car (split-string cdup "\n"))))))
-
-;stolen from pcl-cvs
-(defun git-append-to-ignore (file)
-  "Add a file name to the ignore file in its directory."
-  (let* ((fullname (expand-file-name file))
-         (dir (file-name-directory fullname))
-         (name (file-name-nondirectory fullname))
-         (ignore-name (expand-file-name git-per-dir-ignore-file dir))
-         (created (not (file-exists-p ignore-name))))
-  (save-window-excursion
-    (set-buffer (find-file-noselect ignore-name))
-    (goto-char (point-max))
-    (unless (zerop (current-column)) (insert "\n"))
-    (insert "/" name "\n")
-    (sort-lines nil (point-min) (point-max))
-    (save-buffer))
-  (when created
-    (git-call-process nil "update-index" "--add" "--" (file-relative-name ignore-name)))
-  (git-update-status-files (list (file-relative-name ignore-name)))))
-
-; propertize definition for XEmacs, stolen from erc-compat
-(eval-when-compile
-  (unless (fboundp 'propertize)
-    (defun propertize (string &rest props)
-      (let ((string (copy-sequence string)))
-        (while props
-          (put-text-property 0 (length string) (nth 0 props) (nth 1 props) string)
-          (setq props (cddr props)))
-        string))))
-
-;;;; Wrappers for basic git commands
-;;;; ------------------------------------------------------------
-
-(defun git-rev-parse (rev)
-  "Parse a revision name and return its SHA1."
-  (git-get-string-sha1
-   (git-call-process-string "rev-parse" rev)))
-
-(defun git-config (key)
-  "Retrieve the value associated to KEY in the git repository config file."
-  (let ((str (git-call-process-string "config" key)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-symbolic-ref (ref)
-  "Wrapper for the git-symbolic-ref command."
-  (let ((str (git-call-process-string "symbolic-ref" ref)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-update-ref (ref newval &optional oldval reason)
-  "Update a reference by calling git-update-ref."
-  (let ((args (and oldval (list oldval))))
-    (when newval (push newval args))
-    (push ref args)
-    (when reason
-     (push reason args)
-     (push "-m" args))
-    (unless newval (push "-d" args))
-    (apply 'git-call-process-display-error "update-ref" args)))
-
-(defun git-for-each-ref (&rest specs)
-  "Return a list of refs using git-for-each-ref.
-Each entry is a cons of (SHORT-NAME . FULL-NAME)."
-  (let (refs)
-    (with-temp-buffer
-      (apply #'git-call-process t "for-each-ref" "--format=%(refname)" specs)
-      (goto-char (point-min))
-      (while (re-search-forward "^[^/\n]+/[^/\n]+/\\(.+\\)$" nil t)
-       (push (cons (match-string 1) (match-string 0)) refs)))
-    (nreverse refs)))
-
-(defun git-read-tree (tree &optional index-file)
-  "Read a tree into the index file."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (apply 'git-call-process-display-error "read-tree" (if tree (list tree)))))
-
-(defun git-write-tree (&optional index-file)
-  "Call git-write-tree and return the resulting tree SHA1 as a string."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (git-get-string-sha1
-     (git-call-process-string-display-error "write-tree"))))
-
-(defun git-commit-tree (buffer tree parent)
-  "Create a commit and possibly update HEAD.
-Create a commit with the message in BUFFER using the tree with hash TREE.
-Use PARENT as the parent of the new commit. If PARENT is the current \"HEAD\",
-update the \"HEAD\" reference to the new commit."
-  (let ((author-name (git-get-committer-name))
-        (author-email (git-get-committer-email))
-        (subject "commit (initial): ")
-        author-date log-start log-end args coding-system-for-write)
-    (when parent
-      (setq subject "commit: ")
-      (push "-p" args)
-      (push parent args))
-    (with-current-buffer buffer
-      (goto-char (point-min))
-      (if
-          (setq log-start (re-search-forward (concat "^" (regexp-quote git-log-msg-separator) "\n") nil t))
-          (save-restriction
-            (narrow-to-region (point-min) log-start)
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: +\\(.*?\\) *<\\(.*\\)> *$" nil t)
-              (setq author-name (match-string 1)
-                    author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: +\\(.*\\)$" nil t)
-              (setq author-date (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Merge: +\\(.*\\)" nil t)
-              (setq subject "commit (merge): ")
-              (dolist (parent (split-string (match-string 1) " +" t))
-                (push "-p" args)
-                (push parent args))))
-        (setq log-start (point-min)))
-      (setq log-end (point-max))
-      (goto-char log-start)
-      (when (re-search-forward ".*$" nil t)
-        (setq subject (concat subject (match-string 0))))
-      (setq coding-system-for-write buffer-file-coding-system))
-    (let ((commit
-           (git-get-string-sha1
-            (let ((env `(("GIT_AUTHOR_NAME" . ,author-name)
-                         ("GIT_AUTHOR_EMAIL" . ,author-email)
-                         ("GIT_COMMITTER_NAME" . ,(git-get-committer-name))
-                         ("GIT_COMMITTER_EMAIL" . ,(git-get-committer-email)))))
-              (when author-date (push `("GIT_AUTHOR_DATE" . ,author-date) env))
-              (apply #'git-run-command-region
-                     buffer log-start log-end env
-                     "commit-tree" tree (nreverse args))))))
-      (when commit (git-update-ref "HEAD" commit parent subject))
-      commit)))
-
-(defun git-empty-db-p ()
-  "Check if the git db is empty (no commit done yet)."
-  (not (eq 0 (git-call-process nil "rev-parse" "--verify" "HEAD"))))
-
-(defun git-get-merge-heads ()
-  "Retrieve the merge heads from the MERGE_HEAD file if present."
-  (let (heads)
-    (when (file-readable-p ".git/MERGE_HEAD")
-      (with-temp-buffer
-        (insert-file-contents ".git/MERGE_HEAD" nil nil nil t)
-        (goto-char (point-min))
-        (while (re-search-forward "[0-9a-f]\\{40\\}" nil t)
-          (push (match-string 0) heads))))
-    (nreverse heads)))
-
-(defun git-get-commit-description (commit)
-  "Get a one-line description of COMMIT."
-  (let ((coding-system-for-read (git-get-logoutput-coding-system)))
-    (let ((descr (git-call-process-string "log" "--max-count=1" "--pretty=oneline" commit)))
-      (if (and descr (string-match "\\`\\([0-9a-f]\\{40\\}\\) *\\(.*\\)$" descr))
-          (concat (substring (match-string 1 descr) 0 10) " - " (match-string 2 descr))
-        descr))))
-
-;;;; File info structure
-;;;; ------------------------------------------------------------
-
-; fileinfo structure stolen from pcl-cvs
-(defstruct (git-fileinfo
-            (:copier nil)
-            (:constructor git-create-fileinfo (state name &optional old-perm new-perm rename-state orig-name marked))
-            (:conc-name git-fileinfo->))
-  marked              ;; t/nil
-  state               ;; current state
-  name                ;; file name
-  old-perm new-perm   ;; permission flags
-  rename-state        ;; rename or copy state
-  orig-name           ;; original name for renames or copies
-  needs-update        ;; whether file needs to be updated
-  needs-refresh)      ;; whether file needs to be refreshed
-
-(defvar git-status nil)
-
-(defun git-set-fileinfo-state (info state)
-  "Set the state of a file info."
-  (unless (eq (git-fileinfo->state info) state)
-    (setf (git-fileinfo->state info) state
-         (git-fileinfo->new-perm info) (git-fileinfo->old-perm info)
-          (git-fileinfo->rename-state info) nil
-          (git-fileinfo->orig-name info) nil
-          (git-fileinfo->needs-update info) nil
-          (git-fileinfo->needs-refresh info) t)))
-
-(defun git-status-filenames-map (status func files &rest args)
-  "Apply FUNC to the status files names in the FILES list.
-The list must be sorted."
-  (when files
-    (let ((file (pop files))
-          (node (ewoc-nth status 0)))
-      (while (and file node)
-        (let* ((info (ewoc-data node))
-               (name (git-fileinfo->name info)))
-          (if (string-lessp name file)
-              (setq node (ewoc-next status node))
-            (if (string-equal name file)
-                (apply func info args))
-            (setq file (pop files))))))))
-
-(defun git-set-filenames-state (status files state)
-  "Set the state of a list of named files. The list must be sorted"
-  (when files
-    (git-status-filenames-map status #'git-set-fileinfo-state files state)
-    (unless state  ;; delete files whose state has been set to nil
-      (ewoc-filter status (lambda (info) (git-fileinfo->state info))))))
-
-(defun git-state-code (code)
-  "Convert from a string to a added/deleted/modified state."
-  (case (string-to-char code)
-    (?M 'modified)
-    (?? 'unknown)
-    (?A 'added)
-    (?D 'deleted)
-    (?U 'unmerged)
-    (?T 'modified)
-    (t nil)))
-
-(defun git-status-code-as-string (code)
-  "Format a git status code as string."
-  (case code
-    ('modified (propertize "Modified" 'face 'git-status-face))
-    ('unknown  (propertize "Unknown " 'face 'git-unknown-face))
-    ('added    (propertize "Added   " 'face 'git-status-face))
-    ('deleted  (propertize "Deleted " 'face 'git-status-face))
-    ('unmerged (propertize "Unmerged" 'face 'git-unmerged-face))
-    ('uptodate (propertize "Uptodate" 'face 'git-uptodate-face))
-    ('ignored  (propertize "Ignored " 'face 'git-ignored-face))
-    (t "?       ")))
-
-(defun git-file-type-as-string (old-perm new-perm)
-  "Return a string describing the file type based on its permissions."
-  (let* ((old-type (lsh (or old-perm 0) -9))
-        (new-type (lsh (or new-perm 0) -9))
-        (str (case new-type
-               (64  ;; file
-                (case old-type
-                  (64 nil)
-                  (80 "   (type change symlink -> file)")
-                  (112 "   (type change subproject -> file)")))
-                (80  ;; symlink
-                 (case old-type
-                   (64 "   (type change file -> symlink)")
-                   (112 "   (type change subproject -> symlink)")
-                   (t "   (symlink)")))
-                 (112  ;; subproject
-                  (case old-type
-                    (64 "   (type change file -> subproject)")
-                    (80 "   (type change symlink -> subproject)")
-                    (t "   (subproject)")))
-                  (72 nil)  ;; directory (internal, not a real git state)
-                 (0  ;; deleted or unknown
-                  (case old-type
-                    (80 "   (symlink)")
-                    (112 "   (subproject)")))
-                 (t (format "   (unknown type %o)" new-type)))))
-    (cond (str (propertize str 'face 'git-status-face))
-          ((eq new-type 72) "/")
-          (t ""))))
-
-(defun git-rename-as-string (info)
-  "Return a string describing the copy or rename associated with INFO, or an empty string if none."
-  (let ((state (git-fileinfo->rename-state info)))
-    (if state
-        (propertize
-         (concat "   ("
-                 (if (eq state 'copy) "copied from "
-                   (if (eq (git-fileinfo->state info) 'added) "renamed from "
-                     "renamed to "))
-                 (git-escape-file-name (git-fileinfo->orig-name info))
-                 ")") 'face 'git-status-face)
-      "")))
-
-(defun git-permissions-as-string (old-perm new-perm)
-  "Format a permission change as string."
-  (propertize
-   (if (or (not old-perm)
-           (not new-perm)
-           (eq 0 (logand ?\111 (logxor old-perm new-perm))))
-       "  "
-     (if (eq 0 (logand ?\111 old-perm)) "+x" "-x"))
-  'face 'git-permission-face))
-
-(defun git-fileinfo-prettyprint (info)
-  "Pretty-printer for the git-fileinfo structure."
-  (let ((old-perm (git-fileinfo->old-perm info))
-       (new-perm (git-fileinfo->new-perm info)))
-    (insert (concat "   " (if (git-fileinfo->marked info) (propertize "*" 'face 'git-mark-face) " ")
-                   " " (git-status-code-as-string (git-fileinfo->state info))
-                   " " (git-permissions-as-string old-perm new-perm)
-                   "  " (git-escape-file-name (git-fileinfo->name info))
-                   (git-file-type-as-string old-perm new-perm)
-                   (git-rename-as-string info)))))
-
-(defun git-update-node-fileinfo (node info)
-  "Update the fileinfo of the specified node. The names are assumed to match already."
-  (let ((data (ewoc-data node)))
-    (setf
-     ;; preserve the marked flag
-     (git-fileinfo->marked info) (git-fileinfo->marked data)
-     (git-fileinfo->needs-update data) nil)
-    (when (not (equal info data))
-      (setf (git-fileinfo->needs-refresh info) t
-            (ewoc-data node) info))))
-
-(defun git-insert-info-list (status infolist files)
-  "Insert a sorted list of file infos in the status buffer, replacing existing ones if any."
-  (let* ((info (pop infolist))
-         (node (ewoc-nth status 0))
-         (name (and info (git-fileinfo->name info)))
-         remaining)
-    (while info
-      (let ((nodename (and node (git-fileinfo->name (ewoc-data node)))))
-        (while (and files (string-lessp (car files) name))
-          (push (pop files) remaining))
-        (when (and files (string-equal (car files) name))
-          (setq files (cdr files)))
-        (cond ((not nodename)
-               (setq node (ewoc-enter-last status info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              ((string-lessp nodename name)
-               (setq node (ewoc-next status node)))
-              ((string-equal nodename name)
-               ;; preserve the marked flag
-               (git-update-node-fileinfo node info)
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              (t
-               (setq node (ewoc-enter-before status node info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info)))))))
-    (nconc (nreverse remaining) files)))
-
-(defun git-run-diff-index (status files)
-  "Run git-diff-index on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "diff-index" "-z" "-M" "HEAD" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward
-             ":\\([0-7]\\{6\\}\\) \\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} [0-9a-f]\\{40\\} \\(\\([ADMUT]\\)\0\\([^\0]+\\)\\|\\([CR]\\)[0-9]*\0\\([^\0]+\\)\0\\([^\0]+\\)\\)\0"
-              nil t 1)
-        (let ((old-perm (string-to-number (match-string 1) 8))
-              (new-perm (string-to-number (match-string 2) 8))
-              (state (or (match-string 4) (match-string 6)))
-              (name (or (match-string 5) (match-string 7)))
-              (new-name (match-string 8)))
-          (if new-name  ; copy or rename
-              (if (eq ?C (string-to-char state))
-                  (push (git-create-fileinfo 'added new-name old-perm new-perm 'copy name) infolist)
-                (push (git-create-fileinfo 'deleted name 0 0 'rename new-name) infolist)
-                (push (git-create-fileinfo 'added new-name old-perm new-perm 'rename name) infolist))
-            (push (git-create-fileinfo (git-state-code state) name old-perm new-perm) infolist)))))
-    (setq infolist (sort (nreverse infolist)
-                         (lambda (info1 info2)
-                           (string-lessp (git-fileinfo->name info1)
-                                         (git-fileinfo->name info2)))))
-    (git-insert-info-list status infolist files)))
-
-(defun git-find-status-file (status file)
-  "Find a given file in the status ewoc and return its node."
-  (let ((node (ewoc-nth status 0)))
-    (while (and node (not (string= file (git-fileinfo->name (ewoc-data node)))))
-      (setq node (ewoc-next status node)))
-    node))
-
-(defun git-run-ls-files (status files default-state &rest options)
-  "Run git-ls-files on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" (append options (list "--") files))
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*?\\)\\(/?\\)\0" nil t 1)
-        (let ((name (match-string 1)))
-          (push (git-create-fileinfo default-state name 0
-                                     (if (string-equal "/" (match-string 2)) (lsh ?\110 9) 0))
-                infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-files-cached (status files default-state)
-  "Run git-ls-files -c on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" "-s" "-c" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} 0\t\\([^\0]+\\)\0" nil t)
-       (let* ((new-perm (string-to-number (match-string 1) 8))
-              (old-perm (if (eq default-state 'added) 0 new-perm))
-              (name (match-string 2)))
-         (push (git-create-fileinfo default-state name old-perm new-perm) infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-unmerged (status files)
-  "Run git-ls-files -u on FILES and parse the results into STATUS."
-  (with-temp-buffer
-    (apply #'git-call-process t "ls-files" "-z" "-u" "--" files)
-    (goto-char (point-min))
-    (let (unmerged-files)
-      (while (re-search-forward "[0-7]\\{6\\} [0-9a-f]\\{40\\} [123]\t\\([^\0]+\\)\0" nil t)
-        (push (match-string 1) unmerged-files))
-      (setq unmerged-files (nreverse unmerged-files))  ;; assume it is sorted already
-      (git-set-filenames-state status unmerged-files 'unmerged))))
-
-(defun git-get-exclude-files ()
-  "Get the list of exclude files to pass to git-ls-files."
-  (let (files
-        (config (git-config "core.excludesfile")))
-    (when (file-readable-p ".git/info/exclude")
-      (push ".git/info/exclude" files))
-    (when (and config (file-readable-p config))
-      (push config files))
-    files))
-
-(defun git-run-ls-files-with-excludes (status files default-state &rest options)
-  "Run git-ls-files on FILES with appropriate --exclude-from options."
-  (let ((exclude-files (git-get-exclude-files)))
-    (apply #'git-run-ls-files status files default-state "--directory" "--no-empty-directory"
-           (concat "--exclude-per-directory=" git-per-dir-ignore-file)
-           (append options (mapcar (lambda (f) (concat "--exclude-from=" f)) exclude-files)))))
-
-(defun git-update-status-files (&optional files mark-files)
-  "Update the status of FILES from the index.
-The FILES list must be sorted."
-  (unless git-status (error "Not in git-status buffer."))
-  ;; set the needs-update flag on existing files
-  (if files
-      (git-status-filenames-map
-       git-status (lambda (info) (setf (git-fileinfo->needs-update info) t)) files)
-    (ewoc-map (lambda (info) (setf (git-fileinfo->needs-update info) t) nil) git-status)
-    (git-call-process nil "update-index" "--refresh")
-    (when git-show-uptodate
-      (git-run-ls-files-cached git-status nil 'uptodate)))
-  (let ((remaining-files
-          (if (git-empty-db-p) ; we need some special handling for an empty db
-             (git-run-ls-files-cached git-status files 'added)
-            (git-run-diff-index git-status files))))
-    (git-run-ls-unmerged git-status files)
-    (when (or remaining-files (and git-show-unknown (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'unknown "-o")))
-    (when (or remaining-files (and git-show-ignored (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'ignored "-o" "-i")))
-    (unless files
-      (setq remaining-files (git-get-filenames (ewoc-collect git-status #'git-fileinfo->needs-update))))
-    (when remaining-files
-      (setq remaining-files (git-run-ls-files-cached git-status remaining-files 'uptodate)))
-    (git-set-filenames-state git-status remaining-files nil)
-    (when mark-files (git-mark-files git-status files))
-    (git-refresh-files)
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-mark-files (status files)
-  "Mark all the specified FILES, and unmark the others."
-  (let ((file (and files (pop files)))
-        (node (ewoc-nth status 0)))
-    (while node
-      (let ((info (ewoc-data node)))
-        (if (and file (string-equal (git-fileinfo->name info) file))
-            (progn
-              (unless (git-fileinfo->marked info)
-                (setf (git-fileinfo->marked info) t)
-                (setf (git-fileinfo->needs-refresh info) t))
-              (setq file (pop files))
-              (setq node (ewoc-next status node)))
-          (when (git-fileinfo->marked info)
-            (setf (git-fileinfo->marked info) nil)
-            (setf (git-fileinfo->needs-refresh info) t))
-          (if (and file (string-lessp file (git-fileinfo->name info)))
-              (setq file (pop files))
-            (setq node (ewoc-next status node))))))))
-
-(defun git-marked-files ()
-  "Return a list of all marked files, or if none a list containing just the file at cursor position."
-  (unless git-status (error "Not in git-status buffer."))
-  (or (ewoc-collect git-status (lambda (info) (git-fileinfo->marked info)))
-      (list (ewoc-data (ewoc-locate git-status)))))
-
-(defun git-marked-files-state (&rest states)
-  "Return a sorted list of marked files that are in the specified states."
-  (let ((files (git-marked-files))
-        result)
-    (dolist (info files)
-      (when (memq (git-fileinfo->state info) states)
-        (push info result)))
-    (nreverse result)))
-
-(defun git-refresh-files ()
-  "Refresh all files that need it and clear the needs-refresh flag."
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map
-   (lambda (info)
-     (let ((refresh (git-fileinfo->needs-refresh info)))
-       (setf (git-fileinfo->needs-refresh info) nil)
-       refresh))
-   git-status)
-  ; move back to goal column
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-refresh-ewoc-hf (status)
-  "Refresh the ewoc header and footer."
-  (let ((branch (git-symbolic-ref "HEAD"))
-        (head (if (git-empty-db-p) "Nothing committed yet"
-                (git-get-commit-description "HEAD")))
-        (merge-heads (git-get-merge-heads)))
-    (ewoc-set-hf status
-                 (format "Directory:  %s\nBranch:     %s\nHead:       %s%s\n"
-                         default-directory
-                         (if branch
-                             (if (string-match "^refs/heads/" branch)
-                                 (substring branch (match-end 0))
-                               branch)
-                           "none (detached HEAD)")
-                         head
-                         (if merge-heads
-                             (concat "\nMerging:    "
-                                     (mapconcat (lambda (str) (git-get-commit-description str)) merge-heads "\n            "))
-                           ""))
-                 (if (ewoc-nth status 0) "" "    No changes."))))
-
-(defun git-get-filenames (files)
-  (mapcar (lambda (info) (git-fileinfo->name info)) files))
-
-(defun git-update-index (index-file files)
-  "Run git-update-index on a list of files."
-  (let ((process-environment (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file)))
-                                     process-environment))
-        added deleted modified)
-    (dolist (info files)
-      (case (git-fileinfo->state info)
-        ('added (push info added))
-        ('deleted (push info deleted))
-        ('modified (push info modified))))
-    (and
-     (or (not added) (apply #'git-call-process-display-error "update-index" "--add" "--" (git-get-filenames added)))
-     (or (not deleted) (apply #'git-call-process-display-error "update-index" "--remove" "--" (git-get-filenames deleted)))
-     (or (not modified) (apply #'git-call-process-display-error "update-index" "--" (git-get-filenames modified))))))
-
-(defun git-run-pre-commit-hook ()
-  "Run the pre-commit hook if any."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (or (not files)
-        (not (file-executable-p ".git/hooks/pre-commit"))
-        (let ((index-file (make-temp-file "gitidx")))
-          (unwind-protect
-            (let ((head-tree (unless (git-empty-db-p) (git-rev-parse "HEAD^{tree}"))))
-              (git-read-tree head-tree index-file)
-              (git-update-index index-file files)
-              (git-run-hook "pre-commit" `(("GIT_INDEX_FILE" . ,index-file))))
-          (delete-file index-file))))))
-
-(defun git-do-commit ()
-  "Perform the actual commit using the current buffer as log message."
-  (interactive)
-  (let ((buffer (current-buffer))
-        (index-file (make-temp-file "gitidx")))
-    (with-current-buffer log-edit-parent-buffer
-      (if (git-marked-files-state 'unmerged)
-          (message "You cannot commit unmerged files, resolve them first.")
-        (unwind-protect
-            (let ((files (git-marked-files-state 'added 'deleted 'modified))
-                  head tree head-tree)
-              (unless (git-empty-db-p)
-                (setq head (git-rev-parse "HEAD")
-                      head-tree (git-rev-parse "HEAD^{tree}")))
-              (message "Running git commit...")
-              (when
-                  (and
-                   (git-read-tree head-tree index-file)
-                   (git-update-index nil files)         ;update both the default index
-                   (git-update-index index-file files)  ;and the temporary one
-                   (setq tree (git-write-tree index-file)))
-                (if (or (not (string-equal tree head-tree))
-                        (yes-or-no-p "The tree was not modified, do you really want to perform an empty commit? "))
-                    (let ((commit (git-commit-tree buffer tree head)))
-                      (when commit
-                        (condition-case nil (delete-file ".git/MERGE_HEAD") (error nil))
-                        (condition-case nil (delete-file ".git/MERGE_MSG") (error nil))
-                        (with-current-buffer buffer (erase-buffer))
-                        (git-update-status-files (git-get-filenames files))
-                        (git-call-process nil "rerere")
-                        (git-call-process nil "gc" "--auto")
-                        (message "Committed %s." commit)
-                        (git-run-hook "post-commit" nil)))
-                  (message "Commit aborted."))))
-          (delete-file index-file))))))
-
-
-;;;; Interactive functions
-;;;; ------------------------------------------------------------
-
-(defun git-mark-file ()
-  "Mark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) t)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file ()
-  "Unmark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file-up ()
-  "Unmark the file that the cursor is on and move to the previous one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-prev git-status 1)))
-
-(defun git-mark-all ()
-  "Mark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (unless (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) t))) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-unmark-all ()
-  "Unmark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (when (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) nil)
-                             t)) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-toggle-all-marks ()
-  "Toggle all file marks."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (setf (git-fileinfo->marked info) (not (git-fileinfo->marked info))) t) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-next-file (&optional n)
-  "Move the selection down N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-next git-status n))
-
-(defun git-prev-file (&optional n)
-  "Move the selection up N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-prev git-status n))
-
-(defun git-next-unmerged-file (&optional n)
-  "Move the selection down N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-next git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-next git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-prev-unmerged-file (&optional n)
-  "Move the selection up N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-prev git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-prev git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-insert-file (file)
-  "Insert file(s) into the git-status buffer."
-  (interactive "fInsert file: ")
-  (git-update-status-files (list (file-relative-name file))))
-
-(defun git-add-file ()
-  "Add marked file(s) to the index cache."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown 'ignored 'unmerged))))
-    ;; FIXME: add support for directories
-    (unless files
-      (push (file-relative-name (read-file-name "File to add: " nil nil t)) files))
-    (when (apply 'git-call-process-display-error "update-index" "--add" "--" files)
-      (git-update-status-files files)
-      (git-success-message "Added" files))))
-
-(defun git-ignore-file ()
-  "Add marked file(s) to the ignore list."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to ignore: " nil nil t)) files))
-    (dolist (f files) (git-append-to-ignore f))
-    (git-update-status-files files)
-    (git-success-message "Ignored" files)))
-
-(defun git-remove-file ()
-  "Remove the marked file(s)."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'added 'modified 'unknown 'uptodate 'ignored))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to remove: " nil nil t)) files))
-    (if (yes-or-no-p
-         (if (cdr files)
-             (format "Remove %d files? " (length files))
-           (format "Remove %s? " (car files))))
-        (progn
-          (dolist (name files)
-            (ignore-errors
-              (if (file-directory-p name)
-                  (delete-directory name)
-                (delete-file name))))
-          (when (apply 'git-call-process-display-error "update-index" "--remove" "--" files)
-            (git-update-status-files files)
-            (git-success-message "Removed" files)))
-      (message "Aborting"))))
-
-(defun git-revert-file ()
-  "Revert changes to the marked file(s)."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified 'unmerged))
-        added modified)
-    (when (and files
-               (yes-or-no-p
-                (if (cdr files)
-                    (format "Revert %d files? " (length files))
-                  (format "Revert %s? " (git-fileinfo->name (car files))))))
-      (dolist (info files)
-        (case (git-fileinfo->state info)
-          ('added (push (git-fileinfo->name info) added))
-          ('deleted (push (git-fileinfo->name info) modified))
-          ('unmerged (push (git-fileinfo->name info) modified))
-          ('modified (push (git-fileinfo->name info) modified))))
-      ;; check if a buffer contains one of the files and isn't saved
-      (dolist (file modified)
-        (let ((buffer (get-file-buffer file)))
-          (when (and buffer (buffer-modified-p buffer))
-            (error "Buffer %s is modified. Please kill or save modified buffers before reverting." (buffer-name buffer)))))
-      (let ((ok (and
-                 (or (not added)
-                     (apply 'git-call-process-display-error "update-index" "--force-remove" "--" added))
-                 (or (not modified)
-                     (apply 'git-call-process-display-error "checkout" "HEAD" modified))))
-            (names (git-get-filenames files)))
-        (git-update-status-files names)
-        (when ok
-          (dolist (file modified)
-            (let ((buffer (get-file-buffer file)))
-              (when buffer (with-current-buffer buffer (revert-buffer t t t)))))
-          (git-success-message "Reverted" names))))))
-
-(defun git-remove-handled ()
-  "Remove handled files from the status list."
-  (interactive)
-  (ewoc-filter git-status
-               (lambda (info)
-                 (case (git-fileinfo->state info)
-                   ('ignored git-show-ignored)
-                   ('uptodate git-show-uptodate)
-                   ('unknown git-show-unknown)
-                   (t t))))
-  (unless (ewoc-nth git-status 0)  ; refresh header if list is empty
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-toggle-show-uptodate ()
-  "Toogle the option for showing up-to-date files."
-  (interactive)
-  (if (setq git-show-uptodate (not git-show-uptodate))
-      (git-refresh-status)
-    (git-remove-handled)))
-
-(defun git-toggle-show-ignored ()
-  "Toogle the option for showing ignored files."
-  (interactive)
-  (if (setq git-show-ignored (not git-show-ignored))
-      (progn
-        (message "Inserting ignored files...")
-        (git-run-ls-files-with-excludes git-status nil 'ignored "-o" "-i")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting ignored files...done"))
-    (git-remove-handled)))
-
-(defun git-toggle-show-unknown ()
-  "Toogle the option for showing unknown files."
-  (interactive)
-  (if (setq git-show-unknown (not git-show-unknown))
-      (progn
-        (message "Inserting unknown files...")
-        (git-run-ls-files-with-excludes git-status nil 'unknown "-o")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting unknown files...done"))
-    (git-remove-handled)))
-
-(defun git-expand-directory (info)
-  "Expand the directory represented by INFO to list its files."
-  (when (eq (lsh (git-fileinfo->new-perm info) -9) ?\110)
-    (let ((dir (git-fileinfo->name info)))
-      (git-set-filenames-state git-status (list dir) nil)
-      (git-run-ls-files-with-excludes git-status (list (concat dir "/")) 'unknown "-o")
-      (git-refresh-files)
-      (git-refresh-ewoc-hf git-status)
-      t)))
-
-(defun git-setup-diff-buffer (buffer)
-  "Setup a buffer for displaying a diff."
-  (let ((dir default-directory))
-    (with-current-buffer buffer
-      (diff-mode)
-      (goto-char (point-min))
-      (setq default-directory dir)
-      (setq buffer-read-only t)))
-  (display-buffer buffer)
-  ; shrink window only if it displays the status buffer
-  (when (eq (window-buffer) (current-buffer))
-    (shrink-window-if-larger-than-buffer)))
-
-(defun git-diff-file ()
-  "Diff the marked file(s) against HEAD."
-  (interactive)
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M" "HEAD" "--" (git-get-filenames files)))))
-
-(defun git-diff-file-merge-head (arg)
-  "Diff the marked file(s) against the first merge head (or the nth one with a numeric prefix)."
-  (interactive "p")
-  (let ((files (git-marked-files))
-        (merge-heads (git-get-merge-heads)))
-    (unless merge-heads (error "No merge in progress"))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M"
-            (or (nth (1- arg) merge-heads) "HEAD") "--" (git-get-filenames files)))))
-
-(defun git-diff-unmerged-file (stage)
-  "Diff the marked unmerged file(s) against the specified stage."
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-files" "-p" stage "--" (git-get-filenames files)))))
-
-(defun git-diff-file-base ()
-  "Diff the marked unmerged file(s) against the common base file."
-  (interactive)
-  (git-diff-unmerged-file "-1"))
-
-(defun git-diff-file-mine ()
-  "Diff the marked unmerged file(s) against my pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-2"))
-
-(defun git-diff-file-other ()
-  "Diff the marked unmerged file(s) against the other's pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-3"))
-
-(defun git-diff-file-combined ()
-  "Do a combined diff of the marked unmerged file(s)."
-  (interactive)
-  (git-diff-unmerged-file "-c"))
-
-(defun git-diff-file-idiff ()
-  "Perform an interactive diff on the current file."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (unless (eq 1 (length files))
-      (error "Cannot perform an interactive diff on multiple files."))
-    (let* ((filename (car (git-get-filenames files)))
-           (buff1 (find-file-noselect filename))
-           (buff2 (git-run-command-buffer (concat filename ".~HEAD~") "cat-file" "blob" (concat "HEAD:" filename))))
-      (ediff-buffers buff1 buff2))))
-
-(defun git-log-file ()
-  "Display a log of changes to the marked file(s)."
-  (interactive)
-  (let* ((files (git-marked-files))
-         (coding-system-for-read git-commits-coding-system)
-         (buffer (apply #'git-run-command-buffer "*git-log*" "rev-list" "--pretty" "HEAD" "--" (git-get-filenames files))))
-    (with-current-buffer buffer
-      ; (git-log-mode)  FIXME: implement log mode
-      (goto-char (point-min))
-      (setq buffer-read-only t))
-    (display-buffer buffer)))
-
-(defun git-log-edit-files ()
-  "Return a list of marked files for use in the log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-get-filenames (git-marked-files-state 'added 'deleted 'modified))))
-
-(defun git-log-edit-diff ()
-  "Run a diff of the current files being committed from a log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-diff-file)))
-
-(defun git-append-sign-off (name email)
-  "Append a Signed-off-by entry to the current buffer, avoiding duplicates."
-  (let ((sign-off (format "Signed-off-by: %s <%s>" name email))
-        (case-fold-search t))
-    (goto-char (point-min))
-    (unless (re-search-forward (concat "^" (regexp-quote sign-off)) nil t)
-      (goto-char (point-min))
-      (unless (re-search-forward "^Signed-off-by: " nil t)
-        (setq sign-off (concat "\n" sign-off)))
-      (goto-char (point-max))
-      (insert sign-off "\n"))))
-
-(defun git-setup-log-buffer (buffer &optional merge-heads author-name author-email subject date msg)
-  "Setup the log buffer for a commit."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((dir default-directory)
-        (committer-name (git-get-committer-name))
-        (committer-email (git-get-committer-email))
-        (sign-off git-append-signed-off-by))
-    (with-current-buffer buffer
-      (cd dir)
-      (erase-buffer)
-      (insert
-       (propertize
-        (format "Author: %s <%s>\n%s%s"
-                (or author-name committer-name)
-                (or author-email committer-email)
-                (if date (format "Date: %s\n" date) "")
-                (if merge-heads
-                    (format "Merge: %s\n"
-                            (mapconcat 'identity merge-heads " "))
-                  ""))
-        'face 'git-header-face)
-       (propertize git-log-msg-separator 'face 'git-separator-face)
-       "\n")
-      (when subject (insert subject "\n\n"))
-      (cond (msg (insert msg "\n"))
-            ((file-readable-p ".git/rebase-apply/msg")
-             (insert-file-contents ".git/rebase-apply/msg"))
-            ((file-readable-p ".git/MERGE_MSG")
-             (insert-file-contents ".git/MERGE_MSG")))
-      ; delete empty lines at end
-      (goto-char (point-min))
-      (when (re-search-forward "\n+\\'" nil t)
-        (replace-match "\n" t t))
-      (when sign-off (git-append-sign-off committer-name committer-email)))
-    buffer))
-
-(define-derived-mode git-log-edit-mode log-edit-mode "Git-Log-Edit"
-  "Major mode for editing git log messages.
-
-Set up git-specific `font-lock-keywords' for `log-edit-mode'."
-  (set (make-local-variable 'font-lock-defaults)
-       '(git-log-edit-font-lock-keywords t t)))
-
-(defun git-commit-file ()
-  "Commit the marked file(s), asking for a commit message."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-run-pre-commit-hook)
-    (let ((buffer (get-buffer-create "*git-commit*"))
-          (coding-system (git-get-commits-coding-system))
-          author-name author-email subject date)
-      (when (eq 0 (buffer-size buffer))
-        (when (file-readable-p ".git/rebase-apply/info")
-          (with-temp-buffer
-            (insert-file-contents ".git/rebase-apply/info")
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: \\(.*\\)\nEmail: \\(.*\\)$" nil t)
-              (setq author-name (match-string 1))
-              (setq author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Subject: \\(.*\\)$" nil t)
-              (setq subject (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: \\(.*\\)$" nil t)
-              (setq date (match-string 1)))))
-        (git-setup-log-buffer buffer (git-get-merge-heads) author-name author-email subject date))
-      (if (boundp 'log-edit-diff-function)
-         (log-edit 'git-do-commit nil '((log-edit-listfun . git-log-edit-files)
-                                        (log-edit-diff-function . git-log-edit-diff)) buffer 'git-log-edit-mode)
-       (log-edit 'git-do-commit nil 'git-log-edit-files buffer
-                  'git-log-edit-mode))
-      (setq paragraph-separate (concat (regexp-quote git-log-msg-separator) "$\\|Author: \\|Date: \\|Merge: \\|Signed-off-by: \\|\f\\|[        ]*$"))
-      (setq buffer-file-coding-system coding-system)
-      (re-search-forward (regexp-quote (concat git-log-msg-separator "\n")) nil t))))
-
-(defun git-setup-commit-buffer (commit)
-  "Setup the commit buffer with the contents of COMMIT."
-  (let (parents author-name author-email subject date msg)
-    (with-temp-buffer
-      (let ((coding-system (git-get-logoutput-coding-system)))
-        (git-call-process t "log" "-1" "--pretty=medium" "--abbrev=40" commit)
-        (goto-char (point-min))
-        (when (re-search-forward "^Merge: *\\(.*\\)$" nil t)
-          (setq parents (cdr (split-string (match-string 1) " +"))))
-        (when (re-search-forward "^Author: *\\(.*\\) <\\(.*\\)>$" nil t)
-          (setq author-name (match-string 1))
-          (setq author-email (match-string 2)))
-        (when (re-search-forward "^Date: *\\(.*\\)$" nil t)
-          (setq date (match-string 1)))
-        (while (re-search-forward "^    \\(.*\\)$" nil t)
-          (push (match-string 1) msg))
-        (setq msg (nreverse msg))
-        (setq subject (pop msg))
-        (while (and msg (zerop (length (car msg))) (pop msg)))))
-    (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                          parents author-name author-email subject date
-                          (mapconcat #'identity msg "\n"))))
-
-(defun git-get-commit-files (commit)
-  "Retrieve a sorted list of files modified by COMMIT."
-  (let (files)
-    (with-temp-buffer
-      (git-call-process t "diff-tree" "-m" "-r" "-z" "--name-only" "--no-commit-id" "--root" commit)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*\\)\0" nil t 1)
-        (push (match-string 1) files)))
-    (sort files #'string-lessp)))
-
-(defun git-read-commit-name (prompt &optional default)
-  "Ask for a commit name, with completion for local branch, remote branch and tag."
-  (completing-read prompt
-                   (list* "HEAD" "ORIG_HEAD" "FETCH_HEAD" (mapcar #'car (git-for-each-ref)))
-                  nil nil nil nil default))
-
-(defun git-checkout (branch &optional merge)
-  "Checkout a branch, tag, or any commit.
-Use a prefix arg if git should merge while checking out."
-  (interactive
-   (list (git-read-commit-name "Checkout: ")
-         current-prefix-arg))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((args (list branch "--")))
-    (when merge (push "-m" args))
-    (when (apply #'git-call-process-display-error "checkout" args)
-      (git-update-status-files))))
-
-(defun git-branch (branch)
-  "Create a branch from the current HEAD and switch to it."
-  (interactive (list (git-read-commit-name "Branch: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (if (git-rev-parse (concat "refs/heads/" branch))
-      (if (yes-or-no-p (format "Branch %s already exists, replace it? " branch))
-          (and (git-call-process-display-error "branch" "-f" branch)
-               (git-call-process-display-error "checkout" branch))
-        (message "Canceled."))
-    (git-call-process-display-error "checkout" "-b" branch))
-    (git-refresh-ewoc-hf git-status))
-
-(defun git-amend-commit ()
-  "Undo the last commit on HEAD, and set things up to commit an
-amended version of it."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-empty-db-p) (error "No commit to amend."))
-  (let* ((commit (git-rev-parse "HEAD"))
-         (files (git-get-commit-files commit)))
-    (when (if (git-rev-parse "HEAD^")
-              (git-call-process-display-error "reset" "--soft" "HEAD^")
-            (and (git-update-ref "ORIG_HEAD" commit)
-                 (git-update-ref "HEAD" nil commit)))
-      (git-update-status-files files t)
-      (git-setup-commit-buffer commit)
-      (git-commit-file))))
-
-(defun git-cherry-pick-commit (arg)
-  "Cherry-pick a commit."
-  (interactive (list (git-read-commit-name "Cherry-pick commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot cherry-pick a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (ok (git-call-process-display-error "cherry-pick" "-n" commit)))
-      (git-update-status-files files ok)
-      (with-current-buffer (git-setup-commit-buffer commit)
-        (goto-char (point-min))
-        (if (re-search-forward "^\n*Signed-off-by:" nil t 1)
-            (goto-char (match-beginning 0))
-          (goto-char (point-max)))
-        (insert "(cherry picked from commit " commit ")\n"))
-      (when ok (git-commit-file)))))
-
-(defun git-revert-commit (arg)
-  "Revert a commit."
-  (interactive (list (git-read-commit-name "Revert commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot revert a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (subject (git-get-commit-description commit))
-          (ok (git-call-process-display-error "revert" "-n" commit)))
-      (git-update-status-files files ok)
-      (when (string-match "^[0-9a-f]+ - \\(.*\\)$" subject)
-        (setq subject (match-string 1 subject)))
-      (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                            (git-get-merge-heads) nil nil (format "Revert \"%s\"" subject) nil
-                            (format "This reverts commit %s.\n" commit))
-      (when ok (git-commit-file)))))
-
-(defun git-find-file ()
-  "Visit the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (unless (git-expand-directory info)
-      (find-file (git-fileinfo->name info))
-      (when (eq 'unmerged (git-fileinfo->state info))
-        (smerge-mode 1)))))
-
-(defun git-find-file-other-window ()
-  "Visit the current file in its own buffer in another window."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file-other-window (git-fileinfo->name info))
-    (when (eq 'unmerged (git-fileinfo->state info))
-      (smerge-mode))))
-
-(defun git-find-file-imerge ()
-  "Visit the current file in interactive merge mode."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file (git-fileinfo->name info))
-    (smerge-ediff)))
-
-(defun git-view-file ()
-  "View the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (view-file (git-fileinfo->name info))))
-
-(defun git-refresh-status ()
-  "Refresh the git status buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (message "Refreshing git status...")
-  (git-update-status-files)
-  (message "Refreshing git status...done"))
-
-(defun git-status-quit ()
-  "Quit git-status mode."
-  (interactive)
-  (bury-buffer))
-
-;;;; Major Mode
-;;;; ------------------------------------------------------------
-
-(defvar git-status-mode-hook nil
-  "Run after `git-status-mode' is setup.")
-
-(defvar git-status-mode-map nil
-  "Keymap for git major mode.")
-
-(defvar git-status nil
-  "List of all files managed by the git-status mode.")
-
-(unless git-status-mode-map
-  (let ((map (make-keymap))
-        (commit-map (make-sparse-keymap))
-        (diff-map (make-sparse-keymap))
-        (toggle-map (make-sparse-keymap)))
-    (suppress-keymap map)
-    (define-key map "?"   'git-help)
-    (define-key map "h"   'git-help)
-    (define-key map " "   'git-next-file)
-    (define-key map "a"   'git-add-file)
-    (define-key map "c"   'git-commit-file)
-    (define-key map "\C-c" commit-map)
-    (define-key map "d"    diff-map)
-    (define-key map "="   'git-diff-file)
-    (define-key map "f"   'git-find-file)
-    (define-key map "\r"  'git-find-file)
-    (define-key map "g"   'git-refresh-status)
-    (define-key map "i"   'git-ignore-file)
-    (define-key map "I"   'git-insert-file)
-    (define-key map "l"   'git-log-file)
-    (define-key map "m"   'git-mark-file)
-    (define-key map "M"   'git-mark-all)
-    (define-key map "n"   'git-next-file)
-    (define-key map "N"   'git-next-unmerged-file)
-    (define-key map "o"   'git-find-file-other-window)
-    (define-key map "p"   'git-prev-file)
-    (define-key map "P"   'git-prev-unmerged-file)
-    (define-key map "q"   'git-status-quit)
-    (define-key map "r"   'git-remove-file)
-    (define-key map "t"    toggle-map)
-    (define-key map "T"   'git-toggle-all-marks)
-    (define-key map "u"   'git-unmark-file)
-    (define-key map "U"   'git-revert-file)
-    (define-key map "v"   'git-view-file)
-    (define-key map "x"   'git-remove-handled)
-    (define-key map "\C-?" 'git-unmark-file-up)
-    (define-key map "\M-\C-?" 'git-unmark-all)
-    ; the commit submap
-    (define-key commit-map "\C-a" 'git-amend-commit)
-    (define-key commit-map "\C-b" 'git-branch)
-    (define-key commit-map "\C-o" 'git-checkout)
-    (define-key commit-map "\C-p" 'git-cherry-pick-commit)
-    (define-key commit-map "\C-v" 'git-revert-commit)
-    ; the diff submap
-    (define-key diff-map "b" 'git-diff-file-base)
-    (define-key diff-map "c" 'git-diff-file-combined)
-    (define-key diff-map "=" 'git-diff-file)
-    (define-key diff-map "e" 'git-diff-file-idiff)
-    (define-key diff-map "E" 'git-find-file-imerge)
-    (define-key diff-map "h" 'git-diff-file-merge-head)
-    (define-key diff-map "m" 'git-diff-file-mine)
-    (define-key diff-map "o" 'git-diff-file-other)
-    ; the toggle submap
-    (define-key toggle-map "u" 'git-toggle-show-uptodate)
-    (define-key toggle-map "i" 'git-toggle-show-ignored)
-    (define-key toggle-map "k" 'git-toggle-show-unknown)
-    (define-key toggle-map "m" 'git-toggle-all-marks)
-    (setq git-status-mode-map map))
-  (easy-menu-define git-menu git-status-mode-map
-    "Git Menu"
-    `("Git"
-      ["Refresh" git-refresh-status t]
-      ["Commit" git-commit-file t]
-      ["Checkout..." git-checkout t]
-      ["New Branch..." git-branch t]
-      ["Cherry-pick Commit..." git-cherry-pick-commit t]
-      ["Revert Commit..." git-revert-commit t]
-      ("Merge"
-       ["Next Unmerged File" git-next-unmerged-file t]
-       ["Prev Unmerged File" git-prev-unmerged-file t]
-       ["Interactive Merge File" git-find-file-imerge t]
-       ["Diff Against Common Base File" git-diff-file-base t]
-       ["Diff Combined" git-diff-file-combined t]
-       ["Diff Against Merge Head" git-diff-file-merge-head t]
-       ["Diff Against Mine" git-diff-file-mine t]
-       ["Diff Against Other" git-diff-file-other t])
-      "--------"
-      ["Add File" git-add-file t]
-      ["Revert File" git-revert-file t]
-      ["Ignore File" git-ignore-file t]
-      ["Remove File" git-remove-file t]
-      ["Insert File" git-insert-file t]
-      "--------"
-      ["Find File" git-find-file t]
-      ["View File" git-view-file t]
-      ["Diff File" git-diff-file t]
-      ["Interactive Diff File" git-diff-file-idiff t]
-      ["Log" git-log-file t]
-      "--------"
-      ["Mark" git-mark-file t]
-      ["Mark All" git-mark-all t]
-      ["Unmark" git-unmark-file t]
-      ["Unmark All" git-unmark-all t]
-      ["Toggle All Marks" git-toggle-all-marks t]
-      ["Hide Handled Files" git-remove-handled t]
-      "--------"
-      ["Show Uptodate Files" git-toggle-show-uptodate :style toggle :selected git-show-uptodate]
-      ["Show Ignored Files" git-toggle-show-ignored :style toggle :selected git-show-ignored]
-      ["Show Unknown Files" git-toggle-show-unknown :style toggle :selected git-show-unknown]
-      "--------"
-      ["Quit" git-status-quit t])))
-
-
-;; git mode should only run in the *git status* buffer
-(put 'git-status-mode 'mode-class 'special)
-
-(defun git-status-mode ()
-  "Major mode for interacting with Git.
-Commands:
-\\{git-status-mode-map}"
-  (kill-all-local-variables)
-  (buffer-disable-undo)
-  (setq mode-name "git status"
-        major-mode 'git-status-mode
-        goal-column 17
-        buffer-read-only t)
-  (use-local-map git-status-mode-map)
-  (let ((buffer-read-only nil))
-    (erase-buffer)
-  (let ((status (ewoc-create 'git-fileinfo-prettyprint "" "")))
-    (set (make-local-variable 'git-status) status))
-  (set (make-local-variable 'list-buffers-directory) default-directory)
-  (make-local-variable 'git-show-uptodate)
-  (make-local-variable 'git-show-ignored)
-  (make-local-variable 'git-show-unknown)
-  (run-hooks 'git-status-mode-hook)))
-
-(defun git-find-status-buffer (dir)
-  "Find the git status buffer handling a specified directory."
-  (let ((list (buffer-list))
-        (fulldir (expand-file-name dir))
-        found)
-    (while (and list (not found))
-      (let ((buffer (car list)))
-        (with-current-buffer buffer
-          (when (and list-buffers-directory
-                     (string-equal fulldir (expand-file-name list-buffers-directory))
-                    (eq major-mode 'git-status-mode))
-            (setq found buffer))))
-      (setq list (cdr list)))
-    found))
-
-(defun git-status (dir)
-  "Entry point into git-status mode."
-  (interactive "DSelect directory: ")
-  (setq dir (git-get-top-dir dir))
-  (if (file-exists-p (concat (file-name-as-directory dir) ".git"))
-      (let ((buffer (or (and git-reuse-status-buffer (git-find-status-buffer dir))
-                        (create-file-buffer (expand-file-name "*git-status*" dir)))))
-        (switch-to-buffer buffer)
-        (cd dir)
-        (git-status-mode)
-        (git-refresh-status)
-        (goto-char (point-min))
-        (add-hook 'after-save-hook 'git-update-saved-file))
-    (message "%s is not a git working tree." dir)))
-
-(defun git-update-saved-file ()
-  "Update the corresponding git-status buffer when a file is saved.
-Meant to be used in `after-save-hook'."
-  (let* ((file (expand-file-name buffer-file-name))
-         (dir (condition-case nil (git-get-top-dir (file-name-directory file)) (error nil)))
-         (buffer (and dir (git-find-status-buffer dir))))
-    (when buffer
-      (with-current-buffer buffer
-        (let ((filename (file-relative-name file dir)))
-          ; skip files located inside the .git directory
-          (unless (string-match "^\\.git/" filename)
-            (git-call-process nil "add" "--refresh" "--" filename)
-            (git-update-status-files (list filename))))))))
-
-(defun git-help ()
-  "Display help for Git mode."
-  (interactive)
-  (describe-function 'git-status-mode))
-
-(provide 'git)
-;;; git.el ends here
index 6946f3dd2ad924639f03b3779635c759c938c1e4..18bc60b021be00f69d2b5a784996bbc1e98c82c1 100644 (file)
@@ -1,3 +1,20 @@
-These are original scripted implementations, kept primarily for their
-reference value to any aspiring plumbing users who want to learn how
-pieces can be fit together.
+This directory used to contain scripted implementations of builtins
+that have since been rewritten in C.
+
+They have now been removed, but can be retrieved from an older commit
+that removed them from this directory.
+
+They're interesting for their reference value to any aspiring plumbing
+users who want to learn how pieces can be fit together, but in many
+cases have drifted enough from the actual implementations Git uses to
+be instructive.
+
+Other things that can be useful:
+
+ * Some commands such as git-gc wrap other commands, and what they're
+   doing behind the scenes can be seen by running them under
+   GIT_TRACE=1
+
+ * Doing `git log` on paths matching '*--helper.c' will show
+   incremental effort in the direction of moving existing shell
+   scripts to C.
diff --git a/contrib/examples/builtin-fetch--tool.c b/contrib/examples/builtin-fetch--tool.c
deleted file mode 100644 (file)
index a3eb19d..0000000
+++ /dev/null
@@ -1,575 +0,0 @@
-#include "builtin.h"
-#include "cache.h"
-#include "refs.h"
-#include "commit.h"
-#include "sigchain.h"
-
-static char *get_stdin(void)
-{
-       struct strbuf buf = STRBUF_INIT;
-       if (strbuf_read(&buf, 0, 1024) < 0) {
-               die_errno("error reading standard input");
-       }
-       return strbuf_detach(&buf, NULL);
-}
-
-static void show_new(enum object_type type, unsigned char *sha1_new)
-{
-       fprintf(stderr, "  %s: %s\n", typename(type),
-               find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
-}
-
-static int update_ref_env(const char *action,
-                     const char *refname,
-                     unsigned char *sha1,
-                     unsigned char *oldval)
-{
-       char msg[1024];
-       const char *rla = getenv("GIT_REFLOG_ACTION");
-
-       if (!rla)
-               rla = "(reflog update)";
-       if (snprintf(msg, sizeof(msg), "%s: %s", rla, action) >= sizeof(msg))
-               warning("reflog message too long: %.*s...", 50, msg);
-       return update_ref(msg, refname, sha1, oldval, 0,
-                         UPDATE_REFS_QUIET_ON_ERR);
-}
-
-static int update_local_ref(const char *name,
-                           const char *new_head,
-                           const char *note,
-                           int verbose, int force)
-{
-       unsigned char sha1_old[20], sha1_new[20];
-       char oldh[41], newh[41];
-       struct commit *current, *updated;
-       enum object_type type;
-
-       if (get_sha1_hex(new_head, sha1_new))
-               die("malformed object name %s", new_head);
-
-       type = sha1_object_info(sha1_new, NULL);
-       if (type < 0)
-               die("object %s not found", new_head);
-
-       if (!*name) {
-               /* Not storing */
-               if (verbose) {
-                       fprintf(stderr, "* fetched %s\n", note);
-                       show_new(type, sha1_new);
-               }
-               return 0;
-       }
-
-       if (get_sha1(name, sha1_old)) {
-               const char *msg;
-       just_store:
-               /* new ref */
-               if (!strncmp(name, "refs/tags/", 10))
-                       msg = "storing tag";
-               else
-                       msg = "storing head";
-               fprintf(stderr, "* %s: storing %s\n",
-                       name, note);
-               show_new(type, sha1_new);
-               return update_ref_env(msg, name, sha1_new, NULL);
-       }
-
-       if (!hashcmp(sha1_old, sha1_new)) {
-               if (verbose) {
-                       fprintf(stderr, "* %s: same as %s\n", name, note);
-                       show_new(type, sha1_new);
-               }
-               return 0;
-       }
-
-       if (!strncmp(name, "refs/tags/", 10)) {
-               fprintf(stderr, "* %s: updating with %s\n", name, note);
-               show_new(type, sha1_new);
-               return update_ref_env("updating tag", name, sha1_new, NULL);
-       }
-
-       current = lookup_commit_reference(sha1_old);
-       updated = lookup_commit_reference(sha1_new);
-       if (!current || !updated)
-               goto just_store;
-
-       strcpy(oldh, find_unique_abbrev(current->object.sha1, DEFAULT_ABBREV));
-       strcpy(newh, find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
-
-       if (in_merge_bases(current, updated)) {
-               fprintf(stderr, "* %s: fast-forward to %s\n",
-                       name, note);
-               fprintf(stderr, "  old..new: %s..%s\n", oldh, newh);
-               return update_ref_env("fast-forward", name, sha1_new, sha1_old);
-       }
-       if (!force) {
-               fprintf(stderr,
-                       "* %s: not updating to non-fast-forward %s\n",
-                       name, note);
-               fprintf(stderr,
-                       "  old...new: %s...%s\n", oldh, newh);
-               return 1;
-       }
-       fprintf(stderr,
-               "* %s: forcing update to non-fast-forward %s\n",
-               name, note);
-       fprintf(stderr, "  old...new: %s...%s\n", oldh, newh);
-       return update_ref_env("forced-update", name, sha1_new, sha1_old);
-}
-
-static int append_fetch_head(FILE *fp,
-                            const char *head, const char *remote,
-                            const char *remote_name, const char *remote_nick,
-                            const char *local_name, int not_for_merge,
-                            int verbose, int force)
-{
-       struct commit *commit;
-       int remote_len, i, note_len;
-       unsigned char sha1[20];
-       char note[1024];
-       const char *what, *kind;
-
-       if (get_sha1(head, sha1))
-               return error("Not a valid object name: %s", head);
-       commit = lookup_commit_reference_gently(sha1, 1);
-       if (!commit)
-               not_for_merge = 1;
-
-       if (!strcmp(remote_name, "HEAD")) {
-               kind = "";
-               what = "";
-       }
-       else if (!strncmp(remote_name, "refs/heads/", 11)) {
-               kind = "branch";
-               what = remote_name + 11;
-       }
-       else if (!strncmp(remote_name, "refs/tags/", 10)) {
-               kind = "tag";
-               what = remote_name + 10;
-       }
-       else if (!strncmp(remote_name, "refs/remotes/", 13)) {
-               kind = "remote-tracking branch";
-               what = remote_name + 13;
-       }
-       else {
-               kind = "";
-               what = remote_name;
-       }
-
-       remote_len = strlen(remote);
-       for (i = remote_len - 1; remote[i] == '/' && 0 <= i; i--)
-               ;
-       remote_len = i + 1;
-       if (4 < i && !strncmp(".git", remote + i - 3, 4))
-               remote_len = i - 3;
-
-       note_len = 0;
-       if (*what) {
-               if (*kind)
-                       note_len += sprintf(note + note_len, "%s ", kind);
-               note_len += sprintf(note + note_len, "'%s' of ", what);
-       }
-       note_len += sprintf(note + note_len, "%.*s", remote_len, remote);
-       fprintf(fp, "%s\t%s\t%s\n",
-               sha1_to_hex(commit ? commit->object.sha1 : sha1),
-               not_for_merge ? "not-for-merge" : "",
-               note);
-       return update_local_ref(local_name, head, note, verbose, force);
-}
-
-static char *keep;
-static void remove_keep(void)
-{
-       if (keep && *keep)
-               unlink(keep);
-}
-
-static void remove_keep_on_signal(int signo)
-{
-       remove_keep();
-       sigchain_pop(signo);
-       raise(signo);
-}
-
-static char *find_local_name(const char *remote_name, const char *refs,
-                            int *force_p, int *not_for_merge_p)
-{
-       const char *ref = refs;
-       int len = strlen(remote_name);
-
-       while (ref) {
-               const char *next;
-               int single_force, not_for_merge;
-
-               while (*ref == '\n')
-                       ref++;
-               if (!*ref)
-                       break;
-               next = strchr(ref, '\n');
-
-               single_force = not_for_merge = 0;
-               if (*ref == '+') {
-                       single_force = 1;
-                       ref++;
-               }
-               if (*ref == '.') {
-                       not_for_merge = 1;
-                       ref++;
-                       if (*ref == '+') {
-                               single_force = 1;
-                               ref++;
-                       }
-               }
-               if (!strncmp(remote_name, ref, len) && ref[len] == ':') {
-                       const char *local_part = ref + len + 1;
-                       int retlen;
-
-                       if (!next)
-                               retlen = strlen(local_part);
-                       else
-                               retlen = next - local_part;
-                       *force_p = single_force;
-                       *not_for_merge_p = not_for_merge;
-                       return xmemdupz(local_part, retlen);
-               }
-               ref = next;
-       }
-       return NULL;
-}
-
-static int fetch_native_store(FILE *fp,
-                             const char *remote,
-                             const char *remote_nick,
-                             const char *refs,
-                             int verbose, int force)
-{
-       char buffer[1024];
-       int err = 0;
-
-       sigchain_push_common(remove_keep_on_signal);
-       atexit(remove_keep);
-
-       while (fgets(buffer, sizeof(buffer), stdin)) {
-               int len;
-               char *cp;
-               char *local_name;
-               int single_force, not_for_merge;
-
-               for (cp = buffer; *cp && !isspace(*cp); cp++)
-                       ;
-               if (*cp)
-                       *cp++ = 0;
-               len = strlen(cp);
-               if (len && cp[len-1] == '\n')
-                       cp[--len] = 0;
-               if (!strcmp(buffer, "failed"))
-                       die("Fetch failure: %s", remote);
-               if (!strcmp(buffer, "pack"))
-                       continue;
-               if (!strcmp(buffer, "keep")) {
-                       char *od = get_object_directory();
-                       int len = strlen(od) + strlen(cp) + 50;
-                       keep = xmalloc(len);
-                       sprintf(keep, "%s/pack/pack-%s.keep", od, cp);
-                       continue;
-               }
-
-               local_name = find_local_name(cp, refs,
-                                            &single_force, &not_for_merge);
-               if (!local_name)
-                       continue;
-               err |= append_fetch_head(fp,
-                                        buffer, remote, cp, remote_nick,
-                                        local_name, not_for_merge,
-                                        verbose, force || single_force);
-       }
-       return err;
-}
-
-static int parse_reflist(const char *reflist)
-{
-       const char *ref;
-
-       printf("refs='");
-       for (ref = reflist; ref; ) {
-               const char *next;
-               while (*ref && isspace(*ref))
-                       ref++;
-               if (!*ref)
-                       break;
-               for (next = ref; *next && !isspace(*next); next++)
-                       ;
-               printf("\n%.*s", (int)(next - ref), ref);
-               ref = next;
-       }
-       printf("'\n");
-
-       printf("rref='");
-       for (ref = reflist; ref; ) {
-               const char *next, *colon;
-               while (*ref && isspace(*ref))
-                       ref++;
-               if (!*ref)
-                       break;
-               for (next = ref; *next && !isspace(*next); next++)
-                       ;
-               if (*ref == '.')
-                       ref++;
-               if (*ref == '+')
-                       ref++;
-               colon = strchr(ref, ':');
-               putchar('\n');
-               printf("%.*s", (int)((colon ? colon : next) - ref), ref);
-               ref = next;
-       }
-       printf("'\n");
-       return 0;
-}
-
-static int expand_refs_wildcard(const char *ls_remote_result, int numrefs,
-                               const char **refs)
-{
-       int i, matchlen, replacelen;
-       int found_one = 0;
-       const char *remote = *refs++;
-       numrefs--;
-
-       if (numrefs == 0) {
-               fprintf(stderr, "Nothing specified for fetching with remote.%s.fetch\n",
-                       remote);
-               printf("empty\n");
-       }
-
-       for (i = 0; i < numrefs; i++) {
-               const char *ref = refs[i];
-               const char *lref = ref;
-               const char *colon;
-               const char *tail;
-               const char *ls;
-               const char *next;
-
-               if (*lref == '+')
-                       lref++;
-               colon = strchr(lref, ':');
-               tail = lref + strlen(lref);
-               if (!(colon &&
-                     2 < colon - lref &&
-                     colon[-1] == '*' &&
-                     colon[-2] == '/' &&
-                     2 < tail - (colon + 1) &&
-                     tail[-1] == '*' &&
-                     tail[-2] == '/')) {
-                       /* not a glob */
-                       if (!found_one++)
-                               printf("explicit\n");
-                       printf("%s\n", ref);
-                       continue;
-               }
-
-               /* glob */
-               if (!found_one++)
-                       printf("glob\n");
-
-               /* lref to colon-2 is remote hierarchy name;
-                * colon+1 to tail-2 is local.
-                */
-               matchlen = (colon-1) - lref;
-               replacelen = (tail-1) - (colon+1);
-               for (ls = ls_remote_result; ls; ls = next) {
-                       const char *eol;
-                       unsigned char sha1[20];
-                       int namelen;
-
-                       while (*ls && isspace(*ls))
-                               ls++;
-                       next = strchr(ls, '\n');
-                       eol = !next ? (ls + strlen(ls)) : next;
-                       if (!memcmp("^{}", eol-3, 3))
-                               continue;
-                       if (eol - ls < 40)
-                               continue;
-                       if (get_sha1_hex(ls, sha1))
-                               continue;
-                       ls += 40;
-                       while (ls < eol && isspace(*ls))
-                               ls++;
-                       /* ls to next (or eol) is the name.
-                        * is it identical to lref to colon-2?
-                        */
-                       if ((eol - ls) <= matchlen ||
-                           strncmp(ls, lref, matchlen))
-                               continue;
-
-                       /* Yes, it is a match */
-                       namelen = eol - ls;
-                       if (lref != ref)
-                               putchar('+');
-                       printf("%.*s:%.*s%.*s\n",
-                              namelen, ls,
-                              replacelen, colon + 1,
-                              namelen - matchlen, ls + matchlen);
-               }
-       }
-       return 0;
-}
-
-static int pick_rref(int sha1_only, const char *rref, const char *ls_remote_result)
-{
-       int err = 0;
-       int lrr_count = lrr_count, i, pass;
-       const char *cp;
-       struct lrr {
-               const char *line;
-               const char *name;
-               int namelen;
-               int shown;
-       } *lrr_list = lrr_list;
-
-       for (pass = 0; pass < 2; pass++) {
-               /* pass 0 counts and allocates, pass 1 fills... */
-               cp = ls_remote_result;
-               i = 0;
-               while (1) {
-                       const char *np;
-                       while (*cp && isspace(*cp))
-                               cp++;
-                       if (!*cp)
-                               break;
-                       np = strchrnul(cp, '\n');
-                       if (pass) {
-                               lrr_list[i].line = cp;
-                               lrr_list[i].name = cp + 41;
-                               lrr_list[i].namelen = np - (cp + 41);
-                       }
-                       i++;
-                       cp = np;
-               }
-               if (!pass) {
-                       lrr_count = i;
-                       lrr_list = xcalloc(lrr_count, sizeof(*lrr_list));
-               }
-       }
-
-       while (1) {
-               const char *next;
-               int rreflen;
-               int i;
-
-               while (*rref && isspace(*rref))
-                       rref++;
-               if (!*rref)
-                       break;
-               next = strchrnul(rref, '\n');
-               rreflen = next - rref;
-
-               for (i = 0; i < lrr_count; i++) {
-                       struct lrr *lrr = &(lrr_list[i]);
-
-                       if (rreflen == lrr->namelen &&
-                           !memcmp(lrr->name, rref, rreflen)) {
-                               if (!lrr->shown)
-                                       printf("%.*s\n",
-                                              sha1_only ? 40 : lrr->namelen + 41,
-                                              lrr->line);
-                               lrr->shown = 1;
-                               break;
-                       }
-               }
-               if (lrr_count <= i) {
-                       error("pick-rref: %.*s not found", rreflen, rref);
-                       err = 1;
-               }
-               rref = next;
-       }
-       free(lrr_list);
-       return err;
-}
-
-int cmd_fetch__tool(int argc, const char **argv, const char *prefix)
-{
-       int verbose = 0;
-       int force = 0;
-       int sopt = 0;
-
-       while (1 < argc) {
-               const char *arg = argv[1];
-               if (!strcmp("-v", arg))
-                       verbose = 1;
-               else if (!strcmp("-f", arg))
-                       force = 1;
-               else if (!strcmp("-s", arg))
-                       sopt = 1;
-               else
-                       break;
-               argc--;
-               argv++;
-       }
-
-       if (argc <= 1)
-               return error("Missing subcommand");
-
-       if (!strcmp("append-fetch-head", argv[1])) {
-               int result;
-               FILE *fp;
-               char *filename;
-
-               if (argc != 8)
-                       return error("append-fetch-head takes 6 args");
-               filename = git_path_fetch_head();
-               fp = fopen(filename, "a");
-               if (!fp)
-                       return error("cannot open %s: %s", filename, strerror(errno));
-               result = append_fetch_head(fp, argv[2], argv[3],
-                                          argv[4], argv[5],
-                                          argv[6], !!argv[7][0],
-                                          verbose, force);
-               fclose(fp);
-               return result;
-       }
-       if (!strcmp("native-store", argv[1])) {
-               int result;
-               FILE *fp;
-               char *filename;
-
-               if (argc != 5)
-                       return error("fetch-native-store takes 3 args");
-               filename = git_path_fetch_head();
-               fp = fopen(filename, "a");
-               if (!fp)
-                       return error("cannot open %s: %s", filename, strerror(errno));
-               result = fetch_native_store(fp, argv[2], argv[3], argv[4],
-                                           verbose, force);
-               fclose(fp);
-               return result;
-       }
-       if (!strcmp("parse-reflist", argv[1])) {
-               const char *reflist;
-               if (argc != 3)
-                       return error("parse-reflist takes 1 arg");
-               reflist = argv[2];
-               if (!strcmp(reflist, "-"))
-                       reflist = get_stdin();
-               return parse_reflist(reflist);
-       }
-       if (!strcmp("pick-rref", argv[1])) {
-               const char *ls_remote_result;
-               if (argc != 4)
-                       return error("pick-rref takes 2 args");
-               ls_remote_result = argv[3];
-               if (!strcmp(ls_remote_result, "-"))
-                       ls_remote_result = get_stdin();
-               return pick_rref(sopt, argv[2], ls_remote_result);
-       }
-       if (!strcmp("expand-refs-wildcard", argv[1])) {
-               const char *reflist;
-               if (argc < 4)
-                       return error("expand-refs-wildcard takes at least 2 args");
-               reflist = argv[2];
-               if (!strcmp(reflist, "-"))
-                       reflist = get_stdin();
-               return expand_refs_wildcard(reflist, argc - 3, argv + 3);
-       }
-
-       return error("Unknown subcommand: %s", argv[1]);
-}
diff --git a/contrib/examples/git-am.sh b/contrib/examples/git-am.sh
deleted file mode 100755 (executable)
index dd539f1..0000000
+++ /dev/null
@@ -1,975 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, 2006 Junio C Hamano
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=t
-OPTIONS_SPEC="\
-git am [options] [(<mbox>|<Maildir>)...]
-git am [options] (--continue | --skip | --abort)
---
-i,interactive   run interactively
-b,binary*       (historical option -- no-op)
-3,3way          allow fall back on 3way merging if needed
-q,quiet         be quiet
-s,signoff       add a Signed-off-by line to the commit message
-u,utf8          recode into utf8 (default)
-k,keep          pass -k flag to git-mailinfo
-keep-non-patch  pass -b flag to git-mailinfo
-m,message-id    pass -m flag to git-mailinfo
-keep-cr         pass --keep-cr flag to git-mailsplit for mbox format
-no-keep-cr      do not pass --keep-cr flag to git-mailsplit independent of am.keepcr
-c,scissors      strip everything before a scissors line
-whitespace=     pass it through git-apply
-ignore-space-change pass it through git-apply
-ignore-whitespace pass it through git-apply
-directory=      pass it through git-apply
-exclude=        pass it through git-apply
-include=        pass it through git-apply
-C=              pass it through git-apply
-p=              pass it through git-apply
-patch-format=   format the patch(es) are in
-reject          pass it through git-apply
-resolvemsg=     override error message when patch failure occurs
-continue        continue applying patches after resolving a conflict
-r,resolved      synonyms for --continue
-skip            skip the current patch
-abort           restore the original branch and abort the patching operation.
-committer-date-is-author-date    lie about committer date
-ignore-date     use current timestamp for author date
-rerere-autoupdate update the index with reused conflict resolution if possible
-S,gpg-sign?     GPG-sign commits
-rebasing*       (internal use for git-rebase)"
-
-. git-sh-setup
-. git-sh-i18n
-prefix=$(git rev-parse --show-prefix)
-set_reflog_action am
-require_work_tree
-cd_to_toplevel
-
-git var GIT_COMMITTER_IDENT >/dev/null ||
-       die "$(gettext "You need to set your committer info first")"
-
-if git rev-parse --verify -q HEAD >/dev/null
-then
-       HAS_HEAD=yes
-else
-       HAS_HEAD=
-fi
-
-cmdline="git am"
-if test '' != "$interactive"
-then
-       cmdline="$cmdline -i"
-fi
-if test '' != "$threeway"
-then
-       cmdline="$cmdline -3"
-fi
-
-empty_tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
-
-sq () {
-       git rev-parse --sq-quote "$@"
-}
-
-stop_here () {
-    echo "$1" >"$dotest/next"
-    git rev-parse --verify -q HEAD >"$dotest/abort-safety"
-    exit 1
-}
-
-safe_to_abort () {
-       if test -f "$dotest/dirtyindex"
-       then
-               return 1
-       fi
-
-       if ! test -f "$dotest/abort-safety"
-       then
-               return 0
-       fi
-
-       abort_safety=$(cat "$dotest/abort-safety")
-       if test "z$(git rev-parse --verify -q HEAD)" = "z$abort_safety"
-       then
-               return 0
-       fi
-       gettextln "You seem to have moved HEAD since the last 'am' failure.
-Not rewinding to ORIG_HEAD" >&2
-       return 1
-}
-
-stop_here_user_resolve () {
-    if [ -n "$resolvemsg" ]; then
-           printf '%s\n' "$resolvemsg"
-           stop_here $1
-    fi
-    eval_gettextln "When you have resolved this problem, run \"\$cmdline --continue\".
-If you prefer to skip this patch, run \"\$cmdline --skip\" instead.
-To restore the original branch and stop patching, run \"\$cmdline --abort\"."
-
-    stop_here $1
-}
-
-go_next () {
-       rm -f "$dotest/$msgnum" "$dotest/msg" "$dotest/msg-clean" \
-               "$dotest/patch" "$dotest/info"
-       echo "$next" >"$dotest/next"
-       this=$next
-}
-
-cannot_fallback () {
-       echo "$1"
-       gettextln "Cannot fall back to three-way merge."
-       exit 1
-}
-
-fall_back_3way () {
-    O_OBJECT=$(cd "$GIT_OBJECT_DIRECTORY" && pwd)
-
-    rm -fr "$dotest"/patch-merge-*
-    mkdir "$dotest/patch-merge-tmp-dir"
-
-    # First see if the patch records the index info that we can use.
-    cmd="git apply $git_apply_opt --build-fake-ancestor" &&
-    cmd="$cmd "'"$dotest/patch-merge-tmp-index" "$dotest/patch"' &&
-    eval "$cmd" &&
-    GIT_INDEX_FILE="$dotest/patch-merge-tmp-index" \
-    git write-tree >"$dotest/patch-merge-base+" ||
-    cannot_fallback "$(gettext "Repository lacks necessary blobs to fall back on 3-way merge.")"
-
-    say "$(gettext "Using index info to reconstruct a base tree...")"
-
-    cmd='GIT_INDEX_FILE="$dotest/patch-merge-tmp-index"'
-
-    if test -z "$GIT_QUIET"
-    then
-       eval "$cmd git diff-index --cached --diff-filter=AM --name-status HEAD"
-    fi
-
-    cmd="$cmd git apply --cached $git_apply_opt"' <"$dotest/patch"'
-    if eval "$cmd"
-    then
-       mv "$dotest/patch-merge-base+" "$dotest/patch-merge-base"
-       mv "$dotest/patch-merge-tmp-index" "$dotest/patch-merge-index"
-    else
-       cannot_fallback "$(gettext "Did you hand edit your patch?
-It does not apply to blobs recorded in its index.")"
-    fi
-
-    test -f "$dotest/patch-merge-index" &&
-    his_tree=$(GIT_INDEX_FILE="$dotest/patch-merge-index" git write-tree) &&
-    orig_tree=$(cat "$dotest/patch-merge-base") &&
-    rm -fr "$dotest"/patch-merge-* || exit 1
-
-    say "$(gettext "Falling back to patching base and 3-way merge...")"
-
-    # This is not so wrong.  Depending on which base we picked,
-    # orig_tree may be wildly different from ours, but his_tree
-    # has the same set of wildly different changes in parts the
-    # patch did not touch, so recursive ends up canceling them,
-    # saying that we reverted all those changes.
-
-    eval GITHEAD_$his_tree='"$FIRSTLINE"'
-    export GITHEAD_$his_tree
-    if test -n "$GIT_QUIET"
-    then
-           GIT_MERGE_VERBOSITY=0 && export GIT_MERGE_VERBOSITY
-    fi
-    our_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree)
-    git-merge-recursive $orig_tree -- $our_tree $his_tree || {
-           git rerere $allow_rerere_autoupdate
-           die "$(gettext "Failed to merge in the changes.")"
-    }
-    unset GITHEAD_$his_tree
-}
-
-clean_abort () {
-       test $# = 0 || echo >&2 "$@"
-       rm -fr "$dotest"
-       exit 1
-}
-
-patch_format=
-
-check_patch_format () {
-       # early return if patch_format was set from the command line
-       if test -n "$patch_format"
-       then
-               return 0
-       fi
-
-       # we default to mbox format if input is from stdin and for
-       # directories
-       if test $# = 0 || test "x$1" = "x-" || test -d "$1"
-       then
-               patch_format=mbox
-               return 0
-       fi
-
-       # otherwise, check the first few non-blank lines of the first
-       # patch to try to detect its format
-       {
-               # Start from first line containing non-whitespace
-               l1=
-               while test -z "$l1"
-               do
-                       read l1 || break
-               done
-               read l2
-               read l3
-               case "$l1" in
-               "From "* | "From: "*)
-                       patch_format=mbox
-                       ;;
-               '# This series applies on GIT commit'*)
-                       patch_format=stgit-series
-                       ;;
-               "# HG changeset patch")
-                       patch_format=hg
-                       ;;
-               *)
-                       # if the second line is empty and the third is
-                       # a From, Author or Date entry, this is very
-                       # likely an StGIT patch
-                       case "$l2,$l3" in
-                       ,"From: "* | ,"Author: "* | ,"Date: "*)
-                               patch_format=stgit
-                               ;;
-                       *)
-                               ;;
-                       esac
-                       ;;
-               esac
-               if test -z "$patch_format" &&
-                       test -n "$l1" &&
-                       test -n "$l2" &&
-                       test -n "$l3"
-               then
-                       # This begins with three non-empty lines.  Is this a
-                       # piece of e-mail a-la RFC2822?  Grab all the headers,
-                       # discarding the indented remainder of folded lines,
-                       # and see if it looks like that they all begin with the
-                       # header field names...
-                       tr -d '\015' <"$1" |
-                       sed -n -e '/^$/q' -e '/^[       ]/d' -e p |
-                       sane_egrep -v '^[!-9;-~]+:' >/dev/null ||
-                       patch_format=mbox
-               fi
-       } < "$1" || clean_abort
-}
-
-split_patches () {
-       case "$patch_format" in
-       mbox)
-               if test t = "$keepcr"
-               then
-                   keep_cr=--keep-cr
-               else
-                   keep_cr=
-               fi
-               git mailsplit -d"$prec" -o"$dotest" -b $keep_cr -- "$@" > "$dotest/last" ||
-               clean_abort
-               ;;
-       stgit-series)
-               if test $# -ne 1
-               then
-                       clean_abort "$(gettext "Only one StGIT patch series can be applied at once")"
-               fi
-               series_dir=$(dirname "$1")
-               series_file="$1"
-               shift
-               {
-                       set x
-                       while read filename
-                       do
-                               set "$@" "$series_dir/$filename"
-                       done
-                       # remove the safety x
-                       shift
-                       # remove the arg coming from the first-line comment
-                       shift
-               } < "$series_file" || clean_abort
-               # set the patch format appropriately
-               patch_format=stgit
-               # now handle the actual StGIT patches
-               split_patches "$@"
-               ;;
-       stgit)
-               this=0
-               test 0 -eq "$#" && set -- -
-               for stgit in "$@"
-               do
-                       this=$(expr "$this" + 1)
-                       msgnum=$(printf "%0${prec}d" $this)
-                       # Perl version of StGIT parse_patch. The first nonemptyline
-                       # not starting with Author, From or Date is the
-                       # subject, and the body starts with the next nonempty
-                       # line not starting with Author, From or Date
-                       @@PERL@@ -ne 'BEGIN { $subject = 0 }
-                               if ($subject > 1) { print ; }
-                               elsif (/^\s+$/) { next ; }
-                               elsif (/^Author:/) { s/Author/From/ ; print ;}
-                               elsif (/^(From|Date)/) { print ; }
-                               elsif ($subject) {
-                                       $subject = 2 ;
-                                       print "\n" ;
-                                       print ;
-                               } else {
-                                       print "Subject: ", $_ ;
-                                       $subject = 1;
-                               }
-                       ' -- "$stgit" >"$dotest/$msgnum" || clean_abort
-               done
-               echo "$this" > "$dotest/last"
-               this=
-               msgnum=
-               ;;
-       hg)
-               this=0
-               test 0 -eq "$#" && set -- -
-               for hg in "$@"
-               do
-                       this=$(( $this + 1 ))
-                       msgnum=$(printf "%0${prec}d" $this)
-                       # hg stores changeset metadata in #-commented lines preceding
-                       # the commit message and diff(s). The only metadata we care about
-                       # are the User and Date (Node ID and Parent are hashes which are
-                       # only relevant to the hg repository and thus not useful to us)
-                       # Since we cannot guarantee that the commit message is in
-                       # git-friendly format, we put no Subject: line and just consume
-                       # all of the message as the body
-                       LANG=C LC_ALL=C @@PERL@@ -M'POSIX qw(strftime)' -ne 'BEGIN { $subject = 0 }
-                               if ($subject) { print ; }
-                               elsif (/^\# User /) { s/\# User/From:/ ; print ; }
-                               elsif (/^\# Date /) {
-                                       my ($hashsign, $str, $time, $tz) = split ;
-                                       $tz_str = sprintf "%+05d", (0-$tz)/36;
-                                       print "Date: " .
-                                             strftime("%a, %d %b %Y %H:%M:%S ",
-                                                      gmtime($time-$tz))
-                                             . "$tz_str\n";
-                               } elsif (/^\# /) { next ; }
-                               else {
-                                       print "\n", $_ ;
-                                       $subject = 1;
-                               }
-                       ' -- "$hg" >"$dotest/$msgnum" || clean_abort
-               done
-               echo "$this" >"$dotest/last"
-               this=
-               msgnum=
-               ;;
-       *)
-               if test -n "$patch_format"
-               then
-                       clean_abort "$(eval_gettext "Patch format \$patch_format is not supported.")"
-               else
-                       clean_abort "$(gettext "Patch format detection failed.")"
-               fi
-               ;;
-       esac
-}
-
-prec=4
-dotest="$GIT_DIR/rebase-apply"
-sign= utf8=t keep= keepcr= skip= interactive= resolved= rebasing= abort=
-messageid= resolvemsg= resume= scissors= no_inbody_headers=
-git_apply_opt=
-committer_date_is_author_date=
-ignore_date=
-allow_rerere_autoupdate=
-gpg_sign_opt=
-threeway=
-
-if test "$(git config --bool --get am.messageid)" = true
-then
-    messageid=t
-fi
-
-if test "$(git config --bool --get am.keepcr)" = true
-then
-    keepcr=t
-fi
-
-while test $# != 0
-do
-       case "$1" in
-       -i|--interactive)
-               interactive=t ;;
-       -b|--binary)
-               gettextln >&2 "The -b/--binary option has been a no-op for long time, and
-it will be removed. Please do not use it anymore."
-               ;;
-       -3|--3way)
-               threeway=t ;;
-       -s|--signoff)
-               sign=t ;;
-       -u|--utf8)
-               utf8=t ;; # this is now default
-       --no-utf8)
-               utf8= ;;
-       -m|--message-id)
-               messageid=t ;;
-       --no-message-id)
-               messageid=f ;;
-       -k|--keep)
-               keep=t ;;
-       --keep-non-patch)
-               keep=b ;;
-       -c|--scissors)
-               scissors=t ;;
-       --no-scissors)
-               scissors=f ;;
-       -r|--resolved|--continue)
-               resolved=t ;;
-       --skip)
-               skip=t ;;
-       --abort)
-               abort=t ;;
-       --rebasing)
-               rebasing=t threeway=t ;;
-       --resolvemsg=*)
-               resolvemsg="${1#--resolvemsg=}" ;;
-       --whitespace=*|--directory=*|--exclude=*|--include=*)
-               git_apply_opt="$git_apply_opt $(sq "$1")" ;;
-       -C*|-p*)
-               git_apply_opt="$git_apply_opt $(sq "$1")" ;;
-       --patch-format=*)
-               patch_format="${1#--patch-format=}" ;;
-       --reject|--ignore-whitespace|--ignore-space-change)
-               git_apply_opt="$git_apply_opt $1" ;;
-       --committer-date-is-author-date)
-               committer_date_is_author_date=t ;;
-       --ignore-date)
-               ignore_date=t ;;
-       --rerere-autoupdate|--no-rerere-autoupdate)
-               allow_rerere_autoupdate="$1" ;;
-       -q|--quiet)
-               GIT_QUIET=t ;;
-       --keep-cr)
-               keepcr=t ;;
-       --no-keep-cr)
-               keepcr=f ;;
-       --gpg-sign)
-               gpg_sign_opt=-S ;;
-       --gpg-sign=*)
-               gpg_sign_opt="-S${1#--gpg-sign=}" ;;
-       --)
-               shift; break ;;
-       *)
-               usage ;;
-       esac
-       shift
-done
-
-# If the dotest directory exists, but we have finished applying all the
-# patches in them, clear it out.
-if test -d "$dotest" &&
-   test -f "$dotest/last" &&
-   test -f "$dotest/next" &&
-   last=$(cat "$dotest/last") &&
-   next=$(cat "$dotest/next") &&
-   test $# != 0 &&
-   test "$next" -gt "$last"
-then
-   rm -fr "$dotest"
-fi
-
-if test -d "$dotest" && test -f "$dotest/last" && test -f "$dotest/next"
-then
-       case "$#,$skip$resolved$abort" in
-       0,*t*)
-               # Explicit resume command and we do not have file, so
-               # we are happy.
-               : ;;
-       0,)
-               # No file input but without resume parameters; catch
-               # user error to feed us a patch from standard input
-               # when there is already $dotest.  This is somewhat
-               # unreliable -- stdin could be /dev/null for example
-               # and the caller did not intend to feed us a patch but
-               # wanted to continue unattended.
-               test -t 0
-               ;;
-       *)
-               false
-               ;;
-       esac ||
-       die "$(eval_gettext "previous rebase directory \$dotest still exists but mbox given.")"
-       resume=yes
-
-       case "$skip,$abort" in
-       t,t)
-               die "$(gettext "Please make up your mind. --skip or --abort?")"
-               ;;
-       t,)
-               git rerere clear
-               head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
-               git read-tree --reset -u $head_tree $head_tree &&
-               index_tree=$(git write-tree) &&
-               git read-tree -m -u $index_tree $head_tree
-               git read-tree -m $head_tree
-               ;;
-       ,t)
-               if test -f "$dotest/rebasing"
-               then
-                       exec git rebase --abort
-               fi
-               git rerere clear
-               if safe_to_abort
-               then
-                       head_tree=$(git rev-parse --verify -q HEAD || echo $empty_tree) &&
-                       git read-tree --reset -u $head_tree $head_tree &&
-                       index_tree=$(git write-tree) &&
-                       orig_head=$(git rev-parse --verify -q ORIG_HEAD || echo $empty_tree) &&
-                       git read-tree -m -u $index_tree $orig_head
-                       if git rev-parse --verify -q ORIG_HEAD >/dev/null 2>&1
-                       then
-                               git reset ORIG_HEAD
-                       else
-                               git read-tree $empty_tree
-                               curr_branch=$(git symbolic-ref HEAD 2>/dev/null) &&
-                               git update-ref -d $curr_branch
-                       fi
-               fi
-               rm -fr "$dotest"
-               exit ;;
-       esac
-       rm -f "$dotest/dirtyindex"
-else
-       # Possible stray $dotest directory in the independent-run
-       # case; in the --rebasing case, it is upto the caller
-       # (git-rebase--am) to take care of stray directories.
-       if test -d "$dotest" && test -z "$rebasing"
-       then
-               case "$skip,$resolved,$abort" in
-               ,,t)
-                       rm -fr "$dotest"
-                       exit 0
-                       ;;
-               *)
-                       die "$(eval_gettext "Stray \$dotest directory found.
-Use \"git am --abort\" to remove it.")"
-                       ;;
-               esac
-       fi
-
-       # Make sure we are not given --skip, --continue, or --abort
-       test "$skip$resolved$abort" = "" ||
-               die "$(gettext "Resolve operation not in progress, we are not resuming.")"
-
-       # Start afresh.
-       mkdir -p "$dotest" || exit
-
-       if test -n "$prefix" && test $# != 0
-       then
-               first=t
-               for arg
-               do
-                       test -n "$first" && {
-                               set x
-                               first=
-                       }
-                       if is_absolute_path "$arg"
-                       then
-                               set "$@" "$arg"
-                       else
-                               set "$@" "$prefix$arg"
-                       fi
-               done
-               shift
-       fi
-
-       check_patch_format "$@"
-
-       split_patches "$@"
-
-       # -i can and must be given when resuming; everything
-       # else is kept
-       echo " $git_apply_opt" >"$dotest/apply-opt"
-       echo "$threeway" >"$dotest/threeway"
-       echo "$sign" >"$dotest/sign"
-       echo "$utf8" >"$dotest/utf8"
-       echo "$keep" >"$dotest/keep"
-       echo "$messageid" >"$dotest/messageid"
-       echo "$scissors" >"$dotest/scissors"
-       echo "$no_inbody_headers" >"$dotest/no_inbody_headers"
-       echo "$GIT_QUIET" >"$dotest/quiet"
-       echo 1 >"$dotest/next"
-       if test -n "$rebasing"
-       then
-               : >"$dotest/rebasing"
-       else
-               : >"$dotest/applying"
-               if test -n "$HAS_HEAD"
-               then
-                       git update-ref ORIG_HEAD HEAD
-               else
-                       git update-ref -d ORIG_HEAD >/dev/null 2>&1
-               fi
-       fi
-fi
-
-git update-index -q --refresh
-
-case "$resolved" in
-'')
-       case "$HAS_HEAD" in
-       '')
-               files=$(git ls-files) ;;
-       ?*)
-               files=$(git diff-index --cached --name-only HEAD --) ;;
-       esac || exit
-       if test "$files"
-       then
-               test -n "$HAS_HEAD" && : >"$dotest/dirtyindex"
-               die "$(eval_gettext "Dirty index: cannot apply patches (dirty: \$files)")"
-       fi
-esac
-
-# Now, decide what command line options we will give to the git
-# commands we invoke, based on the result of parsing command line
-# options and previous invocation state stored in $dotest/ files.
-
-if test "$(cat "$dotest/utf8")" = t
-then
-       utf8=-u
-else
-       utf8=-n
-fi
-keep=$(cat "$dotest/keep")
-case "$keep" in
-t)
-       keep=-k ;;
-b)
-       keep=-b ;;
-*)
-       keep= ;;
-esac
-case "$(cat "$dotest/messageid")" in
-t)
-       messageid=-m ;;
-f)
-       messageid= ;;
-esac
-case "$(cat "$dotest/scissors")" in
-t)
-       scissors=--scissors ;;
-f)
-       scissors=--no-scissors ;;
-esac
-if test "$(cat "$dotest/no_inbody_headers")" = t
-then
-       no_inbody_headers=--no-inbody-headers
-else
-       no_inbody_headers=
-fi
-if test "$(cat "$dotest/quiet")" = t
-then
-       GIT_QUIET=t
-fi
-if test "$(cat "$dotest/threeway")" = t
-then
-       threeway=t
-fi
-git_apply_opt=$(cat "$dotest/apply-opt")
-if test "$(cat "$dotest/sign")" = t
-then
-       SIGNOFF=$(git var GIT_COMMITTER_IDENT | sed -e '
-                       s/>.*/>/
-                       s/^/Signed-off-by: /'
-               )
-else
-       SIGNOFF=
-fi
-
-last=$(cat "$dotest/last")
-this=$(cat "$dotest/next")
-if test "$skip" = t
-then
-       this=$(expr "$this" + 1)
-       resume=
-fi
-
-while test "$this" -le "$last"
-do
-       msgnum=$(printf "%0${prec}d" $this)
-       next=$(expr "$this" + 1)
-       test -f "$dotest/$msgnum" || {
-               resume=
-               go_next
-               continue
-       }
-
-       # If we are not resuming, parse and extract the patch information
-       # into separate files:
-       #  - info records the authorship and title
-       #  - msg is the rest of commit log message
-       #  - patch is the patch body.
-       #
-       # When we are resuming, these files are either already prepared
-       # by the user, or the user can tell us to do so by --continue flag.
-       case "$resume" in
-       '')
-               if test -f "$dotest/rebasing"
-               then
-                       commit=$(sed -e 's/^From \([0-9a-f]*\) .*/\1/' \
-                               -e q "$dotest/$msgnum") &&
-                       test "$(git cat-file -t "$commit")" = commit ||
-                               stop_here $this
-                       git cat-file commit "$commit" |
-                       sed -e '1,/^$/d' >"$dotest/msg-clean"
-                       echo "$commit" >"$dotest/original-commit"
-                       get_author_ident_from_commit "$commit" >"$dotest/author-script"
-                       git diff-tree --root --binary --full-index "$commit" >"$dotest/patch"
-               else
-                       git mailinfo $keep $no_inbody_headers $messageid $scissors $utf8 "$dotest/msg" "$dotest/patch" \
-                               <"$dotest/$msgnum" >"$dotest/info" ||
-                               stop_here $this
-
-                       # skip pine's internal folder data
-                       sane_grep '^Author: Mail System Internal Data$' \
-                               <"$dotest"/info >/dev/null &&
-                               go_next && continue
-
-                       test -s "$dotest/patch" || {
-                               eval_gettextln "Patch is empty.  Was it split wrong?
-If you would prefer to skip this patch, instead run \"\$cmdline --skip\".
-To restore the original branch and stop patching run \"\$cmdline --abort\"."
-                               stop_here $this
-                       }
-                       rm -f "$dotest/original-commit" "$dotest/author-script"
-                       {
-                               sed -n '/^Subject/ s/Subject: //p' "$dotest/info"
-                               echo
-                               cat "$dotest/msg"
-                       } |
-                       git stripspace > "$dotest/msg-clean"
-               fi
-               ;;
-       esac
-
-       if test -f "$dotest/author-script"
-       then
-               eval $(cat "$dotest/author-script")
-       else
-               GIT_AUTHOR_NAME="$(sed -n '/^Author/ s/Author: //p' "$dotest/info")"
-               GIT_AUTHOR_EMAIL="$(sed -n '/^Email/ s/Email: //p' "$dotest/info")"
-               GIT_AUTHOR_DATE="$(sed -n '/^Date/ s/Date: //p' "$dotest/info")"
-       fi
-
-       if test -z "$GIT_AUTHOR_EMAIL"
-       then
-               gettextln "Patch does not have a valid e-mail address."
-               stop_here $this
-       fi
-
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
-
-       case "$resume" in
-       '')
-           if test '' != "$SIGNOFF"
-           then
-               LAST_SIGNED_OFF_BY=$(
-                   sed -ne '/^Signed-off-by: /p' \
-                   "$dotest/msg-clean" |
-                   sed -ne '$p'
-               )
-               ADD_SIGNOFF=$(
-                   test "$LAST_SIGNED_OFF_BY" = "$SIGNOFF" || {
-                   test '' = "$LAST_SIGNED_OFF_BY" && echo
-                   echo "$SIGNOFF"
-               })
-           else
-               ADD_SIGNOFF=
-           fi
-           {
-               if test -s "$dotest/msg-clean"
-               then
-                       cat "$dotest/msg-clean"
-               fi
-               if test '' != "$ADD_SIGNOFF"
-               then
-                       echo "$ADD_SIGNOFF"
-               fi
-           } >"$dotest/final-commit"
-           ;;
-       *)
-               case "$resolved$interactive" in
-               tt)
-                       # This is used only for interactive view option.
-                       git diff-index -p --cached HEAD -- >"$dotest/patch"
-                       ;;
-               esac
-       esac
-
-       resume=
-       if test "$interactive" = t
-       then
-           test -t 0 ||
-           die "$(gettext "cannot be interactive without stdin connected to a terminal.")"
-           action=again
-           while test "$action" = again
-           do
-               gettextln "Commit Body is:"
-               echo "--------------------------"
-               cat "$dotest/final-commit"
-               echo "--------------------------"
-               # TRANSLATORS: Make sure to include [y], [n], [e], [v] and [a]
-               # in your translation. The program will only accept English
-               # input at this point.
-               gettext "Apply? [y]es/[n]o/[e]dit/[v]iew patch/[a]ccept all "
-               read reply
-               case "$reply" in
-               [yY]*) action=yes ;;
-               [aA]*) action=yes interactive= ;;
-               [nN]*) action=skip ;;
-               [eE]*) git_editor "$dotest/final-commit"
-                      action=again ;;
-               [vV]*) action=again
-                      git_pager "$dotest/patch" ;;
-               *)     action=again ;;
-               esac
-           done
-       else
-           action=yes
-       fi
-
-       if test $action = skip
-       then
-               go_next
-               continue
-       fi
-
-       hook="$(git rev-parse --git-path hooks/applypatch-msg)"
-       if test -x "$hook"
-       then
-               "$hook" "$dotest/final-commit" || stop_here $this
-       fi
-
-       if test -f "$dotest/final-commit"
-       then
-               FIRSTLINE=$(sed 1q "$dotest/final-commit")
-       else
-               FIRSTLINE=""
-       fi
-
-       say "$(eval_gettext "Applying: \$FIRSTLINE")"
-
-       case "$resolved" in
-       '')
-               # When we are allowed to fall back to 3-way later, don't give
-               # false errors during the initial attempt.
-               squelch=
-               if test "$threeway" = t
-               then
-                       squelch='>/dev/null 2>&1 '
-               fi
-               eval "git apply $squelch$git_apply_opt"' --index "$dotest/patch"'
-               apply_status=$?
-               ;;
-       t)
-               # Resolved means the user did all the hard work, and
-               # we do not have to do any patch application.  Just
-               # trust what the user has in the index file and the
-               # working tree.
-               resolved=
-               git diff-index --quiet --cached HEAD -- && {
-                       gettextln "No changes - did you forget to use 'git add'?
-If there is nothing left to stage, chances are that something else
-already introduced the same changes; you might want to skip this patch."
-                       stop_here_user_resolve $this
-               }
-               unmerged=$(git ls-files -u)
-               if test -n "$unmerged"
-               then
-                       gettextln "You still have unmerged paths in your index
-did you forget to use 'git add'?"
-                       stop_here_user_resolve $this
-               fi
-               apply_status=0
-               git rerere
-               ;;
-       esac
-
-       if test $apply_status != 0 && test "$threeway" = t
-       then
-               if (fall_back_3way)
-               then
-                   # Applying the patch to an earlier tree and merging the
-                   # result may have produced the same tree as ours.
-                   git diff-index --quiet --cached HEAD -- && {
-                       say "$(gettext "No changes -- Patch already applied.")"
-                       go_next
-                       continue
-                   }
-                   # clear apply_status -- we have successfully merged.
-                   apply_status=0
-               fi
-       fi
-       if test $apply_status != 0
-       then
-               eval_gettextln 'Patch failed at $msgnum $FIRSTLINE'
-               if test "$(git config --bool advice.amworkdir)" != false
-               then
-                       eval_gettextln 'The copy of the patch that failed is found in:
-   $dotest/patch'
-               fi
-               stop_here_user_resolve $this
-       fi
-
-       hook="$(git rev-parse --git-path hooks/pre-applypatch)"
-       if test -x "$hook"
-       then
-               "$hook" || stop_here $this
-       fi
-
-       tree=$(git write-tree) &&
-       commit=$(
-               if test -n "$ignore_date"
-               then
-                       GIT_AUTHOR_DATE=
-               fi
-               parent=$(git rev-parse --verify -q HEAD) ||
-               say >&2 "$(gettext "applying to an empty history")"
-
-               if test -n "$committer_date_is_author_date"
-               then
-                       GIT_COMMITTER_DATE="$GIT_AUTHOR_DATE"
-                       export GIT_COMMITTER_DATE
-               fi &&
-               git commit-tree ${parent:+-p} $parent ${gpg_sign_opt:+"$gpg_sign_opt"} $tree  \
-                       <"$dotest/final-commit"
-       ) &&
-       git update-ref -m "$GIT_REFLOG_ACTION: $FIRSTLINE" HEAD $commit $parent ||
-       stop_here $this
-
-       if test -f "$dotest/original-commit"; then
-               echo "$(cat "$dotest/original-commit") $commit" >> "$dotest/rewritten"
-       fi
-
-       hook="$(git rev-parse --git-path hooks/post-applypatch)"
-       test -x "$hook" && "$hook"
-
-       go_next
-done
-
-if test -s "$dotest"/rewritten; then
-    git notes copy --for-rewrite=rebase < "$dotest"/rewritten
-    hook="$(git rev-parse --git-path hooks/post-rewrite)"
-    if test -x "$hook"; then
-       "$hook" rebase < "$dotest"/rewritten
-    fi
-fi
-
-# If am was called with --rebasing (from git-rebase--am), it's up to
-# the caller to take care of housekeeping.
-if ! test -f "$dotest/rebasing"
-then
-       rm -fr "$dotest"
-       git gc --auto
-fi
diff --git a/contrib/examples/git-checkout.sh b/contrib/examples/git-checkout.sh
deleted file mode 100755 (executable)
index 683cae7..0000000
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/bin/sh
-
-OPTIONS_KEEPDASHDASH=t
-OPTIONS_SPEC="\
-git-checkout [options] [<branch>] [<paths>...]
---
-b=          create a new branch started at <branch>
-l           create the new branch's reflog
-track       arrange that the new branch tracks the remote branch
-f           proceed even if the index or working tree is not HEAD
-m           merge local modifications into the new branch
-q,quiet     be quiet
-"
-SUBDIRECTORY_OK=Sometimes
-. git-sh-setup
-require_work_tree
-
-old_name=HEAD
-old=$(git rev-parse --verify $old_name 2>/dev/null)
-oldbranch=$(git symbolic-ref $old_name 2>/dev/null)
-new=
-new_name=
-force=
-branch=
-track=
-newbranch=
-newbranch_log=
-merge=
-quiet=
-v=-v
-LF='
-'
-
-while test $# != 0; do
-       case "$1" in
-       -b)
-               shift
-               newbranch="$1"
-               [ -z "$newbranch" ] &&
-                       die "git checkout: -b needs a branch name"
-               git show-ref --verify --quiet -- "refs/heads/$newbranch" &&
-                       die "git checkout: branch $newbranch already exists"
-               git check-ref-format "heads/$newbranch" ||
-                       die "git checkout: we do not like '$newbranch' as a branch name."
-               ;;
-       -l)
-               newbranch_log=-l
-               ;;
-       --track|--no-track)
-               track="$1"
-               ;;
-       -f)
-               force=1
-               ;;
-       -m)
-               merge=1
-               ;;
-       -q|--quiet)
-               quiet=1
-               v=
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage
-               ;;
-       esac
-       shift
-done
-
-arg="$1"
-rev=$(git rev-parse --verify "$arg" 2>/dev/null)
-if rev=$(git rev-parse --verify "$rev^0" 2>/dev/null)
-then
-       [ -z "$rev" ] && die "unknown flag $arg"
-       new_name="$arg"
-       if git show-ref --verify --quiet -- "refs/heads/$arg"
-       then
-               rev=$(git rev-parse --verify "refs/heads/$arg^0")
-               branch="$arg"
-       fi
-       new="$rev"
-       shift
-elif rev=$(git rev-parse --verify "$rev^{tree}" 2>/dev/null)
-then
-       # checking out selected paths from a tree-ish.
-       new="$rev"
-       new_name="$rev^{tree}"
-       shift
-fi
-[ "$1" = "--" ] && shift
-
-case "$newbranch,$track" in
-,--*)
-       die "git checkout: --track and --no-track require -b"
-esac
-
-case "$force$merge" in
-11)
-       die "git checkout: -f and -m are incompatible"
-esac
-
-# The behaviour of the command with and without explicit path
-# parameters is quite different.
-#
-# Without paths, we are checking out everything in the work tree,
-# possibly switching branches.  This is the traditional behaviour.
-#
-# With paths, we are _never_ switching branch, but checking out
-# the named paths from either index (when no rev is given),
-# or the named tree-ish (when rev is given).
-
-if test "$#" -ge 1
-then
-       hint=
-       if test "$#" -eq 1
-       then
-               hint="
-Did you intend to checkout '$@' which can not be resolved as commit?"
-       fi
-       if test '' != "$newbranch$force$merge"
-       then
-               die "git checkout: updating paths is incompatible with switching branches/forcing$hint"
-       fi
-       if test '' != "$new"
-       then
-               # from a specific tree-ish; note that this is for
-               # rescuing paths and is never meant to remove what
-               # is not in the named tree-ish.
-               git ls-tree --full-name -r "$new" "$@" |
-               git update-index --index-info || exit $?
-       fi
-
-       # Make sure the request is about existing paths.
-       git ls-files --full-name --error-unmatch -- "$@" >/dev/null || exit
-       git ls-files --full-name -- "$@" |
-               (cd_to_toplevel && git checkout-index -f -u --stdin)
-
-       # Run a post-checkout hook -- the HEAD does not change so the
-       # current HEAD is passed in for both args
-       if test -x "$GIT_DIR"/hooks/post-checkout; then
-           "$GIT_DIR"/hooks/post-checkout $old $old 0
-       fi
-
-       exit $?
-else
-       # Make sure we did not fall back on $arg^{tree} codepath
-       # since we are not checking out from an arbitrary tree-ish,
-       # but switching branches.
-       if test '' != "$new"
-       then
-               git rev-parse --verify "$new^{commit}" >/dev/null 2>&1 ||
-               die "Cannot switch branch to a non-commit."
-       fi
-fi
-
-# We are switching branches and checking out trees, so
-# we *NEED* to be at the toplevel.
-cd_to_toplevel
-
-[ -z "$new" ] && new=$old && new_name="$old_name"
-
-# If we don't have an existing branch that we're switching to,
-# and we don't have a new branch name for the target we
-# are switching to, then we are detaching our HEAD from any
-# branch.  However, if "git checkout HEAD" detaches the HEAD
-# from the current branch, even though that may be logically
-# correct, it feels somewhat funny.  More importantly, we do not
-# want "git checkout" or "git checkout -f" to detach HEAD.
-
-detached=
-detach_warn=
-
-describe_detached_head () {
-       test -n "$quiet" || {
-               printf >&2 "$1 "
-               GIT_PAGER= git log >&2 -1 --pretty=oneline --abbrev-commit "$2" --
-       }
-}
-
-if test -z "$branch$newbranch" && test "$new_name" != "$old_name"
-then
-       detached="$new"
-       if test -n "$oldbranch" && test -z "$quiet"
-       then
-               detach_warn="Note: moving to \"$new_name\" which isn't a local branch
-If you want to create a new branch from this checkout, you may do so
-(now or later) by using -b with the checkout command again. Example:
-  git checkout -b <new_branch_name>"
-       fi
-elif test -z "$oldbranch" && test "$new" != "$old"
-then
-       describe_detached_head 'Previous HEAD position was' "$old"
-fi
-
-if [ "X$old" = X ]
-then
-       if test -z "$quiet"
-       then
-               echo >&2 "warning: You appear to be on a branch yet to be born."
-               echo >&2 "warning: Forcing checkout of $new_name."
-       fi
-       force=1
-fi
-
-if [ "$force" ]
-then
-    git read-tree $v --reset -u $new
-else
-    git update-index --refresh >/dev/null
-    git read-tree $v -m -u --exclude-per-directory=.gitignore $old $new || (
-       case "$merge,$v" in
-       ,*)
-               exit 1 ;;
-       1,)
-               ;; # quiet
-       *)
-               echo >&2 "Falling back to 3-way merge..." ;;
-       esac
-
-       # Match the index to the working tree, and do a three-way.
-       git diff-files --name-only | git update-index --remove --stdin &&
-       work=$(git write-tree) &&
-       git read-tree $v --reset -u $new || exit
-
-       eval GITHEAD_$new='${new_name:-${branch:-$new}}' &&
-       eval GITHEAD_$work=local &&
-       export GITHEAD_$new GITHEAD_$work &&
-       git merge-recursive $old -- $new $work
-
-       # Do not register the cleanly merged paths in the index yet.
-       # this is not a real merge before committing, but just carrying
-       # the working tree changes along.
-       unmerged=$(git ls-files -u)
-       git read-tree $v --reset $new
-       case "$unmerged" in
-       '')     ;;
-       *)
-               (
-                       z40=0000000000000000000000000000000000000000
-                       echo "$unmerged" |
-                       sed -e 's/^[0-7]* [0-9a-f]* /'"0 $z40 /"
-                       echo "$unmerged"
-               ) | git update-index --index-info
-               ;;
-       esac
-       exit 0
-    )
-    saved_err=$?
-    if test "$saved_err" = 0 && test -z "$quiet"
-    then
-       git diff-index --name-status "$new"
-    fi
-    (exit $saved_err)
-fi
-
-#
-# Switch the HEAD pointer to the new branch if we
-# checked out a branch head, and remove any potential
-# old MERGE_HEAD's (subsequent commits will clearly not
-# be based on them, since we re-set the index)
-#
-if [ "$?" -eq 0 ]; then
-       if [ "$newbranch" ]; then
-               git branch $track $newbranch_log "$newbranch" "$new_name" || exit
-               branch="$newbranch"
-       fi
-       if test -n "$branch"
-       then
-               old_branch_name=$(expr "z$oldbranch" : 'zrefs/heads/\(.*\)')
-               GIT_DIR="$GIT_DIR" git symbolic-ref -m "checkout: moving from ${old_branch_name:-$old} to $branch" HEAD "refs/heads/$branch"
-               if test -n "$quiet"
-               then
-                       true    # nothing
-               elif test "refs/heads/$branch" = "$oldbranch"
-               then
-                       echo >&2 "Already on branch \"$branch\""
-               else
-                       echo >&2 "Switched to${newbranch:+ a new} branch \"$branch\""
-               fi
-       elif test -n "$detached"
-       then
-               old_branch_name=$(expr "z$oldbranch" : 'zrefs/heads/\(.*\)')
-               git update-ref --no-deref -m "checkout: moving from ${old_branch_name:-$old} to $arg" HEAD "$detached" ||
-                       die "Cannot detach HEAD"
-               if test -n "$detach_warn"
-               then
-                       echo >&2 "$detach_warn"
-               fi
-               describe_detached_head 'HEAD is now at' HEAD
-       fi
-       rm -f "$GIT_DIR/MERGE_HEAD"
-else
-       exit 1
-fi
-
-# Run a post-checkout hook
-if test -x "$GIT_DIR"/hooks/post-checkout; then
-       "$GIT_DIR"/hooks/post-checkout $old $new 1
-fi
diff --git a/contrib/examples/git-clean.sh b/contrib/examples/git-clean.sh
deleted file mode 100755 (executable)
index 01c95e9..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005-2006 Pavel Roskin
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git-clean [options] <paths>...
-
-Clean untracked files from the working directory
-
-When optional <paths>... arguments are given, the paths
-affected are further limited to those that match them.
---
-d remove directories as well
-f override clean.requireForce and clean anyway
-n don't remove anything, just show what would be done
-q be quiet, only report errors
-x remove ignored files as well
-X remove only ignored files"
-
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-require_work_tree
-
-ignored=
-ignoredonly=
-cleandir=
-rmf="rm -f --"
-rmrf="rm -rf --"
-rm_refuse="echo Not removing"
-echo1="echo"
-
-disabled=$(git config --bool clean.requireForce)
-
-while test $# != 0
-do
-       case "$1" in
-       -d)
-               cleandir=1
-               ;;
-       -f)
-               disabled=false
-               ;;
-       -n)
-               disabled=false
-               rmf="echo Would remove"
-               rmrf="echo Would remove"
-               rm_refuse="echo Would not remove"
-               echo1=":"
-               ;;
-       -q)
-               echo1=":"
-               ;;
-       -x)
-               ignored=1
-               ;;
-       -X)
-               ignoredonly=1
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage # should not happen
-               ;;
-       esac
-       shift
-done
-
-# requireForce used to default to false but now it defaults to true.
-# IOW, lack of explicit "clean.requireForce = false" is taken as
-# "clean.requireForce = true".
-case "$disabled" in
-"")
-       die "clean.requireForce not set and -n or -f not given; refusing to clean"
-       ;;
-"true")
-       die "clean.requireForce set and -n or -f not given; refusing to clean"
-       ;;
-esac
-
-if [ "$ignored,$ignoredonly" = "1,1" ]; then
-       die "-x and -X cannot be set together"
-fi
-
-if [ -z "$ignored" ]; then
-       excl="--exclude-per-directory=.gitignore"
-       excl_info= excludes_file=
-       if [ -f "$GIT_DIR/info/exclude" ]; then
-               excl_info="--exclude-from=$GIT_DIR/info/exclude"
-       fi
-       if cfg_excl=$(git config core.excludesfile) && test -f "$cfg_excl"
-       then
-               excludes_file="--exclude-from=$cfg_excl"
-       fi
-       if [ "$ignoredonly" ]; then
-               excl="$excl --ignored"
-       fi
-fi
-
-git ls-files --others --directory \
-       $excl ${excl_info:+"$excl_info"} ${excludes_file:+"$excludes_file"} \
-       -- "$@" |
-while read -r file; do
-       if [ -d "$file" -a ! -L "$file" ]; then
-               if [ -z "$cleandir" ]; then
-                       $rm_refuse "$file"
-                       continue
-               fi
-               $echo1 "Removing $file"
-               $rmrf "$file"
-       else
-               $echo1 "Removing $file"
-               $rmf "$file"
-       fi
-done
diff --git a/contrib/examples/git-clone.sh b/contrib/examples/git-clone.sh
deleted file mode 100755 (executable)
index 08cf246..0000000
+++ /dev/null
@@ -1,525 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, Linus Torvalds
-# Copyright (c) 2005, Junio C Hamano
-#
-# Clone a repository into a different directory that does not yet exist.
-
-# See git-sh-setup why.
-unset CDPATH
-
-OPTIONS_SPEC="\
-git-clone [options] [--] <repo> [<dir>]
---
-n,no-checkout        don't create a checkout
-bare                 create a bare repository
-naked                create a bare repository
-l,local              to clone from a local repository
-no-hardlinks         don't use local hardlinks, always copy
-s,shared             setup as a shared repository
-template=            path to the template directory
-q,quiet              be quiet
-reference=           reference repository
-o,origin=            use <name> instead of 'origin' to track upstream
-u,upload-pack=       path to git-upload-pack on the remote
-depth=               create a shallow clone of that depth
-
-use-separate-remote  compatibility, do not use
-no-separate-remote   compatibility, do not use"
-
-die() {
-       echo >&2 "$@"
-       exit 1
-}
-
-usage() {
-       exec "$0" -h
-}
-
-eval "$(echo "$OPTIONS_SPEC" | git rev-parse --parseopt -- "$@" || echo exit $?)"
-
-get_repo_base() {
-       (
-               cd "$(/bin/pwd)" &&
-               cd "$1" || cd "$1.git" &&
-               {
-                       cd .git
-                       pwd
-               }
-       ) 2>/dev/null
-}
-
-if [ -n "$GIT_SSL_NO_VERIFY" -o \
-       "$(git config --bool http.sslVerify)" = false ]; then
-    curl_extra_args="-k"
-fi
-
-http_fetch () {
-       # $1 = Remote, $2 = Local
-       curl -nsfL $curl_extra_args "$1" >"$2"
-       curl_exit_status=$?
-       case $curl_exit_status in
-       126|127) exit ;;
-       *)       return $curl_exit_status ;;
-       esac
-}
-
-clone_dumb_http () {
-       # $1 - remote, $2 - local
-       cd "$2" &&
-       clone_tmp="$GIT_DIR/clone-tmp" &&
-       mkdir -p "$clone_tmp" || exit 1
-       if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-               curl_extra_args="${curl_extra_args} --disable-epsv"
-       fi
-       http_fetch "$1/info/refs" "$clone_tmp/refs" ||
-               die "Cannot get remote repository information.
-Perhaps git-update-server-info needs to be run there?"
-       test "z$quiet" = z && v=-v || v=
-       while read sha1 refname
-       do
-               name=$(expr "z$refname" : 'zrefs/\(.*\)') &&
-               case "$name" in
-               *^*)    continue;;
-               esac
-               case "$bare,$name" in
-               yes,* | ,heads/* | ,tags/*) ;;
-               *)      continue ;;
-               esac
-               if test -n "$use_separate_remote" &&
-                  branch_name=$(expr "z$name" : 'zheads/\(.*\)')
-               then
-                       tname="remotes/$origin/$branch_name"
-               else
-                       tname=$name
-               fi
-               git-http-fetch $v -a -w "$tname" "$sha1" "$1" || exit 1
-       done <"$clone_tmp/refs"
-       rm -fr "$clone_tmp"
-       http_fetch "$1/HEAD" "$GIT_DIR/REMOTE_HEAD" ||
-       rm -f "$GIT_DIR/REMOTE_HEAD"
-       if test -f "$GIT_DIR/REMOTE_HEAD"; then
-               head_sha1=$(cat "$GIT_DIR/REMOTE_HEAD")
-               case "$head_sha1" in
-               'ref: refs/'*)
-                       ;;
-               *)
-                       git-http-fetch $v -a "$head_sha1" "$1" ||
-                       rm -f "$GIT_DIR/REMOTE_HEAD"
-                       ;;
-               esac
-       fi
-}
-
-quiet=
-local=no
-use_local_hardlink=yes
-local_shared=no
-unset template
-no_checkout=
-upload_pack=
-bare=
-reference=
-origin=
-origin_override=
-use_separate_remote=t
-depth=
-no_progress=
-local_explicitly_asked_for=
-test -t 1 || no_progress=--no-progress
-
-while test $# != 0
-do
-       case "$1" in
-       -n|--no-checkout)
-               no_checkout=yes ;;
-       --naked|--bare)
-               bare=yes ;;
-       -l|--local)
-               local_explicitly_asked_for=yes
-               use_local_hardlink=yes
-               ;;
-       --no-hardlinks)
-               use_local_hardlink=no ;;
-       -s|--shared)
-               local_shared=yes ;;
-       --template)
-               shift; template="--template=$1" ;;
-       -q|--quiet)
-               quiet=-q ;;
-       --use-separate-remote|--no-separate-remote)
-               die "clones are always made with separate-remote layout" ;;
-       --reference)
-               shift; reference="$1" ;;
-       -o|--origin)
-               shift;
-               case "$1" in
-               '')
-                   usage ;;
-               */*)
-                   die "'$1' is not suitable for an origin name"
-               esac
-               git check-ref-format "heads/$1" ||
-                   die "'$1' is not suitable for a branch name"
-               test -z "$origin_override" ||
-                   die "Do not give more than one --origin options."
-               origin_override=yes
-               origin="$1"
-               ;;
-       -u|--upload-pack)
-               shift
-               upload_pack="--upload-pack=$1" ;;
-       --depth)
-               shift
-               depth="--depth=$1" ;;
-       --)
-               shift
-               break ;;
-       *)
-               usage ;;
-       esac
-       shift
-done
-
-repo="$1"
-test -n "$repo" ||
-    die 'you must specify a repository to clone.'
-
-# --bare implies --no-checkout and --no-separate-remote
-if test yes = "$bare"
-then
-       if test yes = "$origin_override"
-       then
-               die '--bare and --origin $origin options are incompatible.'
-       fi
-       no_checkout=yes
-       use_separate_remote=
-fi
-
-if test -z "$origin"
-then
-       origin=origin
-fi
-
-# Turn the source into an absolute path if
-# it is local
-if base=$(get_repo_base "$repo"); then
-       repo="$base"
-       if test -z "$depth"
-       then
-               local=yes
-       fi
-elif test -f "$repo"
-then
-       case "$repo" in /*) ;; *) repo="$PWD/$repo" ;; esac
-fi
-
-# Decide the directory name of the new repository
-if test -n "$2"
-then
-       dir="$2"
-       test $# = 2 || die "excess parameter to git-clone"
-else
-       # Derive one from the repository name
-       # Try using "humanish" part of source repo if user didn't specify one
-       if test -f "$repo"
-       then
-               # Cloning from a bundle
-               dir=$(echo "$repo" | sed -e 's|/*\.bundle$||' -e 's|.*/||g')
-       else
-               dir=$(echo "$repo" |
-                       sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
-       fi
-fi
-
-[ -e "$dir" ] && die "destination directory '$dir' already exists."
-[ yes = "$bare" ] && unset GIT_WORK_TREE
-[ -n "$GIT_WORK_TREE" ] && [ -e "$GIT_WORK_TREE" ] &&
-die "working tree '$GIT_WORK_TREE' already exists."
-D=
-W=
-cleanup() {
-       test -z "$D" && rm -rf "$dir"
-       test -z "$W" && test -n "$GIT_WORK_TREE" && rm -rf "$GIT_WORK_TREE"
-       cd ..
-       test -n "$D" && rm -rf "$D"
-       test -n "$W" && rm -rf "$W"
-       exit $err
-}
-trap 'err=$?; cleanup' 0
-mkdir -p "$dir" && D=$(cd "$dir" && pwd) || usage
-test -n "$GIT_WORK_TREE" && mkdir -p "$GIT_WORK_TREE" &&
-W=$(cd "$GIT_WORK_TREE" && pwd) && GIT_WORK_TREE="$W" && export GIT_WORK_TREE
-if test yes = "$bare" || test -n "$GIT_WORK_TREE"; then
-       GIT_DIR="$D"
-else
-       GIT_DIR="$D/.git"
-fi &&
-export GIT_DIR &&
-GIT_CONFIG="$GIT_DIR/config" git-init $quiet ${template+"$template"} || usage
-
-if test -n "$bare"
-then
-       GIT_CONFIG="$GIT_DIR/config" git config core.bare true
-fi
-
-if test -n "$reference"
-then
-       ref_git=
-       if test -d "$reference"
-       then
-               if test -d "$reference/.git/objects"
-               then
-                       ref_git="$reference/.git"
-               elif test -d "$reference/objects"
-               then
-                       ref_git="$reference"
-               fi
-       fi
-       if test -n "$ref_git"
-       then
-               ref_git=$(cd "$ref_git" && pwd)
-               echo "$ref_git/objects" >"$GIT_DIR/objects/info/alternates"
-               (
-                       GIT_DIR="$ref_git" git for-each-ref \
-                               --format='%(objectname) %(*objectname)'
-               ) |
-               while read a b
-               do
-                       test -z "$a" ||
-                       git update-ref "refs/reference-tmp/$a" "$a"
-                       test -z "$b" ||
-                       git update-ref "refs/reference-tmp/$b" "$b"
-               done
-       else
-               die "reference repository '$reference' is not a local directory."
-       fi
-fi
-
-rm -f "$GIT_DIR/CLONE_HEAD"
-
-# We do local magic only when the user tells us to.
-case "$local" in
-yes)
-       ( cd "$repo/objects" ) ||
-               die "cannot chdir to local '$repo/objects'."
-
-       if test "$local_shared" = yes
-       then
-               mkdir -p "$GIT_DIR/objects/info"
-               echo "$repo/objects" >>"$GIT_DIR/objects/info/alternates"
-       else
-               cpio_quiet_flag=""
-               cpio --help 2>&1 | grep -- --quiet >/dev/null && \
-                       cpio_quiet_flag=--quiet
-               l= &&
-               if test "$use_local_hardlink" = yes
-               then
-                       # See if we can hardlink and drop "l" if not.
-                       sample_file=$(cd "$repo" && \
-                                     find objects -type f -print | sed -e 1q)
-                       # objects directory should not be empty because
-                       # we are cloning!
-                       test -f "$repo/$sample_file" ||
-                               die "fatal: cannot clone empty repository"
-                       if ln "$repo/$sample_file" "$GIT_DIR/objects/sample" 2>/dev/null
-                       then
-                               rm -f "$GIT_DIR/objects/sample"
-                               l=l
-                       elif test -n "$local_explicitly_asked_for"
-                       then
-                               echo >&2 "Warning: -l asked but cannot hardlink to $repo"
-                       fi
-               fi &&
-               cd "$repo" &&
-               # Create dirs using umask and permissions and destination
-               find objects -type d -print | (cd "$GIT_DIR" && xargs mkdir -p) &&
-               # Copy existing 0444 permissions on content
-               find objects ! -type d -print | cpio $cpio_quiet_flag -pumd$l "$GIT_DIR/" || \
-                       exit 1
-       fi
-       git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
-       ;;
-*)
-       case "$repo" in
-       rsync://*)
-               case "$depth" in
-               "") ;;
-               *) die "shallow over rsync not supported" ;;
-               esac
-               rsync $quiet -av --ignore-existing  \
-                       --exclude info "$repo/objects/" "$GIT_DIR/objects/" ||
-               exit
-               # Look at objects/info/alternates for rsync -- http will
-               # support it natively and git native ones will do it on the
-               # remote end.  Not having that file is not a crime.
-               rsync -q "$repo/objects/info/alternates" \
-                       "$GIT_DIR/TMP_ALT" 2>/dev/null ||
-                       rm -f "$GIT_DIR/TMP_ALT"
-               if test -f "$GIT_DIR/TMP_ALT"
-               then
-                   ( cd "$D" &&
-                     . git-parse-remote &&
-                     resolve_alternates "$repo" <"$GIT_DIR/TMP_ALT" ) |
-                   while read alt
-                   do
-                       case "$alt" in 'bad alternate: '*) die "$alt";; esac
-                       case "$quiet" in
-                       '')     echo >&2 "Getting alternate: $alt" ;;
-                       esac
-                       rsync $quiet -av --ignore-existing  \
-                           --exclude info "$alt" "$GIT_DIR/objects" || exit
-                   done
-                   rm -f "$GIT_DIR/TMP_ALT"
-               fi
-               git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
-               ;;
-       https://*|http://*|ftp://*)
-               case "$depth" in
-               "") ;;
-               *) die "shallow over http or ftp not supported" ;;
-               esac
-               if test -z "@@NO_CURL@@"
-               then
-                       clone_dumb_http "$repo" "$D"
-               else
-                       die "http transport not supported, rebuild Git with curl support"
-               fi
-               ;;
-       *)
-               if [ -f "$repo" ] ; then
-                       git bundle unbundle "$repo" > "$GIT_DIR/CLONE_HEAD" ||
-                       die "unbundle from '$repo' failed."
-               else
-                       case "$upload_pack" in
-                       '') git-fetch-pack --all -k $quiet $depth $no_progress "$repo";;
-                       *) git-fetch-pack --all -k \
-                               $quiet "$upload_pack" $depth $no_progress "$repo" ;;
-                       esac >"$GIT_DIR/CLONE_HEAD" ||
-                       die "fetch-pack from '$repo' failed."
-               fi
-               ;;
-       esac
-       ;;
-esac
-test -d "$GIT_DIR/refs/reference-tmp" && rm -fr "$GIT_DIR/refs/reference-tmp"
-
-if test -f "$GIT_DIR/CLONE_HEAD"
-then
-       # Read git-fetch-pack -k output and store the remote branches.
-       if [ -n "$use_separate_remote" ]
-       then
-               branch_top="remotes/$origin"
-       else
-               branch_top="heads"
-       fi
-       tag_top="tags"
-       while read sha1 name
-       do
-               case "$name" in
-               *'^{}')
-                       continue ;;
-               HEAD)
-                       destname="REMOTE_HEAD" ;;
-               refs/heads/*)
-                       destname="refs/$branch_top/${name#refs/heads/}" ;;
-               refs/tags/*)
-                       destname="refs/$tag_top/${name#refs/tags/}" ;;
-               *)
-                       continue ;;
-               esac
-               git update-ref -m "clone: from $repo" "$destname" "$sha1" ""
-       done < "$GIT_DIR/CLONE_HEAD"
-fi
-
-if test -n "$W"; then
-       cd "$W" || exit
-else
-       cd "$D" || exit
-fi
-
-if test -z "$bare"
-then
-       # a non-bare repository is always in separate-remote layout
-       remote_top="refs/remotes/$origin"
-       head_sha1=
-       test ! -r "$GIT_DIR/REMOTE_HEAD" || head_sha1=$(cat "$GIT_DIR/REMOTE_HEAD")
-       case "$head_sha1" in
-       'ref: refs/'*)
-               # Uh-oh, the remote told us (http transport done against
-               # new style repository with a symref HEAD).
-               # Ideally we should skip the guesswork but for now
-               # opt for minimum change.
-               head_sha1=$(expr "z$head_sha1" : 'zref: refs/heads/\(.*\)')
-               head_sha1=$(cat "$GIT_DIR/$remote_top/$head_sha1")
-               ;;
-       esac
-
-       # The name under $remote_top the remote HEAD seems to point at.
-       head_points_at=$(
-               (
-                       test -f "$GIT_DIR/$remote_top/master" && echo "master"
-                       cd "$GIT_DIR/$remote_top" &&
-                       find . -type f -print | sed -e 's/^\.\///'
-               ) | (
-               done=f
-               while read name
-               do
-                       test t = $done && continue
-                       branch_tip=$(cat "$GIT_DIR/$remote_top/$name")
-                       if test "$head_sha1" = "$branch_tip"
-                       then
-                               echo "$name"
-                               done=t
-                       fi
-               done
-               )
-       )
-
-       # Upstream URL
-       git config remote."$origin".url "$repo" &&
-
-       # Set up the mappings to track the remote branches.
-       git config remote."$origin".fetch \
-               "+refs/heads/*:$remote_top/*" '^$' &&
-
-       # Write out remote.$origin config, and update our "$head_points_at".
-       case "$head_points_at" in
-       ?*)
-               # Local default branch
-               git symbolic-ref HEAD "refs/heads/$head_points_at" &&
-
-               # Tracking branch for the primary branch at the remote.
-               git update-ref HEAD "$head_sha1" &&
-
-               rm -f "refs/remotes/$origin/HEAD"
-               git symbolic-ref "refs/remotes/$origin/HEAD" \
-                       "refs/remotes/$origin/$head_points_at" &&
-
-               git config branch."$head_points_at".remote "$origin" &&
-               git config branch."$head_points_at".merge "refs/heads/$head_points_at"
-               ;;
-       '')
-               if test -z "$head_sha1"
-               then
-                       # Source had nonexistent ref in HEAD
-                       echo >&2 "Warning: Remote HEAD refers to nonexistent ref, unable to checkout."
-                       no_checkout=t
-               else
-                       # Source had detached HEAD pointing nowhere
-                       git update-ref --no-deref HEAD "$head_sha1" &&
-                       rm -f "refs/remotes/$origin/HEAD"
-               fi
-               ;;
-       esac
-
-       case "$no_checkout" in
-       '')
-               test "z$quiet" = z && test "z$no_progress" = z && v=-v || v=
-               git read-tree -m -u $v HEAD HEAD
-       esac
-fi
-rm -f "$GIT_DIR/CLONE_HEAD" "$GIT_DIR/REMOTE_HEAD"
-
-trap - 0
diff --git a/contrib/examples/git-commit.sh b/contrib/examples/git-commit.sh
deleted file mode 100755 (executable)
index 86c9cfa..0000000
+++ /dev/null
@@ -1,639 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-# Copyright (c) 2006 Junio C Hamano
-
-USAGE='[-a | --interactive] [-s] [-v] [--no-verify] [-m <message> | -F <logfile> | (-C|-c) <commit> | --amend] [-u] [-e] [--author <author>] [--template <file>] [[-i | -o] <path>...]'
-SUBDIRECTORY_OK=Yes
-OPTIONS_SPEC=
-. git-sh-setup
-require_work_tree
-
-git rev-parse --verify HEAD >/dev/null 2>&1 || initial_commit=t
-
-case "$0" in
-*status)
-       status_only=t
-       ;;
-*commit)
-       status_only=
-       ;;
-esac
-
-refuse_partial () {
-       echo >&2 "$1"
-       echo >&2 "You might have meant to say 'git commit -i paths...', perhaps?"
-       exit 1
-}
-
-TMP_INDEX=
-THIS_INDEX="${GIT_INDEX_FILE:-$GIT_DIR/index}"
-NEXT_INDEX="$GIT_DIR/next-index$$"
-rm -f "$NEXT_INDEX"
-save_index () {
-       cp -p "$THIS_INDEX" "$NEXT_INDEX"
-}
-
-run_status () {
-       # If TMP_INDEX is defined, that means we are doing
-       # "--only" partial commit, and that index file is used
-       # to build the tree for the commit.  Otherwise, if
-       # NEXT_INDEX exists, that is the index file used to
-       # make the commit.  Otherwise we are using as-is commit
-       # so the regular index file is what we use to compare.
-       if test '' != "$TMP_INDEX"
-       then
-               GIT_INDEX_FILE="$TMP_INDEX"
-               export GIT_INDEX_FILE
-       elif test -f "$NEXT_INDEX"
-       then
-               GIT_INDEX_FILE="$NEXT_INDEX"
-               export GIT_INDEX_FILE
-       fi
-
-       if test "$status_only" = "t" || test "$use_status_color" = "t"; then
-               color=
-       else
-               color=--nocolor
-       fi
-       git runstatus ${color} \
-               ${verbose:+--verbose} \
-               ${amend:+--amend} \
-               ${untracked_files:+--untracked}
-}
-
-trap '
-       test -z "$TMP_INDEX" || {
-               test -f "$TMP_INDEX" && rm -f "$TMP_INDEX"
-       }
-       rm -f "$NEXT_INDEX"
-' 0
-
-################################################################
-# Command line argument parsing and sanity checking
-
-all=
-also=
-allow_empty=f
-interactive=
-only=
-logfile=
-use_commit=
-amend=
-edit_flag=
-no_edit=
-log_given=
-log_message=
-verify=t
-quiet=
-verbose=
-signoff=
-force_author=
-only_include_assumed=
-untracked_files=
-templatefile="$(git config commit.template)"
-while test $# != 0
-do
-       case "$1" in
-       -F|--F|-f|--f|--fi|--fil|--file)
-               case "$#" in 1) usage ;; esac
-               shift
-               no_edit=t
-               log_given=t$log_given
-               logfile="$1"
-               ;;
-       -F*|-f*)
-               no_edit=t
-               log_given=t$log_given
-               logfile="${1#-[Ff]}"
-               ;;
-       --F=*|--f=*|--fi=*|--fil=*|--file=*)
-               no_edit=t
-               log_given=t$log_given
-               logfile="${1#*=}"
-               ;;
-       -a|--a|--al|--all)
-               all=t
-               ;;
-       --allo|--allow|--allow-|--allow-e|--allow-em|--allow-emp|\
-       --allow-empt|--allow-empty)
-               allow_empty=t
-               ;;
-       --au=*|--aut=*|--auth=*|--autho=*|--author=*)
-               force_author="${1#*=}"
-               ;;
-       --au|--aut|--auth|--autho|--author)
-               case "$#" in 1) usage ;; esac
-               shift
-               force_author="$1"
-               ;;
-       -e|--e|--ed|--edi|--edit)
-               edit_flag=t
-               ;;
-       -i|--i|--in|--inc|--incl|--inclu|--includ|--include)
-               also=t
-               ;;
-       --int|--inte|--inter|--intera|--interac|--interact|--interacti|\
-       --interactiv|--interactive)
-               interactive=t
-               ;;
-       -o|--o|--on|--onl|--only)
-               only=t
-               ;;
-       -m|--m|--me|--mes|--mess|--messa|--messag|--message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}$1"
-               no_edit=t
-               ;;
-       -m*)
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}${1#-m}"
-               no_edit=t
-               ;;
-       --m=*|--me=*|--mes=*|--mess=*|--messa=*|--messag=*|--message=*)
-               log_given=m$log_given
-               log_message="${log_message:+${log_message}
-
-}${1#*=}"
-               no_edit=t
-               ;;
-       -n|--n|--no|--no-|--no-v|--no-ve|--no-ver|--no-veri|--no-verif|\
-       --no-verify)
-               verify=
-               ;;
-       --a|--am|--ame|--amen|--amend)
-               amend=t
-               use_commit=HEAD
-               ;;
-       -c)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=
-               ;;
-       --ree=*|--reed=*|--reedi=*|--reedit=*|--reedit-=*|--reedit-m=*|\
-       --reedit-me=*|--reedit-mes=*|--reedit-mess=*|--reedit-messa=*|\
-       --reedit-messag=*|--reedit-message=*)
-               log_given=t$log_given
-               use_commit="${1#*=}"
-               no_edit=
-               ;;
-       --ree|--reed|--reedi|--reedit|--reedit-|--reedit-m|--reedit-me|\
-       --reedit-mes|--reedit-mess|--reedit-messa|--reedit-messag|\
-       --reedit-message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=
-               ;;
-       -C)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=t
-               ;;
-       --reu=*|--reus=*|--reuse=*|--reuse-=*|--reuse-m=*|--reuse-me=*|\
-       --reuse-mes=*|--reuse-mess=*|--reuse-messa=*|--reuse-messag=*|\
-       --reuse-message=*)
-               log_given=t$log_given
-               use_commit="${1#*=}"
-               no_edit=t
-               ;;
-       --reu|--reus|--reuse|--reuse-|--reuse-m|--reuse-me|--reuse-mes|\
-       --reuse-mess|--reuse-messa|--reuse-messag|--reuse-message)
-               case "$#" in 1) usage ;; esac
-               shift
-               log_given=t$log_given
-               use_commit="$1"
-               no_edit=t
-               ;;
-       -s|--s|--si|--sig|--sign|--signo|--signof|--signoff)
-               signoff=t
-               ;;
-       -t|--t|--te|--tem|--temp|--templ|--templa|--templat|--template)
-               case "$#" in 1) usage ;; esac
-               shift
-               templatefile="$1"
-               no_edit=
-               ;;
-       -q|--q|--qu|--qui|--quie|--quiet)
-               quiet=t
-               ;;
-       -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
-               verbose=t
-               ;;
-       -u|--u|--un|--unt|--untr|--untra|--untrac|--untrack|--untracke|\
-       --untracked|--untracked-|--untracked-f|--untracked-fi|--untracked-fil|\
-       --untracked-file|--untracked-files)
-               untracked_files=t
-               ;;
-       --)
-               shift
-               break
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-case "$edit_flag" in t) no_edit= ;; esac
-
-################################################################
-# Sanity check options
-
-case "$amend,$initial_commit" in
-t,t)
-       die "You do not have anything to amend." ;;
-t,)
-       if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
-               die "You are in the middle of a merge -- cannot amend."
-       fi ;;
-esac
-
-case "$log_given" in
-tt*)
-       die "Only one of -c/-C/-F can be used." ;;
-*tm*|*mt*)
-       die "Option -m cannot be combined with -c/-C/-F." ;;
-esac
-
-case "$#,$also,$only,$amend" in
-*,t,t,*)
-       die "Only one of --include/--only can be used." ;;
-0,t,,* | 0,,t,)
-       die "No paths with --include/--only does not make sense." ;;
-0,,t,t)
-       only_include_assumed="# Clever... amending the last one with dirty index." ;;
-0,,,*)
-       ;;
-*,,,*)
-       only_include_assumed="# Explicit paths specified without -i or -o; assuming --only paths..."
-       also=
-       ;;
-esac
-unset only
-case "$all,$interactive,$also,$#" in
-*t,*t,*)
-       die "Cannot use -a, --interactive or -i at the same time." ;;
-t,,,[1-9]*)
-       die "Paths with -a does not make sense." ;;
-,t,,[1-9]*)
-       die "Paths with --interactive does not make sense." ;;
-,,t,0)
-       die "No paths with -i does not make sense." ;;
-esac
-
-if test ! -z "$templatefile" && test -z "$log_given"
-then
-       if test ! -f "$templatefile"
-       then
-               die "Commit template file does not exist."
-       fi
-fi
-
-################################################################
-# Prepare index to have a tree to be committed
-
-case "$all,$also" in
-t,)
-       if test ! -f "$THIS_INDEX"
-       then
-               die 'nothing to commit (use "git add file1 file2" to include for commit)'
-       fi
-       save_index &&
-       (
-               cd_to_toplevel &&
-               GIT_INDEX_FILE="$NEXT_INDEX" &&
-               export GIT_INDEX_FILE &&
-               git diff-files --name-only -z |
-               git update-index --remove -z --stdin
-       ) || exit
-       ;;
-,t)
-       save_index &&
-       git ls-files --error-unmatch -- "$@" >/dev/null || exit
-
-       git diff-files --name-only -z -- "$@"  |
-       (
-               cd_to_toplevel &&
-               GIT_INDEX_FILE="$NEXT_INDEX" &&
-               export GIT_INDEX_FILE &&
-               git update-index --remove -z --stdin
-       ) || exit
-       ;;
-,)
-       if test "$interactive" = t; then
-               git add --interactive || exit
-       fi
-       case "$#" in
-       0)
-               ;; # commit as-is
-       *)
-               if test -f "$GIT_DIR/MERGE_HEAD"
-               then
-                       refuse_partial "Cannot do a partial commit during a merge."
-               fi
-
-               TMP_INDEX="$GIT_DIR/tmp-index$$"
-               W=
-               test -z "$initial_commit" && W=--with-tree=HEAD
-               commit_only=$(git ls-files --error-unmatch $W -- "$@") || exit
-
-               # Build a temporary index and update the real index
-               # the same way.
-               if test -z "$initial_commit"
-               then
-                       GIT_INDEX_FILE="$THIS_INDEX" \
-                       git read-tree --index-output="$TMP_INDEX" -i -m HEAD
-               else
-                       rm -f "$TMP_INDEX"
-               fi || exit
-
-               printf '%s\n' "$commit_only" |
-               GIT_INDEX_FILE="$TMP_INDEX" \
-               git update-index --add --remove --stdin &&
-
-               save_index &&
-               printf '%s\n' "$commit_only" |
-               (
-                       GIT_INDEX_FILE="$NEXT_INDEX"
-                       export GIT_INDEX_FILE
-                       git update-index --add --remove --stdin
-               ) || exit
-               ;;
-       esac
-       ;;
-esac
-
-################################################################
-# If we do as-is commit, the index file will be THIS_INDEX,
-# otherwise NEXT_INDEX after we make this commit.  We leave
-# the index as is if we abort.
-
-if test -f "$NEXT_INDEX"
-then
-       USE_INDEX="$NEXT_INDEX"
-else
-       USE_INDEX="$THIS_INDEX"
-fi
-
-case "$status_only" in
-t)
-       # This will silently fail in a read-only repository, which is
-       # what we want.
-       GIT_INDEX_FILE="$USE_INDEX" git update-index -q --unmerged --refresh
-       run_status
-       exit $?
-       ;;
-'')
-       GIT_INDEX_FILE="$USE_INDEX" git update-index -q --refresh || exit
-       ;;
-esac
-
-################################################################
-# Grab commit message, write out tree and make commit.
-
-if test t = "$verify" && test -x "$GIT_DIR"/hooks/pre-commit
-then
-    GIT_INDEX_FILE="${TMP_INDEX:-${USE_INDEX}}" "$GIT_DIR"/hooks/pre-commit \
-    || exit
-fi
-
-if test "$log_message" != ''
-then
-       printf '%s\n' "$log_message"
-elif test "$logfile" != ""
-then
-       if test "$logfile" = -
-       then
-               test -t 0 &&
-               echo >&2 "(reading log message from standard input)"
-               cat
-       else
-               cat <"$logfile"
-       fi
-elif test "$use_commit" != ""
-then
-       encoding=$(git config i18n.commitencoding || echo UTF-8)
-       git show -s --pretty=raw --encoding="$encoding" "$use_commit" |
-       sed -e '1,/^$/d' -e 's/^    //'
-elif test -f "$GIT_DIR/MERGE_MSG"
-then
-       cat "$GIT_DIR/MERGE_MSG"
-elif test -f "$GIT_DIR/SQUASH_MSG"
-then
-       cat "$GIT_DIR/SQUASH_MSG"
-elif test "$templatefile" != ""
-then
-       cat "$templatefile"
-fi | git stripspace >"$GIT_DIR"/COMMIT_EDITMSG
-
-case "$signoff" in
-t)
-       sign=$(git var GIT_COMMITTER_IDENT | sed -e '
-               s/>.*/>/
-               s/^/Signed-off-by: /
-               ')
-       blank_before_signoff=
-       tail -n 1 "$GIT_DIR"/COMMIT_EDITMSG |
-       grep 'Signed-off-by:' >/dev/null || blank_before_signoff='
-'
-       tail -n 1 "$GIT_DIR"/COMMIT_EDITMSG |
-       grep "$sign"$ >/dev/null ||
-       printf '%s%s\n' "$blank_before_signoff" "$sign" \
-               >>"$GIT_DIR"/COMMIT_EDITMSG
-       ;;
-esac
-
-if test -f "$GIT_DIR/MERGE_HEAD" && test -z "$no_edit"; then
-       echo "#"
-       echo "# It looks like you may be committing a MERGE."
-       echo "# If this is not correct, please remove the file"
-       printf '%s\n' "#        $GIT_DIR/MERGE_HEAD"
-       echo "# and try again"
-       echo "#"
-fi >>"$GIT_DIR"/COMMIT_EDITMSG
-
-# Author
-if test '' != "$use_commit"
-then
-       eval "$(get_author_ident_from_commit "$use_commit")"
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
-fi
-if test '' != "$force_author"
-then
-       GIT_AUTHOR_NAME=$(expr "z$force_author" : 'z\(.*[^ ]\) *<.*') &&
-       GIT_AUTHOR_EMAIL=$(expr "z$force_author" : '.*\(<.*\)') &&
-       test '' != "$GIT_AUTHOR_NAME" &&
-       test '' != "$GIT_AUTHOR_EMAIL" ||
-       die "malformed --author parameter"
-       export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL
-fi
-
-PARENTS="-p HEAD"
-if test -z "$initial_commit"
-then
-       rloga='commit'
-       if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
-               rloga='commit (merge)'
-               PARENTS="-p HEAD "$(sed -e 's/^/-p /' "$GIT_DIR/MERGE_HEAD")
-       elif test -n "$amend"; then
-               rloga='commit (amend)'
-               PARENTS=$(git cat-file commit HEAD |
-                       sed -n -e '/^$/q' -e 's/^parent /-p /p')
-       fi
-       current="$(git rev-parse --verify HEAD)"
-else
-       if [ -z "$(git ls-files)" ]; then
-               echo >&2 'nothing to commit (use "git add file1 file2" to include for commit)'
-               exit 1
-       fi
-       PARENTS=""
-       rloga='commit (initial)'
-       current=''
-fi
-set_reflog_action "$rloga"
-
-if test -z "$no_edit"
-then
-       {
-               echo ""
-               echo "# Please enter the commit message for your changes."
-               echo "# (Comment lines starting with '#' will not be included)"
-               test -z "$only_include_assumed" || echo "$only_include_assumed"
-               run_status
-       } >>"$GIT_DIR"/COMMIT_EDITMSG
-else
-       # we need to check if there is anything to commit
-       run_status >/dev/null
-fi
-case "$allow_empty,$?,$PARENTS" in
-t,* | ?,0,* | ?,*,-p' '?*-p' '?*)
-       # an explicit --allow-empty, or a merge commit can record the
-       # same tree as its parent.  Otherwise having commitable paths
-       # is required.
-       ;;
-*)
-       rm -f "$GIT_DIR/COMMIT_EDITMSG" "$GIT_DIR/SQUASH_MSG"
-       use_status_color=t
-       run_status
-       exit 1
-esac
-
-case "$no_edit" in
-'')
-       git var GIT_AUTHOR_IDENT > /dev/null  || die
-       git var GIT_COMMITTER_IDENT > /dev/null  || die
-       git_editor "$GIT_DIR/COMMIT_EDITMSG"
-       ;;
-esac
-
-case "$verify" in
-t)
-       if test -x "$GIT_DIR"/hooks/commit-msg
-       then
-               "$GIT_DIR"/hooks/commit-msg "$GIT_DIR"/COMMIT_EDITMSG || exit
-       fi
-esac
-
-if test -z "$no_edit"
-then
-    sed -e '
-        /^diff --git a\/.*/{
-           s///
-           q
-       }
-       /^#/d
-    ' "$GIT_DIR"/COMMIT_EDITMSG
-else
-    cat "$GIT_DIR"/COMMIT_EDITMSG
-fi |
-git stripspace >"$GIT_DIR"/COMMIT_MSG
-
-# Test whether the commit message has any content we didn't supply.
-have_commitmsg=
-grep -v -i '^Signed-off-by' "$GIT_DIR"/COMMIT_MSG |
-       git stripspace > "$GIT_DIR"/COMMIT_BAREMSG
-
-# Is the commit message totally empty?
-if test -s "$GIT_DIR"/COMMIT_BAREMSG
-then
-       if test "$templatefile" != ""
-       then
-               # Test whether this is just the unaltered template.
-               if cnt=$(sed -e '/^#/d' < "$templatefile" |
-                       git stripspace |
-                       diff "$GIT_DIR"/COMMIT_BAREMSG - |
-                       wc -l) &&
-                  test 0 -lt $cnt
-               then
-                       have_commitmsg=t
-               fi
-       else
-               # No template, so the content in the commit message must
-               # have come from the user.
-               have_commitmsg=t
-       fi
-fi
-
-rm -f "$GIT_DIR"/COMMIT_BAREMSG
-
-if test "$have_commitmsg" = "t"
-then
-       if test -z "$TMP_INDEX"
-       then
-               tree=$(GIT_INDEX_FILE="$USE_INDEX" git write-tree)
-       else
-               tree=$(GIT_INDEX_FILE="$TMP_INDEX" git write-tree) &&
-               rm -f "$TMP_INDEX"
-       fi &&
-       commit=$(git commit-tree $tree $PARENTS <"$GIT_DIR/COMMIT_MSG") &&
-       rlogm=$(sed -e 1q "$GIT_DIR"/COMMIT_MSG) &&
-       git update-ref -m "$GIT_REFLOG_ACTION: $rlogm" HEAD $commit "$current" &&
-       rm -f -- "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/MERGE_MSG" &&
-       if test -f "$NEXT_INDEX"
-       then
-               mv "$NEXT_INDEX" "$THIS_INDEX"
-       else
-               : ;# happy
-       fi
-else
-       echo >&2 "* no commit message?  aborting commit."
-       false
-fi
-ret="$?"
-rm -f "$GIT_DIR/COMMIT_MSG" "$GIT_DIR/COMMIT_EDITMSG" "$GIT_DIR/SQUASH_MSG"
-
-cd_to_toplevel
-
-git rerere
-
-if test "$ret" = 0
-then
-       git gc --auto
-       if test -x "$GIT_DIR"/hooks/post-commit
-       then
-               "$GIT_DIR"/hooks/post-commit
-       fi
-       if test -z "$quiet"
-       then
-               commit=$(git diff-tree --always --shortstat --pretty="format:%h: %s"\
-                      --abbrev --summary --root HEAD --)
-               echo "Created${initial_commit:+ initial} commit $commit"
-       fi
-fi
-
-exit "$ret"
diff --git a/contrib/examples/git-difftool.perl b/contrib/examples/git-difftool.perl
deleted file mode 100755 (executable)
index fb0fd0b..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-#!/usr/bin/perl
-# Copyright (c) 2009, 2010 David Aguilar
-# Copyright (c) 2012 Tim Henigan
-#
-# This is a wrapper around the GIT_EXTERNAL_DIFF-compatible
-# git-difftool--helper script.
-#
-# This script exports GIT_EXTERNAL_DIFF and GIT_PAGER for use by git.
-# The GIT_DIFF* variables are exported for use by git-difftool--helper.
-#
-# Any arguments that are unknown to this script are forwarded to 'git diff'.
-
-use 5.008;
-use strict;
-use warnings;
-use Git::Error qw(:try);
-use File::Basename qw(dirname);
-use File::Copy;
-use File::Find;
-use File::stat;
-use File::Path qw(mkpath rmtree);
-use File::Temp qw(tempdir);
-use Getopt::Long qw(:config pass_through);
-use Git;
-use Git::I18N;
-
-sub usage
-{
-       my $exitcode = shift;
-       print << 'USAGE';
-usage: git difftool [-t|--tool=<tool>] [--tool-help]
-                    [-x|--extcmd=<cmd>]
-                    [-g|--gui] [--no-gui]
-                    [--prompt] [-y|--no-prompt]
-                    [-d|--dir-diff]
-                    ['git diff' options]
-USAGE
-       exit($exitcode);
-}
-
-sub print_tool_help
-{
-       # See the comment at the bottom of file_diff() for the reason behind
-       # using system() followed by exit() instead of exec().
-       my $rc = system(qw(git mergetool --tool-help=diff));
-       exit($rc | ($rc >> 8));
-}
-
-sub exit_cleanup
-{
-       my ($tmpdir, $status) = @_;
-       my $errno = $!;
-       rmtree($tmpdir);
-       if ($status and $errno) {
-               my ($package, $file, $line) = caller();
-               warn "$file line $line: $errno\n";
-       }
-       exit($status | ($status >> 8));
-}
-
-sub use_wt_file
-{
-       my ($file, $sha1) = @_;
-       my $null_sha1 = '0' x 40;
-
-       if (-l $file || ! -e _) {
-               return (0, $null_sha1);
-       }
-
-       my $wt_sha1 = Git::command_oneline('hash-object', $file);
-       my $use = ($sha1 eq $null_sha1) || ($sha1 eq $wt_sha1);
-       return ($use, $wt_sha1);
-}
-
-sub changed_files
-{
-       my ($repo_path, $index, $worktree) = @_;
-       $ENV{GIT_INDEX_FILE} = $index;
-
-       my @gitargs = ('--git-dir', $repo_path, '--work-tree', $worktree);
-       my @refreshargs = (
-               @gitargs, 'update-index',
-               '--really-refresh', '-q', '--unmerged');
-       try {
-               Git::command_oneline(@refreshargs);
-       } catch Git::Error::Command with {};
-
-       my @diffargs = (@gitargs, 'diff-files', '--name-only', '-z');
-       my $line = Git::command_oneline(@diffargs);
-       my @files;
-       if (defined $line) {
-               @files = split('\0', $line);
-       } else {
-               @files = ();
-       }
-
-       delete($ENV{GIT_INDEX_FILE});
-
-       return map { $_ => 1 } @files;
-}
-
-sub setup_dir_diff
-{
-       my ($worktree, $symlinks) = @_;
-       my @gitargs = ('diff', '--raw', '--no-abbrev', '-z', @ARGV);
-       my $diffrtn = Git::command_oneline(@gitargs);
-       exit(0) unless defined($diffrtn);
-
-       # Go to the root of the worktree now that we've captured the list of
-       # changed files.  The paths returned by diff --raw are relative to the
-       # top-level of the repository, but we defer changing directories so
-       # that @ARGV can perform pathspec limiting in the current directory.
-       chdir($worktree);
-
-       # Build index info for left and right sides of the diff
-       my $submodule_mode = '160000';
-       my $symlink_mode = '120000';
-       my $null_mode = '0' x 6;
-       my $null_sha1 = '0' x 40;
-       my $lindex = '';
-       my $rindex = '';
-       my $wtindex = '';
-       my %submodule;
-       my %symlink;
-       my @files = ();
-       my %working_tree_dups = ();
-       my @rawdiff = split('\0', $diffrtn);
-
-       my $i = 0;
-       while ($i < $#rawdiff) {
-               if ($rawdiff[$i] =~ /^::/) {
-                       warn __ <<'EOF';
-Combined diff formats ('-c' and '--cc') are not supported in
-directory diff mode ('-d' and '--dir-diff').
-EOF
-                       exit(1);
-               }
-
-               my ($lmode, $rmode, $lsha1, $rsha1, $status) =
-                       split(' ', substr($rawdiff[$i], 1));
-               my $src_path = $rawdiff[$i + 1];
-               my $dst_path;
-
-               if ($status =~ /^[CR]/) {
-                       $dst_path = $rawdiff[$i + 2];
-                       $i += 3;
-               } else {
-                       $dst_path = $src_path;
-                       $i += 2;
-               }
-
-               if ($lmode eq $submodule_mode or $rmode eq $submodule_mode) {
-                       $submodule{$src_path}{left} = $lsha1;
-                       if ($lsha1 ne $rsha1) {
-                               $submodule{$dst_path}{right} = $rsha1;
-                       } else {
-                               $submodule{$dst_path}{right} = "$rsha1-dirty";
-                       }
-                       next;
-               }
-
-               if ($lmode eq $symlink_mode) {
-                       $symlink{$src_path}{left} =
-                               Git::command_oneline('show', $lsha1);
-               }
-
-               if ($rmode eq $symlink_mode) {
-                       $symlink{$dst_path}{right} =
-                               Git::command_oneline('show', $rsha1);
-               }
-
-               if ($lmode ne $null_mode and $status !~ /^C/) {
-                       $lindex .= "$lmode $lsha1\t$src_path\0";
-               }
-
-               if ($rmode ne $null_mode) {
-                       # Avoid duplicate entries
-                       if ($working_tree_dups{$dst_path}++) {
-                               next;
-                       }
-                       my ($use, $wt_sha1) =
-                               use_wt_file($dst_path, $rsha1);
-                       if ($use) {
-                               push @files, $dst_path;
-                               $wtindex .= "$rmode $wt_sha1\t$dst_path\0";
-                       } else {
-                               $rindex .= "$rmode $rsha1\t$dst_path\0";
-                       }
-               }
-       }
-
-       # Go to the root of the worktree so that the left index files
-       # are properly setup -- the index is toplevel-relative.
-       chdir($worktree);
-
-       # Setup temp directories
-       my $tmpdir = tempdir('git-difftool.XXXXX', CLEANUP => 0, TMPDIR => 1);
-       my $ldir = "$tmpdir/left";
-       my $rdir = "$tmpdir/right";
-       mkpath($ldir) or exit_cleanup($tmpdir, 1);
-       mkpath($rdir) or exit_cleanup($tmpdir, 1);
-
-       # Populate the left and right directories based on each index file
-       my ($inpipe, $ctx);
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/lindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '-z', '--index-info');
-       print($inpipe $lindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       my $rc = system('git', 'checkout-index', '--all', "--prefix=$ldir/");
-       exit_cleanup($tmpdir, $rc) if $rc != 0;
-
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/rindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '-z', '--index-info');
-       print($inpipe $rindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       $rc = system('git', 'checkout-index', '--all', "--prefix=$rdir/");
-       exit_cleanup($tmpdir, $rc) if $rc != 0;
-
-       $ENV{GIT_INDEX_FILE} = "$tmpdir/wtindex";
-       ($inpipe, $ctx) =
-               Git::command_input_pipe('update-index', '--info-only', '-z', '--index-info');
-       print($inpipe $wtindex);
-       Git::command_close_pipe($inpipe, $ctx);
-
-       # If $GIT_DIR was explicitly set just for the update/checkout
-       # commands, then it should be unset before continuing.
-       delete($ENV{GIT_INDEX_FILE});
-
-       # Changes in the working tree need special treatment since they are
-       # not part of the index.
-       for my $file (@files) {
-               my $dir = dirname($file);
-               unless (-d "$rdir/$dir") {
-                       mkpath("$rdir/$dir") or
-                       exit_cleanup($tmpdir, 1);
-               }
-               if ($symlinks) {
-                       symlink("$worktree/$file", "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-               } else {
-                       copy($file, "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-
-                       my $mode = stat($file)->mode;
-                       chmod($mode, "$rdir/$file") or
-                       exit_cleanup($tmpdir, 1);
-               }
-       }
-
-       # Changes to submodules require special treatment. This loop writes a
-       # temporary file to both the left and right directories to show the
-       # change in the recorded SHA1 for the submodule.
-       for my $path (keys %submodule) {
-               my $ok = 0;
-               if (defined($submodule{$path}{left})) {
-                       $ok = write_to_file("$ldir/$path",
-                               "Subproject commit $submodule{$path}{left}");
-               }
-               if (defined($submodule{$path}{right})) {
-                       $ok = write_to_file("$rdir/$path",
-                               "Subproject commit $submodule{$path}{right}");
-               }
-               exit_cleanup($tmpdir, 1) if not $ok;
-       }
-
-       # Symbolic links require special treatment. The standard "git diff"
-       # shows only the link itself, not the contents of the link target.
-       # This loop replicates that behavior.
-       for my $path (keys %symlink) {
-               my $ok = 0;
-               if (defined($symlink{$path}{left})) {
-                       $ok = write_to_file("$ldir/$path",
-                                       $symlink{$path}{left});
-               }
-               if (defined($symlink{$path}{right})) {
-                       $ok = write_to_file("$rdir/$path",
-                                       $symlink{$path}{right});
-               }
-               exit_cleanup($tmpdir, 1) if not $ok;
-       }
-
-       return ($ldir, $rdir, $tmpdir, @files);
-}
-
-sub write_to_file
-{
-       my $path = shift;
-       my $value = shift;
-
-       # Make sure the path to the file exists
-       my $dir = dirname($path);
-       unless (-d "$dir") {
-               mkpath("$dir") or return 0;
-       }
-
-       # If the file already exists in that location, delete it.  This
-       # is required in the case of symbolic links.
-       unlink($path);
-
-       open(my $fh, '>', $path) or return 0;
-       print($fh $value);
-       close($fh);
-
-       return 1;
-}
-
-sub main
-{
-       # parse command-line options. all unrecognized options and arguments
-       # are passed through to the 'git diff' command.
-       my %opts = (
-               difftool_cmd => undef,
-               dirdiff => undef,
-               extcmd => undef,
-               gui => undef,
-               help => undef,
-               prompt => undef,
-               symlinks => $^O ne 'cygwin' &&
-                               $^O ne 'MSWin32' && $^O ne 'msys',
-               tool_help => undef,
-               trust_exit_code => undef,
-       );
-       GetOptions('g|gui!' => \$opts{gui},
-               'd|dir-diff' => \$opts{dirdiff},
-               'h' => \$opts{help},
-               'prompt!' => \$opts{prompt},
-               'y' => sub { $opts{prompt} = 0; },
-               'symlinks' => \$opts{symlinks},
-               'no-symlinks' => sub { $opts{symlinks} = 0; },
-               't|tool:s' => \$opts{difftool_cmd},
-               'tool-help' => \$opts{tool_help},
-               'trust-exit-code' => \$opts{trust_exit_code},
-               'no-trust-exit-code' => sub { $opts{trust_exit_code} = 0; },
-               'x|extcmd:s' => \$opts{extcmd});
-
-       if (defined($opts{help})) {
-               usage(0);
-       }
-       if (defined($opts{tool_help})) {
-               print_tool_help();
-       }
-       if (defined($opts{difftool_cmd})) {
-               if (length($opts{difftool_cmd}) > 0) {
-                       $ENV{GIT_DIFF_TOOL} = $opts{difftool_cmd};
-               } else {
-                       print __("No <tool> given for --tool=<tool>\n");
-                       usage(1);
-               }
-       }
-       if (defined($opts{extcmd})) {
-               if (length($opts{extcmd}) > 0) {
-                       $ENV{GIT_DIFFTOOL_EXTCMD} = $opts{extcmd};
-               } else {
-                       print __("No <cmd> given for --extcmd=<cmd>\n");
-                       usage(1);
-               }
-       }
-       if ($opts{gui}) {
-               my $guitool = Git::config('diff.guitool');
-               if (defined($guitool) && length($guitool) > 0) {
-                       $ENV{GIT_DIFF_TOOL} = $guitool;
-               }
-       }
-
-       if (!defined $opts{trust_exit_code}) {
-               $opts{trust_exit_code} = Git::config_bool('difftool.trustExitCode');
-       }
-       if ($opts{trust_exit_code}) {
-               $ENV{GIT_DIFFTOOL_TRUST_EXIT_CODE} = 'true';
-       } else {
-               $ENV{GIT_DIFFTOOL_TRUST_EXIT_CODE} = 'false';
-       }
-
-       # In directory diff mode, 'git-difftool--helper' is called once
-       # to compare the a/b directories.  In file diff mode, 'git diff'
-       # will invoke a separate instance of 'git-difftool--helper' for
-       # each file that changed.
-       if (defined($opts{dirdiff})) {
-               dir_diff($opts{extcmd}, $opts{symlinks});
-       } else {
-               file_diff($opts{prompt});
-       }
-}
-
-sub dir_diff
-{
-       my ($extcmd, $symlinks) = @_;
-       my $rc;
-       my $error = 0;
-       my $repo = Git->repository();
-       my $repo_path = $repo->repo_path();
-       my $worktree = $repo->wc_path();
-       $worktree =~ s|/$||; # Avoid double slashes in symlink targets
-       my ($a, $b, $tmpdir, @files) = setup_dir_diff($worktree, $symlinks);
-
-       if (defined($extcmd)) {
-               $rc = system($extcmd, $a, $b);
-       } else {
-               $ENV{GIT_DIFFTOOL_DIRDIFF} = 'true';
-               $rc = system('git', 'difftool--helper', $a, $b);
-       }
-       # If the diff including working copy files and those
-       # files were modified during the diff, then the changes
-       # should be copied back to the working tree.
-       # Do not copy back files when symlinks are used and the
-       # external tool did not replace the original link with a file.
-       #
-       # These hashes are loaded lazily since they aren't needed
-       # in the common case of --symlinks and the difftool updating
-       # files through the symlink.
-       my %wt_modified;
-       my %tmp_modified;
-       my $indices_loaded = 0;
-
-       for my $file (@files) {
-               next if $symlinks && -l "$b/$file";
-               next if ! -f "$b/$file";
-
-               if (!$indices_loaded) {
-                       %wt_modified = changed_files(
-                               $repo_path, "$tmpdir/wtindex", $worktree);
-                       %tmp_modified = changed_files(
-                               $repo_path, "$tmpdir/wtindex", $b);
-                       $indices_loaded = 1;
-               }
-
-               if (exists $wt_modified{$file} and exists $tmp_modified{$file}) {
-                       warn sprintf(__(
-                               "warning: Both files modified:\n" .
-                               "'%s/%s' and '%s/%s'.\n" .
-                               "warning: Working tree file has been left.\n" .
-                               "warning:\n"), $worktree, $file, $b, $file);
-                       $error = 1;
-               } elsif (exists $tmp_modified{$file}) {
-                       my $mode = stat("$b/$file")->mode;
-                       copy("$b/$file", $file) or
-                       exit_cleanup($tmpdir, 1);
-
-                       chmod($mode, $file) or
-                       exit_cleanup($tmpdir, 1);
-               }
-       }
-       if ($error) {
-               warn sprintf(__(
-                       "warning: Temporary files exist in '%s'.\n" .
-                       "warning: You may want to cleanup or recover these.\n"), $tmpdir);
-               exit(1);
-       } else {
-               exit_cleanup($tmpdir, $rc);
-       }
-}
-
-sub file_diff
-{
-       my ($prompt) = @_;
-
-       if (defined($prompt)) {
-               if ($prompt) {
-                       $ENV{GIT_DIFFTOOL_PROMPT} = 'true';
-               } else {
-                       $ENV{GIT_DIFFTOOL_NO_PROMPT} = 'true';
-               }
-       }
-
-       $ENV{GIT_PAGER} = '';
-       $ENV{GIT_EXTERNAL_DIFF} = 'git-difftool--helper';
-
-       # ActiveState Perl for Win32 does not implement POSIX semantics of
-       # exec* system call. It just spawns the given executable and finishes
-       # the starting program, exiting with code 0.
-       # system will at least catch the errors returned by git diff,
-       # allowing the caller of git difftool better handling of failures.
-       my $rc = system('git', 'diff', @ARGV);
-       exit($rc | ($rc >> 8));
-}
-
-main();
diff --git a/contrib/examples/git-fetch.sh b/contrib/examples/git-fetch.sh
deleted file mode 100755 (executable)
index 57d2e56..0000000
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/bin/sh
-#
-
-USAGE='<fetch-options> <repository> <refspec>...'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-set_reflog_action "fetch $*"
-cd_to_toplevel ;# probably unnecessary...
-
-. git-parse-remote
-_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
-
-LF='
-'
-IFS="$LF"
-
-no_tags=
-tags=
-append=
-force=
-verbose=
-update_head_ok=
-exec=
-keep=
-shallow_depth=
-no_progress=
-test -t 1 || no_progress=--no-progress
-quiet=
-while test $# != 0
-do
-       case "$1" in
-       -a|--a|--ap|--app|--appe|--appen|--append)
-               append=t
-               ;;
-       --upl|--uplo|--uploa|--upload|--upload-|--upload-p|\
-       --upload-pa|--upload-pac|--upload-pack)
-               shift
-               exec="--upload-pack=$1"
-               ;;
-       --upl=*|--uplo=*|--uploa=*|--upload=*|\
-       --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
-               exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
-               shift
-               ;;
-       -f|--f|--fo|--for|--forc|--force)
-               force=t
-               ;;
-       -t|--t|--ta|--tag|--tags)
-               tags=t
-               ;;
-       -n|--n|--no|--no-|--no-t|--no-ta|--no-tag|--no-tags)
-               no_tags=t
-               ;;
-       -u|--u|--up|--upd|--upda|--updat|--update|--update-|--update-h|\
-       --update-he|--update-hea|--update-head|--update-head-|\
-       --update-head-o|--update-head-ok)
-               update_head_ok=t
-               ;;
-       -q|--q|--qu|--qui|--quie|--quiet)
-               quiet=--quiet
-               ;;
-       -v|--verbose)
-               verbose="$verbose"Yes
-               ;;
-       -k|--k|--ke|--kee|--keep)
-               keep='-k -k'
-               ;;
-       --depth=*)
-               shallow_depth="--depth=$(expr "z$1" : 'z-[^=]*=\(.*\)')"
-               ;;
-       --depth)
-               shift
-               shallow_depth="--depth=$1"
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-
-case "$#" in
-0)
-       origin=$(get_default_remote)
-       test -n "$(get_remote_url ${origin})" ||
-               die "Where do you want to fetch from today?"
-       set x $origin ; shift ;;
-esac
-
-if test -z "$exec"
-then
-       # No command line override and we have configuration for the remote.
-       exec="--upload-pack=$(get_uploadpack $1)"
-fi
-
-remote_nick="$1"
-remote=$(get_remote_url "$@")
-refs=
-rref=
-rsync_slurped_objects=
-
-if test "" = "$append"
-then
-       : >"$GIT_DIR/FETCH_HEAD"
-fi
-
-# Global that is reused later
-ls_remote_result=$(git ls-remote $exec "$remote") ||
-       die "Cannot get the repository state from $remote"
-
-append_fetch_head () {
-       flags=
-       test -n "$verbose" && flags="$flags$LF-v"
-       test -n "$force$single_force" && flags="$flags$LF-f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags append-fetch-head "$@"
-}
-
-# updating the current HEAD with git-fetch in a bare
-# repository is always fine.
-if test -z "$update_head_ok" && test $(is_bare_repository) = false
-then
-       orig_head=$(git rev-parse --verify HEAD 2>/dev/null)
-fi
-
-# Allow --tags/--notags from remote.$1.tagopt
-case "$tags$no_tags" in
-'')
-       case "$(git config --get "remote.$1.tagopt")" in
-       --tags)
-               tags=t ;;
-       --no-tags)
-               no_tags=t ;;
-       esac
-esac
-
-# If --tags (and later --heads or --all) is specified, then we are
-# not talking about defaults stored in Pull: line of remotes or
-# branches file, and just fetch those and refspecs explicitly given.
-# Otherwise we do what we always did.
-
-reflist=$(get_remote_refs_for_fetch "$@")
-if test "$tags"
-then
-       taglist=$(IFS=' ' &&
-                 echo "$ls_remote_result" |
-                 git show-ref --exclude-existing=refs/tags/ |
-                 while read sha1 name
-                 do
-                       echo ".${name}:${name}"
-                 done) || exit
-       if test "$#" -gt 1
-       then
-               # remote URL plus explicit refspecs; we need to merge them.
-               reflist="$reflist$LF$taglist"
-       else
-               # No explicit refspecs; fetch tags only.
-               reflist=$taglist
-       fi
-fi
-
-fetch_all_at_once () {
-
-  eval=$(echo "$1" | git fetch--tool parse-reflist "-")
-  eval "$eval"
-
-    ( : subshell because we muck with IFS
-      IFS="    $LF"
-      (
-       if test "$remote" = . ; then
-           git show-ref $rref || echo failed "$remote"
-       elif test -f "$remote" ; then
-           test -n "$shallow_depth" &&
-               die "shallow clone with bundle is not supported"
-           git bundle unbundle "$remote" $rref ||
-           echo failed "$remote"
-       else
-               if      test -d "$remote" &&
-
-                       # The remote might be our alternate.  With
-                       # this optimization we will bypass fetch-pack
-                       # altogether, which means we cannot be doing
-                       # the shallow stuff at all.
-                       test ! -f "$GIT_DIR/shallow" &&
-                       test -z "$shallow_depth" &&
-
-                       # See if all of what we are going to fetch are
-                       # connected to our repository's tips, in which
-                       # case we do not have to do any fetch.
-                       theirs=$(echo "$ls_remote_result" | \
-                               git fetch--tool -s pick-rref "$rref" "-") &&
-
-                       # This will barf when $theirs reach an object that
-                       # we do not have in our repository.  Otherwise,
-                       # we already have everything the fetch would bring in.
-                       git rev-list --objects $theirs --not --all \
-                               >/dev/null 2>/dev/null
-               then
-                       echo "$ls_remote_result" | \
-                               git fetch--tool pick-rref "$rref" "-"
-               else
-                       flags=
-                       case $verbose in
-                       YesYes*)
-                           flags="-v"
-                           ;;
-                       esac
-                       git-fetch-pack --thin $exec $keep $shallow_depth \
-                               $quiet $no_progress $flags "$remote" $rref ||
-                       echo failed "$remote"
-               fi
-       fi
-      ) |
-      (
-       flags=
-       test -n "$verbose" && flags="$flags -v"
-       test -n "$force" && flags="$flags -f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags native-store \
-                       "$remote" "$remote_nick" "$refs"
-      )
-    ) || exit
-
-}
-
-fetch_per_ref () {
-  reflist="$1"
-  refs=
-  rref=
-
-  for ref in $reflist
-  do
-      refs="$refs$LF$ref"
-
-      # These are relative path from $GIT_DIR, typically starting at refs/
-      # but may be HEAD
-      if expr "z$ref" : 'z\.' >/dev/null
-      then
-         not_for_merge=t
-         ref=$(expr "z$ref" : 'z\.\(.*\)')
-      else
-         not_for_merge=
-      fi
-      if expr "z$ref" : 'z+' >/dev/null
-      then
-         single_force=t
-         ref=$(expr "z$ref" : 'z+\(.*\)')
-      else
-         single_force=
-      fi
-      remote_name=$(expr "z$ref" : 'z\([^:]*\):')
-      local_name=$(expr "z$ref" : 'z[^:]*:\(.*\)')
-
-      rref="$rref$LF$remote_name"
-
-      # There are transports that can fetch only one head at a time...
-      case "$remote" in
-      http://* | https://* | ftp://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with http not supported"
-         proto=$(expr "$remote" : '\([^:]*\):')
-         if [ -n "$GIT_SSL_NO_VERIFY" ]; then
-             curl_extra_args="-k"
-         fi
-         if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-             noepsv_opt="--disable-epsv"
-         fi
-
-         # Find $remote_name from ls-remote output.
-         head=$(echo "$ls_remote_result" | \
-               git fetch--tool -s pick-rref "$remote_name" "-")
-         expr "z$head" : "z$_x40\$" >/dev/null ||
-               die "No such ref $remote_name at $remote"
-         echo >&2 "Fetching $remote_name from $remote using $proto"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         git-http-fetch $v -a "$head" "$remote" || exit
-         ;;
-      rsync://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with rsync not supported"
-         TMP_HEAD="$GIT_DIR/TMP_HEAD"
-         rsync -L -q "$remote/$remote_name" "$TMP_HEAD" || exit 1
-         head=$(git rev-parse --verify TMP_HEAD)
-         rm -f "$TMP_HEAD"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         test "$rsync_slurped_objects" || {
-             rsync -a $v --ignore-existing --exclude info \
-                 "$remote/objects/" "$GIT_OBJECT_DIRECTORY/" || exit
-
-             # Look at objects/info/alternates for rsync -- http will
-             # support it natively and git native ones will do it on
-             # the remote end.  Not having that file is not a crime.
-             rsync -q "$remote/objects/info/alternates" \
-                 "$GIT_DIR/TMP_ALT" 2>/dev/null ||
-                 rm -f "$GIT_DIR/TMP_ALT"
-             if test -f "$GIT_DIR/TMP_ALT"
-             then
-                 resolve_alternates "$remote" <"$GIT_DIR/TMP_ALT" |
-                 while read alt
-                 do
-                     case "$alt" in 'bad alternate: '*) die "$alt";; esac
-                     echo >&2 "Getting alternate: $alt"
-                     rsync -av --ignore-existing --exclude info \
-                     "$alt" "$GIT_OBJECT_DIRECTORY/" || exit
-                 done
-                 rm -f "$GIT_DIR/TMP_ALT"
-             fi
-             rsync_slurped_objects=t
-         }
-         ;;
-      esac
-
-      append_fetch_head "$head" "$remote" \
-         "$remote_name" "$remote_nick" "$local_name" "$not_for_merge" || exit
-
-  done
-
-}
-
-fetch_main () {
-       case "$remote" in
-       http://* | https://* | ftp://* | rsync://* )
-               fetch_per_ref "$@"
-               ;;
-       *)
-               fetch_all_at_once "$@"
-               ;;
-       esac
-}
-
-fetch_main "$reflist" || exit
-
-# automated tag following
-case "$no_tags$tags" in
-'')
-       case "$reflist" in
-       *:refs/*)
-               # effective only when we are following remote branch
-               # using local tracking branch.
-               taglist=$(IFS=' ' &&
-               echo "$ls_remote_result" |
-               git show-ref --exclude-existing=refs/tags/ |
-               while read sha1 name
-               do
-                       git cat-file -t "$sha1" >/dev/null 2>&1 || continue
-                       echo >&2 "Auto-following $name"
-                       echo ".${name}:${name}"
-               done)
-       esac
-       case "$taglist" in
-       '') ;;
-       ?*)
-               # do not deepen a shallow tree when following tags
-               shallow_depth=
-               fetch_main "$taglist" || exit ;;
-       esac
-esac
-
-# If the original head was empty (i.e. no "master" yet), or
-# if we were told not to worry, we do not have to check.
-case "$orig_head" in
-'')
-       ;;
-?*)
-       curr_head=$(git rev-parse --verify HEAD 2>/dev/null)
-       if test "$curr_head" != "$orig_head"
-       then
-           git update-ref \
-                       -m "$GIT_REFLOG_ACTION: Undoing incorrectly fetched HEAD." \
-                       HEAD "$orig_head"
-               die "Cannot fetch into the current branch."
-       fi
-       ;;
-esac
diff --git a/contrib/examples/git-gc.sh b/contrib/examples/git-gc.sh
deleted file mode 100755 (executable)
index 1597e9f..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2006, Shawn O. Pearce
-#
-# Cleanup unreachable files and optimize the repository.
-
-USAGE='[--prune]'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-
-no_prune=:
-while test $# != 0
-do
-       case "$1" in
-       --prune)
-               no_prune=
-               ;;
-       --)
-               usage
-               ;;
-       esac
-       shift
-done
-
-case "$(git config --get gc.packrefs)" in
-notbare|"")
-       test $(is_bare_repository) = true || pack_refs=true;;
-*)
-       pack_refs=$(git config --bool --get gc.packrefs)
-esac
-
-test "true" != "$pack_refs" ||
-git pack-refs --prune &&
-git reflog expire --all &&
-git-repack -a -d -l &&
-$no_prune git prune &&
-git rerere gc || exit
diff --git a/contrib/examples/git-log.sh b/contrib/examples/git-log.sh
deleted file mode 100755 (executable)
index c2ea71c..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-
-USAGE='[--max-count=<n>] [<since>..<limit>] [--pretty=<format>] [git-rev-list options]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-revs=$(git-rev-parse --revs-only --no-flags --default HEAD "$@") || exit
-[ "$revs" ] || {
-       die "No HEAD ref"
-}
-git-rev-list --pretty $(git-rev-parse --default HEAD "$@") |
-LESS=-S ${PAGER:-less}
diff --git a/contrib/examples/git-ls-remote.sh b/contrib/examples/git-ls-remote.sh
deleted file mode 100755 (executable)
index 2aa89a7..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/sh
-#
-
-usage () {
-    echo >&2 "usage: $0 [--heads] [--tags] [-u|--upload-pack <upload-pack>]"
-    echo >&2 "          <repository> <refs>..."
-    exit 1;
-}
-
-die () {
-    echo >&2 "$*"
-    exit 1
-}
-
-exec=
-while test $# != 0
-do
-  case "$1" in
-  -h|--h|--he|--hea|--head|--heads)
-  heads=heads; shift ;;
-  -t|--t|--ta|--tag|--tags)
-  tags=tags; shift ;;
-  -u|--u|--up|--upl|--uploa|--upload|--upload-|--upload-p|--upload-pa|\
-  --upload-pac|--upload-pack)
-       shift
-       exec="--upload-pack=$1"
-       shift;;
-  -u=*|--u=*|--up=*|--upl=*|--uplo=*|--uploa=*|--upload=*|\
-  --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
-       exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
-       shift;;
-  --)
-  shift; break ;;
-  -*)
-  usage ;;
-  *)
-  break ;;
-  esac
-done
-
-case "$#" in 0) usage ;; esac
-
-case ",$heads,$tags," in
-,,,) heads=heads tags=tags other=other ;;
-esac
-
-. git-parse-remote
-peek_repo="$(get_remote_url "$@")"
-shift
-
-tmp=.ls-remote-$$
-trap "rm -fr $tmp-*" 0 1 2 3 15
-tmpdir=$tmp-d
-
-case "$peek_repo" in
-http://* | https://* | ftp://* )
-       if [ -n "$GIT_SSL_NO_VERIFY" -o \
-               "$(git config --bool http.sslVerify)" = false ]; then
-               curl_extra_args="-k"
-       fi
-       if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "$(git config --bool http.noEPSV)" = true ]; then
-               curl_extra_args="${curl_extra_args} --disable-epsv"
-       fi
-       curl -nsf $curl_extra_args --header "Pragma: no-cache" "$peek_repo/info/refs" ||
-               echo "failed    slurping"
-       ;;
-
-rsync://* )
-       mkdir $tmpdir &&
-       rsync -rlq "$peek_repo/HEAD" $tmpdir &&
-       rsync -rq "$peek_repo/refs" $tmpdir || {
-               echo "failed    slurping"
-               exit
-       }
-       head=$(cat "$tmpdir/HEAD") &&
-       case "$head" in
-       ref:' '*)
-               head=$(expr "z$head" : 'zref: \(.*\)') &&
-               head=$(cat "$tmpdir/$head") || exit
-       esac &&
-       echo "$head     HEAD"
-       (cd $tmpdir && find refs -type f) |
-       while read path
-       do
-               tr -d '\012' <"$tmpdir/$path"
-               echo "  $path"
-       done &&
-       rm -fr $tmpdir
-       ;;
-
-* )
-       if test -f "$peek_repo" ; then
-               git bundle list-heads "$peek_repo" ||
-               echo "failed    slurping"
-       else
-               git-peek-remote $exec "$peek_repo" ||
-               echo "failed    slurping"
-       fi
-       ;;
-esac |
-sort -t '      ' -k 2 |
-while read sha1 path
-do
-       case "$sha1" in
-       failed)
-               exit 1 ;;
-       esac
-       case "$path" in
-       refs/heads/*)
-               group=heads ;;
-       refs/tags/*)
-               group=tags ;;
-       *)
-               group=other ;;
-       esac
-       case ",$heads,$tags,$other," in
-       *,$group,*)
-               ;;
-       *)
-               continue;;
-       esac
-       case "$#" in
-       0)
-               match=yes ;;
-       *)
-               match=no
-               for pat
-               do
-                       case "/$path" in
-                       */$pat )
-                               match=yes
-                               break ;;
-                       esac
-               done
-       esac
-       case "$match" in
-       no)
-               continue ;;
-       esac
-       echo "$sha1     $path"
-done
diff --git a/contrib/examples/git-merge-ours.sh b/contrib/examples/git-merge-ours.sh
deleted file mode 100755 (executable)
index 29dba4b..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-# Pretend we resolved the heads, but declare our tree trumps everybody else.
-#
-
-# We need to exit with 2 if the index does not match our HEAD tree,
-# because the current index is what we will be committing as the
-# merge result.
-
-git diff-index --quiet --cached HEAD -- || exit 2
-
-exit 0
diff --git a/contrib/examples/git-merge.sh b/contrib/examples/git-merge.sh
deleted file mode 100755 (executable)
index 932e78d..0000000
+++ /dev/null
@@ -1,620 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git merge [options] <remote>...
-git merge [options] <msg> HEAD <remote>
---
-stat                 show a diffstat at the end of the merge
-n                    don't show a diffstat at the end of the merge
-summary              (synonym to --stat)
-log                  add list of one-line log to merge commit message
-squash               create a single commit instead of doing a merge
-commit               perform a commit if the merge succeeds (default)
-ff                   allow fast-forward (default)
-ff-only              abort if fast-forward is not possible
-rerere-autoupdate    update index with any reused conflict resolution
-s,strategy=          merge strategy to use
-X=                   option for selected merge strategy
-m,message=           message to be used for the merge commit (if any)
-"
-
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-require_work_tree
-cd_to_toplevel
-
-test -z "$(git ls-files -u)" ||
-       die "Merge is not possible because you have unmerged files."
-
-! test -e "$GIT_DIR/MERGE_HEAD" ||
-       die 'You have not concluded your merge (MERGE_HEAD exists).'
-
-LF='
-'
-
-all_strategies='recur recursive octopus resolve stupid ours subtree'
-all_strategies="$all_strategies recursive-ours recursive-theirs"
-not_strategies='base file index tree'
-default_twohead_strategies='recursive'
-default_octopus_strategies='octopus'
-no_fast_forward_strategies='subtree ours'
-no_trivial_strategies='recursive recur subtree ours recursive-ours recursive-theirs'
-use_strategies=
-xopt=
-
-allow_fast_forward=t
-fast_forward_only=
-allow_trivial_merge=t
-squash= no_commit= log_arg= rr_arg=
-
-dropsave() {
-       rm -f -- "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/MERGE_MSG" \
-                "$GIT_DIR/MERGE_STASH" "$GIT_DIR/MERGE_MODE" || exit 1
-}
-
-savestate() {
-       # Stash away any local modifications.
-       git stash create >"$GIT_DIR/MERGE_STASH"
-}
-
-restorestate() {
-        if test -f "$GIT_DIR/MERGE_STASH"
-       then
-               git reset --hard $head >/dev/null
-               git stash apply $(cat "$GIT_DIR/MERGE_STASH")
-               git update-index --refresh >/dev/null
-       fi
-}
-
-finish_up_to_date () {
-       case "$squash" in
-       t)
-               echo "$1 (nothing to squash)" ;;
-       '')
-               echo "$1" ;;
-       esac
-       dropsave
-}
-
-squash_message () {
-       echo Squashed commit of the following:
-       echo
-       git log --no-merges --pretty=medium ^"$head" $remoteheads
-}
-
-finish () {
-       if test '' = "$2"
-       then
-               rlogm="$GIT_REFLOG_ACTION"
-       else
-               echo "$2"
-               rlogm="$GIT_REFLOG_ACTION: $2"
-       fi
-       case "$squash" in
-       t)
-               echo "Squash commit -- not updating HEAD"
-               squash_message >"$GIT_DIR/SQUASH_MSG"
-               ;;
-       '')
-               case "$merge_msg" in
-               '')
-                       echo "No merge message -- not updating HEAD"
-                       ;;
-               *)
-                       git update-ref -m "$rlogm" HEAD "$1" "$head" || exit 1
-                       git gc --auto
-                       ;;
-               esac
-               ;;
-       esac
-       case "$1" in
-       '')
-               ;;
-       ?*)
-               if test "$show_diffstat" = t
-               then
-                       # We want color (if set), but no pager
-                       GIT_PAGER='' git diff --stat --summary -M "$head" "$1"
-               fi
-               ;;
-       esac
-
-       # Run a post-merge hook
-        if test -x "$GIT_DIR"/hooks/post-merge
-        then
-           case "$squash" in
-           t)
-                "$GIT_DIR"/hooks/post-merge 1
-               ;;
-           '')
-                "$GIT_DIR"/hooks/post-merge 0
-               ;;
-           esac
-        fi
-}
-
-merge_name () {
-       remote="$1"
-       rh=$(git rev-parse --verify "$remote^0" 2>/dev/null) || return
-       if truname=$(expr "$remote" : '\(.*\)~[0-9]*$') &&
-               git show-ref -q --verify "refs/heads/$truname" 2>/dev/null
-       then
-               echo "$rh               branch '$truname' (early part) of ."
-               return
-       fi
-       if found_ref=$(git rev-parse --symbolic-full-name --verify \
-                                                       "$remote" 2>/dev/null)
-       then
-               expanded=$(git check-ref-format --branch "$remote") ||
-                       exit
-               if test "${found_ref#refs/heads/}" != "$found_ref"
-               then
-                       echo "$rh               branch '$expanded' of ."
-                       return
-               elif test "${found_ref#refs/remotes/}" != "$found_ref"
-               then
-                       echo "$rh               remote branch '$expanded' of ."
-                       return
-               fi
-       fi
-       if test "$remote" = "FETCH_HEAD" && test -r "$GIT_DIR/FETCH_HEAD"
-       then
-               sed -e 's/      not-for-merge   /               /' -e 1q \
-                       "$GIT_DIR/FETCH_HEAD"
-               return
-       fi
-       echo "$rh               commit '$remote'"
-}
-
-parse_config () {
-       while test $# != 0; do
-               case "$1" in
-               -n|--no-stat|--no-summary)
-                       show_diffstat=false ;;
-               --stat|--summary)
-                       show_diffstat=t ;;
-               --log|--no-log)
-                       log_arg=$1 ;;
-               --squash)
-                       test "$allow_fast_forward" = t ||
-                               die "You cannot combine --squash with --no-ff."
-                       squash=t no_commit=t ;;
-               --no-squash)
-                       squash= no_commit= ;;
-               --commit)
-                       no_commit= ;;
-               --no-commit)
-                       no_commit=t ;;
-               --ff)
-                       allow_fast_forward=t ;;
-               --no-ff)
-                       test "$squash" != t ||
-                               die "You cannot combine --squash with --no-ff."
-                       test "$fast_forward_only" != t ||
-                               die "You cannot combine --ff-only with --no-ff."
-                       allow_fast_forward=f ;;
-               --ff-only)
-                       test "$allow_fast_forward" != f ||
-                               die "You cannot combine --ff-only with --no-ff."
-                       fast_forward_only=t ;;
-               --rerere-autoupdate|--no-rerere-autoupdate)
-                       rr_arg=$1 ;;
-               -s|--strategy)
-                       shift
-                       case " $all_strategies " in
-                       *" $1 "*)
-                               use_strategies="$use_strategies$1 "
-                               ;;
-                       *)
-                               case " $not_strategies " in
-                               *" $1 "*)
-                                       false
-                               esac &&
-                               type "git-merge-$1" >/dev/null 2>&1 ||
-                                       die "available strategies are: $all_strategies"
-                               use_strategies="$use_strategies$1 "
-                               ;;
-                       esac
-                       ;;
-               -X)
-                       shift
-                       xopt="${xopt:+$xopt }$(git rev-parse --sq-quote "--$1")"
-                       ;;
-               -m|--message)
-                       shift
-                       merge_msg="$1"
-                       have_message=t
-                       ;;
-               --)
-                       shift
-                       break ;;
-               *)      usage ;;
-               esac
-               shift
-       done
-       args_left=$#
-}
-
-test $# != 0 || usage
-
-have_message=
-
-if branch=$(git-symbolic-ref -q HEAD)
-then
-       mergeopts=$(git config "branch.${branch#refs/heads/}.mergeoptions")
-       if test -n "$mergeopts"
-       then
-               parse_config $mergeopts --
-       fi
-fi
-
-parse_config "$@"
-while test $args_left -lt $#; do shift; done
-
-if test -z "$show_diffstat"; then
-    test "$(git config --bool merge.diffstat)" = false && show_diffstat=false
-    test "$(git config --bool merge.stat)" = false && show_diffstat=false
-    test -z "$show_diffstat" && show_diffstat=t
-fi
-
-# This could be traditional "merge <msg> HEAD <commit>..."  and the
-# way we can tell it is to see if the second token is HEAD, but some
-# people might have misused the interface and used a commit-ish that
-# is the same as HEAD there instead.  Traditional format never would
-# have "-m" so it is an additional safety measure to check for it.
-
-if test -z "$have_message" &&
-       second_token=$(git rev-parse --verify "$2^0" 2>/dev/null) &&
-       head_commit=$(git rev-parse --verify "HEAD" 2>/dev/null) &&
-       test "$second_token" = "$head_commit"
-then
-       merge_msg="$1"
-       shift
-       head_arg="$1"
-       shift
-elif ! git rev-parse --verify HEAD >/dev/null 2>&1
-then
-       # If the merged head is a valid one there is no reason to
-       # forbid "git merge" into a branch yet to be born.  We do
-       # the same for "git pull".
-       if test 1 -ne $#
-       then
-               echo >&2 "Can merge only exactly one commit into empty head"
-               exit 1
-       fi
-
-       test "$squash" != t ||
-               die "Squash commit into empty head not supported yet"
-       test "$allow_fast_forward" = t ||
-               die "Non-fast-forward into an empty head does not make sense"
-       rh=$(git rev-parse --verify "$1^0") ||
-               die "$1 - not something we can merge"
-
-       git update-ref -m "initial pull" HEAD "$rh" "" &&
-       git read-tree --reset -u HEAD
-       exit
-
-else
-       # We are invoked directly as the first-class UI.
-       head_arg=HEAD
-
-       # All the rest are the commits being merged; prepare
-       # the standard merge summary message to be appended to
-       # the given message.  If remote is invalid we will die
-       # later in the common codepath so we discard the error
-       # in this loop.
-       merge_msg="$(
-               for remote
-               do
-                       merge_name "$remote"
-               done |
-               if test "$have_message" = t
-               then
-                       git fmt-merge-msg -m "$merge_msg" $log_arg
-               else
-                       git fmt-merge-msg $log_arg
-               fi
-       )"
-fi
-head=$(git rev-parse --verify "$head_arg"^0) || usage
-
-# All the rest are remote heads
-test "$#" = 0 && usage ;# we need at least one remote head.
-set_reflog_action "merge $*"
-
-remoteheads=
-for remote
-do
-       remotehead=$(git rev-parse --verify "$remote"^0 2>/dev/null) ||
-           die "$remote - not something we can merge"
-       remoteheads="${remoteheads}$remotehead "
-       eval GITHEAD_$remotehead='"$remote"'
-       export GITHEAD_$remotehead
-done
-set x $remoteheads ; shift
-
-case "$use_strategies" in
-'')
-       case "$#" in
-       1)
-               var="$(git config --get pull.twohead)"
-               if test -n "$var"
-               then
-                       use_strategies="$var"
-               else
-                       use_strategies="$default_twohead_strategies"
-               fi ;;
-       *)
-               var="$(git config --get pull.octopus)"
-               if test -n "$var"
-               then
-                       use_strategies="$var"
-               else
-                       use_strategies="$default_octopus_strategies"
-               fi ;;
-       esac
-       ;;
-esac
-
-for s in $use_strategies
-do
-       for ss in $no_fast_forward_strategies
-       do
-               case " $s " in
-               *" $ss "*)
-                       allow_fast_forward=f
-                       break
-                       ;;
-               esac
-       done
-       for ss in $no_trivial_strategies
-       do
-               case " $s " in
-               *" $ss "*)
-                       allow_trivial_merge=f
-                       break
-                       ;;
-               esac
-       done
-done
-
-case "$#" in
-1)
-       common=$(git merge-base --all $head "$@")
-       ;;
-*)
-       common=$(git merge-base --all --octopus $head "$@")
-       ;;
-esac
-echo "$head" >"$GIT_DIR/ORIG_HEAD"
-
-case "$allow_fast_forward,$#,$common,$no_commit" in
-?,*,'',*)
-       # No common ancestors found. We need a real merge.
-       ;;
-?,1,"$1",*)
-       # If head can reach all the merge then we are up to date.
-       # but first the most common case of merging one remote.
-       finish_up_to_date "Already up to date."
-       exit 0
-       ;;
-t,1,"$head",*)
-       # Again the most common case of merging one remote.
-       echo "Updating $(git rev-parse --short $head)..$(git rev-parse --short $1)"
-       git update-index --refresh 2>/dev/null
-       msg="Fast-forward"
-       if test -n "$have_message"
-       then
-               msg="$msg (no commit created; -m option ignored)"
-       fi
-       new_head=$(git rev-parse --verify "$1^0") &&
-       git read-tree -v -m -u --exclude-per-directory=.gitignore $head "$new_head" &&
-       finish "$new_head" "$msg" || exit
-       dropsave
-       exit 0
-       ;;
-?,1,?*"$LF"?*,*)
-       # We are not doing octopus and not fast-forward.  Need a
-       # real merge.
-       ;;
-?,1,*,)
-       # We are not doing octopus, not fast-forward, and have only
-       # one common.
-       git update-index --refresh 2>/dev/null
-       case "$allow_trivial_merge,$fast_forward_only" in
-       t,)
-               # See if it is really trivial.
-               git var GIT_COMMITTER_IDENT >/dev/null || exit
-               echo "Trying really trivial in-index merge..."
-               if git read-tree --trivial -m -u -v $common $head "$1" &&
-                  result_tree=$(git write-tree)
-               then
-                       echo "Wonderful."
-                       result_commit=$(
-                               printf '%s\n' "$merge_msg" |
-                               git commit-tree $result_tree -p HEAD -p "$1"
-                       ) || exit
-                       finish "$result_commit" "In-index merge"
-                       dropsave
-                       exit 0
-               fi
-               echo "Nope."
-       esac
-       ;;
-*)
-       # An octopus.  If we can reach all the remote we are up to date.
-       up_to_date=t
-       for remote
-       do
-               common_one=$(git merge-base --all $head $remote)
-               if test "$common_one" != "$remote"
-               then
-                       up_to_date=f
-                       break
-               fi
-       done
-       if test "$up_to_date" = t
-       then
-               finish_up_to_date "Already up to date. Yeeah!"
-               exit 0
-       fi
-       ;;
-esac
-
-if test "$fast_forward_only" = t
-then
-       die "Not possible to fast-forward, aborting."
-fi
-
-# We are going to make a new commit.
-git var GIT_COMMITTER_IDENT >/dev/null || exit
-
-# At this point, we need a real merge.  No matter what strategy
-# we use, it would operate on the index, possibly affecting the
-# working tree, and when resolved cleanly, have the desired tree
-# in the index -- this means that the index must be in sync with
-# the $head commit.  The strategies are responsible to ensure this.
-
-case "$use_strategies" in
-?*' '?*)
-    # Stash away the local changes so that we can try more than one.
-    savestate
-    single_strategy=no
-    ;;
-*)
-    rm -f "$GIT_DIR/MERGE_STASH"
-    single_strategy=yes
-    ;;
-esac
-
-result_tree= best_cnt=-1 best_strategy= wt_strategy=
-merge_was_ok=
-for strategy in $use_strategies
-do
-    test "$wt_strategy" = '' || {
-       echo "Rewinding the tree to pristine..."
-       restorestate
-    }
-    case "$single_strategy" in
-    no)
-       echo "Trying merge strategy $strategy..."
-       ;;
-    esac
-
-    # Remember which strategy left the state in the working tree
-    wt_strategy=$strategy
-
-    eval 'git-merge-$strategy '"$xopt"' $common -- "$head_arg" "$@"'
-    exit=$?
-    if test "$no_commit" = t && test "$exit" = 0
-    then
-        merge_was_ok=t
-       exit=1 ;# pretend it left conflicts.
-    fi
-
-    test "$exit" = 0 || {
-
-       # The backend exits with 1 when conflicts are left to be resolved,
-       # with 2 when it does not handle the given merge at all.
-
-       if test "$exit" -eq 1
-       then
-           cnt=$({
-               git diff-files --name-only
-               git ls-files --unmerged
-           } | wc -l)
-           if test $best_cnt -le 0 || test $cnt -le $best_cnt
-           then
-               best_strategy=$strategy
-               best_cnt=$cnt
-           fi
-       fi
-       continue
-    }
-
-    # Automerge succeeded.
-    result_tree=$(git write-tree) && break
-done
-
-# If we have a resulting tree, that means the strategy module
-# auto resolved the merge cleanly.
-if test '' != "$result_tree"
-then
-    if test "$allow_fast_forward" = "t"
-    then
-       parents=$(git merge-base --independent "$head" "$@")
-    else
-       parents=$(git rev-parse "$head" "$@")
-    fi
-    parents=$(echo "$parents" | sed -e 's/^/-p /')
-    result_commit=$(printf '%s\n' "$merge_msg" | git commit-tree $result_tree $parents) || exit
-    finish "$result_commit" "Merge made by $wt_strategy."
-    dropsave
-    exit 0
-fi
-
-# Pick the result from the best strategy and have the user fix it up.
-case "$best_strategy" in
-'')
-       restorestate
-       case "$use_strategies" in
-       ?*' '?*)
-               echo >&2 "No merge strategy handled the merge."
-               ;;
-       *)
-               echo >&2 "Merge with strategy $use_strategies failed."
-               ;;
-       esac
-       exit 2
-       ;;
-"$wt_strategy")
-       # We already have its result in the working tree.
-       ;;
-*)
-       echo "Rewinding the tree to pristine..."
-       restorestate
-       echo "Using the $best_strategy to prepare resolving by hand."
-       git-merge-$best_strategy $common -- "$head_arg" "$@"
-       ;;
-esac
-
-if test "$squash" = t
-then
-       finish
-else
-       for remote
-       do
-               echo $remote
-       done >"$GIT_DIR/MERGE_HEAD"
-       printf '%s\n' "$merge_msg" >"$GIT_DIR/MERGE_MSG" ||
-               die "Could not write to $GIT_DIR/MERGE_MSG"
-       if test "$allow_fast_forward" != t
-       then
-               printf "%s" no-ff
-       else
-               :
-       fi >"$GIT_DIR/MERGE_MODE" ||
-               die "Could not write to $GIT_DIR/MERGE_MODE"
-fi
-
-if test "$merge_was_ok" = t
-then
-       echo >&2 \
-       "Automatic merge went well; stopped before committing as requested"
-       exit 0
-else
-       {
-           echo '
-Conflicts:
-'
-               git ls-files --unmerged |
-               sed -e 's/^[^   ]*      /       /' |
-               uniq
-       } >>"$GIT_DIR/MERGE_MSG"
-       git rerere $rr_arg
-       die "Automatic merge failed; fix conflicts and then commit the result."
-fi
diff --git a/contrib/examples/git-notes.sh b/contrib/examples/git-notes.sh
deleted file mode 100755 (executable)
index e642e47..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/sh
-
-USAGE="(edit [-F <file> | -m <msg>] | show) [commit]"
-. git-sh-setup
-
-test -z "$1" && usage
-ACTION="$1"; shift
-
-test -z "$GIT_NOTES_REF" && GIT_NOTES_REF="$(git config core.notesref)"
-test -z "$GIT_NOTES_REF" && GIT_NOTES_REF="refs/notes/commits"
-
-MESSAGE=
-while test $# != 0
-do
-       case "$1" in
-       -m)
-               test "$ACTION" = "edit" || usage
-               shift
-               if test "$#" = "0"; then
-                       die "error: option -m needs an argument"
-               else
-                       if [ -z "$MESSAGE" ]; then
-                               MESSAGE="$1"
-                       else
-                               MESSAGE="$MESSAGE
-
-$1"
-                       fi
-                       shift
-               fi
-               ;;
-       -F)
-               test "$ACTION" = "edit" || usage
-               shift
-               if test "$#" = "0"; then
-                       die "error: option -F needs an argument"
-               else
-                       if [ -z "$MESSAGE" ]; then
-                               MESSAGE="$(cat "$1")"
-                       else
-                               MESSAGE="$MESSAGE
-
-$(cat "$1")"
-                       fi
-                       shift
-               fi
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-done
-
-COMMIT=$(git rev-parse --verify --default HEAD "$@") ||
-die "Invalid commit: $@"
-
-case "$ACTION" in
-edit)
-       if [ "${GIT_NOTES_REF#refs/notes/}" = "$GIT_NOTES_REF" ]; then
-               die "Refusing to edit notes in $GIT_NOTES_REF (outside of refs/notes/)"
-       fi
-
-       MSG_FILE="$GIT_DIR/new-notes-$COMMIT"
-       GIT_INDEX_FILE="$MSG_FILE.idx"
-       export GIT_INDEX_FILE
-
-       trap '
-               test -f "$MSG_FILE" && rm "$MSG_FILE"
-               test -f "$GIT_INDEX_FILE" && rm "$GIT_INDEX_FILE"
-       ' 0
-
-       CURRENT_HEAD=$(git show-ref "$GIT_NOTES_REF" | cut -f 1 -d ' ')
-       if [ -z "$CURRENT_HEAD" ]; then
-               PARENT=
-       else
-               PARENT="-p $CURRENT_HEAD"
-               git read-tree "$GIT_NOTES_REF" || die "Could not read index"
-       fi
-
-       if [ -z "$MESSAGE" ]; then
-               GIT_NOTES_REF= git log -1 $COMMIT | sed "s/^/#/" > "$MSG_FILE"
-               if [ ! -z "$CURRENT_HEAD" ]; then
-                       git cat-file blob :$COMMIT >> "$MSG_FILE" 2> /dev/null
-               fi
-               core_editor="$(git config core.editor)"
-               ${GIT_EDITOR:-${core_editor:-${VISUAL:-${EDITOR:-vi}}}} "$MSG_FILE"
-       else
-               echo "$MESSAGE" > "$MSG_FILE"
-       fi
-
-       grep -v ^# < "$MSG_FILE" | git stripspace > "$MSG_FILE".processed
-       mv "$MSG_FILE".processed "$MSG_FILE"
-       if [ -s "$MSG_FILE" ]; then
-               BLOB=$(git hash-object -w "$MSG_FILE") ||
-                       die "Could not write into object database"
-               git update-index --add --cacheinfo 0644 $BLOB $COMMIT ||
-                       die "Could not write index"
-       else
-               test -z "$CURRENT_HEAD" &&
-                       die "Will not initialise with empty tree"
-               git update-index --force-remove $COMMIT ||
-                       die "Could not update index"
-       fi
-
-       TREE=$(git write-tree) || die "Could not write tree"
-       NEW_HEAD=$(echo Annotate $COMMIT | git commit-tree $TREE $PARENT) ||
-               die "Could not annotate"
-       git update-ref -m "Annotate $COMMIT" \
-               "$GIT_NOTES_REF" $NEW_HEAD $CURRENT_HEAD
-;;
-show)
-       git rev-parse -q --verify "$GIT_NOTES_REF":$COMMIT > /dev/null ||
-               die "No note for commit $COMMIT."
-       git show "$GIT_NOTES_REF":$COMMIT
-;;
-*)
-       usage
-esac
diff --git a/contrib/examples/git-pull.sh b/contrib/examples/git-pull.sh
deleted file mode 100755 (executable)
index 6b3a03f..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano
-#
-# Fetch one or more remote refs and merge it/them into the current HEAD.
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=Yes
-OPTIONS_SPEC="\
-git pull [options] [<repository> [<refspec>...]]
-
-Fetch one or more remote refs and integrate it/them with the current HEAD.
---
-v,verbose                  be more verbose
-q,quiet                    be more quiet
-progress                   force progress reporting
-
-  Options related to merging
-r,rebase?false|true|preserve incorporate changes by rebasing rather than merging
-n!                         do not show a diffstat at the end of the merge
-stat                       show a diffstat at the end of the merge
-summary                    (synonym to --stat)
-log?n                      add (at most <n>) entries from shortlog to merge commit message
-squash                     create a single commit instead of doing a merge
-commit                     perform a commit if the merge succeeds (default)
-e,edit                       edit message before committing
-ff                         allow fast-forward
-ff-only!                   abort if fast-forward is not possible
-verify-signatures          verify that the named commit has a valid GPG signature
-s,strategy=strategy        merge strategy to use
-X,strategy-option=option   option for selected merge strategy
-S,gpg-sign?key-id          GPG sign commit
-
-  Options related to fetching
-all                        fetch from all remotes
-a,append                   append to .git/FETCH_HEAD instead of overwriting
-upload-pack=path           path to upload pack on remote end
-f,force                    force overwrite of local branch
-t,tags                     fetch all tags and associated objects
-p,prune                    prune remote-tracking branches no longer on remote
-recurse-submodules?on-demand control recursive fetching of submodules
-dry-run                    dry run
-k,keep                     keep downloaded pack
-depth=depth                deepen history of shallow clone
-unshallow                  convert to a complete repository
-update-shallow             accept refs that update .git/shallow
-refmap=refmap              specify fetch refmap
-"
-test $# -gt 0 && args="$*"
-. git-sh-setup
-. git-sh-i18n
-set_reflog_action "pull${args+ $args}"
-require_work_tree_exists
-cd_to_toplevel
-
-
-die_conflict () {
-    git diff-index --cached --name-status -r --ignore-submodules HEAD --
-    if [ $(git config --bool --get advice.resolveConflict || echo true) = "true" ]; then
-       die "$(gettext "Pull is not possible because you have unmerged files.
-Please, fix them up in the work tree, and then use 'git add/rm <file>'
-as appropriate to mark resolution and make a commit.")"
-    else
-       die "$(gettext "Pull is not possible because you have unmerged files.")"
-    fi
-}
-
-die_merge () {
-    if [ $(git config --bool --get advice.resolveConflict || echo true) = "true" ]; then
-       die "$(gettext "You have not concluded your merge (MERGE_HEAD exists).
-Please, commit your changes before merging.")"
-    else
-       die "$(gettext "You have not concluded your merge (MERGE_HEAD exists).")"
-    fi
-}
-
-test -z "$(git ls-files -u)" || die_conflict
-test -f "$GIT_DIR/MERGE_HEAD" && die_merge
-
-bool_or_string_config () {
-       git config --bool "$1" 2>/dev/null || git config "$1"
-}
-
-strategy_args= diffstat= no_commit= squash= no_ff= ff_only=
-log_arg= verbosity= progress= recurse_submodules= verify_signatures=
-merge_args= edit= rebase_args= all= append= upload_pack= force= tags= prune=
-keep= depth= unshallow= update_shallow= refmap=
-curr_branch=$(git symbolic-ref -q HEAD)
-curr_branch_short="${curr_branch#refs/heads/}"
-rebase=$(bool_or_string_config branch.$curr_branch_short.rebase)
-if test -z "$rebase"
-then
-       rebase=$(bool_or_string_config pull.rebase)
-fi
-
-# Setup default fast-forward options via `pull.ff`
-pull_ff=$(bool_or_string_config pull.ff)
-case "$pull_ff" in
-true)
-       no_ff=--ff
-       ;;
-false)
-       no_ff=--no-ff
-       ;;
-only)
-       ff_only=--ff-only
-       ;;
-esac
-
-
-dry_run=
-while :
-do
-       case "$1" in
-       -q|--quiet)
-               verbosity="$verbosity -q" ;;
-       -v|--verbose)
-               verbosity="$verbosity -v" ;;
-       --progress)
-               progress=--progress ;;
-       --no-progress)
-               progress=--no-progress ;;
-       -n|--no-stat|--no-summary)
-               diffstat=--no-stat ;;
-       --stat|--summary)
-               diffstat=--stat ;;
-       --log|--log=*|--no-log)
-               log_arg="$1" ;;
-       --no-commit)
-               no_commit=--no-commit ;;
-       --commit)
-               no_commit=--commit ;;
-       -e|--edit)
-               edit=--edit ;;
-       --no-edit)
-               edit=--no-edit ;;
-       --squash)
-               squash=--squash ;;
-       --no-squash)
-               squash=--no-squash ;;
-       --ff)
-               no_ff=--ff ;;
-       --no-ff)
-               no_ff=--no-ff ;;
-       --ff-only)
-               ff_only=--ff-only ;;
-       -s*|--strategy=*)
-               strategy_args="$strategy_args $1"
-               ;;
-       -X*|--strategy-option=*)
-               merge_args="$merge_args $(git rev-parse --sq-quote "$1")"
-               ;;
-       -r*|--rebase=*)
-               rebase="${1#*=}"
-               ;;
-       --rebase)
-               rebase=true
-               ;;
-       --no-rebase)
-               rebase=false
-               ;;
-       --recurse-submodules)
-               recurse_submodules=--recurse-submodules
-               ;;
-       --recurse-submodules=*)
-               recurse_submodules="$1"
-               ;;
-       --no-recurse-submodules)
-               recurse_submodules=--no-recurse-submodules
-               ;;
-       --verify-signatures)
-               verify_signatures=--verify-signatures
-               ;;
-       --no-verify-signatures)
-               verify_signatures=--no-verify-signatures
-               ;;
-       --gpg-sign|-S)
-               gpg_sign_args=-S
-               ;;
-       --gpg-sign=*)
-               gpg_sign_args=$(git rev-parse --sq-quote "-S${1#--gpg-sign=}")
-               ;;
-       -S*)
-               gpg_sign_args=$(git rev-parse --sq-quote "$1")
-               ;;
-       --dry-run)
-               dry_run=--dry-run
-               ;;
-       --all|--no-all)
-               all=$1 ;;
-       -a|--append|--no-append)
-               append=$1 ;;
-       --upload-pack=*|--no-upload-pack)
-               upload_pack=$1 ;;
-       -f|--force|--no-force)
-               force="$force $1" ;;
-       -t|--tags|--no-tags)
-               tags=$1 ;;
-       -p|--prune|--no-prune)
-               prune=$1 ;;
-       -k|--keep|--no-keep)
-               keep=$1 ;;
-       --depth=*|--no-depth)
-               depth=$1 ;;
-       --unshallow|--no-unshallow)
-               unshallow=$1 ;;
-       --update-shallow|--no-update-shallow)
-               update_shallow=$1 ;;
-       --refmap=*|--no-refmap)
-               refmap=$1 ;;
-       -h|--help-all)
-               usage
-               ;;
-       --)
-               shift
-               break
-               ;;
-       *)
-               usage
-               ;;
-       esac
-       shift
-done
-
-case "$rebase" in
-preserve)
-       rebase=true
-       rebase_args=--preserve-merges
-       ;;
-true|false|'')
-       ;;
-*)
-       echo "Invalid value for --rebase, should be true, false, or preserve"
-       usage
-       exit 1
-       ;;
-esac
-
-error_on_no_merge_candidates () {
-       exec >&2
-
-       if test true = "$rebase"
-       then
-               op_type=rebase
-               op_prep=against
-       else
-               op_type=merge
-               op_prep=with
-       fi
-
-       upstream=$(git config "branch.$curr_branch_short.merge")
-       remote=$(git config "branch.$curr_branch_short.remote")
-
-       if [ $# -gt 1 ]; then
-               if [ "$rebase" = true ]; then
-                       printf "There is no candidate for rebasing against "
-               else
-                       printf "There are no candidates for merging "
-               fi
-               echo "among the refs that you just fetched."
-               echo "Generally this means that you provided a wildcard refspec which had no"
-               echo "matches on the remote end."
-       elif [ $# -gt 0 ] && [ "$1" != "$remote" ]; then
-               echo "You asked to pull from the remote '$1', but did not specify"
-               echo "a branch. Because this is not the default configured remote"
-               echo "for your current branch, you must specify a branch on the command line."
-       elif [ -z "$curr_branch" -o -z "$upstream" ]; then
-               . git-parse-remote
-               error_on_missing_default_upstream "pull" $op_type $op_prep \
-                       "git pull <remote> <branch>"
-       else
-               echo "Your configuration specifies to $op_type $op_prep the ref '${upstream#refs/heads/}'"
-               echo "from the remote, but no such ref was fetched."
-       fi
-       exit 1
-}
-
-test true = "$rebase" && {
-       if ! git rev-parse -q --verify HEAD >/dev/null
-       then
-               # On an unborn branch
-               if test -f "$(git rev-parse --git-path index)"
-               then
-                       die "$(gettext "updating an unborn branch with changes added to the index")"
-               fi
-       else
-               require_clean_work_tree "pull with rebase" "Please commit or stash them."
-       fi
-       oldremoteref= &&
-       test -n "$curr_branch" &&
-       . git-parse-remote &&
-       remoteref="$(get_remote_merge_branch "$@" 2>/dev/null)" &&
-       oldremoteref=$(git merge-base --fork-point "$remoteref" $curr_branch 2>/dev/null)
-}
-orig_head=$(git rev-parse -q --verify HEAD)
-git fetch $verbosity $progress $dry_run $recurse_submodules $all $append \
-${upload_pack:+"$upload_pack"} $force $tags $prune $keep $depth $unshallow $update_shallow \
-$refmap --update-head-ok "$@" || exit 1
-test -z "$dry_run" || exit 0
-
-curr_head=$(git rev-parse -q --verify HEAD)
-if test -n "$orig_head" && test "$curr_head" != "$orig_head"
-then
-       # The fetch involved updating the current branch.
-
-       # The working tree and the index file is still based on the
-       # $orig_head commit, but we are merging into $curr_head.
-       # First update the working tree to match $curr_head.
-
-       eval_gettextln "Warning: fetch updated the current branch head.
-Warning: fast-forwarding your working tree from
-Warning: commit \$orig_head." >&2
-       git update-index -q --refresh
-       git read-tree -u -m "$orig_head" "$curr_head" ||
-               die "$(eval_gettext "Cannot fast-forward your working tree.
-After making sure that you saved anything precious from
-$ git diff \$orig_head
-output, run
-$ git reset --hard
-to recover.")"
-
-fi
-
-merge_head=$(sed -e '/ not-for-merge   /d' \
-       -e 's/  .*//' "$GIT_DIR"/FETCH_HEAD | \
-       tr '\012' ' ')
-
-case "$merge_head" in
-'')
-       error_on_no_merge_candidates "$@"
-       ;;
-?*' '?*)
-       if test -z "$orig_head"
-       then
-               die "$(gettext "Cannot merge multiple branches into empty head")"
-       fi
-       if test true = "$rebase"
-       then
-               die "$(gettext "Cannot rebase onto multiple branches")"
-       fi
-       ;;
-esac
-
-# Pulling into unborn branch: a shorthand for branching off
-# FETCH_HEAD, for lazy typers.
-if test -z "$orig_head"
-then
-       # Two-way merge: we claim the index is based on an empty tree,
-       # and try to fast-forward to HEAD.  This ensures we will not
-       # lose index/worktree changes that the user already made on
-       # the unborn branch.
-       empty_tree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
-       git read-tree -m -u $empty_tree $merge_head &&
-       git update-ref -m "initial pull" HEAD $merge_head "$curr_head"
-       exit
-fi
-
-if test true = "$rebase"
-then
-       o=$(git show-branch --merge-base $curr_branch $merge_head $oldremoteref)
-       if test "$oldremoteref" = "$o"
-       then
-               unset oldremoteref
-       fi
-fi
-
-case "$rebase" in
-true)
-       eval="git-rebase $diffstat $strategy_args $merge_args $rebase_args $verbosity"
-       eval="$eval $gpg_sign_args"
-       eval="$eval --onto $merge_head ${oldremoteref:-$merge_head}"
-       ;;
-*)
-       eval="git-merge $diffstat $no_commit $verify_signatures $edit $squash $no_ff $ff_only"
-       eval="$eval $log_arg $strategy_args $merge_args $verbosity $progress"
-       eval="$eval $gpg_sign_args"
-       eval="$eval FETCH_HEAD"
-       ;;
-esac
-eval "exec $eval"
diff --git a/contrib/examples/git-remote.perl b/contrib/examples/git-remote.perl
deleted file mode 100755 (executable)
index d42df7b..0000000
+++ /dev/null
@@ -1,474 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use Git;
-my $git = Git->repository();
-
-sub add_remote_config {
-       my ($hash, $name, $what, $value) = @_;
-       if ($what eq 'url') {
-               # Having more than one is Ok -- it is used for push.
-               if (! exists $hash->{'URL'}) {
-                       $hash->{$name}{'URL'} = $value;
-               }
-       }
-       elsif ($what eq 'fetch') {
-               $hash->{$name}{'FETCH'} ||= [];
-               push @{$hash->{$name}{'FETCH'}}, $value;
-       }
-       elsif ($what eq 'push') {
-               $hash->{$name}{'PUSH'} ||= [];
-               push @{$hash->{$name}{'PUSH'}}, $value;
-       }
-       if (!exists $hash->{$name}{'SOURCE'}) {
-               $hash->{$name}{'SOURCE'} = 'config';
-       }
-}
-
-sub add_remote_remotes {
-       my ($hash, $file, $name) = @_;
-
-       if (exists $hash->{$name}) {
-               $hash->{$name}{'WARNING'} = 'ignored due to config';
-               return;
-       }
-
-       my $fh;
-       if (!open($fh, '<', $file)) {
-               print STDERR "Warning: cannot open $file\n";
-               return;
-       }
-       my $it = { 'SOURCE' => 'remotes' };
-       $hash->{$name} = $it;
-       while (<$fh>) {
-               chomp;
-               if (/^URL:\s*(.*)$/) {
-                       # Having more than one is Ok -- it is used for push.
-                       if (! exists $it->{'URL'}) {
-                               $it->{'URL'} = $1;
-                       }
-               }
-               elsif (/^Push:\s*(.*)$/) {
-                       $it->{'PUSH'} ||= [];
-                       push @{$it->{'PUSH'}}, $1;
-               }
-               elsif (/^Pull:\s*(.*)$/) {
-                       $it->{'FETCH'} ||= [];
-                       push @{$it->{'FETCH'}}, $1;
-               }
-               elsif (/^\#/) {
-                       ; # ignore
-               }
-               else {
-                       print STDERR "Warning: funny line in $file: $_\n";
-               }
-       }
-       close($fh);
-}
-
-sub list_remote {
-       my ($git) = @_;
-       my %seen = ();
-       my @remotes = eval {
-               $git->command(qw(config --get-regexp), '^remote\.');
-       };
-       for (@remotes) {
-               if (/^remote\.(\S+?)\.([^.\s]+)\s+(.*)$/) {
-                       add_remote_config(\%seen, $1, $2, $3);
-               }
-       }
-
-       my $dir = $git->repo_path() . "/remotes";
-       if (opendir(my $dh, $dir)) {
-               local $_;
-               while ($_ = readdir($dh)) {
-                       chomp;
-                       next if (! -f "$dir/$_" || ! -r _);
-                       add_remote_remotes(\%seen, "$dir/$_", $_);
-               }
-       }
-
-       return \%seen;
-}
-
-sub add_branch_config {
-       my ($hash, $name, $what, $value) = @_;
-       if ($what eq 'remote') {
-               if (exists $hash->{$name}{'REMOTE'}) {
-                       print STDERR "Warning: more than one branch.$name.remote\n";
-               }
-               $hash->{$name}{'REMOTE'} = $value;
-       }
-       elsif ($what eq 'merge') {
-               $hash->{$name}{'MERGE'} ||= [];
-               push @{$hash->{$name}{'MERGE'}}, $value;
-       }
-}
-
-sub list_branch {
-       my ($git) = @_;
-       my %seen = ();
-       my @branches = eval {
-               $git->command(qw(config --get-regexp), '^branch\.');
-       };
-       for (@branches) {
-               if (/^branch\.([^.]*)\.(\S*)\s+(.*)$/) {
-                       add_branch_config(\%seen, $1, $2, $3);
-               }
-       }
-
-       return \%seen;
-}
-
-my $remote = list_remote($git);
-my $branch = list_branch($git);
-
-sub update_ls_remote {
-       my ($harder, $info) = @_;
-
-       return if (($harder == 0) ||
-                  (($harder == 1) && exists $info->{'LS_REMOTE'}));
-
-       my @ref = map { s|refs/heads/||; $_; } keys %{$git->remote_refs($info->{'URL'}, [ 'heads' ])};
-       $info->{'LS_REMOTE'} = \@ref;
-}
-
-sub list_wildcard_mapping {
-       my ($forced, $ours, $ls) = @_;
-       my %refs;
-       for (@$ls) {
-               $refs{$_} = 01; # bit #0 to say "they have"
-       }
-       for ($git->command('for-each-ref', "refs/remotes/$ours")) {
-               chomp;
-               next unless (s|^[0-9a-f]{40}\s[a-z]+\srefs/remotes/$ours/||);
-               next if ($_ eq 'HEAD');
-               $refs{$_} ||= 0;
-               $refs{$_} |= 02; # bit #1 to say "we have"
-       }
-       my (@new, @stale, @tracked);
-       for (sort keys %refs) {
-               my $have = $refs{$_};
-               if ($have == 1) {
-                       push @new, $_;
-               }
-               elsif ($have == 2) {
-                       push @stale, $_;
-               }
-               elsif ($have == 3) {
-                       push @tracked, $_;
-               }
-       }
-       return \@new, \@stale, \@tracked;
-}
-
-sub list_mapping {
-       my ($name, $info) = @_;
-       my $fetch = $info->{'FETCH'};
-       my $ls = $info->{'LS_REMOTE'};
-       my (@new, @stale, @tracked);
-
-       for (@$fetch) {
-               next unless (/(\+)?([^:]+):(.*)/);
-               my ($forced, $theirs, $ours) = ($1, $2, $3);
-               if ($theirs eq 'refs/heads/*' &&
-                   $ours =~ /^refs\/remotes\/(.*)\/\*$/) {
-                       # wildcard mapping
-                       my ($w_new, $w_stale, $w_tracked)
-                               = list_wildcard_mapping($forced, $1, $ls);
-                       push @new, @$w_new;
-                       push @stale, @$w_stale;
-                       push @tracked, @$w_tracked;
-               }
-               elsif ($theirs =~ /\*/ || $ours =~ /\*/) {
-                       print STDERR "Warning: unrecognized mapping in remotes.$name.fetch: $_\n";
-               }
-               elsif ($theirs =~ s|^refs/heads/||) {
-                       if (!grep { $_ eq $theirs } @$ls) {
-                               push @stale, $theirs;
-                       }
-                       elsif ($ours ne '') {
-                               push @tracked, $theirs;
-                       }
-               }
-       }
-       return \@new, \@stale, \@tracked;
-}
-
-sub show_mapping {
-       my ($name, $info) = @_;
-       my ($new, $stale, $tracked) = list_mapping($name, $info);
-       if (@$new) {
-               print "  New remote branches (next fetch will store in remotes/$name)\n";
-               print "    @$new\n";
-       }
-       if (@$stale) {
-               print "  Stale tracking branches in remotes/$name (use 'git remote prune')\n";
-               print "    @$stale\n";
-       }
-       if (@$tracked) {
-               print "  Tracked remote branches\n";
-               print "    @$tracked\n";
-       }
-}
-
-sub prune_remote {
-       my ($name, $ls_remote) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-       my $info = $remote->{$name};
-       update_ls_remote($ls_remote, $info);
-
-       my ($new, $stale, $tracked) = list_mapping($name, $info);
-       my $prefix = "refs/remotes/$name";
-       foreach my $to_prune (@$stale) {
-               my @v = $git->command(qw(rev-parse --verify), "$prefix/$to_prune");
-               $git->command(qw(update-ref -d), "$prefix/$to_prune", $v[0]);
-       }
-       return 0;
-}
-
-sub show_remote {
-       my ($name, $ls_remote) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-       my $info = $remote->{$name};
-       update_ls_remote($ls_remote, $info);
-
-       print "* remote $name\n";
-       print "  URL: $info->{'URL'}\n";
-       for my $branchname (sort keys %$branch) {
-               next unless (defined $branch->{$branchname}{'REMOTE'} &&
-                            $branch->{$branchname}{'REMOTE'} eq $name);
-               my @merged = map {
-                       s|^refs/heads/||;
-                       $_;
-               } split(' ',"@{$branch->{$branchname}{'MERGE'}}");
-               next unless (@merged);
-               print "  Remote branch(es) merged with 'git pull' while on branch $branchname\n";
-               print "    @merged\n";
-       }
-       if ($info->{'LS_REMOTE'}) {
-               show_mapping($name, $info);
-       }
-       if ($info->{'PUSH'}) {
-               my @pushed = map {
-                       s|^refs/heads/||;
-                       s|^\+refs/heads/|+|;
-                       s|:refs/heads/|:|;
-                       $_;
-               } @{$info->{'PUSH'}};
-               print "  Local branch(es) pushed with 'git push'\n";
-               print "    @pushed\n";
-       }
-       return 0;
-}
-
-sub add_remote {
-       my ($name, $url, $opts) = @_;
-       if (exists $remote->{$name}) {
-               print STDERR "remote $name already exists.\n";
-               exit(1);
-       }
-       $git->command('config', "remote.$name.url", $url);
-       my $track = $opts->{'track'} || ["*"];
-
-       for (@$track) {
-               $git->command('config', '--add', "remote.$name.fetch",
-                               $opts->{'mirror'} ?
-                               "+refs/$_:refs/$_" :
-                               "+refs/heads/$_:refs/remotes/$name/$_");
-       }
-       if ($opts->{'fetch'}) {
-               $git->command('fetch', $name);
-       }
-       if (exists $opts->{'master'}) {
-               $git->command('symbolic-ref', "refs/remotes/$name/HEAD",
-                             "refs/remotes/$name/$opts->{'master'}");
-       }
-}
-
-sub update_remote {
-       my ($name) = @_;
-       my @remotes;
-
-        my $conf = $git->config("remotes." . $name);
-       if (defined($conf)) {
-               @remotes = split(' ', $conf);
-       } elsif ($name eq 'default') {
-               @remotes = ();
-               for (sort keys %$remote) {
-                       my $do_fetch = $git->config_bool("remote." . $_ .
-                                                   ".skipDefaultUpdate");
-                       unless ($do_fetch) {
-                               push @remotes, $_;
-                       }
-               }
-       } else {
-               print STDERR "Remote group $name does not exist.\n";
-               exit(1);
-       }
-       for (@remotes) {
-               print "Updating $_\n";
-               $git->command('fetch', "$_");
-       }
-}
-
-sub rm_remote {
-       my ($name) = @_;
-       if (!exists $remote->{$name}) {
-               print STDERR "No such remote $name\n";
-               return 1;
-       }
-
-       $git->command('config', '--remove-section', "remote.$name");
-
-       eval {
-           my @trackers = $git->command('config', '--get-regexp',
-                       'branch.*.remote', $name);
-               for (@trackers) {
-                       /^branch\.(.*)?\.remote/;
-                       $git->config('--unset', "branch.$1.remote");
-                       $git->config('--unset', "branch.$1.merge");
-               }
-       };
-
-       my @refs = $git->command('for-each-ref',
-               '--format=%(refname) %(objectname)', "refs/remotes/$name");
-       for (@refs) {
-               my ($ref, $object) = split;
-               $git->command(qw(update-ref -d), $ref, $object);
-       }
-       return 0;
-}
-
-sub add_usage {
-       print STDERR "usage: git remote add [-f] [-t track]* [-m master] <name> <url>\n";
-       exit(1);
-}
-
-my $VERBOSE = 0;
-@ARGV = grep {
-       if ($_ eq '-v' or $_ eq '--verbose') {
-               $VERBOSE=1;
-               0
-       } else {
-               1
-       }
-} @ARGV;
-
-if (!@ARGV) {
-       for (sort keys %$remote) {
-               print "$_";
-               print "\t$remote->{$_}->{URL}" if $VERBOSE;
-               print "\n";
-       }
-}
-elsif ($ARGV[0] eq 'show') {
-       my $ls_remote = 1;
-       my $i;
-       for ($i = 1; $i < @ARGV; $i++) {
-               if ($ARGV[$i] eq '-n') {
-                       $ls_remote = 0;
-               }
-               else {
-                       last;
-               }
-       }
-       if ($i >= @ARGV) {
-               print STDERR "usage: git remote show <remote>\n";
-               exit(1);
-       }
-       my $status = 0;
-       for (; $i < @ARGV; $i++) {
-               $status |= show_remote($ARGV[$i], $ls_remote);
-       }
-       exit($status);
-}
-elsif ($ARGV[0] eq 'update') {
-       if (@ARGV <= 1) {
-               update_remote("default");
-               exit(1);
-       }
-       for (my $i = 1; $i < @ARGV; $i++) {
-               update_remote($ARGV[$i]);
-       }
-}
-elsif ($ARGV[0] eq 'prune') {
-       my $ls_remote = 1;
-       my $i;
-       for ($i = 1; $i < @ARGV; $i++) {
-               if ($ARGV[$i] eq '-n') {
-                       $ls_remote = 0;
-               }
-               else {
-                       last;
-               }
-       }
-       if ($i >= @ARGV) {
-               print STDERR "usage: git remote prune <remote>\n";
-               exit(1);
-       }
-       my $status = 0;
-       for (; $i < @ARGV; $i++) {
-               $status |= prune_remote($ARGV[$i], $ls_remote);
-       }
-        exit($status);
-}
-elsif ($ARGV[0] eq 'add') {
-       my %opts = ();
-       while (1 < @ARGV && $ARGV[1] =~ /^-/) {
-               my $opt = $ARGV[1];
-               shift @ARGV;
-               if ($opt eq '-f' || $opt eq '--fetch') {
-                       $opts{'fetch'} = 1;
-                       next;
-               }
-               if ($opt eq '-t' || $opt eq '--track') {
-                       if (@ARGV < 1) {
-                               add_usage();
-                       }
-                       $opts{'track'} ||= [];
-                       push @{$opts{'track'}}, $ARGV[1];
-                       shift @ARGV;
-                       next;
-               }
-               if ($opt eq '-m' || $opt eq '--master') {
-                       if ((@ARGV < 1) || exists $opts{'master'}) {
-                               add_usage();
-                       }
-                       $opts{'master'} = $ARGV[1];
-                       shift @ARGV;
-                       next;
-               }
-               if ($opt eq '--mirror') {
-                       $opts{'mirror'} = 1;
-                       next;
-               }
-               add_usage();
-       }
-       if (@ARGV != 3) {
-               add_usage();
-       }
-       add_remote($ARGV[1], $ARGV[2], \%opts);
-}
-elsif ($ARGV[0] eq 'rm') {
-       if (@ARGV <= 1) {
-               print STDERR "usage: git remote rm <remote>\n";
-               exit(1);
-       }
-       exit(rm_remote($ARGV[1]));
-}
-else {
-       print STDERR "usage: git remote\n";
-       print STDERR "       git remote add <name> <url>\n";
-       print STDERR "       git remote rm <name>\n";
-       print STDERR "       git remote show <name>\n";
-       print STDERR "       git remote prune <name>\n";
-       print STDERR "       git remote update [group]\n";
-       exit(1);
-}
diff --git a/contrib/examples/git-repack.sh b/contrib/examples/git-repack.sh
deleted file mode 100755 (executable)
index 672af93..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-
-OPTIONS_KEEPDASHDASH=
-OPTIONS_SPEC="\
-git repack [options]
---
-a               pack everything in a single pack
-A               same as -a, and turn unreachable objects loose
-d               remove redundant packs, and run git-prune-packed
-f               pass --no-reuse-delta to git-pack-objects
-F               pass --no-reuse-object to git-pack-objects
-n               do not run git-update-server-info
-q,quiet         be quiet
-l               pass --local to git-pack-objects
-unpack-unreachable=  with -A, do not loosen objects older than this
- Packing constraints
-window=         size of the window used for delta compression
-window-memory=  same as the above, but limit memory size instead of entries count
-depth=          limits the maximum delta depth
-max-pack-size=  maximum size of each packfile
-"
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-no_update_info= all_into_one= remove_redundant= unpack_unreachable=
-local= no_reuse= extra=
-while test $# != 0
-do
-       case "$1" in
-       -n)     no_update_info=t ;;
-       -a)     all_into_one=t ;;
-       -A)     all_into_one=t
-               unpack_unreachable=--unpack-unreachable ;;
-       --unpack-unreachable)
-               unpack_unreachable="--unpack-unreachable=$2"; shift ;;
-       -d)     remove_redundant=t ;;
-       -q)     GIT_QUIET=t ;;
-       -f)     no_reuse=--no-reuse-delta ;;
-       -F)     no_reuse=--no-reuse-object ;;
-       -l)     local=--local ;;
-       --max-pack-size|--window|--window-memory|--depth)
-               extra="$extra $1=$2"; shift ;;
-       --) shift; break;;
-       *)      usage ;;
-       esac
-       shift
-done
-
-case "$(git config --bool repack.usedeltabaseoffset || echo true)" in
-true)
-       extra="$extra --delta-base-offset" ;;
-esac
-
-PACKDIR="$GIT_OBJECT_DIRECTORY/pack"
-PACKTMP="$PACKDIR/.tmp-$$-pack"
-rm -f "$PACKTMP"-*
-trap 'rm -f "$PACKTMP"-*' 0 1 2 3 15
-
-# There will be more repacking strategies to come...
-case ",$all_into_one," in
-,,)
-       args='--unpacked --incremental'
-       ;;
-,t,)
-       args= existing=
-       if [ -d "$PACKDIR" ]; then
-               for e in $(cd "$PACKDIR" && find . -type f -name '*.pack' \
-                       | sed -e 's/^\.\///' -e 's/\.pack$//')
-               do
-                       if [ -e "$PACKDIR/$e.keep" ]; then
-                               : keep
-                       else
-                               existing="$existing $e"
-                       fi
-               done
-               if test -n "$existing" && test -n "$unpack_unreachable" && \
-                       test -n "$remove_redundant"
-               then
-                       # This may have arbitrary user arguments, so we
-                       # have to protect it against whitespace splitting
-                       # when it gets run as "pack-objects $args" later.
-                       # Fortunately, we know it's an approxidate, so we
-                       # can just use dots instead.
-                       args="$args $(echo "$unpack_unreachable" | tr ' ' .)"
-               fi
-       fi
-       ;;
-esac
-
-mkdir -p "$PACKDIR" || exit
-
-args="$args $local ${GIT_QUIET:+-q} $no_reuse$extra"
-names=$(git pack-objects --keep-true-parents --honor-pack-keep --non-empty --all --reflog $args </dev/null "$PACKTMP") ||
-       exit 1
-if [ -z "$names" ]; then
-       say Nothing new to pack.
-fi
-
-# Ok we have prepared all new packfiles.
-
-# First see if there are packs of the same name and if so
-# if we can move them out of the way (this can happen if we
-# repacked immediately after packing fully.
-rollback=
-failed=
-for name in $names
-do
-       for sfx in pack idx
-       do
-               file=pack-$name.$sfx
-               test -f "$PACKDIR/$file" || continue
-               rm -f "$PACKDIR/old-$file" &&
-               mv "$PACKDIR/$file" "$PACKDIR/old-$file" || {
-                       failed=t
-                       break
-               }
-               rollback="$rollback $file"
-       done
-       test -z "$failed" || break
-done
-
-# If renaming failed for any of them, roll the ones we have
-# already renamed back to their original names.
-if test -n "$failed"
-then
-       rollback_failure=
-       for file in $rollback
-       do
-               mv "$PACKDIR/old-$file" "$PACKDIR/$file" ||
-               rollback_failure="$rollback_failure $file"
-       done
-       if test -n "$rollback_failure"
-       then
-               echo >&2 "WARNING: Some packs in use have been renamed by"
-               echo >&2 "WARNING: prefixing old- to their name, in order to"
-               echo >&2 "WARNING: replace them with the new version of the"
-               echo >&2 "WARNING: file.  But the operation failed, and"
-               echo >&2 "WARNING: attempt to rename them back to their"
-               echo >&2 "WARNING: original names also failed."
-               echo >&2 "WARNING: Please rename them in $PACKDIR manually:"
-               for file in $rollback_failure
-               do
-                       echo >&2 "WARNING:   old-$file -> $file"
-               done
-       fi
-       exit 1
-fi
-
-# Now the ones with the same name are out of the way...
-fullbases=
-for name in $names
-do
-       fullbases="$fullbases pack-$name"
-       chmod a-w "$PACKTMP-$name.pack"
-       chmod a-w "$PACKTMP-$name.idx"
-       mv -f "$PACKTMP-$name.pack" "$PACKDIR/pack-$name.pack" &&
-       mv -f "$PACKTMP-$name.idx"  "$PACKDIR/pack-$name.idx" ||
-       exit
-done
-
-# Remove the "old-" files
-for name in $names
-do
-       rm -f "$PACKDIR/old-pack-$name.idx"
-       rm -f "$PACKDIR/old-pack-$name.pack"
-done
-
-# End of pack replacement.
-
-if test "$remove_redundant" = t
-then
-       # We know $existing are all redundant.
-       if [ -n "$existing" ]
-       then
-               ( cd "$PACKDIR" &&
-                 for e in $existing
-                 do
-                       case " $fullbases " in
-                       *" $e "*) ;;
-                       *)      rm -f "$e.pack" "$e.idx" "$e.keep" ;;
-                       esac
-                 done
-               )
-       fi
-       git prune-packed ${GIT_QUIET:+-q}
-fi
-
-case "$no_update_info" in
-t) : ;;
-*) git update-server-info ;;
-esac
diff --git a/contrib/examples/git-rerere.perl b/contrib/examples/git-rerere.perl
deleted file mode 100755 (executable)
index 4f69209..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/perl
-#
-# REuse REcorded REsolve.  This tool records a conflicted automerge
-# result and its hand resolution, and helps to resolve future
-# automerge that results in the same conflict.
-#
-# To enable this feature, create a directory 'rr-cache' under your
-# .git/ directory.
-
-use Digest;
-use File::Path;
-use File::Copy;
-
-my $git_dir = $::ENV{GIT_DIR} || ".git";
-my $rr_dir = "$git_dir/rr-cache";
-my $merge_rr = "$git_dir/rr-cache/MERGE_RR";
-
-my %merge_rr = ();
-
-sub read_rr {
-       if (!-f $merge_rr) {
-               %merge_rr = ();
-               return;
-       }
-       my $in;
-       local $/ = "\0";
-       open $in, "<$merge_rr" or die "$!: $merge_rr";
-       while (<$in>) {
-               chomp;
-               my ($name, $path) = /^([0-9a-f]{40})\t(.*)$/s;
-               $merge_rr{$path} = $name;
-       }
-       close $in;
-}
-
-sub write_rr {
-       my $out;
-       open $out, ">$merge_rr" or die "$!: $merge_rr";
-       for my $path (sort keys %merge_rr) {
-               my $name = $merge_rr{$path};
-               print $out "$name\t$path\0";
-       }
-       close $out;
-}
-
-sub compute_conflict_name {
-       my ($path) = @_;
-       my @side = ();
-       my $in;
-       open $in, "<$path"  or die "$!: $path";
-
-       my $sha1 = Digest->new("SHA-1");
-       my $hunk = 0;
-       while (<$in>) {
-               if (/^<<<<<<< .*/) {
-                       $hunk++;
-                       @side = ([], undef);
-               }
-               elsif (/^=======$/) {
-                       $side[1] = [];
-               }
-               elsif (/^>>>>>>> .*/) {
-                       my ($one, $two);
-                       $one = join('', @{$side[0]});
-                       $two = join('', @{$side[1]});
-                       if ($two le $one) {
-                               ($one, $two) = ($two, $one);
-                       }
-                       $sha1->add($one);
-                       $sha1->add("\0");
-                       $sha1->add($two);
-                       $sha1->add("\0");
-                       @side = ();
-               }
-               elsif (@side == 0) {
-                       next;
-               }
-               elsif (defined $side[1]) {
-                       push @{$side[1]}, $_;
-               }
-               else {
-                       push @{$side[0]}, $_;
-               }
-       }
-       close $in;
-       return ($sha1->hexdigest, $hunk);
-}
-
-sub record_preimage {
-       my ($path, $name) = @_;
-       my @side = ();
-       my ($in, $out);
-       open $in, "<$path"  or die "$!: $path";
-       open $out, ">$name" or die "$!: $name";
-
-       while (<$in>) {
-               if (/^<<<<<<< .*/) {
-                       @side = ([], undef);
-               }
-               elsif (/^=======$/) {
-                       $side[1] = [];
-               }
-               elsif (/^>>>>>>> .*/) {
-                       my ($one, $two);
-                       $one = join('', @{$side[0]});
-                       $two = join('', @{$side[1]});
-                       if ($two le $one) {
-                               ($one, $two) = ($two, $one);
-                       }
-                       print $out "<<<<<<<\n";
-                       print $out $one;
-                       print $out "=======\n";
-                       print $out $two;
-                       print $out ">>>>>>>\n";
-                       @side = ();
-               }
-               elsif (@side == 0) {
-                       print $out $_;
-               }
-               elsif (defined $side[1]) {
-                       push @{$side[1]}, $_;
-               }
-               else {
-                       push @{$side[0]}, $_;
-               }
-       }
-       close $out;
-       close $in;
-}
-
-sub find_conflict {
-       my $in;
-       local $/ = "\0";
-       my $pid = open($in, '-|');
-       die "$!" unless defined $pid;
-       if (!$pid) {
-               exec(qw(git ls-files -z -u)) or die "$!: ls-files";
-       }
-       my %path = ();
-       my @path = ();
-       while (<$in>) {
-               chomp;
-               my ($mode, $sha1, $stage, $path) =
-                   /^([0-7]+) ([0-9a-f]{40}) ([123])\t(.*)$/s;
-               $path{$path} |= (1 << $stage);
-       }
-       close $in;
-       while (my ($path, $status) = each %path) {
-               if ($status == 14) { push @path, $path; }
-       }
-       return @path;
-}
-
-sub merge {
-       my ($name, $path) = @_;
-       record_preimage($path, "$rr_dir/$name/thisimage");
-       unless (system('git', 'merge-file', map { "$rr_dir/$name/${_}image" }
-                      qw(this pre post))) {
-               my $in;
-               open $in, "<$rr_dir/$name/thisimage" or
-                   die "$!: $name/thisimage";
-               my $out;
-               open $out, ">$path" or die "$!: $path";
-               while (<$in>) { print $out $_; }
-               close $in;
-               close $out;
-               return 1;
-       }
-       return 0;
-}
-
-sub garbage_collect_rerere {
-       # We should allow specifying these from the command line and
-       # that is why the caller gives @ARGV to us, but I am lazy.
-
-       my $cutoff_noresolve = 15; # two weeks
-       my $cutoff_resolve = 60; # two months
-       my @to_remove;
-       while (<$rr_dir/*/preimage>) {
-               my ($dir) = /^(.*)\/preimage$/;
-               my $cutoff = ((-f "$dir/postimage")
-                             ? $cutoff_resolve
-                             : $cutoff_noresolve);
-               my $age = -M "$_";
-               if ($cutoff <= $age) {
-                       push @to_remove, $dir;
-               }
-       }
-       if (@to_remove) {
-               rmtree(\@to_remove);
-       }
-}
-
--d "$rr_dir" || exit(0);
-
-read_rr();
-
-if (@ARGV) {
-       my $arg = shift @ARGV;
-       if ($arg eq 'clear') {
-               for my $path (keys %merge_rr) {
-                       my $name = $merge_rr{$path};
-                       if (-d "$rr_dir/$name" &&
-                           ! -f "$rr_dir/$name/postimage") {
-                               rmtree(["$rr_dir/$name"]);
-                       }
-               }
-               unlink $merge_rr;
-       }
-       elsif ($arg eq 'status') {
-               for my $path (keys %merge_rr) {
-                       print $path, "\n";
-               }
-       }
-       elsif ($arg eq 'diff') {
-               for my $path (keys %merge_rr) {
-                       my $name = $merge_rr{$path};
-                       system('diff', ((@ARGV == 0) ? ('-u') : @ARGV),
-                               '-L', "a/$path", '-L', "b/$path",
-                               "$rr_dir/$name/preimage", $path);
-               }
-       }
-       elsif ($arg eq 'gc') {
-               garbage_collect_rerere(@ARGV);
-       }
-       else {
-               die "$0 unknown command: $arg\n";
-       }
-       exit 0;
-}
-
-my %conflict = map { $_ => 1 } find_conflict();
-
-# MERGE_RR records paths with conflicts immediately after merge
-# failed.  Some of the conflicted paths might have been hand resolved
-# in the working tree since then, but the initial run would catch all
-# and register their preimages.
-
-for my $path (keys %conflict) {
-       # This path has conflict.  If it is not recorded yet,
-       # record the pre-image.
-       if (!exists $merge_rr{$path}) {
-               my ($name, $hunk) = compute_conflict_name($path);
-               next unless ($hunk);
-               $merge_rr{$path} = $name;
-               if (! -d "$rr_dir/$name") {
-                       mkpath("$rr_dir/$name", 0, 0777);
-                       print STDERR "Recorded preimage for '$path'\n";
-                       record_preimage($path, "$rr_dir/$name/preimage");
-               }
-       }
-}
-
-# Now some of the paths that had conflicts earlier might have been
-# hand resolved.  Others may be similar to a conflict already that
-# was resolved before.
-
-for my $path (keys %merge_rr) {
-       my $name = $merge_rr{$path};
-
-       # We could resolve this automatically if we have images.
-       if (-f "$rr_dir/$name/preimage" &&
-           -f "$rr_dir/$name/postimage") {
-               if (merge($name, $path)) {
-                       print STDERR "Resolved '$path' using previous resolution.\n";
-                       # Then we do not have to worry about this path
-                       # anymore.
-                       delete $merge_rr{$path};
-                       next;
-               }
-       }
-
-       # Let's see if we have resolved it.
-       (undef, my $hunk) = compute_conflict_name($path);
-       next if ($hunk);
-
-       print STDERR "Recorded resolution for '$path'.\n";
-       copy($path, "$rr_dir/$name/postimage");
-       # And we do not have to worry about this path anymore.
-       delete $merge_rr{$path};
-}
-
-# Write out the rest.
-write_rr();
diff --git a/contrib/examples/git-reset.sh b/contrib/examples/git-reset.sh
deleted file mode 100755 (executable)
index cb1bbf3..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005, 2006 Linus Torvalds and Junio C Hamano
-#
-USAGE='[--mixed | --soft | --hard]  [<commit-ish>] [ [--] <paths>...]'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-set_reflog_action "reset $*"
-require_work_tree
-
-update= reset_type=--mixed
-unset rev
-
-while test $# != 0
-do
-       case "$1" in
-       --mixed | --soft | --hard)
-               reset_type="$1"
-               ;;
-       --)
-               break
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               rev=$(git rev-parse --verify "$1") || exit
-               shift
-               break
-               ;;
-       esac
-       shift
-done
-
-: ${rev=HEAD}
-rev=$(git rev-parse --verify $rev^0) || exit
-
-# Skip -- in "git reset HEAD -- foo" and "git reset -- foo".
-case "$1" in --) shift ;; esac
-
-# git reset --mixed tree [--] paths... can be used to
-# load chosen paths from the tree into the index without
-# affecting the working tree or HEAD.
-if test $# != 0
-then
-       test "$reset_type" = "--mixed" ||
-               die "Cannot do partial $reset_type reset."
-
-       git diff-index --cached $rev -- "$@" |
-       sed -e 's/^:\([0-7][0-7]*\) [0-7][0-7]* \([0-9a-f][0-9a-f]*\) [0-9a-f][0-9a-f]* [A-Z]   \(.*\)$/\1 \2   \3/' |
-       git update-index --add --remove --index-info || exit
-       git update-index --refresh
-       exit
-fi
-
-cd_to_toplevel
-
-if test "$reset_type" = "--hard"
-then
-       update=-u
-fi
-
-# Soft reset does not touch the index file or the working tree
-# at all, but requires them in a good order.  Other resets reset
-# the index file to the tree object we are switching to.
-if test "$reset_type" = "--soft"
-then
-       if test -f "$GIT_DIR/MERGE_HEAD" ||
-          test "" != "$(git ls-files --unmerged)"
-       then
-               die "Cannot do a soft reset in the middle of a merge."
-       fi
-else
-       git read-tree -v --reset $update "$rev" || exit
-fi
-
-# Any resets update HEAD to the head being switched to.
-if orig=$(git rev-parse --verify HEAD 2>/dev/null)
-then
-       echo "$orig" >"$GIT_DIR/ORIG_HEAD"
-else
-       rm -f "$GIT_DIR/ORIG_HEAD"
-fi
-git update-ref -m "$GIT_REFLOG_ACTION" HEAD "$rev"
-update_ref_status=$?
-
-case "$reset_type" in
---hard )
-       test $update_ref_status = 0 && {
-               printf "HEAD is now at "
-               GIT_PAGER= git log --max-count=1 --pretty=oneline \
-                       --abbrev-commit HEAD
-       }
-       ;;
---soft )
-       ;; # Nothing else to do
---mixed )
-       # Report what has not been updated.
-       git update-index --refresh
-       ;;
-esac
-
-rm -f "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/rr-cache/MERGE_RR" \
-       "$GIT_DIR/SQUASH_MSG" "$GIT_DIR/MERGE_MSG"
-
-exit $update_ref_status
diff --git a/contrib/examples/git-resolve.sh b/contrib/examples/git-resolve.sh
deleted file mode 100755 (executable)
index 3099dc8..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-#
-# Resolve two trees.
-#
-
-echo 'WARNING: This command is DEPRECATED and will be removed very soon.' >&2
-echo 'WARNING: Please use git-merge or git-pull instead.' >&2
-sleep 2
-
-USAGE='<head> <remote> <merge-message>'
-. git-sh-setup
-
-dropheads() {
-       rm -f -- "$GIT_DIR/MERGE_HEAD" \
-               "$GIT_DIR/LAST_MERGE" || exit 1
-}
-
-head=$(git rev-parse --verify "$1"^0) &&
-merge=$(git rev-parse --verify "$2"^0) &&
-merge_name="$2" &&
-merge_msg="$3" || usage
-
-#
-# The remote name is just used for the message,
-# but we do want it.
-#
-if [ -z "$head" -o -z "$merge" -o -z "$merge_msg" ]; then
-       usage
-fi
-
-dropheads
-echo $head > "$GIT_DIR"/ORIG_HEAD
-echo $merge > "$GIT_DIR"/LAST_MERGE
-
-common=$(git merge-base $head $merge)
-if [ -z "$common" ]; then
-       die "Unable to find common commit between" $merge $head
-fi
-
-case "$common" in
-"$merge")
-       echo "Already up to date. Yeeah!"
-       dropheads
-       exit 0
-       ;;
-"$head")
-       echo "Updating $(git rev-parse --short $head)..$(git rev-parse --short $merge)"
-       git read-tree -u -m $head $merge || exit 1
-       git update-ref -m "resolve $merge_name: Fast-forward" \
-               HEAD "$merge" "$head"
-       git diff-tree -p $head $merge | git apply --stat
-       dropheads
-       exit 0
-       ;;
-esac
-
-# We are going to make a new commit.
-git var GIT_COMMITTER_IDENT >/dev/null || exit
-
-# Find an optimum merge base if there are more than one candidates.
-LF='
-'
-common=$(git merge-base -a $head $merge)
-case "$common" in
-?*"$LF"?*)
-       echo "Trying to find the optimum merge base."
-       G=.tmp-index$$
-       best=
-       best_cnt=-1
-       for c in $common
-       do
-               rm -f $G
-               GIT_INDEX_FILE=$G git read-tree -m $c $head $merge \
-                       2>/dev/null || continue
-               # Count the paths that are unmerged.
-               cnt=$(GIT_INDEX_FILE=$G git ls-files --unmerged | wc -l)
-               if test $best_cnt -le 0 || test $cnt -le $best_cnt
-               then
-                       best=$c
-                       best_cnt=$cnt
-                       if test "$best_cnt" -eq 0
-                       then
-                               # Cannot do any better than all trivial merge.
-                               break
-                       fi
-               fi
-       done
-       rm -f $G
-       common="$best"
-esac
-
-echo "Trying to merge $merge into $head using $common."
-git update-index --refresh 2>/dev/null
-git read-tree -u -m $common $head $merge || exit 1
-result_tree=$(git write-tree  2> /dev/null)
-if [ $? -ne 0 ]; then
-       echo "Simple merge failed, trying Automatic merge"
-       git-merge-index -o git-merge-one-file -a
-       if [ $? -ne 0 ]; then
-               echo $merge > "$GIT_DIR"/MERGE_HEAD
-               die "Automatic merge failed, fix up by hand"
-       fi
-       result_tree=$(git write-tree) || exit 1
-fi
-result_commit=$(echo "$merge_msg" | git commit-tree $result_tree -p $head -p $merge)
-echo "Committed merge $result_commit"
-git update-ref -m "resolve $merge_name: In-index merge" \
-       HEAD "$result_commit" "$head"
-git diff-tree -p $head $result_commit | git apply --stat
-dropheads
diff --git a/contrib/examples/git-revert.sh b/contrib/examples/git-revert.sh
deleted file mode 100755 (executable)
index 197838d..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2005 Linus Torvalds
-# Copyright (c) 2005 Junio C Hamano
-#
-
-case "$0" in
-*-revert* )
-       test -t 0 && edit=-e
-       replay=
-       me=revert
-       USAGE='[--edit | --no-edit] [-n] <commit-ish>' ;;
-*-cherry-pick* )
-       replay=t
-       edit=
-       me=cherry-pick
-       USAGE='[--edit] [-n] [-r] [-x] <commit-ish>'  ;;
-* )
-       echo >&2 "What are you talking about?"
-       exit 1 ;;
-esac
-
-SUBDIRECTORY_OK=Yes ;# we will cd up
-. git-sh-setup
-require_work_tree
-cd_to_toplevel
-
-no_commit=
-xopt=
-while case "$#" in 0) break ;; esac
-do
-       case "$1" in
-       -n|--n|--no|--no-|--no-c|--no-co|--no-com|--no-comm|\
-           --no-commi|--no-commit)
-               no_commit=t
-               ;;
-       -e|--e|--ed|--edi|--edit)
-               edit=-e
-               ;;
-       --n|--no|--no-|--no-e|--no-ed|--no-edi|--no-edit)
-               edit=
-               ;;
-       -r)
-               : no-op ;;
-       -x|--i-really-want-to-expose-my-private-commit-object-name)
-               replay=
-               ;;
-       -X?*)
-               xopt="$xopt$(git rev-parse --sq-quote "--${1#-X}")"
-               ;;
-       --strategy-option=*)
-               xopt="$xopt$(git rev-parse --sq-quote "--${1#--strategy-option=}")"
-               ;;
-       -X|--strategy-option)
-               shift
-               xopt="$xopt$(git rev-parse --sq-quote "--$1")"
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-
-set_reflog_action "$me"
-
-test "$me,$replay" = "revert,t" && usage
-
-case "$no_commit" in
-t)
-       # We do not intend to commit immediately.  We just want to
-       # merge the differences in.
-       head=$(git-write-tree) ||
-               die "Your index file is unmerged."
-       ;;
-*)
-       head=$(git-rev-parse --verify HEAD) ||
-               die "You do not have a valid HEAD"
-       files=$(git-diff-index --cached --name-only $head) || exit
-       if [ "$files" ]; then
-               die "Dirty index: cannot $me (dirty: $files)"
-       fi
-       ;;
-esac
-
-rev=$(git-rev-parse --verify "$@") &&
-commit=$(git-rev-parse --verify "$rev^0") ||
-       die "Not a single commit $@"
-prev=$(git-rev-parse --verify "$commit^1" 2>/dev/null) ||
-       die "Cannot run $me a root commit"
-git-rev-parse --verify "$commit^2" >/dev/null 2>&1 &&
-       die "Cannot run $me a multi-parent commit."
-
-encoding=$(git config i18n.commitencoding || echo UTF-8)
-
-# "commit" is an existing commit.  We would want to apply
-# the difference it introduces since its first parent "prev"
-# on top of the current HEAD if we are cherry-pick.  Or the
-# reverse of it if we are revert.
-
-case "$me" in
-revert)
-       git show -s --pretty=oneline --encoding="$encoding" $commit |
-       sed -e '
-               s/^[^ ]* /Revert "/
-               s/$/"/
-       '
-       echo
-       echo "This reverts commit $commit."
-       test "$rev" = "$commit" ||
-       echo "(original 'git revert' arguments: $@)"
-       base=$commit next=$prev
-       ;;
-
-cherry-pick)
-       pick_author_script='
-       /^author /{
-               s/'\''/'\''\\'\'\''/g
-               h
-               s/^author \([^<]*\) <[^>]*> .*$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_NAME='\''&'\''/p
-
-               g
-               s/^author [^<]* <\([^>]*\)> .*$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_EMAIL='\''&'\''/p
-
-               g
-               s/^author [^<]* <[^>]*> \(.*\)$/\1/
-               s/'\''/'\''\'\'\''/g
-               s/.*/GIT_AUTHOR_DATE='\''&'\''/p
-
-               q
-       }'
-
-       logmsg=$(git show -s --pretty=raw --encoding="$encoding" "$commit")
-       set_author_env=$(echo "$logmsg" |
-       LANG=C LC_ALL=C sed -ne "$pick_author_script")
-       eval "$set_author_env"
-       export GIT_AUTHOR_NAME
-       export GIT_AUTHOR_EMAIL
-       export GIT_AUTHOR_DATE
-
-       echo "$logmsg" |
-       sed -e '1,/^$/d' -e 's/^    //'
-       case "$replay" in
-       '')
-               echo "(cherry picked from commit $commit)"
-               test "$rev" = "$commit" ||
-               echo "(original 'git cherry-pick' arguments: $@)"
-               ;;
-       esac
-       base=$prev next=$commit
-       ;;
-
-esac >.msg
-
-eval GITHEAD_$head=HEAD
-eval GITHEAD_$next='$(git show -s \
-       --pretty=oneline --encoding="$encoding" "$commit" |
-       sed -e "s/^[^ ]* //")'
-export GITHEAD_$head GITHEAD_$next
-
-# This three way merge is an interesting one.  We are at
-# $head, and would want to apply the change between $commit
-# and $prev on top of us (when reverting), or the change between
-# $prev and $commit on top of us (when cherry-picking or replaying).
-
-eval "git merge-recursive $xopt $base -- $head $next" &&
-result=$(git-write-tree 2>/dev/null) || {
-       mv -f .msg "$GIT_DIR/MERGE_MSG"
-       {
-           echo '
-Conflicts:
-'
-               git ls-files --unmerged |
-               sed -e 's/^[^   ]*      /       /' |
-               uniq
-       } >>"$GIT_DIR/MERGE_MSG"
-       echo >&2 "Automatic $me failed.  After resolving the conflicts,"
-       echo >&2 "mark the corrected paths with 'git-add <paths>'"
-       echo >&2 "and commit the result."
-       case "$me" in
-       cherry-pick)
-               echo >&2 "You may choose to use the following when making"
-               echo >&2 "the commit:"
-               echo >&2 "$set_author_env"
-       esac
-       exit 1
-}
-
-# If we are cherry-pick, and if the merge did not result in
-# hand-editing, we will hit this commit and inherit the original
-# author date and name.
-# If we are revert, or if our cherry-pick results in a hand merge,
-# we had better say that the current user is responsible for that.
-
-case "$no_commit" in
-'')
-       git-commit -n -F .msg $edit
-       rm -f .msg
-       ;;
-esac
diff --git a/contrib/examples/git-svnimport.perl b/contrib/examples/git-svnimport.perl
deleted file mode 100755 (executable)
index c414f0d..0000000
+++ /dev/null
@@ -1,976 +0,0 @@
-#!/usr/bin/perl
-
-# This tool is copyright (c) 2005, Matthias Urlichs.
-# It is released under the Gnu Public License, version 2.
-#
-# The basic idea is to pull and analyze SVN changes.
-#
-# Checking out the files is done by a single long-running SVN connection.
-#
-# The head revision is on branch "origin" by default.
-# You can change that with the '-o' option.
-
-use strict;
-use warnings;
-use Getopt::Std;
-use File::Copy;
-use File::Spec;
-use File::Temp qw(tempfile);
-use File::Path qw(mkpath);
-use File::Basename qw(basename dirname);
-use Time::Local;
-use IO::Pipe;
-use POSIX qw(strftime dup2);
-use IPC::Open2;
-use SVN::Core;
-use SVN::Ra;
-
-die "Need SVN:Core 1.2.1 or better" if $SVN::Core::VERSION lt "1.2.1";
-
-$SIG{'PIPE'}="IGNORE";
-$ENV{'TZ'}="UTC";
-
-our($opt_h,$opt_o,$opt_v,$opt_u,$opt_C,$opt_i,$opt_m,$opt_M,$opt_t,$opt_T,
-    $opt_b,$opt_r,$opt_I,$opt_A,$opt_s,$opt_l,$opt_d,$opt_D,$opt_S,$opt_F,
-    $opt_P,$opt_R);
-
-sub usage() {
-       print STDERR <<END;
-usage: ${\basename $0}     # fetch/update GIT from SVN
-       [-o branch-for-HEAD] [-h] [-v] [-l max_rev] [-R repack_each_revs]
-       [-C GIT_repository] [-t tagname] [-T trunkname] [-b branchname]
-       [-d|-D] [-i] [-u] [-r] [-I ignorefilename] [-s start_chg]
-       [-m] [-M regex] [-A author_file] [-S] [-F] [-P project_name] [SVN_URL]
-END
-       exit(1);
-}
-
-getopts("A:b:C:dDFhiI:l:mM:o:rs:t:T:SP:R:uv") or usage();
-usage if $opt_h;
-
-my $tag_name = $opt_t || "tags";
-my $trunk_name = defined $opt_T ? $opt_T : "trunk";
-my $branch_name = $opt_b || "branches";
-my $project_name = $opt_P || "";
-$project_name = "/" . $project_name if ($project_name);
-my $repack_after = $opt_R || 1000;
-my $root_pool = SVN::Pool->new_default;
-
-@ARGV == 1 or @ARGV == 2 or usage();
-
-$opt_o ||= "origin";
-$opt_s ||= 1;
-my $git_tree = $opt_C;
-$git_tree ||= ".";
-
-my $svn_url = $ARGV[0];
-my $svn_dir = $ARGV[1];
-
-our @mergerx = ();
-if ($opt_m) {
-       my $branch_esc = quotemeta ($branch_name);
-       my $trunk_esc  = quotemeta ($trunk_name);
-       @mergerx =
-       (
-               qr!\b(?:merg(?:ed?|ing))\b.*?\b((?:(?<=$branch_esc/)[\w\.\-]+)|(?:$trunk_esc))\b!i,
-               qr!\b(?:from|of)\W+((?:(?<=$branch_esc/)[\w\.\-]+)|(?:$trunk_esc))\b!i,
-               qr!\b(?:from|of)\W+(?:the )?([\w\.\-]+)[-\s]branch\b!i
-       );
-}
-if ($opt_M) {
-       unshift (@mergerx, qr/$opt_M/);
-}
-
-# Absolutize filename now, since we will have chdir'ed by the time we
-# get around to opening it.
-$opt_A = File::Spec->rel2abs($opt_A) if $opt_A;
-
-our %users = ();
-our $users_file = undef;
-sub read_users($) {
-       $users_file = File::Spec->rel2abs(@_);
-       die "Cannot open $users_file\n" unless -f $users_file;
-       open(my $authors,$users_file);
-       while(<$authors>) {
-               chomp;
-               next unless /^(\S+?)\s*=\s*(.+?)\s*<(.+)>\s*$/;
-               (my $user,my $name,my $email) = ($1,$2,$3);
-               $users{$user} = [$name,$email];
-       }
-       close($authors);
-}
-
-select(STDERR); $|=1; select(STDOUT);
-
-
-package SVNconn;
-# Basic SVN connection.
-# We're only interested in connecting and downloading, so ...
-
-use File::Spec;
-use File::Temp qw(tempfile);
-use POSIX qw(strftime dup2);
-use Fcntl qw(SEEK_SET);
-
-sub new {
-       my($what,$repo) = @_;
-       $what=ref($what) if ref($what);
-
-       my $self = {};
-       $self->{'buffer'} = "";
-       bless($self,$what);
-
-       $repo =~ s#/+$##;
-       $self->{'fullrep'} = $repo;
-       $self->conn();
-
-       return $self;
-}
-
-sub conn {
-       my $self = shift;
-       my $repo = $self->{'fullrep'};
-       my $auth = SVN::Core::auth_open ([SVN::Client::get_simple_provider,
-                         SVN::Client::get_ssl_server_trust_file_provider,
-                         SVN::Client::get_username_provider]);
-       my $s = SVN::Ra->new(url => $repo, auth => $auth, pool => $root_pool);
-       die "SVN connection to $repo: $!\n" unless defined $s;
-       $self->{'svn'} = $s;
-       $self->{'repo'} = $repo;
-       $self->{'maxrev'} = $s->get_latest_revnum();
-}
-
-sub file {
-       my($self,$path,$rev) = @_;
-
-       my ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                   DIR => File::Spec->tmpdir(), UNLINK => 1);
-
-       print "... $rev $path ...\n" if $opt_v;
-       my (undef, $properties);
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       eval { (undef, $properties)
-                  = $self->{'svn'}->get_file($path,$rev,$fh); };
-       if($@) {
-               return undef if $@ =~ /Attempted to get checksum/;
-               die $@;
-       }
-       my $mode;
-       if (exists $properties->{'svn:executable'}) {
-               $mode = '100755';
-       } elsif (exists $properties->{'svn:special'}) {
-               my ($special_content, $filesize);
-               $filesize = tell $fh;
-               seek $fh, 0, SEEK_SET;
-               read $fh, $special_content, $filesize;
-               if ($special_content =~ s/^link //) {
-                       $mode = '120000';
-                       seek $fh, 0, SEEK_SET;
-                       truncate $fh, 0;
-                       print $fh $special_content;
-               } else {
-                       die "unexpected svn:special file encountered";
-               }
-       } else {
-               $mode = '100644';
-       }
-       close ($fh);
-
-       return ($name, $mode);
-}
-
-sub ignore {
-       my($self,$path,$rev) = @_;
-
-       print "... $rev $path ...\n" if $opt_v;
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my (undef,undef,$properties)
-           = $self->{'svn'}->get_dir($path,$rev,undef);
-       if (exists $properties->{'svn:ignore'}) {
-               my ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                                          DIR => File::Spec->tmpdir(),
-                                          UNLINK => 1);
-               print $fh $properties->{'svn:ignore'};
-               close($fh);
-               return $name;
-       } else {
-               return undef;
-       }
-}
-
-sub dir_list {
-       my($self,$path,$rev) = @_;
-       $path =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my ($dirents,undef,$properties)
-           = $self->{'svn'}->get_dir($path,$rev,undef);
-       return $dirents;
-}
-
-package main;
-use URI;
-
-our $svn = $svn_url;
-$svn .= "/$svn_dir" if defined $svn_dir;
-my $svn2 = SVNconn->new($svn);
-$svn = SVNconn->new($svn);
-
-my $lwp_ua;
-if($opt_d or $opt_D) {
-       $svn_url = URI->new($svn_url)->canonical;
-       if($opt_D) {
-               $svn_dir =~ s#/*$#/#;
-       } else {
-               $svn_dir = "";
-       }
-       if ($svn_url->scheme eq "http") {
-               use LWP::UserAgent;
-               $lwp_ua = LWP::UserAgent->new(keep_alive => 1, requests_redirectable => []);
-       } else {
-               print STDERR "Warning: not HTTP; turning off direct file access\n";
-               $opt_d=0;
-       }
-}
-
-sub pdate($) {
-       my($d) = @_;
-       $d =~ m#(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)#
-               or die "Unparseable date: $d\n";
-       my $y=$1; $y-=1900 if $y>1900;
-       return timegm($6||0,$5,$4,$3,$2-1,$y);
-}
-
-sub getwd() {
-       my $pwd = `pwd`;
-       chomp $pwd;
-       return $pwd;
-}
-
-
-sub get_headref($$) {
-    my $name    = shift;
-    my $git_dir = shift;
-    my $sha;
-
-    if (open(C,"$git_dir/refs/heads/$name")) {
-       chomp($sha = <C>);
-       close(C);
-       length($sha) == 40
-           or die "Cannot get head id for $name ($sha): $!\n";
-    }
-    return $sha;
-}
-
-
--d $git_tree
-       or mkdir($git_tree,0777)
-       or die "Could not create $git_tree: $!";
-chdir($git_tree);
-
-my $orig_branch = "";
-my $forward_master = 0;
-my %branches;
-
-my $git_dir = $ENV{"GIT_DIR"} || ".git";
-$git_dir = getwd()."/".$git_dir unless $git_dir =~ m#^/#;
-$ENV{"GIT_DIR"} = $git_dir;
-my $orig_git_index;
-$orig_git_index = $ENV{GIT_INDEX_FILE} if exists $ENV{GIT_INDEX_FILE};
-my ($git_ih, $git_index) = tempfile('gitXXXXXX', SUFFIX => '.idx',
-                                   DIR => File::Spec->tmpdir());
-close ($git_ih);
-$ENV{GIT_INDEX_FILE} = $git_index;
-my $maxnum = 0;
-my $last_rev = "";
-my $last_branch;
-my $current_rev = $opt_s || 1;
-unless(-d $git_dir) {
-       system("git init");
-       die "Cannot init the GIT db at $git_tree: $?\n" if $?;
-       system("git read-tree --empty");
-       die "Cannot init an empty tree: $?\n" if $?;
-
-       $last_branch = $opt_o;
-       $orig_branch = "";
-} else {
-       -f "$git_dir/refs/heads/$opt_o"
-               or die "Branch '$opt_o' does not exist.\n".
-                      "Either use the correct '-o branch' option,\n".
-                      "or import to a new repository.\n";
-
-       -f "$git_dir/svn2git"
-               or die "'$git_dir/svn2git' does not exist.\n".
-                      "You need that file for incremental imports.\n";
-       open(F, "git symbolic-ref HEAD |") or
-               die "Cannot run git-symbolic-ref: $!\n";
-       chomp ($last_branch = <F>);
-       $last_branch = basename($last_branch);
-       close(F);
-       unless($last_branch) {
-               warn "Cannot read the last branch name: $! -- assuming 'master'\n";
-               $last_branch = "master";
-       }
-       $orig_branch = $last_branch;
-       $last_rev = get_headref($orig_branch, $git_dir);
-       if (-f "$git_dir/SVN2GIT_HEAD") {
-               die <<EOM;
-SVN2GIT_HEAD exists.
-Make sure your working directory corresponds to HEAD and remove SVN2GIT_HEAD.
-You may need to run
-
-    git-read-tree -m -u SVN2GIT_HEAD HEAD
-EOM
-       }
-       system('cp', "$git_dir/HEAD", "$git_dir/SVN2GIT_HEAD");
-
-       $forward_master =
-           $opt_o ne 'master' && -f "$git_dir/refs/heads/master" &&
-           system('cmp', '-s', "$git_dir/refs/heads/master",
-                               "$git_dir/refs/heads/$opt_o") == 0;
-
-       # populate index
-       system('git', 'read-tree', $last_rev);
-       die "read-tree failed: $?\n" if $?;
-
-       # Get the last import timestamps
-       open my $B,"<", "$git_dir/svn2git";
-       while(<$B>) {
-               chomp;
-               my($num,$branch,$ref) = split;
-               $branches{$branch}{$num} = $ref;
-               $branches{$branch}{"LAST"} = $ref;
-               $current_rev = $num+1 if $current_rev <= $num;
-       }
-       close($B);
-}
--d $git_dir
-       or die "Could not create git subdir ($git_dir).\n";
-
-my $default_authors = "$git_dir/svn-authors";
-if ($opt_A) {
-       read_users($opt_A);
-       copy($opt_A,$default_authors) or die "Copy failed: $!";
-} else {
-       read_users($default_authors) if -f $default_authors;
-}
-
-open BRANCHES,">>", "$git_dir/svn2git";
-
-sub node_kind($$) {
-       my ($svnpath, $revision) = @_;
-       $svnpath =~ s#^/*##;
-       my $subpool = SVN::Pool::new_default_sub;
-       my $kind = $svn->{'svn'}->check_path($svnpath,$revision);
-       return $kind;
-}
-
-sub get_file($$$) {
-       my($svnpath,$rev,$path) = @_;
-
-       # now get it
-       my ($name,$mode);
-       if($opt_d) {
-               my($req,$res);
-
-               # /svn/!svn/bc/2/django/trunk/django-docs/build.py
-               my $url=$svn_url->clone();
-               $url->path($url->path."/!svn/bc/$rev/$svn_dir$svnpath");
-               print "... $path...\n" if $opt_v;
-               $req = HTTP::Request->new(GET => $url);
-               $res = $lwp_ua->request($req);
-               if ($res->is_success) {
-                       my $fh;
-                       ($fh, $name) = tempfile('gitsvn.XXXXXX',
-                       DIR => File::Spec->tmpdir(), UNLINK => 1);
-                       print $fh $res->content;
-                       close($fh) or die "Could not write $name: $!\n";
-               } else {
-                       return undef if $res->code == 301; # directory?
-                       die $res->status_line." at $url\n";
-               }
-               $mode = '0644'; # can't obtain mode via direct http request?
-       } else {
-               ($name,$mode) = $svn->file("$svnpath",$rev);
-               return undef unless defined $name;
-       }
-
-       my $pid = open(my $F, '-|');
-       die $! unless defined $pid;
-       if (!$pid) {
-           exec("git", "hash-object", "-w", $name)
-               or die "Cannot create object: $!\n";
-       }
-       my $sha = <$F>;
-       chomp $sha;
-       close $F;
-       unlink $name;
-       return [$mode, $sha, $path];
-}
-
-sub get_ignore($$$$$) {
-       my($new,$old,$rev,$path,$svnpath) = @_;
-
-       return unless $opt_I;
-       my $name = $svn->ignore("$svnpath",$rev);
-       if ($path eq '/') {
-               $path = $opt_I;
-       } else {
-               $path = File::Spec->catfile($path,$opt_I);
-       }
-       if (defined $name) {
-               my $pid = open(my $F, '-|');
-               die $! unless defined $pid;
-               if (!$pid) {
-                       exec("git", "hash-object", "-w", $name)
-                           or die "Cannot create object: $!\n";
-               }
-               my $sha = <$F>;
-               chomp $sha;
-               close $F;
-               unlink $name;
-               push(@$new,['0644',$sha,$path]);
-       } elsif (defined $old) {
-               push(@$old,$path);
-       }
-}
-
-sub project_path($$)
-{
-       my ($path, $project) = @_;
-
-       $path = "/".$path unless ($path =~ m#^\/#) ;
-       return $1 if ($path =~ m#^$project\/(.*)$#);
-
-       $path =~ s#\.#\\\.#g;
-       $path =~ s#\+#\\\+#g;
-       return "/" if ($project =~ m#^$path.*$#);
-
-       return undef;
-}
-
-sub split_path($$) {
-       my($rev,$path) = @_;
-       my $branch;
-
-       if($path =~ s#^/\Q$tag_name\E/([^/]+)/?##) {
-               $branch = "/$1";
-       } elsif($path =~ s#^/\Q$trunk_name\E/?##) {
-               $branch = "/";
-       } elsif($path =~ s#^/\Q$branch_name\E/([^/]+)/?##) {
-               $branch = $1;
-       } else {
-               my %no_error = (
-                       "/" => 1,
-                       "/$tag_name" => 1,
-                       "/$branch_name" => 1
-               );
-               print STDERR "$rev: Unrecognized path: $path\n" unless (defined $no_error{$path});
-               return ()
-       }
-       if ($path eq "") {
-               $path = "/";
-       } elsif ($project_name) {
-               $path = project_path($path, $project_name);
-       }
-       return ($branch,$path);
-}
-
-sub branch_rev($$) {
-
-       my ($srcbranch,$uptorev) = @_;
-
-       my $bbranches = $branches{$srcbranch};
-       my @revs = reverse sort { ($a eq 'LAST' ? 0 : $a) <=> ($b eq 'LAST' ? 0 : $b) } keys %$bbranches;
-       my $therev;
-       foreach my $arev(@revs) {
-               next if  ($arev eq 'LAST');
-               if ($arev <= $uptorev) {
-                       $therev = $arev;
-                       last;
-               }
-       }
-       return $therev;
-}
-
-sub expand_svndir($$$);
-
-sub expand_svndir($$$)
-{
-       my ($svnpath, $rev, $path) = @_;
-       my @list;
-       get_ignore(\@list, undef, $rev, $path, $svnpath);
-       my $dirents = $svn->dir_list($svnpath, $rev);
-       foreach my $p(keys %$dirents) {
-               my $kind = node_kind($svnpath.'/'.$p, $rev);
-               if ($kind eq $SVN::Node::file) {
-                       my $f = get_file($svnpath.'/'.$p, $rev, $path.'/'.$p);
-                       push(@list, $f) if $f;
-               } elsif ($kind eq $SVN::Node::dir) {
-                       push(@list,
-                            expand_svndir($svnpath.'/'.$p, $rev, $path.'/'.$p));
-               }
-       }
-       return @list;
-}
-
-sub copy_path($$$$$$$$) {
-       # Somebody copied a whole subdirectory.
-       # We need to find the index entries from the old version which the
-       # SVN log entry points to, and add them to the new place.
-
-       my($newrev,$newbranch,$path,$oldpath,$rev,$node_kind,$new,$parents) = @_;
-
-       my($srcbranch,$srcpath) = split_path($rev,$oldpath);
-       unless(defined $srcbranch && defined $srcpath) {
-               print "Path not found when copying from $oldpath @ $rev.\n".
-                       "Will try to copy from original SVN location...\n"
-                       if $opt_v;
-               push (@$new, expand_svndir($oldpath, $rev, $path));
-               return;
-       }
-       my $therev = branch_rev($srcbranch, $rev);
-       my $gitrev = $branches{$srcbranch}{$therev};
-       unless($gitrev) {
-               print STDERR "$newrev:$newbranch: could not find $oldpath \@ $rev\n";
-               return;
-       }
-       if ($srcbranch ne $newbranch) {
-               push(@$parents, $branches{$srcbranch}{'LAST'});
-       }
-       print "$newrev:$newbranch:$path: copying from $srcbranch:$srcpath @ $rev\n" if $opt_v;
-       if ($node_kind eq $SVN::Node::dir) {
-               $srcpath =~ s#/*$#/#;
-       }
-
-       my $pid = open my $f,'-|';
-       die $! unless defined $pid;
-       if (!$pid) {
-               exec("git","ls-tree","-r","-z",$gitrev,$srcpath)
-                       or die $!;
-       }
-       local $/ = "\0";
-       while(<$f>) {
-               chomp;
-               my($m,$p) = split(/\t/,$_,2);
-               my($mode,$type,$sha1) = split(/ /,$m);
-               next if $type ne "blob";
-               if ($node_kind eq $SVN::Node::dir) {
-                       $p = $path . substr($p,length($srcpath)-1);
-               } else {
-                       $p = $path;
-               }
-               push(@$new,[$mode,$sha1,$p]);
-       }
-       close($f) or
-               print STDERR "$newrev:$newbranch: could not list files in $oldpath \@ $rev\n";
-}
-
-sub commit {
-       my($branch, $changed_paths, $revision, $author, $date, $message) = @_;
-       my($committer_name,$committer_email,$dest);
-       my($author_name,$author_email);
-       my(@old,@new,@parents);
-
-       if (not defined $author or $author eq "") {
-               $committer_name = $committer_email = "unknown";
-       } elsif (defined $users_file) {
-               die "User $author is not listed in $users_file\n"
-                   unless exists $users{$author};
-               ($committer_name,$committer_email) = @{$users{$author}};
-       } elsif ($author =~ /^(.*?)\s+<(.*)>$/) {
-               ($committer_name, $committer_email) = ($1, $2);
-       } else {
-               $author =~ s/^<(.*)>$/$1/;
-               $committer_name = $committer_email = $author;
-       }
-
-       if ($opt_F && $message =~ /From:\s+(.*?)\s+<(.*)>\s*\n/) {
-               ($author_name, $author_email) = ($1, $2);
-               print "Author from From: $1 <$2>\n" if ($opt_v);;
-       } elsif ($opt_S && $message =~ /Signed-off-by:\s+(.*?)\s+<(.*)>\s*\n/) {
-               ($author_name, $author_email) = ($1, $2);
-               print "Author from Signed-off-by: $1 <$2>\n" if ($opt_v);;
-       } else {
-               $author_name = $committer_name;
-               $author_email = $committer_email;
-       }
-
-       $date = pdate($date);
-
-       my $tag;
-       my $parent;
-       if($branch eq "/") { # trunk
-               $parent = $opt_o;
-       } elsif($branch =~ m#^/(.+)#) { # tag
-               $tag = 1;
-               $parent = $1;
-       } else { # "normal" branch
-               # nothing to do
-               $parent = $branch;
-       }
-       $dest = $parent;
-
-       my $prev = $changed_paths->{"/"};
-       if($prev and $prev->[0] eq "A") {
-               delete $changed_paths->{"/"};
-               my $oldpath = $prev->[1];
-               my $rev;
-               if(defined $oldpath) {
-                       my $p;
-                       ($parent,$p) = split_path($revision,$oldpath);
-                       if(defined $parent) {
-                               if($parent eq "/") {
-                                       $parent = $opt_o;
-                               } else {
-                                       $parent =~ s#^/##; # if it's a tag
-                               }
-                       }
-               } else {
-                       $parent = undef;
-               }
-       }
-
-       my $rev;
-       if($revision > $opt_s and defined $parent) {
-               open(H,'-|',"git","rev-parse","--verify",$parent);
-               $rev = <H>;
-               close(H) or do {
-                       print STDERR "$revision: cannot find commit '$parent'!\n";
-                       return;
-               };
-               chop $rev;
-               if(length($rev) != 40) {
-                       print STDERR "$revision: cannot find commit '$parent'!\n";
-                       return;
-               }
-               $rev = $branches{($parent eq $opt_o) ? "/" : $parent}{"LAST"};
-               if($revision != $opt_s and not $rev) {
-                       print STDERR "$revision: do not know ancestor for '$parent'!\n";
-                       return;
-               }
-       } else {
-               $rev = undef;
-       }
-
-#      if($prev and $prev->[0] eq "A") {
-#              if(not $tag) {
-#                      unless(open(H,"> $git_dir/refs/heads/$branch")) {
-#                              print STDERR "$revision: Could not create branch $branch: $!\n";
-#                              $state=11;
-#                              next;
-#                      }
-#                      print H "$rev\n"
-#                              or die "Could not write branch $branch: $!";
-#                      close(H)
-#                              or die "Could not write branch $branch: $!";
-#              }
-#      }
-       if(not defined $rev) {
-               unlink($git_index);
-       } elsif ($rev ne $last_rev) {
-               print "Switching from $last_rev to $rev ($branch)\n" if $opt_v;
-               system("git", "read-tree", $rev);
-               die "read-tree failed for $rev: $?\n" if $?;
-               $last_rev = $rev;
-       }
-
-       push (@parents, $rev) if defined $rev;
-
-       my $cid;
-       if($tag and not %$changed_paths) {
-               $cid = $rev;
-       } else {
-               my @paths = sort keys %$changed_paths;
-               foreach my $path(@paths) {
-                       my $action = $changed_paths->{$path};
-
-                       if ($action->[0] eq "R") {
-                               # refer to a file/tree in an earlier commit
-                               push(@old,$path); # remove any old stuff
-                       }
-                       if(($action->[0] eq "A") || ($action->[0] eq "R")) {
-                               my $node_kind = node_kind($action->[3], $revision);
-                               if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($action->[3],
-                                                        $revision, $path);
-                                       if ($f) {
-                                               push(@new,$f) if $f;
-                                       } else {
-                                               my $opath = $action->[3];
-                                               print STDERR "$revision: $branch: could not fetch '$opath'\n";
-                                       }
-                               } elsif ($node_kind eq $SVN::Node::dir) {
-                                       if($action->[1]) {
-                                               copy_path($revision, $branch,
-                                                         $path, $action->[1],
-                                                         $action->[2], $node_kind,
-                                                         \@new, \@parents);
-                                       } else {
-                                               get_ignore(\@new, \@old, $revision,
-                                                          $path, $action->[3]);
-                                       }
-                               }
-                       } elsif ($action->[0] eq "D") {
-                               push(@old,$path);
-                       } elsif ($action->[0] eq "M") {
-                               my $node_kind = node_kind($action->[3], $revision);
-                               if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($action->[3],
-                                                        $revision, $path);
-                                       push(@new,$f) if $f;
-                               } elsif ($node_kind eq $SVN::Node::dir) {
-                                       get_ignore(\@new, \@old, $revision,
-                                                  $path, $action->[3]);
-                               }
-                       } else {
-                               die "$revision: unknown action '".$action->[0]."' for $path\n";
-                       }
-               }
-
-               while(@old) {
-                       my @o1;
-                       if(@old > 55) {
-                               @o1 = splice(@old,0,50);
-                       } else {
-                               @o1 = @old;
-                               @old = ();
-                       }
-                       my $pid = open my $F, "-|";
-                       die "$!" unless defined $pid;
-                       if (!$pid) {
-                               exec("git", "ls-files", "-z", @o1) or die $!;
-                       }
-                       @o1 = ();
-                       local $/ = "\0";
-                       while(<$F>) {
-                               chomp;
-                               push(@o1,$_);
-                       }
-                       close($F);
-
-                       while(@o1) {
-                               my @o2;
-                               if(@o1 > 55) {
-                                       @o2 = splice(@o1,0,50);
-                               } else {
-                                       @o2 = @o1;
-                                       @o1 = ();
-                               }
-                               system("git","update-index","--force-remove","--",@o2);
-                               die "Cannot remove files: $?\n" if $?;
-                       }
-               }
-               while(@new) {
-                       my @n2;
-                       if(@new > 12) {
-                               @n2 = splice(@new,0,10);
-                       } else {
-                               @n2 = @new;
-                               @new = ();
-                       }
-                       system("git","update-index","--add",
-                               (map { ('--cacheinfo', @$_) } @n2));
-                       die "Cannot add files: $?\n" if $?;
-               }
-
-               my $pid = open(C,"-|");
-               die "Cannot fork: $!" unless defined $pid;
-               unless($pid) {
-                       exec("git","write-tree");
-                       die "Cannot exec git-write-tree: $!\n";
-               }
-               chomp(my $tree = <C>);
-               length($tree) == 40
-                       or die "Cannot get tree id ($tree): $!\n";
-               close(C)
-                       or die "Error running git-write-tree: $?\n";
-               print "Tree ID $tree\n" if $opt_v;
-
-               my $pr = IO::Pipe->new() or die "Cannot open pipe: $!\n";
-               my $pw = IO::Pipe->new() or die "Cannot open pipe: $!\n";
-               $pid = fork();
-               die "Fork: $!\n" unless defined $pid;
-               unless($pid) {
-                       $pr->writer();
-                       $pw->reader();
-                       open(OUT,">&STDOUT");
-                       dup2($pw->fileno(),0);
-                       dup2($pr->fileno(),1);
-                       $pr->close();
-                       $pw->close();
-
-                       my @par = ();
-
-                       # loose detection of merges
-                       # based on the commit msg
-                       foreach my $rx (@mergerx) {
-                               if ($message =~ $rx) {
-                                       my $mparent = $1;
-                                       if ($mparent eq 'HEAD') { $mparent = $opt_o };
-                                       if ( -e "$git_dir/refs/heads/$mparent") {
-                                               $mparent = get_headref($mparent, $git_dir);
-                                               push (@parents, $mparent);
-                                               print OUT "Merge parent branch: $mparent\n" if $opt_v;
-                                       }
-                               }
-                       }
-                       my %seen_parents = ();
-                       my @unique_parents = grep { ! $seen_parents{$_} ++ } @parents;
-                       foreach my $bparent (@unique_parents) {
-                               push @par, '-p', $bparent;
-                               print OUT "Merge parent branch: $bparent\n" if $opt_v;
-                       }
-
-                       exec("env",
-                               "GIT_AUTHOR_NAME=$author_name",
-                               "GIT_AUTHOR_EMAIL=$author_email",
-                               "GIT_AUTHOR_DATE=".strftime("+0000 %Y-%m-%d %H:%M:%S",gmtime($date)),
-                               "GIT_COMMITTER_NAME=$committer_name",
-                               "GIT_COMMITTER_EMAIL=$committer_email",
-                               "GIT_COMMITTER_DATE=".strftime("+0000 %Y-%m-%d %H:%M:%S",gmtime($date)),
-                               "git", "commit-tree", $tree,@par);
-                       die "Cannot exec git-commit-tree: $!\n";
-               }
-               $pw->writer();
-               $pr->reader();
-
-               $message =~ s/[\s\n]+\z//;
-               $message = "r$revision: $message" if $opt_r;
-
-               print $pw "$message\n"
-                       or die "Error writing to git-commit-tree: $!\n";
-               $pw->close();
-
-               print "Committed change $revision:$branch ".strftime("%Y-%m-%d %H:%M:%S",gmtime($date)).")\n" if $opt_v;
-               chomp($cid = <$pr>);
-               length($cid) == 40
-                       or die "Cannot get commit id ($cid): $!\n";
-               print "Commit ID $cid\n" if $opt_v;
-               $pr->close();
-
-               waitpid($pid,0);
-               die "Error running git-commit-tree: $?\n" if $?;
-       }
-
-       if (not defined $cid) {
-               $cid = $branches{"/"}{"LAST"};
-       }
-
-       if(not defined $dest) {
-               print "... no known parent\n" if $opt_v;
-       } elsif(not $tag) {
-               print "Writing to refs/heads/$dest\n" if $opt_v;
-               open(C,">$git_dir/refs/heads/$dest") and
-               print C ("$cid\n") and
-               close(C)
-                       or die "Cannot write branch $dest for update: $!\n";
-       }
-
-       if ($tag) {
-               $last_rev = "-" if %$changed_paths;
-               # the tag was 'complex', i.e. did not refer to a "real" revision
-
-               $dest =~ tr/_/\./ if $opt_u;
-
-               system('git', 'tag', '-f', $dest, $cid) == 0
-                       or die "Cannot create tag $dest: $!\n";
-
-               print "Created tag '$dest' on '$branch'\n" if $opt_v;
-       }
-       $branches{$branch}{"LAST"} = $cid;
-       $branches{$branch}{$revision} = $cid;
-       $last_rev = $cid;
-       print BRANCHES "$revision $branch $cid\n";
-       print "DONE: $revision $dest $cid\n" if $opt_v;
-}
-
-sub commit_all {
-       # Recursive use of the SVN connection does not work
-       local $svn = $svn2;
-
-       my ($changed_paths, $revision, $author, $date, $message) = @_;
-       my %p;
-       while(my($path,$action) = each %$changed_paths) {
-               $p{$path} = [ $action->action,$action->copyfrom_path, $action->copyfrom_rev, $path ];
-       }
-       $changed_paths = \%p;
-
-       my %done;
-       my @col;
-       my $pref;
-       my $branch;
-
-       while(my($path,$action) = each %$changed_paths) {
-               ($branch,$path) = split_path($revision,$path);
-               next if not defined $branch;
-               next if not defined $path;
-               $done{$branch}{$path} = $action;
-       }
-       while(($branch,$changed_paths) = each %done) {
-               commit($branch, $changed_paths, $revision, $author, $date, $message);
-       }
-}
-
-$opt_l = $svn->{'maxrev'} if not defined $opt_l or $opt_l > $svn->{'maxrev'};
-
-if ($opt_l < $current_rev) {
-    print "Up to date: no new revisions to fetch!\n" if $opt_v;
-    unlink("$git_dir/SVN2GIT_HEAD");
-    exit;
-}
-
-print "Processing from $current_rev to $opt_l ...\n" if $opt_v;
-
-my $from_rev;
-my $to_rev = $current_rev - 1;
-
-my $subpool = SVN::Pool::new_default_sub;
-while ($to_rev < $opt_l) {
-       $subpool->clear;
-       $from_rev = $to_rev + 1;
-       $to_rev = $from_rev + $repack_after;
-       $to_rev = $opt_l if $opt_l < $to_rev;
-       print "Fetching from $from_rev to $to_rev ...\n" if $opt_v;
-       $svn->{'svn'}->get_log("",$from_rev,$to_rev,0,1,1,\&commit_all);
-       my $pid = fork();
-       die "Fork: $!\n" unless defined $pid;
-       unless($pid) {
-               exec("git", "repack", "-d")
-                       or die "Cannot repack: $!\n";
-       }
-       waitpid($pid, 0);
-}
-
-
-unlink($git_index);
-
-if (defined $orig_git_index) {
-       $ENV{GIT_INDEX_FILE} = $orig_git_index;
-} else {
-       delete $ENV{GIT_INDEX_FILE};
-}
-
-# Now switch back to the branch we were in before all of this happened
-if($orig_branch) {
-       print "DONE\n" if $opt_v and (not defined $opt_l or $opt_l > 0);
-       system("cp","$git_dir/refs/heads/$opt_o","$git_dir/refs/heads/master")
-               if $forward_master;
-       unless ($opt_i) {
-               system('git', 'read-tree', '-m', '-u', 'SVN2GIT_HEAD', 'HEAD');
-               die "read-tree failed: $?\n" if $?;
-       }
-} else {
-       $orig_branch = "master";
-       print "DONE; creating $orig_branch branch\n" if $opt_v and (not defined $opt_l or $opt_l > 0);
-       system("cp","$git_dir/refs/heads/$opt_o","$git_dir/refs/heads/master")
-               unless -f "$git_dir/refs/heads/master";
-       system('git', 'update-ref', 'HEAD', "$orig_branch");
-       unless ($opt_i) {
-               system('git checkout');
-               die "checkout failed: $?\n" if $?;
-       }
-}
-unlink("$git_dir/SVN2GIT_HEAD");
-close(BRANCHES);
diff --git a/contrib/examples/git-svnimport.txt b/contrib/examples/git-svnimport.txt
deleted file mode 100644 (file)
index 3f0a9c3..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-git-svnimport(1)
-================
-v0.1, July 2005
-
-NAME
-----
-git-svnimport - Import a SVN repository into git
-
-
-SYNOPSIS
---------
-[verse]
-'git-svnimport' [ -o <branch-for-HEAD> ] [ -h ] [ -v ] [ -d | -D ]
-               [ -C <GIT_repository> ] [ -i ] [ -u ] [-l limit_rev]
-               [ -b branch_subdir ] [ -T trunk_subdir ] [ -t tag_subdir ]
-               [ -s start_chg ] [ -m ] [ -r ] [ -M regex ]
-               [ -I <ignorefile_name> ] [ -A <author_file> ]
-               [ -R <repack_each_revs>] [ -P <path_from_trunk> ]
-               <SVN_repository_URL> [ <path> ]
-
-
-DESCRIPTION
------------
-Imports a SVN repository into git. It will either create a new
-repository, or incrementally import into an existing one.
-
-SVN access is done by the SVN::Perl module.
-
-git-svnimport assumes that SVN repositories are organized into one
-"trunk" directory where the main development happens, "branches/FOO"
-directories for branches, and "/tags/FOO" directories for tags.
-Other subdirectories are ignored.
-
-git-svnimport creates a file ".git/svn2git", which is required for
-incremental SVN imports.
-
-OPTIONS
--------
--C <target-dir>::
-        The GIT repository to import to.  If the directory doesn't
-        exist, it will be created.  Default is the current directory.
-
--s <start_rev>::
-        Start importing at this SVN change number. The  default is 1.
-+
-When importing incrementally, you might need to edit the .git/svn2git file.
-
--i::
-       Import-only: don't perform a checkout after importing.  This option
-       ensures the working directory and index remain untouched and will
-       not create them if they do not exist.
-
--T <trunk_subdir>::
-       Name the SVN trunk. Default "trunk".
-
--t <tag_subdir>::
-       Name the SVN subdirectory for tags. Default "tags".
-
--b <branch_subdir>::
-       Name the SVN subdirectory for branches. Default "branches".
-
--o <branch-for-HEAD>::
-       The 'trunk' branch from SVN is imported to the 'origin' branch within
-       the git repository. Use this option if you want to import into a
-       different branch.
-
--r::
-       Prepend 'rX: ' to commit messages, where X is the imported
-       subversion revision.
-
--u::
-       Replace underscores in tag names with periods.
-
--I <ignorefile_name>::
-       Import the svn:ignore directory property to files with this
-       name in each directory. (The Subversion and GIT ignore
-       syntaxes are similar enough that using the Subversion patterns
-       directly with "-I .gitignore" will almost always just work.)
-
--A <author_file>::
-       Read a file with lines on the form
-+
-------
-       username = User's Full Name <email@addr.es>
-
-------
-+
-and use "User's Full Name <email@addr.es>" as the GIT
-author and committer for Subversion commits made by
-"username". If encountering a commit made by a user not in the
-list, abort.
-+
-For convenience, this data is saved to $GIT_DIR/svn-authors
-each time the -A option is provided, and read from that same
-file each time git-svnimport is run with an existing GIT
-repository without -A.
-
--m::
-       Attempt to detect merges based on the commit message. This option
-       will enable default regexes that try to capture the name source
-       branch name from the commit message.
-
--M <regex>::
-       Attempt to detect merges based on the commit message with a custom
-       regex. It can be used with -m to also see the default regexes.
-       You must escape forward slashes.
-
--l <max_rev>::
-       Specify a maximum revision number to pull.
-+
-Formerly, this option controlled how many revisions to pull,
-due to SVN memory leaks. (These have been worked around.)
-
--R <repack_each_revs>::
-       Specify how often git repository should be repacked.
-+
-The default value is 1000. git-svnimport will do imports in chunks of 1000
-revisions, after each chunk the git repository will be repacked. To disable
-this behavior specify some large value here which is greater than the number of
-revisions to import.
-
--P <path_from_trunk>::
-       Partial import of the SVN tree.
-+
-By default, the whole tree on the SVN trunk (/trunk) is imported.
-'-P my/proj' will import starting only from '/trunk/my/proj'.
-This option is useful when you want to import one project from a
-svn repo which hosts multiple projects under the same trunk.
-
--v::
-       Verbosity: let 'svnimport' report what it is doing.
-
--d::
-       Use direct HTTP requests if possible. The "<path>" argument is used
-       only for retrieving the SVN logs; the path to the contents is
-       included in the SVN log.
-
--D::
-       Use direct HTTP requests if possible. The "<path>" argument is used
-       for retrieving the logs, as well as for the contents.
-+
-There's no safe way to automatically find out which of these options to
-use, so you need to try both. Usually, the one that's wrong will die
-with a 40x error pretty quickly.
-
-<SVN_repository_URL>::
-       The URL of the SVN module you want to import. For local
-       repositories, use "file:///absolute/path".
-+
-If you're using the "-d" or "-D" option, this is the URL of the SVN
-repository itself; it usually ends in "/svn".
-
-<path>::
-       The path to the module you want to check out.
-
--h::
-       Print a short usage message and exit.
-
-OUTPUT
-------
-If '-v' is specified, the script reports what it is doing.
-
-Otherwise, success is indicated the Unix way, i.e. by simply exiting with
-a zero exit status.
-
-Author
-------
-Written by Matthias Urlichs <smurf@smurf.noris.de>, with help from
-various participants of the git-list <git@vger.kernel.org>.
-
-Based on a cvs2git script by the same author.
-
-Documentation
---------------
-Documentation by Matthias Urlichs <smurf@smurf.noris.de>.
-
-GIT
----
-Part of the linkgit:git[7] suite
diff --git a/contrib/examples/git-tag.sh b/contrib/examples/git-tag.sh
deleted file mode 100755 (executable)
index 1bd8f3c..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/bin/sh
-# Copyright (c) 2005 Linus Torvalds
-
-USAGE='[-n [<num>]] -l [<pattern>] | [-a | -s | -u <key-id>] [-f | -d | -v] [-m <msg>] <tagname> [<head>]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-message_given=
-annotate=
-signed=
-force=
-message=
-username=
-list=
-verify=
-LINES=0
-while test $# != 0
-do
-    case "$1" in
-    -a)
-       annotate=1
-       shift
-       ;;
-    -s)
-       annotate=1
-       signed=1
-       shift
-       ;;
-    -f)
-       force=1
-       shift
-       ;;
-    -n)
-        case "$#,$2" in
-       1,* | *,-*)
-               LINES=1         # no argument
-               ;;
-       *)      shift
-               LINES=$(expr "$1" : '\([0-9]*\)')
-               [ -z "$LINES" ] && LINES=1 # 1 line is default when -n is used
-               ;;
-       esac
-       shift
-       ;;
-    -l)
-       list=1
-       shift
-       case $# in
-       0)      PATTERN=
-               ;;
-       *)
-               PATTERN="$1"    # select tags by shell pattern, not re
-               shift
-               ;;
-       esac
-       git rev-parse --symbolic --tags | sort |
-           while read TAG
-           do
-               case "$TAG" in
-               *$PATTERN*) ;;
-               *)          continue ;;
-               esac
-               [ "$LINES" -le 0 ] && { echo "$TAG"; continue ;}
-               OBJTYPE=$(git cat-file -t "$TAG")
-               case $OBJTYPE in
-               tag)
-                       ANNOTATION=$(git cat-file tag "$TAG" |
-                               sed -e '1,/^$/d' |
-                               sed -n -e "
-                                       /^-----BEGIN PGP SIGNATURE-----\$/q
-                                       2,\$s/^/    /
-                                       p
-                                       ${LINES}q
-                               ")
-                       printf "%-15s %s\n" "$TAG" "$ANNOTATION"
-                       ;;
-               *)      echo "$TAG"
-                       ;;
-               esac
-           done
-       ;;
-    -m)
-       annotate=1
-       shift
-       message="$1"
-       if test "$#" = "0"; then
-           die "error: option -m needs an argument"
-       else
-           message="$1"
-           message_given=1
-           shift
-       fi
-       ;;
-    -F)
-       annotate=1
-       shift
-       if test "$#" = "0"; then
-           die "error: option -F needs an argument"
-       else
-           message="$(cat "$1")"
-           message_given=1
-           shift
-       fi
-       ;;
-    -u)
-       annotate=1
-       signed=1
-       shift
-       if test "$#" = "0"; then
-           die "error: option -u needs an argument"
-       else
-           username="$1"
-           shift
-       fi
-       ;;
-    -d)
-       shift
-       had_error=0
-       for tag
-       do
-               cur=$(git show-ref --verify --hash -- "refs/tags/$tag") || {
-                       echo >&2 "Seriously, what tag are you talking about?"
-                       had_error=1
-                       continue
-               }
-               git update-ref -m 'tag: delete' -d "refs/tags/$tag" "$cur" || {
-                       had_error=1
-                       continue
-               }
-               echo "Deleted tag $tag."
-       done
-       exit $had_error
-       ;;
-    -v)
-       shift
-       tag_name="$1"
-       tag=$(git show-ref --verify --hash -- "refs/tags/$tag_name") ||
-               die "Seriously, what tag are you talking about?"
-       git-verify-tag -v "$tag"
-       exit $?
-       ;;
-    -*)
-        usage
-       ;;
-    *)
-       break
-       ;;
-    esac
-done
-
-[ -n "$list" ] && exit 0
-
-name="$1"
-[ "$name" ] || usage
-prev=0000000000000000000000000000000000000000
-if git show-ref --verify --quiet -- "refs/tags/$name"
-then
-    test -n "$force" || die "tag '$name' already exists"
-    prev=$(git rev-parse "refs/tags/$name")
-fi
-shift
-git check-ref-format "tags/$name" ||
-       die "we do not like '$name' as a tag name."
-
-object=$(git rev-parse --verify --default HEAD "$@") || exit 1
-type=$(git cat-file -t $object) || exit 1
-tagger=$(git var GIT_COMMITTER_IDENT) || exit 1
-
-test -n "$username" ||
-       username=$(git config user.signingkey) ||
-       username=$(expr "z$tagger" : 'z\(.*>\)')
-
-trap 'rm -f "$GIT_DIR"/TAG_TMP* "$GIT_DIR"/TAG_FINALMSG "$GIT_DIR"/TAG_EDITMSG' 0
-
-if [ "$annotate" ]; then
-    if [ -z "$message_given" ]; then
-        ( echo "#"
-          echo "# Write a tag message"
-          echo "#" ) > "$GIT_DIR"/TAG_EDITMSG
-        git_editor "$GIT_DIR"/TAG_EDITMSG || exit
-    else
-        printf '%s\n' "$message" >"$GIT_DIR"/TAG_EDITMSG
-    fi
-
-    grep -v '^#' <"$GIT_DIR"/TAG_EDITMSG |
-    git stripspace >"$GIT_DIR"/TAG_FINALMSG
-
-    [ -s "$GIT_DIR"/TAG_FINALMSG -o -n "$message_given" ] || {
-       echo >&2 "No tag message?"
-       exit 1
-    }
-
-    ( printf 'object %s\ntype %s\ntag %s\ntagger %s\n\n' \
-       "$object" "$type" "$name" "$tagger";
-      cat "$GIT_DIR"/TAG_FINALMSG ) >"$GIT_DIR"/TAG_TMP
-    rm -f "$GIT_DIR"/TAG_TMP.asc "$GIT_DIR"/TAG_FINALMSG
-    if [ "$signed" ]; then
-       gpg -bsa -u "$username" "$GIT_DIR"/TAG_TMP &&
-       cat "$GIT_DIR"/TAG_TMP.asc >>"$GIT_DIR"/TAG_TMP ||
-       die "failed to sign the tag with GPG."
-    fi
-    object=$(git-mktag < "$GIT_DIR"/TAG_TMP)
-fi
-
-git update-ref "refs/tags/$name" "$object" "$prev"
diff --git a/contrib/examples/git-verify-tag.sh b/contrib/examples/git-verify-tag.sh
deleted file mode 100755 (executable)
index 0902a5c..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/sh
-
-USAGE='<tag>'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-verbose=
-while test $# != 0
-do
-       case "$1" in
-       -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
-               verbose=t ;;
-       *)
-               break ;;
-       esac
-       shift
-done
-
-if [ "$#" != "1" ]
-then
-       usage
-fi
-
-type="$(git cat-file -t "$1" 2>/dev/null)" ||
-       die "$1: no such object."
-
-test "$type" = tag ||
-       die "$1: cannot verify a non-tag object of type $type."
-
-case "$verbose" in
-t)
-       git cat-file -p "$1" |
-       sed -n -e '/^-----BEGIN PGP SIGNATURE-----/q' -e p
-       ;;
-esac
-
-trap 'rm -f "$GIT_DIR/.tmp-vtag"' 0
-
-git cat-file tag "$1" >"$GIT_DIR/.tmp-vtag" || exit 1
-sed -n -e '
-       /^-----BEGIN PGP SIGNATURE-----$/q
-       p
-' <"$GIT_DIR/.tmp-vtag" |
-gpg --verify "$GIT_DIR/.tmp-vtag" - || exit 1
-rm -f "$GIT_DIR/.tmp-vtag"
diff --git a/contrib/examples/git-whatchanged.sh b/contrib/examples/git-whatchanged.sh
deleted file mode 100755 (executable)
index 2edbdc6..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-USAGE='[-p] [--max-count=<n>] [<since>..<limit>] [--pretty=<format>] [-m] [git-diff-tree options] [git-rev-list options]'
-SUBDIRECTORY_OK='Yes'
-. git-sh-setup
-
-diff_tree_flags=$(git-rev-parse --sq --no-revs --flags "$@") || exit
-case "$0" in
-*whatchanged)
-       count=
-       test -z "$diff_tree_flags" &&
-               diff_tree_flags=$(git config --get whatchanged.difftree)
-       diff_tree_default_flags='-c -M --abbrev' ;;
-*show)
-       count=-n1
-       test -z "$diff_tree_flags" &&
-               diff_tree_flags=$(git config --get show.difftree)
-       diff_tree_default_flags='--cc --always' ;;
-esac
-test -z "$diff_tree_flags" &&
-       diff_tree_flags="$diff_tree_default_flags"
-
-rev_list_args=$(git-rev-parse --sq --default HEAD --revs-only "$@") &&
-diff_tree_args=$(git-rev-parse --sq --no-revs --no-flags "$@") &&
-
-eval "git-rev-list $count $rev_list_args" |
-eval "git-diff-tree --stdin --pretty -r $diff_tree_flags $diff_tree_args" |
-LESS="$LESS -S" ${PAGER:-less}
index 6a2cdebdb78c5d9d5a507635f748bda7b2be676d..7ba78c4dff6811d4b60b01ec66882ebcd468df14 100755 (executable)
@@ -17,7 +17,7 @@
 # ln -sf /usr/share/git-core/contrib/hooks/pre-auto-gc-battery \
 #      hooks/pre-auto-gc
 
-if test -x /sbin/on_ac_power && /sbin/on_ac_power
+if test -x /sbin/on_ac_power && (/sbin/on_ac_power;test $? -ne 1)
 then
        exit 0
 elif test "$(cat /sys/class/power_supply/AC/online 2>/dev/null)" = 1
index dec085a235f4477c9079dc643c76db166f487761..d3f39a862ac9f6ec39bd08b1c3b48ea787bba04b 100755 (executable)
@@ -297,7 +297,7 @@ find_latest_squash () {
        main=
        sub=
        git log --grep="^git-subtree-dir: $dir/*\$" \
-               --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
+               --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
        while read a b junk
        do
                debug "$a $b $junk"
@@ -341,7 +341,7 @@ find_existing_splits () {
        main=
        sub=
        git log --grep="^git-subtree-dir: $dir/*\$" \
-               --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
+               --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
        while read a b junk
        do
                case "$a" in
@@ -382,7 +382,7 @@ copy_commit () {
        # We're going to set some environment vars here, so
        # do it in a subshell to get rid of them safely later
        debug copy_commit "{$1}" "{$2}" "{$3}"
-       git log -1 --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
+       git log -1 --no-show-signature --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
        (
                read GIT_AUTHOR_NAME
                read GIT_AUTHOR_EMAIL
@@ -462,8 +462,8 @@ squash_msg () {
                oldsub_short=$(git rev-parse --short "$oldsub")
                echo "Squashed '$dir/' changes from $oldsub_short..$newsub_short"
                echo
-               git log --pretty=tformat:'%h %s' "$oldsub..$newsub"
-               git log --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
+               git log --no-show-signature --pretty=tformat:'%h %s' "$oldsub..$newsub"
+               git log --no-show-signature --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
        else
                echo "Squashed '$dir/' content from commit $newsub_short"
        fi
@@ -475,7 +475,7 @@ squash_msg () {
 
 toptree_for_commit () {
        commit="$1"
-       git log -1 --pretty=format:'%T' "$commit" -- || exit $?
+       git rev-parse --verify "$commit^{tree}" || exit $?
 }
 
 subtree_for_commit () {
index 60d76cdddfc75f0c77d5798ac0b1adc6b0e93899..352deda69dcfd009ff025615dff07dd0a5d015d1 100644 (file)
@@ -28,7 +28,7 @@ as a subdirectory of your application.
 
 Subtrees are not to be confused with submodules, which are meant for
 the same task. Unlike submodules, subtrees do not need any special
-constructions (like .gitmodule files or gitlinks) be present in
+constructions (like .gitmodules files or gitlinks) be present in
 your repository, and do not force end-users of your
 repository to do anything special or to understand how subtrees
 work. A subtree is just a subdirectory that can be
index b976eb968c8289414c415e7beefa91c0816d5158..c480097a2a0cb3d780bdeff252da912c1b8e63b7 100644 (file)
--- a/convert.c
+++ b/convert.c
@@ -898,7 +898,7 @@ static int ident_to_git(const char *path, const char *src, size_t len,
 static int ident_to_worktree(const char *path, const char *src, size_t len,
                              struct strbuf *buf, int ident)
 {
-       unsigned char sha1[20];
+       struct object_id oid;
        char *to_free = NULL, *dollar, *spc;
        int cnt;
 
@@ -912,9 +912,9 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
        /* are we "faking" in place editing ? */
        if (src == buf->buf)
                to_free = strbuf_detach(buf, NULL);
-       hash_sha1_file(src, len, "blob", sha1);
+       hash_object_file(src, len, "blob", &oid);
 
-       strbuf_grow(buf, len + cnt * 43);
+       strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
        for (;;) {
                /* step 1: run to the next '$' */
                dollar = memchr(src, '$', len);
@@ -969,7 +969,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
 
                /* step 4: substitute */
                strbuf_addstr(buf, "Id: ");
-               strbuf_add(buf, sha1_to_hex(sha1), 40);
+               strbuf_addstr(buf, oid_to_hex(&oid));
                strbuf_addstr(buf, " $");
        }
        strbuf_add(buf, src, len);
@@ -1510,7 +1510,7 @@ struct ident_filter {
        struct stream_filter filter;
        struct strbuf left;
        int state;
-       char ident[45]; /* ": x40 $" */
+       char ident[GIT_MAX_HEXSZ + 5]; /* ": x40 $" */
 };
 
 static int is_foreign_ident(const char *str)
@@ -1635,12 +1635,12 @@ static struct stream_filter_vtbl ident_vtbl = {
        ident_free_fn,
 };
 
-static struct stream_filter *ident_filter(const unsigned char *sha1)
+static struct stream_filter *ident_filter(const struct object_id *oid)
 {
        struct ident_filter *ident = xmalloc(sizeof(*ident));
 
        xsnprintf(ident->ident, sizeof(ident->ident),
-                 ": %s $", sha1_to_hex(sha1));
+                 ": %s $", oid_to_hex(oid));
        strbuf_init(&ident->left, 0);
        ident->filter.vtbl = &ident_vtbl;
        ident->state = 0;
@@ -1655,7 +1655,7 @@ static struct stream_filter *ident_filter(const unsigned char *sha1)
  * Note that you would be crazy to set CRLF, smuge/clean or ident to a
  * large binary blob you would want us not to slurp into the memory!
  */
-struct stream_filter *get_stream_filter(const char *path, const unsigned char *sha1)
+struct stream_filter *get_stream_filter(const char *path, const struct object_id *oid)
 {
        struct conv_attrs ca;
        struct stream_filter *filter = NULL;
@@ -1668,7 +1668,7 @@ struct stream_filter *get_stream_filter(const char *path, const unsigned char *s
                return NULL;
 
        if (ca.ident)
-               filter = ident_filter(sha1);
+               filter = ident_filter(oid);
 
        if (output_eol(ca.crlf_action) == EOL_CRLF)
                filter = cascade_filter(filter, lf_to_crlf_filter());
index 65ab3e516745775b52f36a370ba7880a34c8b2a5..2e9b4f49cc0acc697bf0304306b96e0b50e30aab 100644 (file)
--- a/convert.h
+++ b/convert.h
@@ -93,7 +93,7 @@ extern int would_convert_to_git_filter_fd(const char *path);
 
 struct stream_filter; /* opaque */
 
-extern struct stream_filter *get_stream_filter(const char *path, const unsigned char *);
+extern struct stream_filter *get_stream_filter(const char *path, const struct object_id *);
 extern void free_stream_filter(struct stream_filter *);
 extern int is_null_stream_filter(struct stream_filter *);
 
index 9747f47b18bf2e622f11f41889faaa4b1845ac8d..62be651b03b55ee4d478706e51ea8606b10739f0 100644 (file)
@@ -5,6 +5,7 @@
 #include "run-command.h"
 #include "url.h"
 #include "prompt.h"
+#include "sigchain.h"
 
 void credential_init(struct credential *c)
 {
@@ -227,8 +228,10 @@ static int run_credential_helper(struct credential *c,
                return -1;
 
        fp = xfdopen(helper.in, "w");
+       sigchain_push(SIGPIPE, SIG_IGN);
        credential_write(c, fp);
        fclose(fp);
+       sigchain_pop(SIGPIPE);
 
        if (want_output) {
                int r;
index 2adae04073816a781d01d85433d8d8922baafd7f..5eda7fb6af673ae82989dab871aaac74ec9ff629 100644 (file)
@@ -11,7 +11,7 @@
 #include "progress.h"
 #include "csum-file.h"
 
-static void flush(struct sha1file *f, const void *buf, unsigned int count)
+static void flush(struct hashfile *f, const void *buf, unsigned int count)
 {
        if (0 <= f->check_fd && count)  {
                unsigned char check_buffer[8192];
@@ -42,28 +42,28 @@ static void flush(struct sha1file *f, const void *buf, unsigned int count)
        }
 }
 
-void sha1flush(struct sha1file *f)
+void hashflush(struct hashfile *f)
 {
        unsigned offset = f->offset;
 
        if (offset) {
-               git_SHA1_Update(&f->ctx, f->buffer, offset);
+               the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
                flush(f, f->buffer, offset);
                f->offset = 0;
        }
 }
 
-int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
+int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
 {
        int fd;
 
-       sha1flush(f);
-       git_SHA1_Final(f->buffer, &f->ctx);
+       hashflush(f);
+       the_hash_algo->final_fn(f->buffer, &f->ctx);
        if (result)
                hashcpy(result, f->buffer);
        if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
                /* write checksum and close fd */
-               flush(f, f->buffer, 20);
+               flush(f, f->buffer, the_hash_algo->rawsz);
                if (flags & CSUM_FSYNC)
                        fsync_or_die(f->fd, f->name);
                if (close(f->fd))
@@ -86,7 +86,7 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
        return fd;
 }
 
-void sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
 {
        while (count) {
                unsigned offset = f->offset;
@@ -110,7 +110,7 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
                buf = (char *) buf + nr;
                left -= nr;
                if (!left) {
-                       git_SHA1_Update(&f->ctx, data, offset);
+                       the_hash_algo->update_fn(&f->ctx, data, offset);
                        flush(f, data, offset);
                        offset = 0;
                }
@@ -118,15 +118,15 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
        }
 }
 
-struct sha1file *sha1fd(int fd, const char *name)
+struct hashfile *hashfd(int fd, const char *name)
 {
-       return sha1fd_throughput(fd, name, NULL);
+       return hashfd_throughput(fd, name, NULL);
 }
 
-struct sha1file *sha1fd_check(const char *name)
+struct hashfile *hashfd_check(const char *name)
 {
        int sink, check;
-       struct sha1file *f;
+       struct hashfile *f;
 
        sink = open("/dev/null", O_WRONLY);
        if (sink < 0)
@@ -134,14 +134,14 @@ struct sha1file *sha1fd_check(const char *name)
        check = open(name, O_RDONLY);
        if (check < 0)
                die_errno("unable to open '%s'", name);
-       f = sha1fd(sink, name);
+       f = hashfd(sink, name);
        f->check_fd = check;
        return f;
 }
 
-struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp)
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
 {
-       struct sha1file *f = xmalloc(sizeof(*f));
+       struct hashfile *f = xmalloc(sizeof(*f));
        f->fd = fd;
        f->check_fd = -1;
        f->offset = 0;
@@ -149,18 +149,18 @@ struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp
        f->tp = tp;
        f->name = name;
        f->do_crc = 0;
-       git_SHA1_Init(&f->ctx);
+       the_hash_algo->init_fn(&f->ctx);
        return f;
 }
 
-void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
 {
-       sha1flush(f);
+       hashflush(f);
        checkpoint->offset = f->total;
        checkpoint->ctx = f->ctx;
 }
 
-int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
 {
        off_t offset = checkpoint->offset;
 
@@ -169,17 +169,17 @@ int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint
                return -1;
        f->total = offset;
        f->ctx = checkpoint->ctx;
-       f->offset = 0; /* sha1flush() was called in checkpoint */
+       f->offset = 0; /* hashflush() was called in checkpoint */
        return 0;
 }
 
-void crc32_begin(struct sha1file *f)
+void crc32_begin(struct hashfile *f)
 {
        f->crc32 = crc32(0, NULL, 0);
        f->do_crc = 1;
 }
 
-uint32_t crc32_end(struct sha1file *f)
+uint32_t crc32_end(struct hashfile *f)
 {
        f->do_crc = 0;
        return f->crc32;
index 7530927d774562f82636aa773481ae93600c1756..992e5c014122d8fed3ee782d400e61de78e55271 100644 (file)
@@ -4,11 +4,11 @@
 struct progress;
 
 /* A SHA1-protected file */
-struct sha1file {
+struct hashfile {
        int fd;
        int check_fd;
        unsigned int offset;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        off_t total;
        struct progress *tp;
        const char *name;
@@ -18,36 +18,36 @@ struct sha1file {
 };
 
 /* Checkpoint */
-struct sha1file_checkpoint {
+struct hashfile_checkpoint {
        off_t offset;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
 };
 
-extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *);
-extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *);
+extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
 
-/* sha1close flags */
+/* hashclose flags */
 #define CSUM_CLOSE     1
 #define CSUM_FSYNC     2
 
-extern struct sha1file *sha1fd(int fd, const char *name);
-extern struct sha1file *sha1fd_check(const char *name);
-extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
-extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern void sha1write(struct sha1file *, const void *, unsigned int);
-extern void sha1flush(struct sha1file *f);
-extern void crc32_begin(struct sha1file *);
-extern uint32_t crc32_end(struct sha1file *);
+extern struct hashfile *hashfd(int fd, const char *name);
+extern struct hashfile *hashfd_check(const char *name);
+extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+extern int hashclose(struct hashfile *, unsigned char *, unsigned int);
+extern void hashwrite(struct hashfile *, const void *, unsigned int);
+extern void hashflush(struct hashfile *f);
+extern void crc32_begin(struct hashfile *);
+extern uint32_t crc32_end(struct hashfile *);
 
-static inline void sha1write_u8(struct sha1file *f, uint8_t data)
+static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
 {
-       sha1write(f, &data, sizeof(data));
+       hashwrite(f, &data, sizeof(data));
 }
 
-static inline void sha1write_be32(struct sha1file *f, uint32_t data)
+static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
 {
        data = htonl(data);
-       sha1write(f, &data, sizeof(data));
+       hashwrite(f, &data, sizeof(data));
 }
 
 #endif
index 72dfeaf6e296323db7baf9e4545e412c01878417..fe833ea7de7685968915c93950bb54768dd04586 100644 (file)
--- a/daemon.c
+++ b/daemon.c
@@ -9,7 +9,12 @@
 #define initgroups(x, y) (0) /* nothing */
 #endif
 
-static int log_syslog;
+static enum log_destination {
+       LOG_DESTINATION_UNSET = -1,
+       LOG_DESTINATION_NONE = 0,
+       LOG_DESTINATION_STDERR = 1,
+       LOG_DESTINATION_SYSLOG = 2,
+} log_destination = LOG_DESTINATION_UNSET;
 static int verbose;
 static int reuseaddr;
 static int informative_errors;
@@ -25,6 +30,7 @@ static const char daemon_usage[] =
 "           [--access-hook=<path>]\n"
 "           [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n"
 "                      [--detach] [--user=<user> [--group=<group>]]\n"
+"           [--log-destination=(stderr|syslog|none)]\n"
 "           [<directory>...]";
 
 /* List of acceptable pathname prefixes */
@@ -74,11 +80,14 @@ static const char *get_ip_address(struct hostinfo *hi)
 
 static void logreport(int priority, const char *err, va_list params)
 {
-       if (log_syslog) {
+       switch (log_destination) {
+       case LOG_DESTINATION_SYSLOG: {
                char buf[1024];
                vsnprintf(buf, sizeof(buf), err, params);
                syslog(priority, "%s", buf);
-       } else {
+               break;
+       }
+       case LOG_DESTINATION_STDERR:
                /*
                 * Since stderr is set to buffered mode, the
                 * logging of different processes will not overlap
@@ -88,6 +97,11 @@ static void logreport(int priority, const char *err, va_list params)
                vfprintf(stderr, err, params);
                fputc('\n', stderr);
                fflush(stderr);
+               break;
+       case LOG_DESTINATION_NONE:
+               break;
+       case LOG_DESTINATION_UNSET:
+               BUG("log destination not initialized correctly");
        }
 }
 
@@ -1286,7 +1300,6 @@ int cmd_main(int argc, const char **argv)
                }
                if (!strcmp(arg, "--inetd")) {
                        inetd_mode = 1;
-                       log_syslog = 1;
                        continue;
                }
                if (!strcmp(arg, "--verbose")) {
@@ -1294,9 +1307,22 @@ int cmd_main(int argc, const char **argv)
                        continue;
                }
                if (!strcmp(arg, "--syslog")) {
-                       log_syslog = 1;
+                       log_destination = LOG_DESTINATION_SYSLOG;
                        continue;
                }
+               if (skip_prefix(arg, "--log-destination=", &v)) {
+                       if (!strcmp(v, "syslog")) {
+                               log_destination = LOG_DESTINATION_SYSLOG;
+                               continue;
+                       } else if (!strcmp(v, "stderr")) {
+                               log_destination = LOG_DESTINATION_STDERR;
+                               continue;
+                       } else if (!strcmp(v, "none")) {
+                               log_destination = LOG_DESTINATION_NONE;
+                               continue;
+                       } else
+                               die("unknown log destination '%s'", v);
+               }
                if (!strcmp(arg, "--export-all")) {
                        export_all_trees = 1;
                        continue;
@@ -1353,7 +1379,6 @@ int cmd_main(int argc, const char **argv)
                }
                if (!strcmp(arg, "--detach")) {
                        detach = 1;
-                       log_syslog = 1;
                        continue;
                }
                if (skip_prefix(arg, "--user=", &v)) {
@@ -1399,7 +1424,14 @@ int cmd_main(int argc, const char **argv)
                usage(daemon_usage);
        }
 
-       if (log_syslog) {
+       if (log_destination == LOG_DESTINATION_UNSET) {
+               if (inetd_mode || detach)
+                       log_destination = LOG_DESTINATION_SYSLOG;
+               else
+                       log_destination = LOG_DESTINATION_STDERR;
+       }
+
+       if (log_destination == LOG_DESTINATION_SYSLOG) {
                openlog("git-daemon", LOG_PID, LOG_DAEMON);
                set_die_routine(daemon_die);
        } else
index 8104603a3b36f2fd73761db34efde44d5938f68f..104f954a25700a4eee95507e7ebb7734e94b67c6 100644 (file)
@@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
        int diff_unmerged_stage = revs->max_count;
        unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
                              ? CE_MATCH_RACY_IS_DIRTY : 0);
+       uint64_t start = getnanotime();
 
        diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
 
@@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
        }
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-files");
        return 0;
 }
 
@@ -302,7 +304,7 @@ static int get_stat_data(const struct cache_entry *ce,
 }
 
 static void show_new_file(struct rev_info *revs,
-                         const struct cache_entry *new,
+                         const struct cache_entry *new_file,
                          int cached, int match_missing)
 {
        const struct object_id *oid;
@@ -313,16 +315,16 @@ static void show_new_file(struct rev_info *revs,
         * New file in the index: it might actually be different in
         * the working tree.
         */
-       if (get_stat_data(new, &oid, &mode, cached, match_missing,
+       if (get_stat_data(new_file, &oid, &mode, cached, match_missing,
            &dirty_submodule, &revs->diffopt) < 0)
                return;
 
-       diff_index_show_file(revs, "+", new, oid, !is_null_oid(oid), mode, dirty_submodule);
+       diff_index_show_file(revs, "+", new_file, oid, !is_null_oid(oid), mode, dirty_submodule);
 }
 
 static int show_modified(struct rev_info *revs,
-                        const struct cache_entry *old,
-                        const struct cache_entry *new,
+                        const struct cache_entry *old_entry,
+                        const struct cache_entry *new_entry,
                         int report_missing,
                         int cached, int match_missing)
 {
@@ -330,47 +332,47 @@ static int show_modified(struct rev_info *revs,
        const struct object_id *oid;
        unsigned dirty_submodule = 0;
 
-       if (get_stat_data(new, &oid, &mode, cached, match_missing,
+       if (get_stat_data(new_entry, &oid, &mode, cached, match_missing,
                          &dirty_submodule, &revs->diffopt) < 0) {
                if (report_missing)
-                       diff_index_show_file(revs, "-", old,
-                                            &old->oid, 1, old->ce_mode,
+                       diff_index_show_file(revs, "-", old_entry,
+                                            &old_entry->oid, 1, old_entry->ce_mode,
                                             0);
                return -1;
        }
 
        if (revs->combine_merges && !cached &&
-           (oidcmp(oid, &old->oid) || oidcmp(&old->oid, &new->oid))) {
+           (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
                struct combine_diff_path *p;
-               int pathlen = ce_namelen(new);
+               int pathlen = ce_namelen(new_entry);
 
                p = xmalloc(combine_diff_path_size(2, pathlen));
                p->path = (char *) &p->parent[2];
                p->next = NULL;
-               memcpy(p->path, new->name, pathlen);
+               memcpy(p->path, new_entry->name, pathlen);
                p->path[pathlen] = 0;
                p->mode = mode;
                oidclr(&p->oid);
                memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
                p->parent[0].status = DIFF_STATUS_MODIFIED;
-               p->parent[0].mode = new->ce_mode;
-               oidcpy(&p->parent[0].oid, &new->oid);
+               p->parent[0].mode = new_entry->ce_mode;
+               oidcpy(&p->parent[0].oid, &new_entry->oid);
                p->parent[1].status = DIFF_STATUS_MODIFIED;
-               p->parent[1].mode = old->ce_mode;
-               oidcpy(&p->parent[1].oid, &old->oid);
+               p->parent[1].mode = old_entry->ce_mode;
+               oidcpy(&p->parent[1].oid, &old_entry->oid);
                show_combined_diff(p, 2, revs->dense_combined_merges, revs);
                free(p);
                return 0;
        }
 
-       oldmode = old->ce_mode;
-       if (mode == oldmode && !oidcmp(oid, &old->oid) && !dirty_submodule &&
+       oldmode = old_entry->ce_mode;
+       if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
            !revs->diffopt.flags.find_copies_harder)
                return 0;
 
        diff_change(&revs->diffopt, oldmode, mode,
-                   &old->oid, oid, 1, !is_null_oid(oid),
-                   old->name, 0, dirty_submodule);
+                   &old_entry->oid, oid, 1, !is_null_oid(oid),
+                   old_entry->name, 0, dirty_submodule);
        return 0;
 }
 
@@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs,
 int run_diff_index(struct rev_info *revs, int cached)
 {
        struct object_array_entry *ent;
+       uint64_t start = getnanotime();
 
        ent = revs->pending.objects;
        if (diff_cache(revs, &ent->item->oid, ent->name, cached))
@@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached)
        diffcore_fix_diff_index(&revs->diffopt);
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-index");
        return 0;
 }
 
diff --git a/diff.c b/diff.c
index 21c3838b25be63e632aa3af19eef27dd2eddd976..1289df4b1f9f395010e475073c2c5e5ce43976a7 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -1504,7 +1504,7 @@ struct diff_words_style_elem {
 
 struct diff_words_style {
        enum diff_words_type type;
-       struct diff_words_style_elem new, old, ctx;
+       struct diff_words_style_elem new_word, old_word, ctx;
        const char *newline;
 };
 
@@ -1655,12 +1655,12 @@ static void fn_out_diff_words_aux(void *priv, char *line, unsigned long len)
        }
        if (minus_begin != minus_end) {
                fn_out_diff_words_write_helper(diff_words->opt,
-                               &style->old, style->newline,
+                               &style->old_word, style->newline,
                                minus_end - minus_begin, minus_begin);
        }
        if (plus_begin != plus_end) {
                fn_out_diff_words_write_helper(diff_words->opt,
-                               &style->new, style->newline,
+                               &style->new_word, style->newline,
                                plus_end - plus_begin, plus_begin);
        }
 
@@ -1758,7 +1758,7 @@ static void diff_words_show(struct diff_words_data *diff_words)
                emit_diff_symbol(diff_words->opt, DIFF_SYMBOL_WORD_DIFF,
                                 line_prefix, strlen(line_prefix), 0);
                fn_out_diff_words_write_helper(diff_words->opt,
-                       &style->old, style->newline,
+                       &style->old_word, style->newline,
                        diff_words->minus.text.size,
                        diff_words->minus.text.ptr);
                diff_words->minus.text.size = 0;
@@ -1883,8 +1883,8 @@ static void init_diff_words_data(struct emit_callback *ecbdata,
        }
        if (want_color(o->use_color)) {
                struct diff_words_style *st = ecbdata->diff_words->style;
-               st->old.color = diff_get_color_opt(o, DIFF_FILE_OLD);
-               st->new.color = diff_get_color_opt(o, DIFF_FILE_NEW);
+               st->old_word.color = diff_get_color_opt(o, DIFF_FILE_OLD);
+               st->new_word.color = diff_get_color_opt(o, DIFF_FILE_NEW);
                st->ctx.color = diff_get_color_opt(o, DIFF_CONTEXT);
        }
 }
@@ -2045,11 +2045,10 @@ static void fn_out_consume(void *priv, char *line, unsigned long len)
        }
 }
 
-static char *pprint_rename(const char *a, const char *b)
+static void pprint_rename(struct strbuf *name, const char *a, const char *b)
 {
-       const char *old = a;
-       const char *new = b;
-       struct strbuf name = STRBUF_INIT;
+       const char *old_name = a;
+       const char *new_name = b;
        int pfx_length, sfx_length;
        int pfx_adjust_for_slash;
        int len_a = strlen(a);
@@ -2059,24 +2058,24 @@ static char *pprint_rename(const char *a, const char *b)
        int qlen_b = quote_c_style(b, NULL, NULL, 0);
 
        if (qlen_a || qlen_b) {
-               quote_c_style(a, &name, NULL, 0);
-               strbuf_addstr(&name, " => ");
-               quote_c_style(b, &name, NULL, 0);
-               return strbuf_detach(&name, NULL);
+               quote_c_style(a, name, NULL, 0);
+               strbuf_addstr(name, " => ");
+               quote_c_style(b, name, NULL, 0);
+               return;
        }
 
        /* Find common prefix */
        pfx_length = 0;
-       while (*old && *new && *old == *new) {
-               if (*old == '/')
-                       pfx_length = old - a + 1;
-               old++;
-               new++;
+       while (*old_name && *new_name && *old_name == *new_name) {
+               if (*old_name == '/')
+                       pfx_length = old_name - a + 1;
+               old_name++;
+               new_name++;
        }
 
        /* Find common suffix */
-       old = a + len_a;
-       new = b + len_b;
+       old_name = a + len_a;
+       new_name = b + len_b;
        sfx_length = 0;
        /*
         * If there is a common prefix, it must end in a slash.  In
@@ -2087,13 +2086,13 @@ static char *pprint_rename(const char *a, const char *b)
         * underrun the input strings.
         */
        pfx_adjust_for_slash = (pfx_length ? 1 : 0);
-       while (a + pfx_length - pfx_adjust_for_slash <= old &&
-              b + pfx_length - pfx_adjust_for_slash <= new &&
-              *old == *new) {
-               if (*old == '/')
-                       sfx_length = len_a - (old - a);
-               old--;
-               new--;
+       while (a + pfx_length - pfx_adjust_for_slash <= old_name &&
+              b + pfx_length - pfx_adjust_for_slash <= new_name &&
+              *old_name == *new_name) {
+               if (*old_name == '/')
+                       sfx_length = len_a - (old_name - a);
+               old_name--;
+               new_name--;
        }
 
        /*
@@ -2109,19 +2108,18 @@ static char *pprint_rename(const char *a, const char *b)
        if (b_midlen < 0)
                b_midlen = 0;
 
-       strbuf_grow(&name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
+       strbuf_grow(name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
        if (pfx_length + sfx_length) {
-               strbuf_add(&name, a, pfx_length);
-               strbuf_addch(&name, '{');
+               strbuf_add(name, a, pfx_length);
+               strbuf_addch(name, '{');
        }
-       strbuf_add(&name, a + pfx_length, a_midlen);
-       strbuf_addstr(&name, " => ");
-       strbuf_add(&name, b + pfx_length, b_midlen);
+       strbuf_add(name, a + pfx_length, a_midlen);
+       strbuf_addstr(name, " => ");
+       strbuf_add(name, b + pfx_length, b_midlen);
        if (pfx_length + sfx_length) {
-               strbuf_addch(&name, '}');
-               strbuf_add(&name, a + len_a - sfx_length, sfx_length);
+               strbuf_addch(name, '}');
+               strbuf_add(name, a + len_a - sfx_length, sfx_length);
        }
-       return strbuf_detach(&name, NULL);
 }
 
 struct diffstat_t {
@@ -2131,6 +2129,7 @@ struct diffstat_t {
                char *from_name;
                char *name;
                char *print_name;
+               const char *comments;
                unsigned is_unmerged:1;
                unsigned is_binary:1;
                unsigned is_renamed:1;
@@ -2197,23 +2196,20 @@ static void show_graph(struct strbuf *out, char ch, int cnt,
 
 static void fill_print_name(struct diffstat_file *file)
 {
-       char *pname;
+       struct strbuf pname = STRBUF_INIT;
 
        if (file->print_name)
                return;
 
-       if (!file->is_renamed) {
-               struct strbuf buf = STRBUF_INIT;
-               if (quote_c_style(file->name, &buf, NULL, 0)) {
-                       pname = strbuf_detach(&buf, NULL);
-               } else {
-                       pname = file->name;
-                       strbuf_release(&buf);
-               }
-       } else {
-               pname = pprint_rename(file->from_name, file->name);
-       }
-       file->print_name = pname;
+       if (file->is_renamed)
+               pprint_rename(&pname, file->from_name, file->name);
+       else
+               quote_c_style(file->name, &pname, NULL, 0);
+
+       if (file->comments)
+               strbuf_addf(&pname, " (%s)", file->comments);
+
+       file->print_name = strbuf_detach(&pname, NULL);
 }
 
 static void print_stat_summary_inserts_deletes(struct diff_options *options,
@@ -2594,14 +2590,14 @@ struct dirstat_dir {
 static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                unsigned long changed, const char *base, int baselen)
 {
-       unsigned long this_dir = 0;
+       unsigned long sum_changes = 0;
        unsigned int sources = 0;
        const char *line_prefix = diff_line_prefix(opt);
 
        while (dir->nr) {
                struct dirstat_file *f = dir->files;
                int namelen = strlen(f->name);
-               unsigned long this;
+               unsigned long changes;
                char *slash;
 
                if (namelen < baselen)
@@ -2611,15 +2607,15 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                slash = strchr(f->name + baselen, '/');
                if (slash) {
                        int newbaselen = slash + 1 - f->name;
-                       this = gather_dirstat(opt, dir, changed, f->name, newbaselen);
+                       changes = gather_dirstat(opt, dir, changed, f->name, newbaselen);
                        sources++;
                } else {
-                       this = f->changed;
+                       changes = f->changed;
                        dir->files++;
                        dir->nr--;
                        sources += 2;
                }
-               this_dir += this;
+               sum_changes += changes;
        }
 
        /*
@@ -2629,8 +2625,8 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
         *    under this directory (sources == 1).
         */
        if (baselen && sources != 1) {
-               if (this_dir) {
-                       int permille = this_dir * 1000 / changed;
+               if (sum_changes) {
+                       int permille = sum_changes * 1000 / changed;
                        if (permille >= dir->permille) {
                                fprintf(opt->file, "%s%4d.%01d%% %.*s\n", line_prefix,
                                        permille / 10, permille % 10, baselen, base);
@@ -2639,7 +2635,7 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                        }
                }
        }
-       return this_dir;
+       return sum_changes;
 }
 
 static int dirstat_compare(const void *_a, const void *_b)
@@ -2797,8 +2793,7 @@ static void free_diffstat_info(struct diffstat_t *diffstat)
        int i;
        for (i = 0; i < diffstat->nr; i++) {
                struct diffstat_file *f = diffstat->files[i];
-               if (f->name != f->print_name)
-                       free(f->print_name);
+               free(f->print_name);
                free(f->name);
                free(f->from_name);
                free(f);
@@ -3248,6 +3243,32 @@ static void builtin_diff(const char *name_a,
        return;
 }
 
+static char *get_compact_summary(const struct diff_filepair *p, int is_renamed)
+{
+       if (!is_renamed) {
+               if (p->status == DIFF_STATUS_ADDED) {
+                       if (S_ISLNK(p->two->mode))
+                               return "new +l";
+                       else if ((p->two->mode & 0777) == 0755)
+                               return "new +x";
+                       else
+                               return "new";
+               } else if (p->status == DIFF_STATUS_DELETED)
+                       return "gone";
+       }
+       if (S_ISLNK(p->one->mode) && !S_ISLNK(p->two->mode))
+               return "mode -l";
+       else if (!S_ISLNK(p->one->mode) && S_ISLNK(p->two->mode))
+               return "mode +l";
+       else if ((p->one->mode & 0777) == 0644 &&
+                (p->two->mode & 0777) == 0755)
+               return "mode +x";
+       else if ((p->one->mode & 0777) == 0755 &&
+                (p->two->mode & 0777) == 0644)
+               return "mode -x";
+       return NULL;
+}
+
 static void builtin_diffstat(const char *name_a, const char *name_b,
                             struct diff_filespec *one,
                             struct diff_filespec *two,
@@ -3267,6 +3288,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b,
 
        data = diffstat_add(diffstat, name_a, name_b);
        data->is_interesting = p->status != DIFF_STATUS_UNKNOWN;
+       if (o->flags.stat_with_summary)
+               data->comments = get_compact_summary(p, data->is_renamed);
 
        if (!one || !two) {
                data->is_unmerged = 1;
@@ -3615,7 +3638,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
        else {
                enum object_type type;
                if (size_only || (flags & CHECK_BINARY)) {
-                       type = sha1_object_info(s->oid.hash, &s->size);
+                       type = oid_object_info(&s->oid, &s->size);
                        if (type < 0)
                                die("unable to read %s",
                                    oid_to_hex(&s->oid));
@@ -3626,7 +3649,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
                                return 0;
                        }
                }
-               s->data = read_sha1_file(s->oid.hash, &type, &s->size);
+               s->data = read_object_file(&s->oid, &type, &s->size);
                if (!s->data)
                        die("unable to read %s", oid_to_hex(&s->oid));
                s->should_free = 1;
@@ -3660,15 +3683,15 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp,
                           int mode)
 {
        struct strbuf buf = STRBUF_INIT;
-       struct strbuf template = STRBUF_INIT;
+       struct strbuf tempfile = STRBUF_INIT;
        char *path_dup = xstrdup(path);
        const char *base = basename(path_dup);
 
        /* Generate "XXXXXX_basename.ext" */
-       strbuf_addstr(&template, "XXXXXX_");
-       strbuf_addstr(&template, base);
+       strbuf_addstr(&tempfile, "XXXXXX_");
+       strbuf_addstr(&tempfile, base);
 
-       temp->tempfile = mks_tempfile_ts(template.buf, strlen(base) + 1);
+       temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
        if (!temp->tempfile)
                die_errno("unable to create temp-file");
        if (convert_to_working_tree(path,
@@ -3683,7 +3706,7 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp,
        oid_to_hex_r(temp->hex, oid);
        xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode);
        strbuf_release(&buf);
-       strbuf_release(&template);
+       strbuf_release(&tempfile);
        free(path_dup);
 }
 
@@ -3811,7 +3834,7 @@ static int similarity_index(struct diff_filepair *p)
 static const char *diff_abbrev_oid(const struct object_id *oid, int abbrev)
 {
        if (startup_info->have_repository)
-               return find_unique_abbrev(oid->hash, abbrev);
+               return find_unique_abbrev(oid, abbrev);
        else {
                char *hex = oid_to_hex(oid);
                if (abbrev < 0)
@@ -4553,6 +4576,11 @@ int diff_opt_parse(struct diff_options *options,
        else if (starts_with(arg, "--stat"))
                /* --stat, --stat-width, --stat-name-width, or --stat-count */
                return stat_opt(options, av);
+       else if (!strcmp(arg, "--compact-summary")) {
+                options->flags.stat_with_summary = 1;
+                options->output_format |= DIFF_FORMAT_DIFFSTAT;
+       } else if (!strcmp(arg, "--no-compact-summary"))
+                options->flags.stat_with_summary = 0;
 
        /* renames options */
        else if (starts_with(arg, "-B") ||
@@ -5241,10 +5269,12 @@ static void show_rename_copy(struct diff_options *opt, const char *renamecopy,
                struct diff_filepair *p)
 {
        struct strbuf sb = STRBUF_INIT;
-       char *names = pprint_rename(p->one->path, p->two->path);
+       struct strbuf names = STRBUF_INIT;
+
+       pprint_rename(&names, p->one->path, p->two->path);
        strbuf_addf(&sb, " %s %s (%d%%)\n",
-                       renamecopy, names, similarity_index(p));
-       free(names);
+                   renamecopy, names.buf, similarity_index(p));
+       strbuf_release(&names);
        emit_diff_symbol(opt, DIFF_SYMBOL_SUMMARY,
                                 sb.buf, sb.len, 0);
        show_mode_change(opt, p, 0);
diff --git a/diff.h b/diff.h
index 6bd278aac11a4349ce76602113e45e31ae1229de..d29560f822ca0ee4738f769e2feb3939851d7ff1 100644 (file)
--- a/diff.h
+++ b/diff.h
@@ -93,6 +93,7 @@ struct diff_flags {
        unsigned dirstat_by_line:1;
        unsigned funccontext:1;
        unsigned default_follow_renames:1;
+       unsigned stat_with_summary:1;
 };
 
 static inline void diff_flags_or(struct diff_flags *a,
index ebe70fb068519be86437a11b980d804be0534c75..c83d45a0470d412c29f8c392e84bb010150dbb2a 100644 (file)
@@ -48,16 +48,16 @@ struct spanhash_top {
 
 static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
 {
-       struct spanhash_top *new;
+       struct spanhash_top *new_spanhash;
        int i;
        int osz = 1 << orig->alloc_log2;
        int sz = osz << 1;
 
-       new = xmalloc(st_add(sizeof(*orig),
+       new_spanhash = xmalloc(st_add(sizeof(*orig),
                             st_mult(sizeof(struct spanhash), sz)));
-       new->alloc_log2 = orig->alloc_log2 + 1;
-       new->free = INITIAL_FREE(new->alloc_log2);
-       memset(new->data, 0, sizeof(struct spanhash) * sz);
+       new_spanhash->alloc_log2 = orig->alloc_log2 + 1;
+       new_spanhash->free = INITIAL_FREE(new_spanhash->alloc_log2);
+       memset(new_spanhash->data, 0, sizeof(struct spanhash) * sz);
        for (i = 0; i < osz; i++) {
                struct spanhash *o = &(orig->data[i]);
                int bucket;
@@ -65,11 +65,11 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
                        continue;
                bucket = o->hashval & (sz - 1);
                while (1) {
-                       struct spanhash *h = &(new->data[bucket++]);
+                       struct spanhash *h = &(new_spanhash->data[bucket++]);
                        if (!h->cnt) {
                                h->hashval = o->hashval;
                                h->cnt = o->cnt;
-                               new->free--;
+                               new_spanhash->free--;
                                break;
                        }
                        if (sz <= bucket)
@@ -77,7 +77,7 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
                }
        }
        free(orig);
-       return new;
+       return new_spanhash;
 }
 
 static struct spanhash_top *add_spanhash(struct spanhash_top *top,
index 888a4b0189c00e3dcf99a3684afe88d62f7921b2..0b7e4989a87214faa22e4f8ec75a719d3fd857ae 100644 (file)
@@ -260,8 +260,8 @@ static unsigned int hash_filespec(struct diff_filespec *filespec)
        if (!filespec->oid_valid) {
                if (diff_populate_filespec(filespec, 0))
                        return 0;
-               hash_sha1_file(filespec->data, filespec->size, "blob",
-                              filespec->oid.hash);
+               hash_object_file(filespec->data, filespec->size, "blob",
+                                &filespec->oid);
        }
        return sha1hash(filespec->oid.hash);
 }
diff --git a/dir.c b/dir.c
index ce6e50d2a2399122ee5c12b18a0ebb8a07dc3f75..63a917be45db99c278cc86012ff74718043dc63d 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -231,12 +231,10 @@ int within_depth(const char *name, int namelen,
  *     1 along with { data, size } of the (possibly augmented) buffer
  *       when successful.
  *
- * Optionally updates the given sha1_stat with the given OID (when valid).
+ * Optionally updates the given oid_stat with the given OID (when valid).
  */
-static int do_read_blob(const struct object_id *oid,
-                       struct sha1_stat *sha1_stat,
-                       size_t *size_out,
-                       char **data_out)
+static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
+                       size_t *size_out, char **data_out)
 {
        enum object_type type;
        unsigned long sz;
@@ -245,15 +243,15 @@ static int do_read_blob(const struct object_id *oid,
        *size_out = 0;
        *data_out = NULL;
 
-       data = read_sha1_file(oid->hash, &type, &sz);
+       data = read_object_file(oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return -1;
        }
 
-       if (sha1_stat) {
-               memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
-               hashcpy(sha1_stat->sha1, oid->hash);
+       if (oid_stat) {
+               memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
+               oidcpy(&oid_stat->oid, oid);
        }
 
        if (sz == 0) {
@@ -654,9 +652,8 @@ void add_exclude(const char *string, const char *base,
 
 static int read_skip_worktree_file_from_index(const struct index_state *istate,
                                              const char *path,
-                                             size_t *size_out,
-                                             char **data_out,
-                                             struct sha1_stat *sha1_stat)
+                                             size_t *size_out, char **data_out,
+                                             struct oid_stat *oid_stat)
 {
        int pos, len;
 
@@ -667,7 +664,7 @@ static int read_skip_worktree_file_from_index(const struct index_state *istate,
        if (!ce_skip_worktree(istate->cache[pos]))
                return -1;
 
-       return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out);
+       return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
 }
 
 /*
@@ -774,7 +771,16 @@ static void invalidate_directory(struct untracked_cache *uc,
                                 struct untracked_cache_dir *dir)
 {
        int i;
-       uc->dir_invalidated++;
+
+       /*
+        * Invalidation increment here is just roughly correct. If
+        * untracked_nr or any of dirs[].recurse is non-zero, we
+        * should increment dir_invalidated too. But that's more
+        * expensive to do.
+        */
+       if (dir->valid)
+               uc->dir_invalidated++;
+
        dir->valid = 0;
        dir->untracked_nr = 0;
        for (i = 0; i < dir->dirs_nr; i++)
@@ -795,9 +801,8 @@ static int add_excludes_from_buffer(char *buf, size_t size,
  * ss_valid is non-zero, "ss" must contain good value as input.
  */
 static int add_excludes(const char *fname, const char *base, int baselen,
-                       struct exclude_list *el,
-                       struct index_state *istate,
-                       struct sha1_stat *sha1_stat)
+                       struct exclude_list *el, struct index_state *istate,
+                       struct oid_stat *oid_stat)
 {
        struct stat st;
        int r;
@@ -815,16 +820,16 @@ static int add_excludes(const char *fname, const char *base, int baselen,
                        return -1;
                r = read_skip_worktree_file_from_index(istate, fname,
                                                       &size, &buf,
-                                                      sha1_stat);
+                                                      oid_stat);
                if (r != 1)
                        return r;
        } else {
                size = xsize_t(st.st_size);
                if (size == 0) {
-                       if (sha1_stat) {
-                               fill_stat_data(&sha1_stat->stat, &st);
-                               hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
-                               sha1_stat->valid = 1;
+                       if (oid_stat) {
+                               fill_stat_data(&oid_stat->stat, &st);
+                               oidcpy(&oid_stat->oid, &empty_blob_oid);
+                               oid_stat->valid = 1;
                        }
                        close(fd);
                        return 0;
@@ -837,22 +842,23 @@ static int add_excludes(const char *fname, const char *base, int baselen,
                }
                buf[size++] = '\n';
                close(fd);
-               if (sha1_stat) {
+               if (oid_stat) {
                        int pos;
-                       if (sha1_stat->valid &&
-                           !match_stat_data_racy(istate, &sha1_stat->stat, &st))
+                       if (oid_stat->valid &&
+                           !match_stat_data_racy(istate, &oid_stat->stat, &st))
                                ; /* no content change, ss->sha1 still good */
                        else if (istate &&
                                 (pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
                                 !ce_stage(istate->cache[pos]) &&
                                 ce_uptodate(istate->cache[pos]) &&
                                 !would_convert_to_git(istate, fname))
-                               hashcpy(sha1_stat->sha1,
-                                       istate->cache[pos]->oid.hash);
+                               oidcpy(&oid_stat->oid,
+                                      &istate->cache[pos]->oid);
                        else
-                               hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
-                       fill_stat_data(&sha1_stat->stat, &st);
-                       sha1_stat->valid = 1;
+                               hash_object_file(buf, size, "blob",
+                                                &oid_stat->oid);
+                       fill_stat_data(&oid_stat->stat, &st);
+                       oid_stat->valid = 1;
                }
        }
 
@@ -930,7 +936,7 @@ struct exclude_list *add_exclude_list(struct dir_struct *dir,
  * Used to set up core.excludesfile and .git/info/exclude lists.
  */
 static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
-                                    struct sha1_stat *sha1_stat)
+                                    struct oid_stat *oid_stat)
 {
        struct exclude_list *el;
        /*
@@ -941,7 +947,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
        if (!dir->untracked)
                dir->unmanaged_exclude_files++;
        el = add_exclude_list(dir, EXC_FILE, fname);
-       if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0)
+       if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
                die("cannot use %s as an exclude file", fname);
 }
 
@@ -1180,7 +1186,7 @@ static void prep_exclude(struct dir_struct *dir,
 
        while (current < baselen) {
                const char *cp;
-               struct sha1_stat sha1_stat;
+               struct oid_stat oid_stat;
 
                stk = xcalloc(1, sizeof(*stk));
                if (current < 0) {
@@ -1223,8 +1229,8 @@ static void prep_exclude(struct dir_struct *dir,
                }
 
                /* Try to read per-directory file */
-               hashclr(sha1_stat.sha1);
-               sha1_stat.valid = 0;
+               oidclr(&oid_stat.oid);
+               oid_stat.valid = 0;
                if (dir->exclude_per_dir &&
                    /*
                     * If we know that no files have been added in
@@ -1252,7 +1258,7 @@ static void prep_exclude(struct dir_struct *dir,
                        strbuf_addstr(&sb, dir->exclude_per_dir);
                        el->src = strbuf_detach(&sb, NULL);
                        add_excludes(el->src, el->src, stk->baselen, el, istate,
-                                    untracked ? &sha1_stat : NULL);
+                                    untracked ? &oid_stat : NULL);
                }
                /*
                 * NEEDSWORK: when untracked cache is enabled, prep_exclude()
@@ -1269,9 +1275,9 @@ static void prep_exclude(struct dir_struct *dir,
                 * order, though, if you do that.
                 */
                if (untracked &&
-                   hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
+                   hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
                        invalidate_gitignore(dir->untracked, untracked);
-                       hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
+                       hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
                }
                dir->exclude_stack = stk;
                current = stk->baselen;
@@ -1773,7 +1779,7 @@ static enum path_treatment treat_path(struct dir_struct *dir,
        if (!de)
                return treat_path_fast(dir, untracked, cdir, istate, path,
                                       baselen, pathspec);
-       if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git"))
+       if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git"))
                return path_none;
        strbuf_setlen(path, baselen);
        strbuf_addstr(path, de->d_name);
@@ -1809,24 +1815,19 @@ static int valid_cached_dir(struct dir_struct *dir,
         */
        refresh_fsmonitor(istate);
        if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
-               if (stat(path->len ? path->buf : ".", &st)) {
-                       invalidate_directory(dir->untracked, untracked);
+               if (lstat(path->len ? path->buf : ".", &st)) {
                        memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
                        return 0;
                }
                if (!untracked->valid ||
                        match_stat_data_racy(istate, &untracked->stat_data, &st)) {
-                       if (untracked->valid)
-                               invalidate_directory(dir->untracked, untracked);
                        fill_stat_data(&untracked->stat_data, &st);
                        return 0;
                }
        }
 
-       if (untracked->check_only != !!check_only) {
-               invalidate_directory(dir->untracked, untracked);
+       if (untracked->check_only != !!check_only)
                return 0;
-       }
 
        /*
         * prep_exclude will be called eventually on this directory,
@@ -1853,13 +1854,20 @@ static int open_cached_dir(struct cached_dir *cdir,
                           struct strbuf *path,
                           int check_only)
 {
+       const char *c_path;
+
        memset(cdir, 0, sizeof(*cdir));
        cdir->untracked = untracked;
        if (valid_cached_dir(dir, untracked, istate, path, check_only))
                return 0;
-       cdir->fdir = opendir(path->len ? path->buf : ".");
-       if (dir->untracked)
+       c_path = path->len ? path->buf : ".";
+       cdir->fdir = opendir(c_path);
+       if (!cdir->fdir)
+               warning_errno(_("could not open directory '%s'"), c_path);
+       if (dir->untracked) {
+               invalidate_directory(dir->untracked, untracked);
                dir->untracked->dir_opened++;
+       }
        if (!cdir->fdir)
                return -1;
        return 0;
@@ -2164,8 +2172,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
                                                      const struct pathspec *pathspec)
 {
        struct untracked_cache_dir *root;
+       static int untracked_cache_disabled = -1;
 
-       if (!dir->untracked || getenv("GIT_DISABLE_UNTRACKED_CACHE"))
+       if (!dir->untracked)
+               return NULL;
+       if (untracked_cache_disabled < 0)
+               untracked_cache_disabled = git_env_bool("GIT_DISABLE_UNTRACKED_CACHE", 0);
+       if (untracked_cache_disabled)
                return NULL;
 
        /*
@@ -2228,13 +2241,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
 
        /* Validate $GIT_DIR/info/exclude and core.excludesfile */
        root = dir->untracked->root;
-       if (hashcmp(dir->ss_info_exclude.sha1,
-                   dir->untracked->ss_info_exclude.sha1)) {
+       if (oidcmp(&dir->ss_info_exclude.oid,
+                  &dir->untracked->ss_info_exclude.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_info_exclude = dir->ss_info_exclude;
        }
-       if (hashcmp(dir->ss_excludes_file.sha1,
-                   dir->untracked->ss_excludes_file.sha1)) {
+       if (oidcmp(&dir->ss_excludes_file.oid,
+                  &dir->untracked->ss_excludes_file.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_excludes_file = dir->ss_excludes_file;
        }
@@ -2248,6 +2261,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                   const char *path, int len, const struct pathspec *pathspec)
 {
        struct untracked_cache_dir *untracked;
+       uint64_t start = getnanotime();
 
        if (has_symlink_leading_path(path, len))
                return dir->nr;
@@ -2286,8 +2300,14 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                dir->nr = i;
        }
 
+       trace_performance_since(start, "read directory %.*s", len, path);
        if (dir->untracked) {
+               static int force_untracked_cache = -1;
                static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
+
+               if (force_untracked_cache < 0)
+                       force_untracked_cache =
+                               git_env_bool("GIT_FORCE_UNTRACKED_CACHE", 0);
                trace_printf_key(&trace_untracked_stats,
                                 "node creation: %u\n"
                                 "gitignore invalidation: %u\n"
@@ -2297,7 +2317,8 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                                 dir->untracked->gitignore_invalidated,
                                 dir->untracked->dir_invalidated,
                                 dir->untracked->dir_opened);
-               if (dir->untracked == istate->untracked &&
+               if (force_untracked_cache &&
+                       dir->untracked == istate->untracked &&
                    (dir->untracked->dir_opened ||
                     dir->untracked->gitignore_invalidated ||
                     dir->untracked->dir_invalidated))
@@ -2638,8 +2659,8 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
        FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
        stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
        stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
-       hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
-       hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
+       hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
+       hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
        ouc->dir_flags = htonl(untracked->dir_flags);
 
        varint_len = encode_varint(untracked->ident.len, varbuf);
@@ -2816,13 +2837,12 @@ static void read_sha1(size_t pos, void *cb)
        rd->data += 20;
 }
 
-static void load_sha1_stat(struct sha1_stat *sha1_stat,
-                          const unsigned char *data,
-                          const unsigned char *sha1)
+static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
+                         const unsigned char *sha1)
 {
-       stat_data_from_disk(&sha1_stat->stat, data);
-       hashcpy(sha1_stat->sha1, sha1);
-       sha1_stat->valid = 1;
+       stat_data_from_disk(&oid_stat->stat, data);
+       hashcpy(oid_stat->oid.hash, sha1);
+       oid_stat->valid = 1;
 }
 
 struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
@@ -2850,12 +2870,12 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
        uc = xcalloc(1, sizeof(*uc));
        strbuf_init(&uc->ident, ident_len);
        strbuf_add(&uc->ident, ident, ident_len);
-       load_sha1_stat(&uc->ss_info_exclude,
-                      next + ouc_offset(info_exclude_stat),
-                      next + ouc_offset(info_exclude_sha1));
-       load_sha1_stat(&uc->ss_excludes_file,
-                      next + ouc_offset(excludes_file_stat),
-                      next + ouc_offset(excludes_file_sha1));
+       load_oid_stat(&uc->ss_info_exclude,
+                     next + ouc_offset(info_exclude_stat),
+                     next + ouc_offset(info_exclude_sha1));
+       load_oid_stat(&uc->ss_excludes_file,
+                     next + ouc_offset(excludes_file_stat),
+                     next + ouc_offset(excludes_file_sha1));
        uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
        exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
        uc->exclude_per_dir = xstrdup(exclude_per_dir);
@@ -2968,10 +2988,12 @@ static int invalidate_one_component(struct untracked_cache *uc,
 }
 
 void untracked_cache_invalidate_path(struct index_state *istate,
-                                    const char *path)
+                                    const char *path, int safe_path)
 {
        if (!istate->untracked || !istate->untracked->root)
                return;
+       if (!safe_path && !verify_path(path))
+               return;
        invalidate_one_component(istate->untracked, istate->untracked->root,
                                 path, strlen(path));
 }
@@ -2979,13 +3001,13 @@ void untracked_cache_invalidate_path(struct index_state *istate,
 void untracked_cache_remove_from_index(struct index_state *istate,
                                       const char *path)
 {
-       untracked_cache_invalidate_path(istate, path);
+       untracked_cache_invalidate_path(istate, path, 1);
 }
 
 void untracked_cache_add_to_index(struct index_state *istate,
                                  const char *path)
 {
-       untracked_cache_invalidate_path(istate, path);
+       untracked_cache_invalidate_path(istate, path, 1);
 }
 
 /* Update gitfile and core.worktree setting to connect work tree and git dir */
diff --git a/dir.h b/dir.h
index 11a047ba486b81f624fb021418f06cbbd65da676..b0758b82a20017dd3ce29c54454678f026718078 100644 (file)
--- a/dir.h
+++ b/dir.h
@@ -74,9 +74,9 @@ struct exclude_list_group {
        struct exclude_list *el;
 };
 
-struct sha1_stat {
+struct oid_stat {
        struct stat_data stat;
-       unsigned char sha1[20];
+       struct object_id oid;
        int valid;
 };
 
@@ -124,8 +124,8 @@ struct untracked_cache_dir {
 };
 
 struct untracked_cache {
-       struct sha1_stat ss_info_exclude;
-       struct sha1_stat ss_excludes_file;
+       struct oid_stat ss_info_exclude;
+       struct oid_stat ss_excludes_file;
        const char *exclude_per_dir;
        struct strbuf ident;
        /*
@@ -195,8 +195,8 @@ struct dir_struct {
 
        /* Enable untracked file cache if set */
        struct untracked_cache *untracked;
-       struct sha1_stat ss_info_exclude;
-       struct sha1_stat ss_excludes_file;
+       struct oid_stat ss_info_exclude;
+       struct oid_stat ss_excludes_file;
        unsigned unmanaged_exclude_files;
 };
 
@@ -350,7 +350,7 @@ static inline int dir_path_match(const struct dir_entry *ent,
 int cmp_dir_entry(const void *p1, const void *p2);
 int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in);
 
-void untracked_cache_invalidate_path(struct index_state *, const char *);
+void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path);
 void untracked_cache_remove_from_index(struct index_state *, const char *);
 void untracked_cache_add_to_index(struct index_state *, const char *);
 
diff --git a/entry.c b/entry.c
index 30211447ac839899d2c232af232c95312856816c..2101201a111785f449a65e785e9ed5b57b7aa196 100644 (file)
--- a/entry.c
+++ b/entry.c
@@ -85,12 +85,12 @@ static int create_file(const char *path, unsigned int mode)
 static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
 {
        enum object_type type;
-       void *new = read_sha1_file(ce->oid.hash, &type, size);
+       void *blob_data = read_object_file(&ce->oid, &type, size);
 
-       if (new) {
+       if (blob_data) {
                if (type == OBJ_BLOB)
-                       return new;
-               free(new);
+                       return blob_data;
+               free(blob_data);
        }
        return NULL;
 }
@@ -256,7 +256,7 @@ static int write_entry(struct cache_entry *ce,
        unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
        struct delayed_checkout *dco = state->delayed_checkout;
        int fd, ret, fstat_done = 0;
-       char *new;
+       char *new_blob;
        struct strbuf buf = STRBUF_INIT;
        unsigned long size;
        ssize_t wrote;
@@ -266,7 +266,7 @@ static int write_entry(struct cache_entry *ce,
 
        if (ce_mode_s_ifmt == S_IFREG) {
                struct stream_filter *filter = get_stream_filter(ce->name,
-                                                                ce->oid.hash);
+                                                                &ce->oid);
                if (filter &&
                    !streaming_write_entry(ce, path, filter,
                                           state, to_tempfile,
@@ -276,8 +276,8 @@ static int write_entry(struct cache_entry *ce,
 
        switch (ce_mode_s_ifmt) {
        case S_IFLNK:
-               new = read_blob_entry(ce, &size);
-               if (!new)
+               new_blob = read_blob_entry(ce, &size);
+               if (!new_blob)
                        return error("unable to read sha1 file of %s (%s)",
                                     path, oid_to_hex(&ce->oid));
 
@@ -288,8 +288,8 @@ static int write_entry(struct cache_entry *ce,
                if (!has_symlinks || to_tempfile)
                        goto write_file_entry;
 
-               ret = symlink(new, path);
-               free(new);
+               ret = symlink(new_blob, path);
+               free(new_blob);
                if (ret)
                        return error_errno("unable to create symlink %s", path);
                break;
@@ -300,11 +300,11 @@ static int write_entry(struct cache_entry *ce,
                 * bother reading it at all.
                 */
                if (dco && dco->state == CE_RETRY) {
-                       new = NULL;
+                       new_blob = NULL;
                        size = 0;
                } else {
-                       new = read_blob_entry(ce, &size);
-                       if (!new)
+                       new_blob = read_blob_entry(ce, &size);
+                       if (!new_blob)
                                return error("unable to read sha1 file of %s (%s)",
                                             path, oid_to_hex(&ce->oid));
                }
@@ -313,18 +313,18 @@ static int write_entry(struct cache_entry *ce,
                 * Convert from git internal format to working tree format
                 */
                if (dco && dco->state != CE_NO_DELAY) {
-                       ret = async_convert_to_working_tree(ce->name, new,
+                       ret = async_convert_to_working_tree(ce->name, new_blob,
                                                            size, &buf, dco);
                        if (ret && string_list_has_string(&dco->paths, ce->name)) {
-                               free(new);
+                               free(new_blob);
                                goto delayed;
                        }
                } else
-                       ret = convert_to_working_tree(ce->name, new, size, &buf);
+                       ret = convert_to_working_tree(ce->name, new_blob, size, &buf);
 
                if (ret) {
-                       free(new);
-                       new = strbuf_detach(&buf, &newsize);
+                       free(new_blob);
+                       new_blob = strbuf_detach(&buf, &newsize);
                        size = newsize;
                }
                /*
@@ -336,15 +336,15 @@ static int write_entry(struct cache_entry *ce,
        write_file_entry:
                fd = open_output_fd(path, ce, to_tempfile);
                if (fd < 0) {
-                       free(new);
+                       free(new_blob);
                        return error_errno("unable to create file %s", path);
                }
 
-               wrote = write_in_full(fd, new, size);
+               wrote = write_in_full(fd, new_blob, size);
                if (!to_tempfile)
                        fstat_done = fstat_output(fd, state, &st);
                close(fd);
-               free(new);
+               free(new_blob);
                if (wrote < 0)
                        return error("unable to write file %s", path);
                break;
index 93c9fbb0ba29479f33ccab7808b11eb60cd373ec..39b3d906c89048cd094f352fa2ea81d873deb31a 100644 (file)
@@ -102,7 +102,7 @@ int ignore_untracked_cache_config;
 /* This is set by setup_git_dir_gently() and/or git_default_config() */
 char *git_work_tree_cfg;
 
-static char *namespace;
+static char *git_namespace;
 
 static const char *super_prefix;
 
@@ -185,8 +185,8 @@ void setup_git_env(const char *git_dir)
        free(git_replace_ref_base);
        git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
                                                          : "refs/replace/");
-       free(namespace);
-       namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
+       free(git_namespace);
+       git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
        shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
        if (shallow_file)
                set_alternate_shallow_file(shallow_file, 0);
@@ -220,9 +220,9 @@ const char *get_git_common_dir(void)
 
 const char *get_git_namespace(void)
 {
-       if (!namespace)
+       if (!git_namespace)
                BUG("git environment hasn't been setup");
-       return namespace;
+       return git_namespace;
 }
 
 const char *strip_namespace(const char *namespaced_ref)
@@ -276,7 +276,7 @@ char *get_object_directory(void)
        return the_repository->objects->objectdir;
 }
 
-int odb_mkstemp(struct strbuf *template, const char *pattern)
+int odb_mkstemp(struct strbuf *temp_filename, const char *pattern)
 {
        int fd;
        /*
@@ -284,16 +284,16 @@ int odb_mkstemp(struct strbuf *template, const char *pattern)
         * restrictive except to remove write permission.
         */
        int mode = 0444;
-       git_path_buf(template, "objects/%s", pattern);
-       fd = git_mkstemp_mode(template->buf, mode);
+       git_path_buf(temp_filename, "objects/%s", pattern);
+       fd = git_mkstemp_mode(temp_filename->buf, mode);
        if (0 <= fd)
                return fd;
 
        /* slow path */
-       /* some mkstemp implementations erase template on failure */
-       git_path_buf(template, "objects/%s", pattern);
-       safe_create_leading_directories(template->buf);
-       return xmkstemp_mode(template->buf, mode);
+       /* some mkstemp implementations erase temp_filename on failure */
+       git_path_buf(temp_filename, "objects/%s", pattern);
+       safe_create_leading_directories(temp_filename->buf);
+       return xmkstemp_mode(temp_filename->buf, mode);
 }
 
 int odb_pack_keep(const char *name)
index 37a44752a8178732af10da28a14b61d10b883395..fc4e885bcc2de6f56a5b559eaacdcf68b8220e8e 100644 (file)
@@ -170,6 +170,7 @@ Format of STDIN stream:
 #include "run-command.h"
 #include "packfile.h"
 #include "object-store.h"
+#include "mem-pool.h"
 
 #define PACK_ID_BITS 16
 #define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
@@ -211,13 +212,6 @@ struct last_object {
        unsigned no_swap : 1;
 };
 
-struct mem_pool {
-       struct mem_pool *next_pool;
-       char *next_free;
-       char *end;
-       uintmax_t space[FLEX_ARRAY]; /* more */
-};
-
 struct atom_str {
        struct atom_str *next_atom;
        unsigned short str_len;
@@ -306,9 +300,7 @@ static int global_argc;
 static const char **global_argv;
 
 /* Memory pools */
-static size_t mem_pool_alloc = 2*1024*1024 - sizeof(struct mem_pool);
-static size_t total_allocd;
-static struct mem_pool *mem_pool;
+static struct mem_pool fi_mem_pool =  {0, 2*1024*1024 - sizeof(struct mp_block), 0 };
 
 /* Atom management */
 static unsigned int atom_table_sz = 4451;
@@ -318,7 +310,7 @@ static struct atom_str **atom_table;
 /* The .pack file being generated */
 static struct pack_idx_option pack_idx_opts;
 static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
 static struct packed_git *pack_data;
 static struct packed_git **all_packs;
 static off_t pack_size;
@@ -326,6 +318,7 @@ static off_t pack_size;
 /* Table of objects we've written. */
 static unsigned int object_entry_alloc = 5000;
 static struct object_entry_pool *blocks;
+static size_t total_allocd;
 static struct object_entry *object_table[1 << 16];
 static struct mark_set *marks;
 static const char *export_marks_file;
@@ -636,49 +629,10 @@ static unsigned int hc_str(const char *s, size_t len)
        return r;
 }
 
-static void *pool_alloc(size_t len)
-{
-       struct mem_pool *p;
-       void *r;
-
-       /* round up to a 'uintmax_t' alignment */
-       if (len & (sizeof(uintmax_t) - 1))
-               len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
-
-       for (p = mem_pool; p; p = p->next_pool)
-               if ((p->end - p->next_free >= len))
-                       break;
-
-       if (!p) {
-               if (len >= (mem_pool_alloc/2)) {
-                       total_allocd += len;
-                       return xmalloc(len);
-               }
-               total_allocd += sizeof(struct mem_pool) + mem_pool_alloc;
-               p = xmalloc(st_add(sizeof(struct mem_pool), mem_pool_alloc));
-               p->next_pool = mem_pool;
-               p->next_free = (char *) p->space;
-               p->end = p->next_free + mem_pool_alloc;
-               mem_pool = p;
-       }
-
-       r = p->next_free;
-       p->next_free += len;
-       return r;
-}
-
-static void *pool_calloc(size_t count, size_t size)
-{
-       size_t len = count * size;
-       void *r = pool_alloc(len);
-       memset(r, 0, len);
-       return r;
-}
-
 static char *pool_strdup(const char *s)
 {
        size_t len = strlen(s) + 1;
-       char *r = pool_alloc(len);
+       char *r = mem_pool_alloc(&fi_mem_pool, len);
        memcpy(r, s, len);
        return r;
 }
@@ -687,7 +641,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
 {
        struct mark_set *s = marks;
        while ((idnum >> s->shift) >= 1024) {
-               s = pool_calloc(1, sizeof(struct mark_set));
+               s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
                s->shift = marks->shift + 10;
                s->data.sets[0] = marks;
                marks = s;
@@ -696,7 +650,7 @@ static void insert_mark(uintmax_t idnum, struct object_entry *oe)
                uintmax_t i = idnum >> s->shift;
                idnum -= i << s->shift;
                if (!s->data.sets[i]) {
-                       s->data.sets[i] = pool_calloc(1, sizeof(struct mark_set));
+                       s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
                        s->data.sets[i]->shift = s->shift - 10;
                }
                s = s->data.sets[i];
@@ -734,7 +688,7 @@ static struct atom_str *to_atom(const char *s, unsigned short len)
                if (c->str_len == len && !strncmp(s, c->str_dat, len))
                        return c;
 
-       c = pool_alloc(sizeof(struct atom_str) + len + 1);
+       c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
        c->str_len = len;
        memcpy(c->str_dat, s, len);
        c->str_dat[len] = 0;
@@ -765,7 +719,7 @@ static struct branch *new_branch(const char *name)
        if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
                die("Branch name doesn't conform to GIT standards: %s", name);
 
-       b = pool_calloc(1, sizeof(struct branch));
+       b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
        b->name = pool_strdup(name);
        b->table_next_branch = branch_table[hc];
        b->branch_tree.versions[0].mode = S_IFDIR;
@@ -801,7 +755,7 @@ static struct tree_content *new_tree_content(unsigned int cnt)
                        avail_tree_table[hc] = f->next_avail;
        } else {
                cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
-               f = pool_alloc(sizeof(*t) + sizeof(t->entries[0]) * cnt);
+               f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
                f->entry_capacity = cnt;
        }
 
@@ -907,12 +861,12 @@ static void start_packfile(void)
 
        p->pack_fd = pack_fd;
        p->do_not_close = 1;
-       pack_file = sha1fd(pack_fd, p->pack_name);
+       pack_file = hashfd(pack_fd, p->pack_name);
 
        hdr.hdr_signature = htonl(PACK_SIGNATURE);
        hdr.hdr_version = htonl(2);
        hdr.hdr_entries = 0;
-       sha1write(pack_file, &hdr, sizeof(hdr));
+       hashwrite(pack_file, &hdr, sizeof(hdr));
 
        pack_data = p;
        pack_size = sizeof(hdr);
@@ -1018,7 +972,7 @@ static void end_packfile(void)
                struct tag *t;
 
                close_pack_windows(pack_data);
-               sha1close(pack_file, cur_pack_oid.hash, 0);
+               hashclose(pack_file, cur_pack_oid.hash, 0);
                fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
                                    pack_data->pack_name, object_count,
                                    cur_pack_oid.hash, pack_size);
@@ -1094,15 +1048,15 @@ static int store_object(
        unsigned char hdr[96];
        struct object_id oid;
        unsigned long hdrlen, deltalen;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        git_zstream s;
 
        hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
-                          typename(type), (unsigned long)dat->len) + 1;
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, hdrlen);
-       git_SHA1_Update(&c, dat->buf, dat->len);
-       git_SHA1_Final(oid.hash, &c);
+                          type_name(type), (unsigned long)dat->len) + 1;
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
+       the_hash_algo->update_fn(&c, dat->buf, dat->len);
+       the_hash_algo->final_fn(oid.hash, &c);
        if (oidout)
                oidcpy(oidout, &oid);
 
@@ -1121,11 +1075,13 @@ static int store_object(
                return 1;
        }
 
-       if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+       if (last && last->data.buf && last->depth < max_depth
+               && dat->len > the_hash_algo->rawsz) {
+
                delta_count_attempts_by_type[type]++;
                delta = diff_delta(last->data.buf, last->data.len,
                        dat->buf, dat->len,
-                       &deltalen, dat->len - 20);
+                       &deltalen, dat->len - the_hash_algo->rawsz);
        } else
                delta = NULL;
 
@@ -1183,23 +1139,23 @@ static int store_object(
 
                hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
                                                      OBJ_OFS_DELTA, deltalen);
-               sha1write(pack_file, hdr, hdrlen);
+               hashwrite(pack_file, hdr, hdrlen);
                pack_size += hdrlen;
 
                hdr[pos] = ofs & 127;
                while (ofs >>= 7)
                        hdr[--pos] = 128 | (--ofs & 127);
-               sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+               hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
                pack_size += sizeof(hdr) - pos;
        } else {
                e->depth = 0;
                hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
                                                      type, dat->len);
-               sha1write(pack_file, hdr, hdrlen);
+               hashwrite(pack_file, hdr, hdrlen);
                pack_size += hdrlen;
        }
 
-       sha1write(pack_file, out, s.total_out);
+       hashwrite(pack_file, out, s.total_out);
        pack_size += s.total_out;
 
        e->idx.crc32 = crc32_end(pack_file);
@@ -1218,9 +1174,9 @@ static int store_object(
        return 0;
 }
 
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
 {
-       if (sha1file_truncate(pack_file, checkpoint))
+       if (hashfile_truncate(pack_file, checkpoint))
                die_errno("cannot truncate pack to skip duplicate");
        pack_size = checkpoint->offset;
 }
@@ -1234,9 +1190,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
        struct object_id oid;
        unsigned long hdrlen;
        off_t offset;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        git_zstream s;
-       struct sha1file_checkpoint checkpoint;
+       struct hashfile_checkpoint checkpoint;
        int status = Z_OK;
 
        /* Determine if we should auto-checkpoint. */
@@ -1244,13 +1200,13 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                || (pack_size + 60 + len) < pack_size)
                cycle_packfile();
 
-       sha1file_checkpoint(pack_file, &checkpoint);
+       hashfile_checkpoint(pack_file, &checkpoint);
        offset = checkpoint.offset;
 
        hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, out_buf, hdrlen);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, out_buf, hdrlen);
 
        crc32_begin(pack_file);
 
@@ -1268,7 +1224,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                        if (!n && feof(stdin))
                                die("EOF in data (%" PRIuMAX " bytes remaining)", len);
 
-                       git_SHA1_Update(&c, in_buf, n);
+                       the_hash_algo->update_fn(&c, in_buf, n);
                        s.next_in = in_buf;
                        s.avail_in = n;
                        len -= n;
@@ -1278,7 +1234,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
 
                if (!s.avail_out || status == Z_STREAM_END) {
                        size_t n = s.next_out - out_buf;
-                       sha1write(pack_file, out_buf, n);
+                       hashwrite(pack_file, out_buf, n);
                        pack_size += n;
                        s.next_out = out_buf;
                        s.avail_out = out_sz;
@@ -1294,7 +1250,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                }
        }
        git_deflate_end(&s);
-       git_SHA1_Final(oid.hash, &c);
+       the_hash_algo->final_fn(oid.hash, &c);
 
        if (oidout)
                oidcpy(oidout, &oid);
@@ -1354,25 +1310,25 @@ static void *gfi_unpack_entry(
 {
        enum object_type type;
        struct packed_git *p = all_packs[oe->pack_id];
-       if (p == pack_data && p->pack_size < (pack_size + 20)) {
+       if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
                /* The object is stored in the packfile we are writing to
                 * and we have modified it since the last time we scanned
                 * back to read a previously written object.  If an old
-                * window covered [p->pack_size, p->pack_size + 20) its
+                * window covered [p->pack_size, p->pack_size + rawsz) its
                 * data is stale and is not valid.  Closing all windows
                 * and updating the packfile length ensures we can read
                 * the newly written data.
                 */
                close_pack_windows(p);
-               sha1flush(pack_file);
+               hashflush(pack_file);
 
-               /* We have to offer 20 bytes additional on the end of
+               /* We have to offer rawsz bytes additional on the end of
                 * the packfile as the core unpacker code assumes the
                 * footer is present at the file end and must promise
-                * at least 20 bytes within any window it maps.  But
+                * at least rawsz bytes within any window it maps.  But
                 * we don't actually create the footer here.
                 */
-               p->pack_size = pack_size + 20;
+               p->pack_size = pack_size + the_hash_algo->rawsz;
        }
        return unpack_entry(p, oe->idx.offset, &type, sizep);
 }
@@ -1414,7 +1370,7 @@ static void load_tree(struct tree_entry *root)
                        die("Can't load tree %s", oid_to_hex(oid));
        } else {
                enum object_type type;
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf || type != OBJ_TREE)
                        die("Can't load tree %s", oid_to_hex(oid));
        }
@@ -1915,7 +1871,7 @@ static void read_marks(void)
                        die("corrupt mark line: %s", line);
                e = find_object(&oid);
                if (!e) {
-                       enum object_type type = sha1_object_info(oid.hash, NULL);
+                       enum object_type type = oid_object_info(&oid, NULL);
                        if (type < 0)
                                die("object not found: %s", oid_to_hex(&oid));
                        e = insert_object(&oid);
@@ -2208,7 +2164,7 @@ static void construct_path_with_fanout(const char *hex_sha1,
                unsigned char fanout, char *path)
 {
        unsigned int i = 0, j = 0;
-       if (fanout >= 20)
+       if (fanout >= the_hash_algo->rawsz)
                die("Too large fanout (%u)", fanout);
        while (fanout) {
                path[i++] = hex_sha1[j++];
@@ -2216,8 +2172,8 @@ static void construct_path_with_fanout(const char *hex_sha1,
                path[i++] = '/';
                fanout--;
        }
-       memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
-       path[i + GIT_SHA1_HEXSZ - j] = '\0';
+       memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+       path[i + the_hash_algo->hexsz - j] = '\0';
 }
 
 static uintmax_t do_change_note_fanout(
@@ -2425,7 +2381,7 @@ static void file_change_m(const char *p, struct branch *b)
                else if (oe) {
                        if (oe->type != OBJ_COMMIT)
                                die("Not a commit (actually a %s): %s",
-                                       typename(oe->type), command_buf.buf);
+                                       type_name(oe->type), command_buf.buf);
                }
                /*
                 * Accept the sha1 without checking; it expected to be in
@@ -2445,14 +2401,14 @@ static void file_change_m(const char *p, struct branch *b)
                enum object_type expected = S_ISDIR(mode) ?
                                                OBJ_TREE: OBJ_BLOB;
                enum object_type type = oe ? oe->type :
-                                       sha1_object_info(oid.hash, NULL);
+                                       oid_object_info(&oid, NULL);
                if (type < 0)
                        die("%s not found: %s",
                                        S_ISDIR(mode) ?  "Tree" : "Blob",
                                        command_buf.buf);
                if (type != expected)
                        die("Not a %s (actually a %s): %s",
-                               typename(expected), typename(type),
+                               type_name(expected), type_name(type),
                                command_buf.buf);
        }
 
@@ -2585,8 +2541,9 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
                oidcpy(&commit_oid, &commit_oe->idx.oid);
        } else if (!get_oid(p, &commit_oid)) {
                unsigned long size;
-               char *buf = read_object_with_reference(commit_oid.hash,
-                       commit_type, &size, commit_oid.hash);
+               char *buf = read_object_with_reference(&commit_oid,
+                                                      commit_type, &size,
+                                                      &commit_oid);
                if (!buf || size < 46)
                        die("Not a valid commit: %s", p);
                free(buf);
@@ -2603,14 +2560,14 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
        } else if (oe) {
                if (oe->type != OBJ_BLOB)
                        die("Not a blob (actually a %s): %s",
-                               typename(oe->type), command_buf.buf);
+                               type_name(oe->type), command_buf.buf);
        } else if (!is_null_oid(&oid)) {
-               enum object_type type = sha1_object_info(oid.hash, NULL);
+               enum object_type type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("Blob not found: %s", command_buf.buf);
                if (type != OBJ_BLOB)
                        die("Not a blob (actually a %s): %s",
-                           typename(type), command_buf.buf);
+                           type_name(type), command_buf.buf);
        }
 
        construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
@@ -2655,9 +2612,8 @@ static void parse_from_existing(struct branch *b)
                unsigned long size;
                char *buf;
 
-               buf = read_object_with_reference(b->oid.hash,
-                                                commit_type, &size,
-                                                b->oid.hash);
+               buf = read_object_with_reference(&b->oid, commit_type, &size,
+                                                &b->oid);
                parse_from_commit(b, buf, size);
                free(buf);
        }
@@ -2734,8 +2690,9 @@ static struct hash_list *parse_merge(unsigned int *count)
                        oidcpy(&n->oid, &oe->idx.oid);
                } else if (!get_oid(from, &n->oid)) {
                        unsigned long size;
-                       char *buf = read_object_with_reference(n->oid.hash,
-                               commit_type, &size, n->oid.hash);
+                       char *buf = read_object_with_reference(&n->oid,
+                                                              commit_type,
+                                                              &size, &n->oid);
                        if (!buf || size < 46)
                                die("Not a valid commit: %s", from);
                        free(buf);
@@ -2864,7 +2821,7 @@ static void parse_new_tag(const char *arg)
        enum object_type type;
        const char *v;
 
-       t = pool_alloc(sizeof(struct tag));
+       t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
        memset(t, 0, sizeof(struct tag));
        t->name = pool_strdup(arg);
        if (last_tag)
@@ -2892,7 +2849,7 @@ static void parse_new_tag(const char *arg)
        } else if (!get_oid(from, &oid)) {
                struct object_entry *oe = find_object(&oid);
                if (!oe) {
-                       type = sha1_object_info(oid.hash, NULL);
+                       type = oid_object_info(&oid, NULL);
                        if (type < 0)
                                die("Not a valid object: %s", from);
                } else
@@ -2918,7 +2875,7 @@ static void parse_new_tag(const char *arg)
                    "object %s\n"
                    "type %s\n"
                    "tag %s\n",
-                   oid_to_hex(&oid), typename(type), t->name);
+                   oid_to_hex(&oid), type_name(type), t->name);
        if (tagger)
                strbuf_addf(&new_data,
                            "tagger %s\n", tagger);
@@ -2968,7 +2925,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
        char *buf;
 
        if (!oe || oe->pack_id == MAX_PACK_ID) {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
        } else {
                type = oe->type;
                buf = gfi_unpack_entry(oe, &size);
@@ -2989,10 +2946,10 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
                die("Can't read object %s", oid_to_hex(oid));
        if (type != OBJ_BLOB)
                die("Object %s is a %s but a blob was expected.",
-                   oid_to_hex(oid), typename(type));
+                   oid_to_hex(oid), type_name(type));
        strbuf_reset(&line);
        strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
-                                               typename(type), size);
+                                               type_name(type), size);
        cat_blob_write(line.buf, line.len);
        strbuf_release(&line);
        cat_blob_write(buf, size);
@@ -3007,7 +2964,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
 
 static void parse_get_mark(const char *p)
 {
-       struct object_entry *oe = oe;
+       struct object_entry *oe;
        char output[GIT_MAX_HEXSZ + 2];
 
        /* get-mark SP <object> LF */
@@ -3024,7 +2981,7 @@ static void parse_get_mark(const char *p)
 
 static void parse_cat_blob(const char *p)
 {
-       struct object_entry *oe = oe;
+       struct object_entry *oe;
        struct object_id oid;
 
        /* cat-blob SP <object> LF */
@@ -3050,7 +3007,7 @@ static struct object_entry *dereference(struct object_entry *oe,
        unsigned long size;
        char *buf = NULL;
        if (!oe) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(oid, NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(oid));
                /* cache it! */
@@ -3073,7 +3030,7 @@ static struct object_entry *dereference(struct object_entry *oe,
                buf = gfi_unpack_entry(oe, &size);
        } else {
                enum object_type unused;
-               buf = read_sha1_file(oid->hash, &unused, &size);
+               buf = read_object_file(oid, &unused, &size);
        }
        if (!buf)
                die("Can't load object %s", oid_to_hex(oid));
@@ -3463,12 +3420,12 @@ int cmd_main(int argc, const char **argv)
        atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
        branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
        avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
-       marks = pool_calloc(1, sizeof(struct mark_set));
+       marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
 
        global_argc = argc;
        global_argv = argv;
 
-       rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
+       rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
        for (i = 0; i < (cmd_save - 1); i++)
                rc_free[i].next = &rc_free[i + 1];
        rc_free[cmd_save - 1].next = NULL;
@@ -3542,8 +3499,8 @@ int cmd_main(int argc, const char **argv)
                fprintf(stderr, "Total branches:  %10lu (%10lu loads     )\n", branch_count, branch_load_count);
                fprintf(stderr, "      marks:     %10" PRIuMAX " (%10" PRIuMAX " unique    )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
                fprintf(stderr, "      atoms:     %10u\n", atom_cnt);
-               fprintf(stderr, "Memory total:    %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024);
-               fprintf(stderr, "       pools:    %10lu KiB\n", (unsigned long)(total_allocd/1024));
+               fprintf(stderr, "Memory total:    %10" PRIuMAX " KiB\n", (total_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+               fprintf(stderr, "       pools:    %10lu KiB\n", (unsigned long)((total_allocd + fi_mem_pool.pool_alloc) /1024));
                fprintf(stderr, "     objects:    %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
                fprintf(stderr, "---------------------------------------------------------------------\n");
                pack_report();
index eac5928a27bd63f0b27176f3a50af283661c3dfa..216d1368beffbba7364b9d8c014ddf144595c8c1 100644 (file)
@@ -262,8 +262,8 @@ static enum ack_type get_ack(int fd, struct object_id *result_oid)
        char *line = packet_read_line(fd, &len);
        const char *arg;
 
-       if (!len)
-               die(_("git fetch-pack: expected ACK/NAK, got EOF"));
+       if (!line)
+               die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
        if (!strcmp(line, "NAK"))
                return NAK;
        if (skip_prefix(line, "ACK ", &arg)) {
@@ -305,9 +305,9 @@ static void insert_one_alternate_object(struct object *obj)
 #define PIPESAFE_FLUSH 32
 #define LARGE_FLUSH 16384
 
-static int next_flush(struct fetch_pack_args *args, int count)
+static int next_flush(int stateless_rpc, int count)
 {
-       if (args->stateless_rpc) {
+       if (stateless_rpc) {
                if (count < LARGE_FLUSH)
                        count <<= 1;
                else
@@ -470,7 +470,7 @@ static int find_common(struct fetch_pack_args *args,
                        send_request(args, fd[1], &req_buf);
                        strbuf_setlen(&req_buf, state_len);
                        flushes++;
-                       flush_at = next_flush(args, count);
+                       flush_at = next_flush(args->stateless_rpc, count);
 
                        /*
                         * We keep one window "ahead" of the other side, and
@@ -712,6 +712,28 @@ static void mark_alternate_complete(struct object *obj)
        mark_complete(&obj->oid);
 }
 
+struct loose_object_iter {
+       struct oidset *loose_object_set;
+       struct ref *refs;
+};
+
+/*
+ *  If the number of refs is not larger than the number of loose objects,
+ *  this function stops inserting.
+ */
+static int add_loose_objects_to_set(const struct object_id *oid,
+                                   const char *path,
+                                   void *data)
+{
+       struct loose_object_iter *iter = data;
+       oidset_insert(iter->loose_object_set, oid);
+       if (iter->refs == NULL)
+               return 1;
+
+       iter->refs = iter->refs->next;
+       return 0;
+}
+
 static int everything_local(struct fetch_pack_args *args,
                            struct ref **refs,
                            struct ref **sought, int nr_sought)
@@ -720,16 +742,31 @@ static int everything_local(struct fetch_pack_args *args,
        int retval;
        int old_save_commit_buffer = save_commit_buffer;
        timestamp_t cutoff = 0;
+       struct oidset loose_oid_set = OIDSET_INIT;
+       int use_oidset = 0;
+       struct loose_object_iter iter = {&loose_oid_set, *refs};
+
+       /* Enumerate all loose objects or know refs are not so many. */
+       use_oidset = !for_each_loose_object(add_loose_objects_to_set,
+                                           &iter, 0);
 
        save_commit_buffer = 0;
 
        for (ref = *refs; ref; ref = ref->next) {
                struct object *o;
+               unsigned int flags = OBJECT_INFO_QUICK;
 
-               if (!has_object_file_with_flags(&ref->old_oid,
-                                               OBJECT_INFO_QUICK))
-                       continue;
+               if (use_oidset &&
+                   !oidset_contains(&loose_oid_set, &ref->old_oid)) {
+                       /*
+                        * I know this does not exist in the loose form,
+                        * so check if it exists in a non-loose form.
+                        */
+                       flags |= OBJECT_INFO_IGNORE_LOOSE;
+               }
 
+               if (!has_object_file_with_flags(&ref->old_oid, flags))
+                       continue;
                o = parse_object(&ref->old_oid);
                if (!o)
                        continue;
@@ -745,6 +782,8 @@ static int everything_local(struct fetch_pack_args *args,
                }
        }
 
+       oidset_clear(&loose_oid_set);
+
        if (!args->no_dependents) {
                if (!args->deepen) {
                        for_each_ref(mark_complete_oid, NULL);
@@ -887,8 +926,17 @@ static int get_pack(struct fetch_pack_args *args,
            ? fetch_fsck_objects
            : transfer_fsck_objects >= 0
            ? transfer_fsck_objects
-           : 0)
-               argv_array_push(&cmd.args, "--strict");
+           : 0) {
+               if (args->from_promisor)
+                       /*
+                        * We cannot use --strict in index-pack because it
+                        * checks both broken objects and links, but we only
+                        * want to check for broken objects.
+                        */
+                       argv_array_push(&cmd.args, "--fsck-objects");
+               else
+                       argv_array_push(&cmd.args, "--strict");
+       }
 
        cmd.in = demux.out;
        cmd.git_cmd = 1;
@@ -1032,6 +1080,328 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
        return ref;
 }
 
+static void add_shallow_requests(struct strbuf *req_buf,
+                                const struct fetch_pack_args *args)
+{
+       if (is_repository_shallow())
+               write_shallow_commits(req_buf, 1, NULL);
+       if (args->depth > 0)
+               packet_buf_write(req_buf, "deepen %d", args->depth);
+       if (args->deepen_since) {
+               timestamp_t max_age = approxidate(args->deepen_since);
+               packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+       }
+       if (args->deepen_not) {
+               int i;
+               for (i = 0; i < args->deepen_not->nr; i++) {
+                       struct string_list_item *s = args->deepen_not->items + i;
+                       packet_buf_write(req_buf, "deepen-not %s", s->string);
+               }
+       }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+       for ( ; wants ; wants = wants->next) {
+               const struct object_id *remote = &wants->old_oid;
+               const char *remote_hex;
+               struct object *o;
+
+               /*
+                * If that object is complete (i.e. it is an ancestor of a
+                * local ref), we tell them we have it but do not have to
+                * tell them about its ancestors, which they already know
+                * about.
+                *
+                * We use lookup_object here because we are only
+                * interested in the case we *know* the object is
+                * reachable and we have already scanned it.
+                */
+               if (((o = lookup_object(remote->hash)) != NULL) &&
+                   (o->flags & COMPLETE)) {
+                       continue;
+               }
+
+               remote_hex = oid_to_hex(remote);
+               packet_buf_write(req_buf, "want %s\n", remote_hex);
+       }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+       struct oidset_iter iter;
+       const struct object_id *oid;
+       oidset_iter_init(common, &iter);
+
+       while ((oid = oidset_iter_next(&iter))) {
+               packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+       }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+       int ret = 0;
+       int haves_added = 0;
+       const struct object_id *oid;
+
+       while ((oid = get_rev())) {
+               packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+               if (++haves_added >= *haves_to_send)
+                       break;
+       }
+
+       *in_vain += haves_added;
+       if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+               /* Send Done */
+               packet_buf_write(req_buf, "done\n");
+               ret = 1;
+       }
+
+       /* Increase haves to send on next round */
+       *haves_to_send = next_flush(1, *haves_to_send);
+
+       return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+                             const struct ref *wants, struct oidset *common,
+                             int *haves_to_send, int *in_vain)
+{
+       int ret = 0;
+       struct strbuf req_buf = STRBUF_INIT;
+
+       if (server_supports_v2("fetch", 1))
+               packet_buf_write(&req_buf, "command=fetch");
+       if (server_supports_v2("agent", 0))
+               packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+
+       packet_buf_delim(&req_buf);
+       if (args->use_thin_pack)
+               packet_buf_write(&req_buf, "thin-pack");
+       if (args->no_progress)
+               packet_buf_write(&req_buf, "no-progress");
+       if (args->include_tag)
+               packet_buf_write(&req_buf, "include-tag");
+       if (prefer_ofs_delta)
+               packet_buf_write(&req_buf, "ofs-delta");
+
+       /* Add shallow-info and deepen request */
+       if (server_supports_feature("fetch", "shallow", 0))
+               add_shallow_requests(&req_buf, args);
+       else if (is_repository_shallow() || args->deepen)
+               die(_("Server does not support shallow requests"));
+
+       /* add wants */
+       add_wants(wants, &req_buf);
+
+       /* Add all of the common commits we've found in previous rounds */
+       add_common(&req_buf, common);
+
+       /* Add initial haves */
+       ret = add_haves(&req_buf, haves_to_send, in_vain);
+
+       /* Send request */
+       packet_buf_flush(&req_buf);
+       write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+       strbuf_release(&req_buf);
+       return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`.  If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+                                 const char *section, int peek)
+{
+       int ret;
+
+       if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+               die("error reading section header '%s'", section);
+
+       ret = !strcmp(reader->line, section);
+
+       if (!peek) {
+               if (!ret)
+                       die("expected '%s', received '%s'",
+                           section, reader->line);
+               packet_reader_read(reader);
+       }
+
+       return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+       /* received */
+       int received_ready = 0;
+       int received_ack = 0;
+
+       process_section_header(reader, "acknowledgments", 0);
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               const char *arg;
+
+               if (!strcmp(reader->line, "NAK"))
+                       continue;
+
+               if (skip_prefix(reader->line, "ACK ", &arg)) {
+                       struct object_id oid;
+                       if (!get_oid_hex(arg, &oid)) {
+                               struct commit *commit;
+                               oidset_insert(common, &oid);
+                               commit = lookup_commit(&oid);
+                               mark_common(commit, 0, 1);
+                       }
+                       continue;
+               }
+
+               if (!strcmp(reader->line, "ready")) {
+                       clear_prio_queue(&rev_list);
+                       received_ready = 1;
+                       continue;
+               }
+
+               die("unexpected acknowledgment line: '%s'", reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH &&
+           reader->status != PACKET_READ_DELIM)
+               die("error processing acks: %d", reader->status);
+
+       /* return 0 if no common, 1 if there are common, or 2 if ready */
+       return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+                                struct packet_reader *reader)
+{
+       process_section_header(reader, "shallow-info", 0);
+       while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+               const char *arg;
+               struct object_id oid;
+
+               if (skip_prefix(reader->line, "shallow ", &arg)) {
+                       if (get_oid_hex(arg, &oid))
+                               die(_("invalid shallow line: %s"), reader->line);
+                       register_shallow(&oid);
+                       continue;
+               }
+               if (skip_prefix(reader->line, "unshallow ", &arg)) {
+                       if (get_oid_hex(arg, &oid))
+                               die(_("invalid unshallow line: %s"), reader->line);
+                       if (!lookup_object(oid.hash))
+                               die(_("object not found: %s"), reader->line);
+                       /* make sure that it is parsed as shallow */
+                       if (!parse_object(&oid))
+                               die(_("error in object: %s"), reader->line);
+                       if (unregister_shallow(&oid))
+                               die(_("no shallow found: %s"), reader->line);
+                       continue;
+               }
+               die(_("expected shallow/unshallow, got %s"), reader->line);
+       }
+
+       if (reader->status != PACKET_READ_FLUSH &&
+           reader->status != PACKET_READ_DELIM)
+               die("error processing shallow info: %d", reader->status);
+
+       setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+       args->deepen = 1;
+}
+
+enum fetch_state {
+       FETCH_CHECK_LOCAL = 0,
+       FETCH_SEND_REQUEST,
+       FETCH_PROCESS_ACKS,
+       FETCH_GET_PACK,
+       FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+                                   int fd[2],
+                                   const struct ref *orig_ref,
+                                   struct ref **sought, int nr_sought,
+                                   char **pack_lockfile)
+{
+       struct ref *ref = copy_ref_list(orig_ref);
+       enum fetch_state state = FETCH_CHECK_LOCAL;
+       struct oidset common = OIDSET_INIT;
+       struct packet_reader reader;
+       int in_vain = 0;
+       int haves_to_send = INITIAL_FLUSH;
+       packet_reader_init(&reader, fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE);
+
+       while (state != FETCH_DONE) {
+               switch (state) {
+               case FETCH_CHECK_LOCAL:
+                       sort_ref_list(&ref, ref_compare_name);
+                       QSORT(sought, nr_sought, cmp_ref_by_name);
+
+                       /* v2 supports these by default */
+                       allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+                       use_sideband = 2;
+                       if (args->depth > 0 || args->deepen_since || args->deepen_not)
+                               args->deepen = 1;
+
+                       if (marked)
+                               for_each_ref(clear_marks, NULL);
+                       marked = 1;
+
+                       for_each_ref(rev_list_insert_ref_oid, NULL);
+                       for_each_cached_alternate(insert_one_alternate_object);
+
+                       /* Filter 'ref' by 'sought' and those that aren't local */
+                       if (everything_local(args, &ref, sought, nr_sought))
+                               state = FETCH_DONE;
+                       else
+                               state = FETCH_SEND_REQUEST;
+                       break;
+               case FETCH_SEND_REQUEST:
+                       if (send_fetch_request(fd[1], args, ref, &common,
+                                              &haves_to_send, &in_vain))
+                               state = FETCH_GET_PACK;
+                       else
+                               state = FETCH_PROCESS_ACKS;
+                       break;
+               case FETCH_PROCESS_ACKS:
+                       /* Process ACKs/NAKs */
+                       switch (process_acks(&reader, &common)) {
+                       case 2:
+                               state = FETCH_GET_PACK;
+                               break;
+                       case 1:
+                               in_vain = 0;
+                               /* fallthrough */
+                       default:
+                               state = FETCH_SEND_REQUEST;
+                               break;
+                       }
+                       break;
+               case FETCH_GET_PACK:
+                       /* Check for shallow-info section */
+                       if (process_section_header(&reader, "shallow-info", 1))
+                               receive_shallow_info(args, &reader);
+
+                       /* get the pack */
+                       process_section_header(&reader, "packfile", 0);
+                       if (get_pack(args, fd, pack_lockfile))
+                               die(_("git fetch-pack: fetch failed."));
+
+                       state = FETCH_DONE;
+                       break;
+               case FETCH_DONE:
+                       continue;
+               }
+       }
+
+       oidset_clear(&common);
+       return ref;
+}
+
 static void fetch_pack_config(void)
 {
        git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
@@ -1177,7 +1547,8 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                       const char *dest,
                       struct ref **sought, int nr_sought,
                       struct oid_array *shallow,
-                      char **pack_lockfile)
+                      char **pack_lockfile,
+                      enum protocol_version version)
 {
        struct ref *ref_cpy;
        struct shallow_info si;
@@ -1191,8 +1562,12 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                die(_("no matching remote head"));
        }
        prepare_shallow_info(&si, shallow);
-       ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
-                               &si, pack_lockfile);
+       if (version == protocol_v2)
+               ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+                                          pack_lockfile);
+       else
+               ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
+                                       &si, pack_lockfile);
        reprepare_packed_git(the_repository);
        update_shallow(args, sought, nr_sought, &si);
        clear_shallow_info(&si);
index 3e224a18226ec6219b09387704baf178821c4c23..6afa08b48bb9f7cc2db1b2fb4644c576fd8d8f3e 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "string-list.h"
 #include "run-command.h"
+#include "protocol.h"
 #include "list-objects-filter-options.h"
 
 struct oid_array;
@@ -53,7 +54,8 @@ struct ref *fetch_pack(struct fetch_pack_args *args,
                       struct ref **sought,
                       int nr_sought,
                       struct oid_array *shallow,
-                      char **pack_lockfile);
+                      char **pack_lockfile,
+                      enum protocol_version version);
 
 /*
  * Print an appropriate error message for each sought ref that wasn't
diff --git a/fsck.c b/fsck.c
index 032699e9ac2622c7d089523a456db0fa83310e14..9218c2a643b83e68b9f59479bcc1be833ae8be96 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -811,7 +811,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data,
                enum object_type type;
 
                buffer = to_free =
-                       read_sha1_file(tag->object.oid.hash, &type, &size);
+                       read_object_file(&tag->object.oid, &type, &size);
                if (!buffer)
                        return report(options, &tag->object,
                                FSCK_MSG_MISSING_TAG_OBJECT,
@@ -821,7 +821,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data,
                        ret = report(options, &tag->object,
                                FSCK_MSG_TAG_OBJECT_NOT_TAG,
                                "expected tag got %s",
-                           typename(type));
+                           type_name(type));
                        goto done;
                }
        }
index 0af7c4edba37fd9f6a8cee83cd2715c3fa08b488..6d7bcd5d0ed8f2d3f5abdea2f26c6be72909b657 100644 (file)
@@ -130,7 +130,7 @@ static void fsmonitor_refresh_callback(struct index_state *istate, const char *n
         * as it could be a new untracked file.
         */
        trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
-       untracked_cache_invalidate_path(istate, name);
+       untracked_cache_invalidate_path(istate, name, 0);
 }
 
 void refresh_fsmonitor(struct index_state *istate)
index cd3cc0ccf228c9d601621685042d51368a71c707..65f37436369cd934f2bdb3b627396c5f72a7cb03 100644 (file)
@@ -65,7 +65,7 @@ static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cac
 {
        if (core_fsmonitor) {
                ce->ce_flags &= ~CE_FSMONITOR_VALID;
-               untracked_cache_invalidate_path(istate, ce->name);
+               untracked_cache_invalidate_path(istate, ce->name, 1);
                trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
        }
 }
index 964c3a75420db4751cf11125b68b6904112632f1..cb48fc3e8e1e046a1076826bddc04ae298a9a9e5 100755 (executable)
@@ -677,7 +677,7 @@ sub add_untracked_cmd {
 sub run_git_apply {
        my $cmd = shift;
        my $fh;
-       open $fh, '| git ' . $cmd . " --recount --allow-overlap";
+       open $fh, '| git ' . $cmd . " --allow-overlap";
        print $fh @_;
        return close $fh;
 }
@@ -705,6 +705,14 @@ sub parse_diff {
        }
        my (@hunk) = { TEXT => [], DISPLAY => [], TYPE => 'header' };
 
+       if (@colored && @colored != @diff) {
+               print STDERR
+                 "fatal: mismatched output from interactive.diffFilter\n",
+                 "hint: Your filter must maintain a one-to-one correspondence\n",
+                 "hint: between its input and output lines.\n";
+               exit 1;
+       }
+
        for (my $i = 0; $i < @diff; $i++) {
                if ($diff[$i] =~ /^@@ /) {
                        push @hunk, { TEXT => [], DISPLAY => [],
@@ -751,6 +759,15 @@ sub parse_hunk_header {
        return ($o_ofs, $o_cnt, $n_ofs, $n_cnt);
 }
 
+sub format_hunk_header {
+       my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) = @_;
+       return ("@@ -$o_ofs" .
+               (($o_cnt != 1) ? ",$o_cnt" : '') .
+               " +$n_ofs" .
+               (($n_cnt != 1) ? ",$n_cnt" : '') .
+               " @@\n");
+}
+
 sub split_hunk {
        my ($text, $display) = @_;
        my @split = ();
@@ -784,6 +801,11 @@ sub split_hunk {
                while (++$i < @$text) {
                        my $line = $text->[$i];
                        my $display = $display->[$i];
+                       if ($line =~ /^\\/) {
+                               push @{$this->{TEXT}}, $line;
+                               push @{$this->{DISPLAY}}, $display;
+                               next;
+                       }
                        if ($line =~ /^ /) {
                                if ($this->{ADDDEL} &&
                                    !defined $next_hunk_start) {
@@ -838,11 +860,7 @@ sub split_hunk {
                my $o_cnt = $hunk->{OCNT};
                my $n_cnt = $hunk->{NCNT};
 
-               my $head = ("@@ -$o_ofs" .
-                           (($o_cnt != 1) ? ",$o_cnt" : '') .
-                           " +$n_ofs" .
-                           (($n_cnt != 1) ? ",$n_cnt" : '') .
-                           " @@\n");
+               my $head = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
                my $display_head = $head;
                unshift @{$hunk->{TEXT}}, $head;
                if ($diff_use_color) {
@@ -886,6 +904,9 @@ sub merge_hunk {
                        $n_cnt++;
                        push @line, $line;
                        next;
+               } elsif ($line =~ /^\\/) {
+                       push @line, $line;
+                       next;
                }
 
                last if ($o1_ofs <= $ofs);
@@ -904,6 +925,9 @@ sub merge_hunk {
                        $n_cnt++;
                        push @line, $line;
                        next;
+               } elsif ($line =~ /^\\/) {
+                       push @line, $line;
+                       next;
                }
                $ofs++;
                $o_cnt++;
@@ -912,11 +936,7 @@ sub merge_hunk {
                }
                push @line, $line;
        }
-       my $head = ("@@ -$o0_ofs" .
-                   (($o_cnt != 1) ? ",$o_cnt" : '') .
-                   " +$n0_ofs" .
-                   (($n_cnt != 1) ? ",$n_cnt" : '') .
-                   " @@\n");
+       my $head = format_hunk_header($o0_ofs, $o_cnt, $n0_ofs, $n_cnt);
        @{$prev->{TEXT}} = ($head, @line);
 }
 
@@ -925,14 +945,35 @@ sub coalesce_overlapping_hunks {
        my @out = ();
 
        my ($last_o_ctx, $last_was_dirty);
+       my $ofs_delta = 0;
 
-       for (grep { $_->{USE} } @in) {
+       for (@in) {
                if ($_->{TYPE} ne 'hunk') {
                        push @out, $_;
                        next;
                }
                my $text = $_->{TEXT};
-               my ($o_ofs) = parse_hunk_header($text->[0]);
+               my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) =
+                                               parse_hunk_header($text->[0]);
+               unless ($_->{USE}) {
+                       $ofs_delta += $o_cnt - $n_cnt;
+                       # If this hunk has been edited then subtract
+                       # the delta that is due to the edit.
+                       if ($_->{OFS_DELTA}) {
+                               $ofs_delta -= $_->{OFS_DELTA};
+                       }
+                       next;
+               }
+               if ($ofs_delta) {
+                       $n_ofs += $ofs_delta;
+                       $_->{TEXT}->[0] = format_hunk_header($o_ofs, $o_cnt,
+                                                            $n_ofs, $n_cnt);
+               }
+               # If this hunk was edited then adjust the offset delta
+               # to reflect the edit.
+               if ($_->{OFS_DELTA}) {
+                       $ofs_delta += $_->{OFS_DELTA};
+               }
                if (defined $last_o_ctx &&
                    $o_ofs <= $last_o_ctx &&
                    !$_->{DIRTY} &&
@@ -980,6 +1021,171 @@ sub color_diff {
        } @_;
 }
 
+sub label_hunk_lines {
+       local $_;
+       my $hunk = shift;
+       my $i = 0;
+       my $labels = [ map { /^[-+]/ ? ++$i : 0 } @{$hunk->{TEXT}} ];
+       if ($i > 1) {
+               @{$hunk}{qw(LABELS MAX_LABEL)} = ($labels, $i);
+               return 1;
+       }
+       return 0;
+}
+
+sub select_hunk_lines {
+       my ($hunk, $selected) = @_;
+       my ($text, $labels) = @{$hunk}{qw(TEXT LABELS)};
+       my ($i, $o_cnt, $n_cnt) = (0, 0, 0);
+       my ($push_eol, @newtext);
+       # Lines with this mode will become context lines if they are
+       # not selected
+       my $context_mode = $patch_mode_flavour{IS_REVERSE} ? '+' : '-';
+       for $i (1..$#{$text}) {
+               my $mode = substr($text->[$i], 0, 1);
+               if ($mode eq '\\') {
+                       push @newtext, $text->[$i] if ($push_eol);
+                       undef $push_eol;
+               } elsif ($labels->[$i] and $selected->[$labels->[$i]]) {
+                       push @newtext, $text->[$i];
+                       if ($mode eq '+') {
+                               $n_cnt++;
+                       } else {
+                               $o_cnt++;
+                       }
+                       $push_eol = 1;
+               } elsif ($mode eq ' ' or $mode eq $context_mode) {
+                       push @newtext, ' ' . substr($text->[$i], 1);
+                       $o_cnt++; $n_cnt++;
+                       $push_eol = 1;
+               } else {
+                       undef $push_eol;
+               }
+       }
+       my ($o_ofs, $orig_o_cnt, $n_ofs, $orig_n_cnt) =
+                                       parse_hunk_header($text->[0]);
+       unshift @newtext, format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
+       my $newhunk = {
+               TEXT => \@newtext,
+               DISPLAY => [ color_diff(@newtext) ],
+               OFS_DELTA => $orig_o_cnt - $orig_n_cnt - $o_cnt + $n_cnt,
+               TYPE => $hunk->{TYPE},
+               USE => 1,
+       };
+       # If this hunk has previously been edited add the offset delta
+       # of the old hunk to get the real delta from the original
+       # hunk.
+       if ($hunk->{OFS_DELTA}) {
+               $newhunk->{OFS_DELTA} += $hunk->{OFS_DELTA};
+       }
+       return $newhunk;
+}
+
+sub check_hunk_label {
+       my ($max_label, $label) = ($_[0]->{MAX_LABEL}, $_[1]);
+       if ($label < 1 or $label > $max_label) {
+               error_msg sprintf(__("invalid hunk line '%d'\n"), $label);
+               return 0;
+       }
+       return 1;
+}
+
+sub split_hunk_selection {
+       local $_;
+       my @fields = @_;
+       my @ret;
+       for (@fields) {
+               while ($_ ne '') {
+                       if (/^[0-9]-$/) {
+                               push @ret, $_;
+                               last;
+                       } elsif (/^([0-9](?:-[0-9])?)(.*)/) {
+                               push @ret, $1;
+                               $_ = $2;
+                       } else {
+                               error_msg sprintf
+                                   __("invalid hunk line '%s'\n"),
+                                   substr($_, 0, 1);
+                               return ();
+                       }
+               }
+       }
+       return @ret;
+}
+
+sub parse_hunk_selection {
+       local $_;
+       my ($hunk, $line) = @_;
+       my ($max_label, $invert) = ($hunk->{MAX_LABEL}, undef);
+       my @selected = (0) x ($max_label + 1);
+       my @fields = split(/[,\s]+/, $line);
+       if ($fields[0] =~ /^-(.*)/) {
+               $invert = 1;
+               if ($1 ne '') {
+                       $fields[0] = $1;
+               } else {
+                       shift @fields;
+                       unless (@fields) {
+                               error_msg __("no lines to invert\n");
+                               return undef;
+                       }
+               }
+       }
+       if ($max_label < 10) {
+               @fields = split_hunk_selection(@fields) or return undef;
+       }
+       for (@fields) {
+               if (my ($lo, $hi) = /^([0-9]+)-([0-9]*)$/) {
+                       if ($hi eq '') {
+                               $hi = $max_label;
+                       }
+                       check_hunk_label($hunk, $lo) or return undef;
+                       check_hunk_label($hunk, $hi) or return undef;
+                       if ($hi < $lo) {
+                               ($lo, $hi) = ($hi, $lo);
+                       }
+                       @selected[$lo..$hi] = (1) x (1 + $hi - $lo);
+               } elsif (/^([0-9]+)$/) {
+                       check_hunk_label($hunk, $1) or return undef;
+                       $selected[$1] = 1;
+               } else {
+                       error_msg sprintf(__("invalid hunk line '%s'\n"), $_);
+                       return undef;
+               }
+       }
+       if ($invert) {
+               @selected = map { !$_ } @selected;
+       }
+       return \@selected;
+}
+
+sub display_hunk_lines {
+       my ($display, $labels, $max_label) =
+                               @{$_[0]}{qw(DISPLAY LABELS MAX_LABEL)};
+       my $width = int(log($max_label) / log(10)) + 1;
+       my $padding = ' ' x ($width + 1);
+       for my $i (0..$#{$display}) {
+               if ($labels->[$i]) {
+                       printf '%*d %s', $width, $labels->[$i], $display->[$i];
+               } else {
+                       print $padding . $display->[$i];
+               }
+       }
+}
+
+sub select_lines_loop {
+       my $hunk = shift;
+       display_hunk_lines($hunk);
+       my $selection = undef;
+       until (defined $selection) {
+               print colored $prompt_color, __("select lines? ");
+               my $text = <STDIN>;
+               defined $text and $text =~ /\S/ or return undef;
+               $selection = parse_hunk_selection($hunk, $text);
+       }
+       return select_hunk_lines($hunk, $selection);
+}
+
 my %edit_hunk_manually_modes = (
        stage => N__(
 "If the patch applies cleanly, the edited hunk will immediately be
@@ -1004,6 +1210,30 @@ sub color_diff {
 marked for applying."),
 );
 
+sub recount_edited_hunk {
+       local $_;
+       my ($oldtext, $newtext) = @_;
+       my ($o_cnt, $n_cnt) = (0, 0);
+       for (@{$newtext}[1..$#{$newtext}]) {
+               my $mode = substr($_, 0, 1);
+               if ($mode eq '-') {
+                       $o_cnt++;
+               } elsif ($mode eq '+') {
+                       $n_cnt++;
+               } elsif ($mode eq ' ') {
+                       $o_cnt++;
+                       $n_cnt++;
+               }
+       }
+       my ($o_ofs, undef, $n_ofs, undef) =
+                                       parse_hunk_header($newtext->[0]);
+       $newtext->[0] = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
+       my (undef, $orig_o_cnt, undef, $orig_n_cnt) =
+                                       parse_hunk_header($oldtext->[0]);
+       # Return the change in the number of lines inserted by this hunk
+       return $orig_o_cnt - $orig_n_cnt - $o_cnt + $n_cnt;
+}
+
 sub edit_hunk_manually {
        my ($oldtext) = @_;
 
@@ -1102,25 +1332,32 @@ sub prompt_yesno {
 }
 
 sub edit_hunk_loop {
-       my ($head, $hunk, $ix) = @_;
-       my $text = $hunk->[$ix]->{TEXT};
+       my ($head, $hunks, $ix) = @_;
+       my $hunk = $hunks->[$ix];
+       my $text = $hunk->{TEXT};
 
        while (1) {
-               $text = edit_hunk_manually($text);
-               if (!defined $text) {
+               my $newtext = edit_hunk_manually($text);
+               if (!defined $newtext) {
                        return undef;
                }
                my $newhunk = {
-                       TEXT => $text,
-                       TYPE => $hunk->[$ix]->{TYPE},
+                       TEXT => $newtext,
+                       TYPE => $hunk->{TYPE},
                        USE => 1,
                        DIRTY => 1,
                };
+               $newhunk->{OFS_DELTA} = recount_edited_hunk($text, $newtext);
+               # If this hunk has already been edited then add the
+               # offset delta of the previous edit to get the real
+               # delta from the original unedited hunk.
+               $hunk->{OFS_DELTA} and
+                               $newhunk->{OFS_DELTA} += $hunk->{OFS_DELTA};
                if (diff_applies($head,
-                                @{$hunk}[0..$ix-1],
+                                @{$hunks}[0..$ix-1],
                                 $newhunk,
-                                @{$hunk}[$ix+1..$#{$hunk}])) {
-                       $newhunk->{DISPLAY} = [color_diff(@{$text})];
+                                @{$hunks}[$ix+1..$#{$hunks}])) {
+                       $newhunk->{DISPLAY} = [color_diff(@{$newtext})];
                        return $newhunk;
                }
                else {
@@ -1184,13 +1421,20 @@ sub edit_hunk_loop {
 );
 
 sub help_patch_cmd {
-       print colored $help_color, __($help_patch_modes{$patch_mode}), "\n", __ <<EOF ;
+       local $_;
+       my $other = $_[0] . ",?";
+       print colored $help_color, __($help_patch_modes{$patch_mode}), "\n",
+               map { "$_\n" } grep {
+                       my $c = quotemeta(substr($_, 0, 1));
+                       $other =~ /,$c/
+               } split "\n", __ <<EOF ;
 g - select a hunk to go to
 / - search for a hunk matching the given regex
 j - leave this hunk undecided, see next undecided hunk
 J - leave this hunk undecided, see next hunk
 k - leave this hunk undecided, see previous undecided hunk
 K - leave this hunk undecided, see previous hunk
+l - select hunk lines to use
 s - split the current hunk into smaller hunks
 e - manually edit the current hunk
 ? - print help
@@ -1302,39 +1546,39 @@ sub display_hunks {
 
 my %patch_update_prompt_modes = (
        stage => {
-               mode => N__("Stage mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Stage deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Stage this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Stage mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Stage deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Stage this hunk [y,n,q,a,d%s,?]? "),
        },
        stash => {
-               mode => N__("Stash mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Stash deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Stash this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Stash mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Stash deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Stash this hunk [y,n,q,a,d%s,?]? "),
        },
        reset_head => {
-               mode => N__("Unstage mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Unstage deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Unstage this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Unstage mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Unstage deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Unstage this hunk [y,n,q,a,d%s,?]? "),
        },
        reset_nothead => {
-               mode => N__("Apply mode change to index [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Apply deletion to index [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Apply this hunk to index [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Apply mode change to index [y,n,q,a,d%s,?]? "),
+               deletion => N__("Apply deletion to index [y,n,q,a,d%s,?]? "),
+               hunk => N__("Apply this hunk to index [y,n,q,a,d%s,?]? "),
        },
        checkout_index => {
-               mode => N__("Discard mode change from worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Discard deletion from worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Discard this hunk from worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Discard mode change from worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Discard deletion from worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Discard this hunk from worktree [y,n,q,a,d%s,?]? "),
        },
        checkout_head => {
-               mode => N__("Discard mode change from index and worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Discard deletion from index and worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "),
        },
        checkout_nothead => {
-               mode => N__("Apply mode change to index and worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Apply deletion to index and worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "),
        },
 );
 
@@ -1390,7 +1634,7 @@ sub patch_update_file {
                        $other .= ',J';
                }
                if ($num > 1) {
-                       $other .= ',g';
+                       $other .= ',g,/';
                }
                for ($i = 0; $i < $num; $i++) {
                        if (!defined $hunk[$i]{USE}) {
@@ -1407,6 +1651,9 @@ sub patch_update_file {
                if ($hunk[$ix]{TYPE} eq 'hunk') {
                        $other .= ',e';
                }
+               if (label_hunk_lines($hunk[$ix])) {
+                       $other .= ',l';
+               }
                for (@{$hunk[$ix]{DISPLAY}}) {
                        print;
                }
@@ -1431,8 +1678,12 @@ sub patch_update_file {
                                }
                                next;
                        }
-                       elsif ($other =~ /g/ && $line =~ /^g(.*)/) {
+                       elsif ($line =~ /^g(.*)/) {
                                my $response = $1;
+                               unless ($other =~ /g/) {
+                                       error_msg __("No other hunks to goto\n");
+                                       next;
+                               }
                                my $no = $ix > 10 ? $ix - 10 : 0;
                                while ($response eq '') {
                                        $no = display_hunks(\@hunk, $no);
@@ -1478,6 +1729,10 @@ sub patch_update_file {
                        }
                        elsif ($line =~ m|^/(.*)|) {
                                my $regex = $1;
+                               unless ($other =~ m|/|) {
+                                       error_msg __("No other hunks to search\n");
+                                       next;
+                               }
                                if ($1 eq "") {
                                        print colored $prompt_color, __("search for regex? ");
                                        $regex = <STDIN>;
@@ -1546,7 +1801,23 @@ sub patch_update_file {
                                        next;
                                }
                        }
-                       elsif ($other =~ /s/ && $line =~ /^s/) {
+                       elsif ($line =~ /^l/) {
+                               unless ($other =~ /l/) {
+                                       error_msg __("Cannot select line by line\n");
+                                       next;
+                               }
+                               my $newhunk = select_lines_loop($hunk[$ix]);
+                               if ($newhunk) {
+                                       splice @hunk, $ix, 1, $newhunk;
+                               } else {
+                                       next;
+                               }
+                       }
+                       elsif ($line =~ /^s/) {
+                               unless ($other =~ /s/) {
+                                       error_msg __("Sorry, cannot split this hunk\n");
+                                       next;
+                               }
                                my @split = split_hunk($hunk[$ix]{TEXT}, $hunk[$ix]{DISPLAY});
                                if (1 < @split) {
                                        print colored $header_color, sprintf(
@@ -1558,7 +1829,11 @@ sub patch_update_file {
                                $num = scalar @hunk;
                                next;
                        }
-                       elsif ($other =~ /e/ && $line =~ /^e/) {
+                       elsif ($line =~ /^e/) {
+                               unless ($other =~ /e/) {
+                                       error_msg __("Sorry, cannot edit this hunk\n");
+                                       next;
+                               }
                                my $newhunk = edit_hunk_loop($head, \@hunk, $ix);
                                if (defined $newhunk) {
                                        splice @hunk, $ix, 1, $newhunk;
index 68b2ad531ea6f9cf1127a0e5986c523fd86b0967..07e383257b4985f7400f167d683a5fb692237d93 100644 (file)
@@ -826,8 +826,8 @@ extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
 extern int xdup(int fd);
 extern FILE *xfopen(const char *path, const char *mode);
 extern FILE *xfdopen(int fd, const char *mode);
-extern int xmkstemp(char *template);
-extern int xmkstemp_mode(char *template, int mode);
+extern int xmkstemp(char *temp_filename);
+extern int xmkstemp_mode(char *temp_filename, int mode);
 extern char *xgetcwd(void);
 extern FILE *fopen_for_writing(const char *path);
 extern FILE *fopen_or_warn(const char *path, const char *mode);
index 2d8df831722913093a46e9325796b85f67a97073..b31613cb8aa8decd3f808d5d29e047243fa1eac6 100755 (executable)
@@ -601,7 +601,9 @@ ($)
        my ($d) = @_;
        m#(\d{2,4})/(\d\d)/(\d\d)\s(\d\d):(\d\d)(?::(\d\d))?#
                or die "Unparseable date: $d\n";
-       my $y=$1; $y-=1900 if $y>1900;
+       my $y=$1;
+       $y+=100 if $y<70;
+       $y+=1900 if $y<1000;
        return timegm($6||0,$5,$4,$3,$2-1,$y);
 }
 
index 1b7e4b2cdbdf36e090c4b1f531dcc48222d8f8b1..64f21547c1ab99ef3ad98d57fe0fbadff9621838 100755 (executable)
@@ -251,8 +251,18 @@ done < "$tempdir"/backup-refs
 
 # The refs should be updated if their heads were rewritten
 git rev-parse --no-flags --revs-only --symbolic-full-name \
-       --default HEAD "$@" > "$tempdir"/raw-heads || exit
-sed -e '/^^/d' "$tempdir"/raw-heads >"$tempdir"/heads
+       --default HEAD "$@" > "$tempdir"/raw-refs || exit
+while read ref
+do
+       case "$ref" in ^?*) continue ;; esac
+
+       if git rev-parse --verify "$ref"^0 >/dev/null 2>&1
+       then
+               echo "$ref"
+       else
+               warn "WARNING: not rewriting '$ref' (not a committish)"
+       fi
+done >"$tempdir"/heads <"$tempdir"/raw-refs
 
 test -s "$tempdir"/heads ||
        die "You must specify a ref to rewrite."
@@ -310,7 +320,7 @@ git rev-list --reverse --topo-order --default HEAD \
        die "Could not get the commits"
 commits=$(wc -l <../revs | tr -d " ")
 
-test $commits -eq 0 && die "Found nothing to rewrite"
+test $commits -eq 0 && die_with_status 2 "Found nothing to rewrite"
 
 # Rewrite the commits
 report_progress ()
@@ -627,12 +637,12 @@ then
                                print H "$_:$f\n" or die;
                        }
                        close(H) or die;' || die "Unable to save state")
-       state_tree=$(/bin/echo -e "100644 blob $state_blob\tfilter.map" | git mktree)
+       state_tree=$(printf '100644 blob %s\tfilter.map\n' "$state_blob" | git mktree)
        if test -n "$state_commit"
        then
-               state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" -p "$state_commit")
+               state_commit=$(echo "Sync" | git commit-tree "$state_tree" -p "$state_commit")
        else
-               state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" )
+               state_commit=$(echo "Sync" | git commit-tree "$state_tree" )
        fi
        git update-ref "$state_branch" "$state_commit"
 fi
index 14c50782e096966b860d49cbaed7658e9f56958f..e5fd6101db3018aff5fca7047eb2958291b6b03d 100644 (file)
@@ -4,15 +4,6 @@
 # Copyright (c) 2010 Junio C Hamano.
 #
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
-#
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
 git_rebase__am () {
 
 case "$action" in
@@ -27,6 +18,9 @@ skip)
        move_to_original_branch
        return
        ;;
+show-current-patch)
+       exec git am --show-current-patch
+       ;;
 esac
 
 if test -z "$rebase_root"
@@ -46,6 +40,7 @@ then
        # makes this easy
        git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
                $allow_rerere_autoupdate --right-only "$revisions" \
+               $allow_empty_message \
                ${restrict_revision+^$restrict_revision}
        ret=$?
 else
@@ -101,5 +96,3 @@ fi
 move_to_original_branch
 
 }
-# ... and then we call the whole thing.
-git_rebase__am
index d47bd29593ad8711448293f3b6bf2e059323ff78..50323fc27351666440e76ae50806ee480e998da3 100644 (file)
@@ -199,12 +199,14 @@ make_patch () {
 
 die_with_patch () {
        echo "$1" > "$state_dir"/stopped-sha
+       git update-ref REBASE_HEAD "$1"
        make_patch "$1"
        die "$2"
 }
 
 exit_with_patch () {
        echo "$1" > "$state_dir"/stopped-sha
+       git update-ref REBASE_HEAD "$1"
        make_patch $1
        git rev-parse --verify HEAD > "$amend"
        gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
@@ -281,7 +283,7 @@ pick_one () {
 
        test -d "$rewritten" &&
                pick_one_preserving_merges "$@" && return
-       output eval git cherry-pick $allow_rerere_autoupdate \
+       output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
                        ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
                        "$strategy_args" $empty_args $ff "$@"
 
@@ -305,17 +307,14 @@ pick_one_preserving_merges () {
        esac
        sha1=$(git rev-parse $sha1)
 
-       if test -f "$state_dir"/current-commit
+       if test -f "$state_dir"/current-commit && test "$fast_forward" = t
        then
-               if test "$fast_forward" = t
-               then
-                       while read current_commit
-                       do
-                               git rev-parse HEAD > "$rewritten"/$current_commit
-                       done <"$state_dir"/current-commit
-                       rm "$state_dir"/current-commit ||
-                               die "$(gettext "Cannot write current commit's replacement sha1")"
-               fi
+               while read current_commit
+               do
+                       git rev-parse HEAD > "$rewritten"/$current_commit
+               done <"$state_dir"/current-commit
+               rm "$state_dir"/current-commit ||
+                       die "$(gettext "Cannot write current commit's replacement sha1")"
        fi
 
        echo $sha1 >> "$state_dir"/current-commit
@@ -396,7 +395,7 @@ pick_one_preserving_merges () {
                                        --sq-quote "$gpg_sign_opt")} \
                                $allow_rerere_autoupdate "$merge_args" \
                                "$strategy_args" \
-                               -m $(git rev-parse --sq-quote "$msg_content") \
+                               -m "$(git rev-parse --sq-quote "$msg_content")" \
                                "$new_parents"
                        then
                                printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG
@@ -406,6 +405,7 @@ pick_one_preserving_merges () {
                        ;;
                *)
                        output eval git cherry-pick $allow_rerere_autoupdate \
+                               $allow_empty_message \
                                ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
                                "$strategy_args" "$@" ||
                                die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")"
@@ -559,7 +559,8 @@ do_next () {
 
                mark_action_done
                do_pick $sha1 "$rest"
-               git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || {
+               git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \
+                       $allow_empty_message || {
                        warn "$(eval_gettext "\
 Could not amend commit after successfully picking \$sha1... \$rest
 This is most likely due to an empty commit message, or the pre-commit hook
@@ -607,7 +608,7 @@ you are able to reword the commit.")"
                        # This is an intermediate commit; its message will only be
                        # used in case of trouble.  So use the long version:
                        do_with_author output git commit --amend --no-verify -F "$squash_msg" \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                die_failed_squash $sha1 "$rest"
                        ;;
                *)
@@ -615,13 +616,13 @@ you are able to reword the commit.")"
                        if test -f "$fixup_msg"
                        then
                                do_with_author git commit --amend --no-verify -F "$fixup_msg" \
-                                       ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                        die_failed_squash $sha1 "$rest"
                        else
                                cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit
                                rm -f "$GIT_DIR"/MERGE_MSG
                                do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \
-                                       ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                        die_failed_squash $sha1 "$rest"
                        fi
                        rm -f "$squash_msg" "$fixup_msg"
@@ -739,36 +740,39 @@ get_missing_commit_check_level () {
        printf '%s' "$check_level" | tr 'A-Z' 'a-z'
 }
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
+# Initiate an action. If the cannot be any
+# further action it  may exec a command
+# or exit and not return.
 #
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
-git_rebase__interactive () {
-
-case "$action" in
-continue)
-       if test ! -d "$rewritten"
-       then
-               exec git rebase--helper ${force_rebase:+--no-ff} --continue
-       fi
-       # do we have anything to commit?
-       if git diff-index --cached --quiet HEAD --
-       then
-               # Nothing to commit -- skip this commit
-
-               test ! -f "$GIT_DIR"/CHERRY_PICK_HEAD ||
-               rm "$GIT_DIR"/CHERRY_PICK_HEAD ||
-               die "$(gettext "Could not remove CHERRY_PICK_HEAD")"
-       else
-               if ! test -f "$author_script"
+# TODO: Consider a cleaner return model so it
+# never exits and always return 0 if process
+# is complete.
+#
+# Parameter 1 is the action to initiate.
+#
+# Returns 0 if the action was able to complete
+# and if 1 if further processing is required.
+initiate_action () {
+       case "$1" in
+       continue)
+               if test ! -d "$rewritten"
                then
-                       gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
-                       die "$(eval_gettext "\
+                       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                               --continue
+               fi
+               # do we have anything to commit?
+               if git diff-index --cached --quiet HEAD --
+               then
+                       # Nothing to commit -- skip this commit
+
+                       test ! -f "$GIT_DIR"/CHERRY_PICK_HEAD ||
+                       rm "$GIT_DIR"/CHERRY_PICK_HEAD ||
+                       die "$(gettext "Could not remove CHERRY_PICK_HEAD")"
+               else
+                       if ! test -f "$author_script"
+                       then
+                               gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
+                               die "$(eval_gettext "\
 You have staged changes in your working tree.
 If these changes are meant to be
 squashed into the previous commit, run:
@@ -783,83 +787,199 @@ In both cases, once you're done, continue with:
 
   git rebase --continue
 ")"
-               fi
-               . "$author_script" ||
-                       die "$(gettext "Error trying to find the author identity to amend commit")"
-               if test -f "$amend"
-               then
-                       current_head=$(git rev-parse --verify HEAD)
-                       test "$current_head" = $(cat "$amend") ||
-                       die "$(gettext "\
+                       fi
+                       . "$author_script" ||
+                               die "$(gettext "Error trying to find the author identity to amend commit")"
+                       if test -f "$amend"
+                       then
+                               current_head=$(git rev-parse --verify HEAD)
+                               test "$current_head" = $(cat "$amend") ||
+                               die "$(gettext "\
 You have uncommitted changes in your working tree. Please commit them
 first and then run 'git rebase --continue' again.")"
-                       do_with_author git commit --amend --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
-                               die "$(gettext "Could not commit staged changes.")"
-               else
-                       do_with_author git commit --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
-                               die "$(gettext "Could not commit staged changes.")"
+                               do_with_author git commit --amend --no-verify -F "$msg" -e \
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
+                                       die "$(gettext "Could not commit staged changes.")"
+                       else
+                               do_with_author git commit --no-verify -F "$msg" -e \
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
+                                       die "$(gettext "Could not commit staged changes.")"
+                       fi
                fi
-       fi
 
-       if test -r "$state_dir"/stopped-sha
+               if test -r "$state_dir"/stopped-sha
+               then
+                       record_in_rewritten "$(cat "$state_dir"/stopped-sha)"
+               fi
+
+               require_clean_work_tree "rebase"
+               do_rest
+               return 0
+               ;;
+       skip)
+               git rerere clear
+
+               if test ! -d "$rewritten"
+               then
+                       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                               --continue
+               fi
+               do_rest
+               return 0
+               ;;
+       edit-todo)
+               git stripspace --strip-comments <"$todo" >"$todo".new
+               mv -f "$todo".new "$todo"
+               collapse_todo_ids
+               append_todo_help
+               gettext "
+You are editing the todo file of an ongoing interactive rebase.
+To continue rebase after editing, run:
+    git rebase --continue
+
+" | git stripspace --comment-lines >>"$todo"
+
+               git_sequence_editor "$todo" ||
+                       die "$(gettext "Could not execute editor")"
+               expand_todo_ids
+
+               exit
+               ;;
+       show-current-patch)
+               exec git show REBASE_HEAD --
+               ;;
+       *)
+               return 1 # continue
+               ;;
+       esac
+}
+
+setup_reflog_action () {
+       comment_for_reflog start
+
+       if test ! -z "$switch_to"
        then
-               record_in_rewritten "$(cat "$state_dir"/stopped-sha)"
+               GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to"
+               output git checkout "$switch_to" -- ||
+                       die "$(eval_gettext "Could not checkout \$switch_to")"
+
+               comment_for_reflog start
        fi
+}
 
-       require_clean_work_tree "rebase"
-       do_rest
-       return 0
-       ;;
-skip)
-       git rerere clear
+init_basic_state () {
+       orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
+       mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+       rm -f "$(git rev-parse --git-path REBASE_HEAD)"
+
+       : > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
+       write_basic_state
+}
 
-       if test ! -d "$rewritten"
+init_revisions_and_shortrevisions () {
+       shorthead=$(git rev-parse --short $orig_head)
+       shortonto=$(git rev-parse --short $onto)
+       if test -z "$rebase_root"
+               # this is now equivalent to ! -z "$upstream"
        then
-               exec git rebase--helper ${force_rebase:+--no-ff} --continue
+               shortupstream=$(git rev-parse --short $upstream)
+               revisions=$upstream...$orig_head
+               shortrevisions=$shortupstream..$shorthead
+       else
+               revisions=$onto...$orig_head
+               shortrevisions=$shorthead
        fi
-       do_rest
-       return 0
-       ;;
-edit-todo)
-       git stripspace --strip-comments <"$todo" >"$todo".new
-       mv -f "$todo".new "$todo"
-       collapse_todo_ids
+}
+
+complete_action() {
+       test -s "$todo" || echo noop >> "$todo"
+       test -z "$autosquash" || git rebase--helper --rearrange-squash || exit
+       test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd"
+
+       todocount=$(git stripspace --strip-comments <"$todo" | wc -l)
+       todocount=${todocount##* }
+
+cat >>"$todo" <<EOF
+
+$comment_char $(eval_ngettext \
+       "Rebase \$shortrevisions onto \$shortonto (\$todocount command)" \
+       "Rebase \$shortrevisions onto \$shortonto (\$todocount commands)" \
+       "$todocount")
+EOF
        append_todo_help
        gettext "
-You are editing the todo file of an ongoing interactive rebase.
-To continue rebase after editing, run:
-    git rebase --continue
+       However, if you remove everything, the rebase will be aborted.
 
-" | git stripspace --comment-lines >>"$todo"
+       " | git stripspace --comment-lines >>"$todo"
 
+       if test -z "$keep_empty"
+       then
+               printf '%s\n' "$comment_char $(gettext "Note that empty commits are commented out")" >>"$todo"
+       fi
+
+
+       has_action "$todo" ||
+               return 2
+
+       cp "$todo" "$todo".backup
+       collapse_todo_ids
        git_sequence_editor "$todo" ||
-               die "$(gettext "Could not execute editor")"
+               die_abort "$(gettext "Could not execute editor")"
+
+       has_action "$todo" ||
+               return 2
+
+       git rebase--helper --check-todo-list || {
+               ret=$?
+               checkout_onto
+               exit $ret
+       }
+
        expand_todo_ids
 
-       exit
-       ;;
-esac
+       test -d "$rewritten" || test -n "$force_rebase" ||
+       onto="$(git rebase--helper --skip-unnecessary-picks)" ||
+       die "Could not skip unnecessary pick commands"
 
-comment_for_reflog start
+       checkout_onto
+       if test -z "$rebase_root" && test ! -d "$rewritten"
+       then
+               require_clean_work_tree "rebase"
+               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                       --continue
+       fi
+       do_rest
+}
 
-if test ! -z "$switch_to"
-then
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to"
-       output git checkout "$switch_to" -- ||
-               die "$(eval_gettext "Could not checkout \$switch_to")"
+git_rebase__interactive () {
+       initiate_action "$action"
+       ret=$?
+       if test $ret = 0; then
+               return 0
+       fi
 
-       comment_for_reflog start
-fi
+       setup_reflog_action
+       init_basic_state
+
+       init_revisions_and_shortrevisions
+
+       git rebase--helper --make-script ${keep_empty:+--keep-empty} \
+               $revisions ${restrict_revision+^$restrict_revision} >"$todo" ||
+       die "$(gettext "Could not generate todo list")"
 
-orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
-mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+       complete_action
+}
+
+git_rebase__interactive__preserve_merges () {
+       initiate_action "$action"
+       ret=$?
+       if test $ret = 0; then
+               return 0
+       fi
+
+       setup_reflog_action
+       init_basic_state
 
-: > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
-write_basic_state
-if test t = "$preserve_merges"
-then
        if test -z "$rebase_root"
        then
                mkdir "$rewritten" &&
@@ -873,41 +993,17 @@ then
                echo $onto > "$rewritten"/root ||
                        die "$(gettext "Could not init rewritten commits")"
        fi
-       # No cherry-pick because our first pass is to determine
-       # parents to rewrite and skipping dropped commits would
-       # prematurely end our probe
-       merges_option=
-else
-       merges_option="--no-merges --cherry-pick"
-fi
-
-shorthead=$(git rev-parse --short $orig_head)
-shortonto=$(git rev-parse --short $onto)
-if test -z "$rebase_root"
-       # this is now equivalent to ! -z "$upstream"
-then
-       shortupstream=$(git rev-parse --short $upstream)
-       revisions=$upstream...$orig_head
-       shortrevisions=$shortupstream..$shorthead
-else
-       revisions=$onto...$orig_head
-       shortrevisions=$shorthead
-fi
-if test t != "$preserve_merges"
-then
-       git rebase--helper --make-script ${keep_empty:+--keep-empty} \
-               $revisions ${restrict_revision+^$restrict_revision} >"$todo" ||
-       die "$(gettext "Could not generate todo list")"
-else
+
+       init_revisions_and_shortrevisions
+
        format=$(git config --get rebase.instructionFormat)
        # the 'rev-list .. | sed' requires %m to parse; the instruction requires %H to parse
-       git rev-list $merges_option --format="%m%H ${format:-%s}" \
+       git rev-list --format="%m%H ${format:-%s}" \
                --reverse --left-right --topo-order \
                $revisions ${restrict_revision+^$restrict_revision} | \
                sed -n "s/^>//p" |
        while read -r sha1 rest
        do
-
                if test -z "$keep_empty" && is_empty_commit $sha1 && ! is_merge_commit $sha1
                then
                        comment_out="$comment_char "
@@ -934,11 +1030,8 @@ else
                        printf '%s\n' "${comment_out}pick $sha1 $rest" >>"$todo"
                fi
        done
-fi
 
-# Watch for commits that been dropped by --cherry-pick
-if test t = "$preserve_merges"
-then
+       # Watch for commits that been dropped by --cherry-pick
        mkdir "$dropped"
        # Save all non-cherry-picked changes
        git rev-list $revisions --left-right --cherry-pick | \
@@ -961,65 +1054,6 @@ then
                        rm "$rewritten"/$rev
                fi
        done
-fi
-
-test -s "$todo" || echo noop >> "$todo"
-test -z "$autosquash" || git rebase--helper --rearrange-squash || exit
-test -n "$cmd" && git rebase--helper --add-exec-commands "$cmd"
-
-todocount=$(git stripspace --strip-comments <"$todo" | wc -l)
-todocount=${todocount##* }
-
-cat >>"$todo" <<EOF
-
-$comment_char $(eval_ngettext \
-       "Rebase \$shortrevisions onto \$shortonto (\$todocount command)" \
-       "Rebase \$shortrevisions onto \$shortonto (\$todocount commands)" \
-       "$todocount")
-EOF
-append_todo_help
-gettext "
-However, if you remove everything, the rebase will be aborted.
-
-" | git stripspace --comment-lines >>"$todo"
-
-if test -z "$keep_empty"
-then
-       printf '%s\n' "$comment_char $(gettext "Note that empty commits are commented out")" >>"$todo"
-fi
-
-
-has_action "$todo" ||
-       return 2
-
-cp "$todo" "$todo".backup
-collapse_todo_ids
-git_sequence_editor "$todo" ||
-       die_abort "$(gettext "Could not execute editor")"
-
-has_action "$todo" ||
-       return 2
-
-git rebase--helper --check-todo-list || {
-       ret=$?
-       checkout_onto
-       exit $ret
-}
-
-expand_todo_ids
-
-test -d "$rewritten" || test -n "$force_rebase" ||
-onto="$(git rebase--helper --skip-unnecessary-picks)" ||
-die "Could not skip unnecessary pick commands"
-
-checkout_onto
-if test -z "$rebase_root" && test ! -d "$rewritten"
-then
-       require_clean_work_tree "rebase"
-       exec git rebase--helper ${force_rebase:+--no-ff} --continue
-fi
-do_rest
 
+       complete_action
 }
-# ... and then we call the whole thing.
-git_rebase__interactive
index 06a4723d4db3db74ea17ace60d824e83cdee25e9..685f48ca49bcdac63f5577ac20daa2993be5e4a2 100644 (file)
@@ -27,7 +27,8 @@ continue_merge () {
        cmt=$(cat "$state_dir/current")
        if ! git diff-index --quiet --ignore-submodules HEAD --
        then
-               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt"
+               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+                       --no-verify -C "$cmt"
                then
                        echo "Commit failed, please do not call \"git commit\""
                        echo "directly, but instead do one of the following: "
@@ -57,6 +58,7 @@ call_merge () {
        echo "$msgnum" >"$state_dir/msgnum"
        cmt="$(cat "$state_dir/cmt.$msgnum")"
        echo "$cmt" > "$state_dir/current"
+       git update-ref REBASE_HEAD "$cmt"
        hd=$(git rev-parse --verify HEAD)
        cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD)
        eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"'
@@ -102,15 +104,6 @@ finish_rb_merge () {
        say All done.
 }
 
-# The whole contents of this file is run by dot-sourcing it from
-# inside a shell function.  It used to be that "return"s we see
-# below were not inside any function, and expected to return
-# to the function that dot-sourced us.
-#
-# However, older (9.x) versions of FreeBSD /bin/sh misbehave on such a
-# construct and continue to run the statements that follow such a "return".
-# As a work-around, we introduce an extra layer of a function
-# here, and immediately call it after defining it.
 git_rebase__merge () {
 
 case "$action" in
@@ -137,11 +130,15 @@ skip)
        finish_rb_merge
        return
        ;;
+show-current-patch)
+       exec git show REBASE_HEAD --
+       ;;
 esac
 
 mkdir -p "$state_dir"
 echo "$onto_name" > "$state_dir/onto_name"
 write_basic_state
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
 
 msgnum=0
 for cmt in $(git rev-list --reverse --no-merges "$revisions")
@@ -165,5 +162,3 @@ done
 finish_rb_merge
 
 }
-# ... and then we call the whole thing.
-git_rebase__merge
index fd72a35c65b43537b292445b87ffb7e682cc076a..fb64ee1fe42e801a57f1709c15617ddbc27f05af 100755 (executable)
@@ -24,6 +24,7 @@ m,merge!           use merging strategies to rebase
 i,interactive!     let the user edit the list of commits to rebase
 x,exec=!           add exec lines after each commit of the editable list
 k,keep-empty      preserve empty commits during rebase
+allow-empty-message allow rebasing commits with empty messages
 f,force-rebase!    force rebase even if branch is up to date
 X,strategy-option=! pass the argument through to the merge strategy
 stat!              display a diffstat of what changed upstream
@@ -45,6 +46,7 @@ abort!             abort and check out the original branch
 skip!              skip current patch and continue
 edit-todo!         edit the todo list during an interactive rebase
 quit!              abort but keep HEAD where it is
+show-current-patch! show the patch file being applied or merged
 "
 . git-sh-setup
 set_reflog_action rebase
@@ -89,6 +91,7 @@ action=
 preserve_merges=
 autosquash=
 keep_empty=
+allow_empty_message=
 test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
 case "$(git config --bool commit.gpgsign)" in
 true)  gpg_sign_opt=-S ;;
@@ -181,6 +184,7 @@ You can run "git stash pop" or "git stash drop" at any time.
 }
 
 finish_rebase () {
+       rm -f "$(git rev-parse --git-path REBASE_HEAD)"
        apply_autostash &&
        { git gc --auto || true; } &&
        rm -rf "$state_dir"
@@ -193,6 +197,7 @@ run_specific_rebase () {
                autosquash=
        fi
        . git-rebase--$type
+       git_rebase__$type${preserve_merges:+__preserve_merges}
        ret=$?
        if test $ret -eq 0
        then
@@ -245,7 +250,7 @@ do
        --verify)
                ok_to_skip_pre_rebase=
                ;;
-       --continue|--skip|--abort|--quit|--edit-todo)
+       --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
                test $total_argc -eq 2 || usage
                action=${1##--}
                ;;
@@ -262,6 +267,9 @@ do
        --keep-empty)
                keep_empty=yes
                ;;
+       --allow-empty-message)
+               allow_empty_message=--allow-empty-message
+               ;;
        --preserve-merges)
                preserve_merges=t
                test -z "$interactive_rebase" && interactive_rebase=implied
@@ -412,6 +420,10 @@ quit)
 edit-todo)
        run_specific_rebase
        ;;
+show-current-patch)
+       run_specific_rebase
+       die "BUG: run_specific_rebase is not supposed to return here"
+       ;;
 esac
 
 # Make sure no rebase is in progress
index bbf4deaa0d42ef2a843c2240000d7e34dfff0b62..2fa7818ca9a8ac7363d17aef17b864f7361b3eb8 100755 (executable)
 use Term::ANSIColor;
 use File::Temp qw/ tempdir tempfile /;
 use File::Spec::Functions qw(catdir catfile);
-use Git::Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
 use Cwd qw(abs_path cwd);
 use Git;
 use Git::I18N;
-use Git::Mail::Address;
+use Net::Domain ();
+use Net::SMTP ();
+use Git::LoadCPAN::Mail::Address;
 
 Getopt::Long::Configure qw/ pass_through /;
 
@@ -57,6 +59,7 @@ sub usage {
     --[no-]cc               <str>  * Email Cc:
     --[no-]bcc              <str>  * Email Bcc:
     --subject               <str>  * Email "Subject:"
+    --reply-to              <str>  * Email "Reply-To:"
     --in-reply-to           <str>  * Email "In-Reply-To:"
     --[no-]xmailer                 * Add "X-Mailer:" header (default).
     --[no-]annotate                * Review each patch that will be sent in an editor.
@@ -167,13 +170,13 @@ sub format_2822_time {
 
 # Variables we fill in automatically, or via prompting:
 my (@to,$no_to,@initial_to,@cc,$no_cc,@initial_cc,@bcclist,$no_bcc,@xh,
-       $initial_reply_to,$initial_subject,@files,
+       $initial_in_reply_to,$reply_to,$initial_subject,@files,
        $author,$sender,$smtp_authpass,$annotate,$use_xmailer,$compose,$time);
 
 my $envelope_sender;
 
 # Example reply to:
-#$initial_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
+#$initial_in_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
 
 my $repo = eval { Git->repository() };
 my @repo = $repo ? ($repo) : ();
@@ -315,7 +318,8 @@ sub signal_handler {
     if !$help and $dump_aliases and @ARGV;
 $rc = GetOptions(
                    "sender|from=s" => \$sender,
-                    "in-reply-to=s" => \$initial_reply_to,
+                    "in-reply-to=s" => \$initial_in_reply_to,
+                   "reply-to=s" => \$reply_to,
                    "subject=s" => \$initial_subject,
                    "to=s" => \@initial_to,
                    "to-cmd=s" => \$to_cmd,
@@ -379,6 +383,10 @@ sub signal_handler {
 die __("Cannot run git format-patch from outside a repository\n")
        if $format_patch and not $repo;
 
+die __("`batch-size` and `relogin` must be specified together " .
+       "(via command-line or configuration option)\n")
+       if defined $relogin_delay and not defined $batch_size;
+
 # Now, let's fill any that aren't set in with defaults:
 
 sub read_config {
@@ -677,7 +685,8 @@ sub get_patch_subject {
 
        my $tpl_sender = $sender || $repoauthor || $repocommitter || '';
        my $tpl_subject = $initial_subject || '';
-       my $tpl_reply_to = $initial_reply_to || '';
+       my $tpl_in_reply_to = $initial_in_reply_to || '';
+       my $tpl_reply_to = $reply_to || '';
 
        print $c <<EOT1, Git::prefix_lines("GIT: ", __ <<EOT2), <<EOT3;
 From $tpl_sender # This line is ignored.
@@ -689,8 +698,9 @@ sub get_patch_subject {
 Clear the body content if you don't wish to send a summary.
 EOT2
 From: $tpl_sender
+Reply-To: $tpl_reply_to
 Subject: $tpl_subject
-In-Reply-To: $tpl_reply_to
+In-Reply-To: $tpl_in_reply_to
 
 EOT3
        for my $f (@files) {
@@ -704,57 +714,73 @@ sub get_patch_subject {
                do_edit($compose_filename);
        }
 
-       open my $c2, ">", $compose_filename . ".final"
-               or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
-
        open $c, "<", $compose_filename
                or die sprintf(__("Failed to open %s: %s"), $compose_filename, $!);
 
-       my $need_8bit_cte = file_has_nonascii($compose_filename);
-       my $in_body = 0;
-       my $summary_empty = 1;
        if (!defined $compose_encoding) {
                $compose_encoding = "UTF-8";
        }
-       while(<$c>) {
-               next if m/^GIT:/;
-               if ($in_body) {
-                       $summary_empty = 0 unless (/^\n$/);
-               } elsif (/^\n$/) {
-                       $in_body = 1;
-                       if ($need_8bit_cte) {
-                               print $c2 "MIME-Version: 1.0\n",
-                                        "Content-Type: text/plain; ",
-                                          "charset=$compose_encoding\n",
-                                        "Content-Transfer-Encoding: 8bit\n";
-                       }
-               } elsif (/^MIME-Version:/i) {
-                       $need_8bit_cte = 0;
-               } elsif (/^Subject:\s*(.+)\s*$/i) {
-                       $initial_subject = $1;
-                       my $subject = $initial_subject;
-                       $_ = "Subject: " .
-                               quote_subject($subject, $compose_encoding) .
-                               "\n";
-               } elsif (/^In-Reply-To:\s*(.+)\s*$/i) {
-                       $initial_reply_to = $1;
-                       next;
-               } elsif (/^From:\s*(.+)\s*$/i) {
-                       $sender = $1;
-                       next;
-               } elsif (/^(?:To|Cc|Bcc):/i) {
-                       print __("To/Cc/Bcc fields are not interpreted yet, they have been ignored\n");
-                       next;
+
+       my %parsed_email;
+       while (my $line = <$c>) {
+               next if $line =~ m/^GIT:/;
+               parse_header_line($line, \%parsed_email);
+               if ($line =~ /^$/) {
+                       $parsed_email{'body'} = filter_body($c);
                }
-               print $c2 $_;
        }
        close $c;
-       close $c2;
 
-       if ($summary_empty) {
+       open my $c2, ">", $compose_filename . ".final"
+       or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
+
+
+       if ($parsed_email{'From'}) {
+               $sender = delete($parsed_email{'From'});
+       }
+       if ($parsed_email{'In-Reply-To'}) {
+               $initial_in_reply_to = delete($parsed_email{'In-Reply-To'});
+       }
+       if ($parsed_email{'Reply-To'}) {
+               $reply_to = delete($parsed_email{'Reply-To'});
+       }
+       if ($parsed_email{'Subject'}) {
+               $initial_subject = delete($parsed_email{'Subject'});
+               print $c2 "Subject: " .
+                       quote_subject($initial_subject, $compose_encoding) .
+                       "\n";
+       }
+
+       if ($parsed_email{'MIME-Version'}) {
+               print $c2 "MIME-Version: $parsed_email{'MIME-Version'}\n",
+                               "Content-Type: $parsed_email{'Content-Type'};\n",
+                               "Content-Transfer-Encoding: $parsed_email{'Content-Transfer-Encoding'}\n";
+               delete($parsed_email{'MIME-Version'});
+               delete($parsed_email{'Content-Type'});
+               delete($parsed_email{'Content-Transfer-Encoding'});
+       } elsif (file_has_nonascii($compose_filename)) {
+               my $content_type = (delete($parsed_email{'Content-Type'}) or
+                       "text/plain; charset=$compose_encoding");
+               print $c2 "MIME-Version: 1.0\n",
+                       "Content-Type: $content_type\n",
+                       "Content-Transfer-Encoding: 8bit\n";
+       }
+       # Preserve unknown headers
+       foreach my $key (keys %parsed_email) {
+               next if $key eq 'body';
+               print $c2 "$key: $parsed_email{$key}";
+       }
+
+       if ($parsed_email{'body'}) {
+               print $c2 "\n$parsed_email{'body'}\n";
+               delete($parsed_email{'body'});
+       } else {
                print __("Summary email is empty, skipping it\n");
                $compose = -1;
        }
+
+       close $c2;
+
 } elsif ($annotate) {
        do_edit(@files);
 }
@@ -793,6 +819,32 @@ sub ask {
        return;
 }
 
+sub parse_header_line {
+       my $lines = shift;
+       my $parsed_line = shift;
+       my $addr_pat = join "|", qw(To Cc Bcc);
+
+       foreach (split(/\n/, $lines)) {
+               if (/^($addr_pat):\s*(.+)$/i) {
+                       $parsed_line->{$1} = [ parse_address_line($2) ];
+               } elsif (/^([^:]*):\s*(.+)\s*$/i) {
+                       $parsed_line->{$1} = $2;
+               }
+       }
+}
+
+sub filter_body {
+       my $c = shift;
+       my $body = "";
+       while (my $body_line = <$c>) {
+               if ($body_line !~ m/^GIT:/) {
+                       $body .= $body_line;
+               }
+       }
+       return $body;
+}
+
+
 my %broken_encoding;
 
 sub file_declares_8bit_cte {
@@ -873,16 +925,22 @@ sub expand_one_alias {
 @initial_cc = process_address_list(@initial_cc);
 @bcclist = process_address_list(@bcclist);
 
-if ($thread && !defined $initial_reply_to && $prompting) {
-       $initial_reply_to = ask(
+if ($thread && !defined $initial_in_reply_to && $prompting) {
+       $initial_in_reply_to = ask(
                __("Message-ID to be used as In-Reply-To for the first email (if any)? "),
                default => "",
                valid_re => qr/\@.*\./, confirm_only => 1);
 }
-if (defined $initial_reply_to) {
-       $initial_reply_to =~ s/^\s*<?//;
-       $initial_reply_to =~ s/>?\s*$//;
-       $initial_reply_to = "<$initial_reply_to>" if $initial_reply_to ne '';
+if (defined $initial_in_reply_to) {
+       $initial_in_reply_to =~ s/^\s*<?//;
+       $initial_in_reply_to =~ s/>?\s*$//;
+       $initial_in_reply_to = "<$initial_in_reply_to>" if $initial_in_reply_to ne '';
+}
+
+if (defined $reply_to) {
+       $reply_to =~ s/^\s+|\s+$//g;
+       ($reply_to) = expand_aliases($reply_to);
+       $reply_to = sanitize_address($reply_to);
 }
 
 if (!defined $smtp_server) {
@@ -902,7 +960,7 @@ sub expand_one_alias {
 }
 
 # Variables we set as part of the loop over files
-our ($message_id, %mail, $subject, $reply_to, $references, $message,
+our ($message_id, %mail, $subject, $in_reply_to, $references, $message,
        $needs_confirm, $message_num, $ask_default);
 
 sub extract_valid_address {
@@ -1143,10 +1201,8 @@ sub valid_fqdn {
 sub maildomain_net {
        my $maildomain;
 
-       if (eval { require Net::Domain; 1 }) {
-               my $domain = Net::Domain::domainname();
-               $maildomain = $domain if valid_fqdn($domain);
-       }
+       my $domain = Net::Domain::domainname();
+       $maildomain = $domain if valid_fqdn($domain);
 
        return $maildomain;
 }
@@ -1154,17 +1210,15 @@ sub maildomain_net {
 sub maildomain_mta {
        my $maildomain;
 
-       if (eval { require Net::SMTP; 1 }) {
-               for my $host (qw(mailhost localhost)) {
-                       my $smtp = Net::SMTP->new($host);
-                       if (defined $smtp) {
-                               my $domain = $smtp->domain;
-                               $smtp->quit;
+       for my $host (qw(mailhost localhost)) {
+               my $smtp = Net::SMTP->new($host);
+               if (defined $smtp) {
+                       my $domain = $smtp->domain;
+                       $smtp->quit;
 
-                               $maildomain = $domain if valid_fqdn($domain);
+                       $maildomain = $domain if valid_fqdn($domain);
 
-                               last if $maildomain;
-                       }
+                       last if $maildomain;
                }
        }
 
@@ -1311,11 +1365,14 @@ sub send_message {
        if ($use_xmailer) {
                $header .= "X-Mailer: git-send-email $gitversion\n";
        }
-       if ($reply_to) {
+       if ($in_reply_to) {
 
-               $header .= "In-Reply-To: $reply_to\n";
+               $header .= "In-Reply-To: $in_reply_to\n";
                $header .= "References: $references\n";
        }
+       if ($reply_to) {
+               $header .= "Reply-To: $reply_to\n";
+       }
        if (@xh) {
                $header .= join("\n", @xh) . "\n";
        }
@@ -1490,8 +1547,8 @@ sub send_message {
        return 1;
 }
 
-$reply_to = $initial_reply_to;
-$references = $initial_reply_to || '';
+$in_reply_to = $initial_in_reply_to;
+$references = $initial_in_reply_to || '';
 $subject = $initial_subject;
 $message_num = 0;
 
@@ -1701,9 +1758,9 @@ sub send_message {
 
        # set up for the next message
        if ($thread && $message_was_sent &&
-               ($chain_reply_to || !defined $reply_to || length($reply_to) == 0 ||
+               ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 ||
                $message_num == 1)) {
-               $reply_to = $message_id;
+               $in_reply_to = $message_id;
                if (length $references > 0) {
                        $references .= "\n $message_id";
                } else {
index 1ef1889dbd806494b8659023a091920b07692035..9d065fb4bf9a308e64fa2d4aa5dff294b116f1a9 100644 (file)
@@ -17,15 +17,15 @@ export TEXTDOMAINDIR
 
 # First decide what scheme to use...
 GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough
-if test -n "@@USE_GETTEXT_SCHEME@@"
+if test -n "$GIT_GETTEXT_POISON"
+then
+       GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
+elif test -n "@@USE_GETTEXT_SCHEME@@"
 then
        GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@"
 elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS"
 then
        : no probing necessary
-elif test -n "$GIT_GETTEXT_POISON"
-then
-       GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
 elif type gettext.sh >/dev/null 2>&1
 then
        # GNU libintl's gettext.sh
index fc8f8ae6401dddcceaa82f9e9c748f5c185536d6..94793c1a913abf569ff9101d935c355b9eb27648 100755 (executable)
@@ -39,7 +39,7 @@ fi
 no_changes () {
        git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
        git diff-files --quiet --ignore-submodules -- "$@" &&
-       (test -z "$untracked" || test -z "$(untracked_files)")
+       (test -z "$untracked" || test -z "$(untracked_files "$@")")
 }
 
 untracked_files () {
@@ -315,16 +315,18 @@ push_stash () {
        if test -z "$patch_mode"
        then
                test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
-               if test -n "$untracked"
+               if test -n "$untracked" && test $# = 0
                then
-                       git clean --force --quiet -d $CLEAN_X_OPTION -- "$@"
+                       git clean --force --quiet -d $CLEAN_X_OPTION
                fi
 
                if test $# != 0
                then
-                       git add -u -- "$@" |
-                       git checkout-index -z --force --stdin
-                       git diff-index -p --cached --binary HEAD -- "$@" | git apply --index -R
+                       test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
+                       test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
+                       git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
+                       git diff-index -p --cached --binary HEAD -- "$@" |
+                       git apply --index -R
                else
                        git reset --hard -q
                fi
diff --git a/git.c b/git.c
index c870b9719c21b2db23ea7ab5d56fdeda483cb02e..b73a550edd87cc205d09c60b258c62191de3648f 100644 (file)
--- a/git.c
+++ b/git.c
@@ -4,12 +4,30 @@
 #include "help.h"
 #include "run-command.h"
 
+#define RUN_SETUP              (1<<0)
+#define RUN_SETUP_GENTLY       (1<<1)
+#define USE_PAGER              (1<<2)
+/*
+ * require working tree to be present -- anything uses this needs
+ * RUN_SETUP for reading from the configuration file.
+ */
+#define NEED_WORK_TREE         (1<<3)
+#define SUPPORT_SUPER_PREFIX   (1<<4)
+#define DELAY_PAGER_CONFIG     (1<<5)
+#define NO_PARSEOPT            (1<<6) /* parse-options is not used */
+
+struct cmd_struct {
+       const char *cmd;
+       int (*fn)(int, const char **, const char *);
+       unsigned int option;
+};
+
 const char git_usage_string[] =
-       "git [--version] [--help] [-C <path>] [-c name=value]\n"
-       "           [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
-       "           [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
-       "           [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
-       "           <command> [<args>]";
+       N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
+          "           [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
+          "           [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+          "           [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
+          "           <command> [<args>]");
 
 const char git_more_info_string[] =
        N_("'git help -a' and 'git help -g' list available subcommands and some\n"
@@ -18,7 +36,7 @@ const char git_more_info_string[] =
 
 static int use_pager = -1;
 
-static void list_builtins(void);
+static void list_builtins(unsigned int exclude_option, char sep);
 
 static void commit_pager_choice(void) {
        switch (use_pager) {
@@ -92,7 +110,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--git-dir")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --git-dir.\n" );
+                               fprintf(stderr, _("no directory given for --git-dir\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1);
@@ -106,7 +124,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--namespace")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No namespace given for --namespace.\n" );
+                               fprintf(stderr, _("no namespace given for --namespace\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1);
@@ -120,7 +138,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--work-tree")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --work-tree.\n" );
+                               fprintf(stderr, _("no directory given for --work-tree\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
@@ -134,7 +152,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--super-prefix")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No prefix given for --super-prefix.\n" );
+                               fprintf(stderr, _("no prefix given for --super-prefix\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1);
@@ -156,7 +174,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "-c")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "-c expects a configuration string\n" );
+                               fprintf(stderr, _("-c expects a configuration string\n" ));
                                usage(git_usage_string);
                        }
                        git_config_push_parameter((*argv)[1]);
@@ -194,22 +212,25 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "-C")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for -C.\n" );
+                               fprintf(stderr, _("no directory given for -C\n" ));
                                usage(git_usage_string);
                        }
                        if ((*argv)[1][0]) {
                                if (chdir((*argv)[1]))
-                                       die_errno("Cannot change to '%s'", (*argv)[1]);
+                                       die_errno("cannot change to '%s'", (*argv)[1]);
                                if (envchanged)
                                        *envchanged = 1;
                        }
                        (*argv)++;
                        (*argc)--;
                } else if (!strcmp(cmd, "--list-builtins")) {
-                       list_builtins();
+                       list_builtins(0, '\n');
+                       exit(0);
+               } else if (!strcmp(cmd, "--list-parseopt-builtins")) {
+                       list_builtins(NO_PARSEOPT, ' ');
                        exit(0);
                } else {
-                       fprintf(stderr, "Unknown option: %s\n", cmd);
+                       fprintf(stderr, _("unknown option: %s\n"), cmd);
                        usage(git_usage_string);
                }
 
@@ -247,7 +268,7 @@ static int handle_alias(int *argcp, const char ***argv)
                        if (ret >= 0)   /* normal exit */
                                exit(ret);
 
-                       die_errno("While expanding alias '%s': '%s'",
+                       die_errno("while expanding alias '%s': '%s'",
                            alias_command, alias_string + 1);
                }
                count = split_cmdline(alias_string, &new_argv);
@@ -256,8 +277,8 @@ static int handle_alias(int *argcp, const char ***argv)
                            split_cmdline_strerror(count));
                option_count = handle_options(&new_argv, &count, &envchanged);
                if (envchanged)
-                       die("alias '%s' changes environment variables\n"
-                                "You can use '!git' in the alias to do this.",
+                       die("alias '%s' changes environment variables.\n"
+                                "You can use '!git' in the alias to do this",
                                 alias_command);
                memmove(new_argv - option_count, new_argv,
                                count * sizeof(char *));
@@ -288,23 +309,6 @@ static int handle_alias(int *argcp, const char ***argv)
        return ret;
 }
 
-#define RUN_SETUP              (1<<0)
-#define RUN_SETUP_GENTLY       (1<<1)
-#define USE_PAGER              (1<<2)
-/*
- * require working tree to be present -- anything uses this needs
- * RUN_SETUP for reading from the configuration file.
- */
-#define NEED_WORK_TREE         (1<<3)
-#define SUPPORT_SUPER_PREFIX   (1<<4)
-#define DELAY_PAGER_CONFIG     (1<<5)
-
-struct cmd_struct {
-       const char *cmd;
-       int (*fn)(int, const char **, const char *);
-       int option;
-};
-
 static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 {
        int status, help;
@@ -367,18 +371,18 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 static struct cmd_struct commands[] = {
        { "add", cmd_add, RUN_SETUP | NEED_WORK_TREE },
        { "am", cmd_am, RUN_SETUP | NEED_WORK_TREE },
-       { "annotate", cmd_annotate, RUN_SETUP },
+       { "annotate", cmd_annotate, RUN_SETUP | NO_PARSEOPT },
        { "apply", cmd_apply, RUN_SETUP_GENTLY },
        { "archive", cmd_archive, RUN_SETUP_GENTLY },
        { "bisect--helper", cmd_bisect__helper, RUN_SETUP },
        { "blame", cmd_blame, RUN_SETUP },
        { "branch", cmd_branch, RUN_SETUP | DELAY_PAGER_CONFIG },
-       { "bundle", cmd_bundle, RUN_SETUP_GENTLY },
+       { "bundle", cmd_bundle, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "cat-file", cmd_cat_file, RUN_SETUP },
        { "check-attr", cmd_check_attr, RUN_SETUP },
        { "check-ignore", cmd_check_ignore, RUN_SETUP | NEED_WORK_TREE },
        { "check-mailmap", cmd_check_mailmap, RUN_SETUP },
-       { "check-ref-format", cmd_check_ref_format },
+       { "check-ref-format", cmd_check_ref_format, NO_PARSEOPT  },
        { "checkout", cmd_checkout, RUN_SETUP | NEED_WORK_TREE },
        { "checkout-index", cmd_checkout_index,
                RUN_SETUP | NEED_WORK_TREE},
@@ -388,30 +392,30 @@ static struct cmd_struct commands[] = {
        { "clone", cmd_clone },
        { "column", cmd_column, RUN_SETUP_GENTLY },
        { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
-       { "commit-tree", cmd_commit_tree, RUN_SETUP },
-       { "config", cmd_config, RUN_SETUP_GENTLY },
+       { "commit-tree", cmd_commit_tree, RUN_SETUP | NO_PARSEOPT },
+       { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
        { "count-objects", cmd_count_objects, RUN_SETUP },
-       { "credential", cmd_credential, RUN_SETUP_GENTLY },
+       { "credential", cmd_credential, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "describe", cmd_describe, RUN_SETUP },
-       { "diff", cmd_diff },
-       { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE },
-       { "diff-index", cmd_diff_index, RUN_SETUP },
-       { "diff-tree", cmd_diff_tree, RUN_SETUP },
+       { "diff", cmd_diff, NO_PARSEOPT },
+       { "diff-files", cmd_diff_files, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "diff-index", cmd_diff_index, RUN_SETUP | NO_PARSEOPT },
+       { "diff-tree", cmd_diff_tree, RUN_SETUP | NO_PARSEOPT },
        { "difftool", cmd_difftool, RUN_SETUP | NEED_WORK_TREE },
        { "fast-export", cmd_fast_export, RUN_SETUP },
        { "fetch", cmd_fetch, RUN_SETUP },
-       { "fetch-pack", cmd_fetch_pack, RUN_SETUP },
+       { "fetch-pack", cmd_fetch_pack, RUN_SETUP | NO_PARSEOPT },
        { "fmt-merge-msg", cmd_fmt_merge_msg, RUN_SETUP },
        { "for-each-ref", cmd_for_each_ref, RUN_SETUP },
        { "format-patch", cmd_format_patch, RUN_SETUP },
        { "fsck", cmd_fsck, RUN_SETUP },
        { "fsck-objects", cmd_fsck, RUN_SETUP },
        { "gc", cmd_gc, RUN_SETUP },
-       { "get-tar-commit-id", cmd_get_tar_commit_id },
+       { "get-tar-commit-id", cmd_get_tar_commit_id, NO_PARSEOPT },
        { "grep", cmd_grep, RUN_SETUP_GENTLY },
        { "hash-object", cmd_hash_object },
        { "help", cmd_help },
-       { "index-pack", cmd_index_pack, RUN_SETUP_GENTLY },
+       { "index-pack", cmd_index_pack, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "init", cmd_init_db },
        { "init-db", cmd_init_db },
        { "interpret-trailers", cmd_interpret_trailers, RUN_SETUP_GENTLY },
@@ -419,27 +423,27 @@ static struct cmd_struct commands[] = {
        { "ls-files", cmd_ls_files, RUN_SETUP },
        { "ls-remote", cmd_ls_remote, RUN_SETUP_GENTLY },
        { "ls-tree", cmd_ls_tree, RUN_SETUP },
-       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY },
-       { "mailsplit", cmd_mailsplit },
+       { "mailinfo", cmd_mailinfo, RUN_SETUP_GENTLY | NO_PARSEOPT },
+       { "mailsplit", cmd_mailsplit, NO_PARSEOPT },
        { "merge", cmd_merge, RUN_SETUP | NEED_WORK_TREE },
        { "merge-base", cmd_merge_base, RUN_SETUP },
        { "merge-file", cmd_merge_file, RUN_SETUP_GENTLY },
-       { "merge-index", cmd_merge_index, RUN_SETUP },
-       { "merge-ours", cmd_merge_ours, RUN_SETUP },
-       { "merge-recursive", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE },
-       { "merge-tree", cmd_merge_tree, RUN_SETUP },
-       { "mktag", cmd_mktag, RUN_SETUP },
+       { "merge-index", cmd_merge_index, RUN_SETUP | NO_PARSEOPT },
+       { "merge-ours", cmd_merge_ours, RUN_SETUP | NO_PARSEOPT },
+       { "merge-recursive", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-recursive-ours", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-recursive-theirs", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-subtree", cmd_merge_recursive, RUN_SETUP | NEED_WORK_TREE | NO_PARSEOPT },
+       { "merge-tree", cmd_merge_tree, RUN_SETUP | NO_PARSEOPT },
+       { "mktag", cmd_mktag, RUN_SETUP | NO_PARSEOPT },
        { "mktree", cmd_mktree, RUN_SETUP },
        { "mv", cmd_mv, RUN_SETUP | NEED_WORK_TREE },
        { "name-rev", cmd_name_rev, RUN_SETUP },
        { "notes", cmd_notes, RUN_SETUP },
        { "pack-objects", cmd_pack_objects, RUN_SETUP },
-       { "pack-redundant", cmd_pack_redundant, RUN_SETUP },
+       { "pack-redundant", cmd_pack_redundant, RUN_SETUP | NO_PARSEOPT },
        { "pack-refs", cmd_pack_refs, RUN_SETUP },
-       { "patch-id", cmd_patch_id, RUN_SETUP_GENTLY },
+       { "patch-id", cmd_patch_id, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "pickaxe", cmd_blame, RUN_SETUP },
        { "prune", cmd_prune, RUN_SETUP },
        { "prune-packed", cmd_prune_packed, RUN_SETUP },
@@ -450,17 +454,18 @@ static struct cmd_struct commands[] = {
        { "receive-pack", cmd_receive_pack },
        { "reflog", cmd_reflog, RUN_SETUP },
        { "remote", cmd_remote, RUN_SETUP },
-       { "remote-ext", cmd_remote_ext },
-       { "remote-fd", cmd_remote_fd },
+       { "remote-ext", cmd_remote_ext, NO_PARSEOPT },
+       { "remote-fd", cmd_remote_fd, NO_PARSEOPT },
        { "repack", cmd_repack, RUN_SETUP },
        { "replace", cmd_replace, RUN_SETUP },
        { "rerere", cmd_rerere, RUN_SETUP },
        { "reset", cmd_reset, RUN_SETUP },
-       { "rev-list", cmd_rev_list, RUN_SETUP },
-       { "rev-parse", cmd_rev_parse },
+       { "rev-list", cmd_rev_list, RUN_SETUP | NO_PARSEOPT },
+       { "rev-parse", cmd_rev_parse, NO_PARSEOPT },
        { "revert", cmd_revert, RUN_SETUP | NEED_WORK_TREE },
        { "rm", cmd_rm, RUN_SETUP },
        { "send-pack", cmd_send_pack, RUN_SETUP },
+       { "serve", cmd_serve, RUN_SETUP },
        { "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
        { "show", cmd_show, RUN_SETUP },
        { "show-branch", cmd_show_branch, RUN_SETUP },
@@ -468,23 +473,24 @@ static struct cmd_struct commands[] = {
        { "stage", cmd_add, RUN_SETUP | NEED_WORK_TREE },
        { "status", cmd_status, RUN_SETUP | NEED_WORK_TREE },
        { "stripspace", cmd_stripspace },
-       { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX},
+       { "submodule--helper", cmd_submodule__helper, RUN_SETUP | SUPPORT_SUPER_PREFIX | NO_PARSEOPT },
        { "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
        { "tag", cmd_tag, RUN_SETUP | DELAY_PAGER_CONFIG },
-       { "unpack-file", cmd_unpack_file, RUN_SETUP },
-       { "unpack-objects", cmd_unpack_objects, RUN_SETUP },
+       { "unpack-file", cmd_unpack_file, RUN_SETUP | NO_PARSEOPT },
+       { "unpack-objects", cmd_unpack_objects, RUN_SETUP | NO_PARSEOPT },
        { "update-index", cmd_update_index, RUN_SETUP },
        { "update-ref", cmd_update_ref, RUN_SETUP },
        { "update-server-info", cmd_update_server_info, RUN_SETUP },
-       { "upload-archive", cmd_upload_archive },
-       { "upload-archive--writer", cmd_upload_archive_writer },
-       { "var", cmd_var, RUN_SETUP_GENTLY },
+       { "upload-archive", cmd_upload_archive, NO_PARSEOPT },
+       { "upload-archive--writer", cmd_upload_archive_writer, NO_PARSEOPT },
+       { "upload-pack", cmd_upload_pack },
+       { "var", cmd_var, RUN_SETUP_GENTLY | NO_PARSEOPT },
        { "verify-commit", cmd_verify_commit, RUN_SETUP },
        { "verify-pack", cmd_verify_pack },
        { "verify-tag", cmd_verify_tag, RUN_SETUP },
        { "version", cmd_version },
        { "whatchanged", cmd_whatchanged, RUN_SETUP },
-       { "worktree", cmd_worktree, RUN_SETUP },
+       { "worktree", cmd_worktree, RUN_SETUP | NO_PARSEOPT },
        { "write-tree", cmd_write_tree, RUN_SETUP },
 };
 
@@ -504,11 +510,15 @@ int is_builtin(const char *s)
        return !!get_builtin(s);
 }
 
-static void list_builtins(void)
+static void list_builtins(unsigned int exclude_option, char sep)
 {
        int i;
-       for (i = 0; i < ARRAY_SIZE(commands); i++)
-               printf("%s\n", commands[i].cmd);
+       for (i = 0; i < ARRAY_SIZE(commands); i++) {
+               if (exclude_option &&
+                   (commands[i].option & exclude_option))
+                       continue;
+               printf("%s%c", commands[i].cmd, sep);
+       }
 }
 
 #ifdef STRIP_EXTENSION
@@ -684,8 +694,8 @@ int cmd_main(int argc, const char **argv)
                if (errno != ENOENT)
                        break;
                if (was_alias) {
-                       fprintf(stderr, "Expansion of alias '%s' failed; "
-                               "'%s' is not a git command\n",
+                       fprintf(stderr, _("expansion of alias '%s' failed; "
+                                         "'%s' is not a git command\n"),
                                cmd, argv[0]);
                        exit(1);
                }
@@ -696,7 +706,7 @@ int cmd_main(int argc, const char **argv)
                        break;
        }
 
-       fprintf(stderr, "Failed to run command '%s': %s\n",
+       fprintf(stderr, _("failed to run command '%s': %s\n"),
                cmd, strerror(errno));
 
        return 1;
index 408f2859d3c42becc333b8c0e4ee6366de7fdb14..a58e6b3c44b0ef6175df1417a3ed6f7775bd9953 100644 (file)
@@ -29,12 +29,11 @@ Requirements
 ------------
 
  - Core git tools
- - Perl
+ - Perl 5.8
  - Perl modules: CGI, Encode, Fcntl, File::Find, File::Basename.
  - web server
 
 The following optional Perl modules are required for extra features
- - Digest::MD5 - for gravatar support
  - CGI::Fast and FCGI - for running gitweb as FastCGI script
  - HTML::TagCloud - for fancy tag cloud in project list view
  - HTTP::Date or Time::ParseDate - to support If-Modified-Since for feeds
index 2417057f2bc61a98e68dc6c817e456a21bf6044e..2594a4badb3d7b942b28b57ca036650328a1b050 100755 (executable)
@@ -20,6 +20,8 @@
 use File::Find qw();
 use File::Basename qw(basename);
 use Time::HiRes qw(gettimeofday tv_interval);
+use Digest::MD5 qw(md5_hex);
+
 binmode STDOUT, ':utf8';
 
 if (!defined($CGI::VERSION) || $CGI::VERSION < 4.08) {
@@ -490,7 +492,6 @@ sub evaluate_uri {
        # Currently available providers are gravatar and picon.
        # If an unknown provider is specified, the feature is disabled.
 
-       # Gravatar depends on Digest::MD5.
        # Picon currently relies on the indiana.edu database.
 
        # To enable system wide have in $GITWEB_CONFIG
@@ -1166,18 +1167,8 @@ sub configure_gitweb_features {
        our @snapshot_fmts = gitweb_get_feature('snapshot');
        @snapshot_fmts = filter_snapshot_fmts(@snapshot_fmts);
 
-       # check that the avatar feature is set to a known provider name,
-       # and for each provider check if the dependencies are satisfied.
-       # if the provider name is invalid or the dependencies are not met,
-       # reset $git_avatar to the empty string.
        our ($git_avatar) = gitweb_get_feature('avatar');
-       if ($git_avatar eq 'gravatar') {
-               $git_avatar = '' unless (eval { require Digest::MD5; 1; });
-       } elsif ($git_avatar eq 'picon') {
-               # no dependencies
-       } else {
-               $git_avatar = '';
-       }
+       $git_avatar = '' unless $git_avatar =~ /^(?:gravatar|picon)$/s;
 
        our @extra_branch_refs = gitweb_get_feature('extra-branch-refs');
        @extra_branch_refs = filter_and_validate_refs (@extra_branch_refs);
@@ -2167,7 +2158,7 @@ sub gravatar_url {
        my $size = shift;
        $avatar_cache{$email} ||=
                "//www.gravatar.com/avatar/" .
-                       Digest::MD5::md5_hex($email) . "?s=";
+                       md5_hex($email) . "?s=";
        return $avatar_cache{$email} . $size;
 }
 
diff --git a/grep.c b/grep.c
index 3d7cd0e96f1ee160a66dd500c58d4026bf24e34c..65b90c10a38c136d2ce9a42fee5c543cd7d4717d 100644 (file)
--- a/grep.c
+++ b/grep.c
@@ -18,6 +18,11 @@ static void std_output(struct grep_opt *opt, const void *buf, size_t size)
        fwrite(buf, size, 1, stdout);
 }
 
+static void color_set(char *dst, const char *color_bytes)
+{
+       xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
+}
+
 /*
  * Initialize the grep_defaults template with hardcoded defaults.
  * We could let the compiler do this, but without C99 initializers
@@ -2010,7 +2015,7 @@ static int grep_source_load_oid(struct grep_source *gs)
        enum object_type type;
 
        grep_read_lock();
-       gs->buf = read_sha1_file(gs->identifier, &type, &gs->size);
+       gs->buf = read_object_file(gs->identifier, &type, &gs->size);
        grep_read_unlock();
 
        if (!gs->buf)
diff --git a/hash.h b/hash.h
index 7d7a864f5dd43bb53ee6b31d391df3e15cbdbb08..7c8238bc2ebfded778351b58c16a3854617082c6 100644 (file)
--- a/hash.h
+++ b/hash.h
 #include "block-sha1/sha1.h"
 #endif
 
+#ifndef platform_SHA_CTX
+/*
+ * platform's underlying implementation of SHA-1; could be OpenSSL,
+ * blk_SHA, Apple CommonCrypto, etc...  Note that the relevant
+ * SHA-1 header may have already defined platform_SHA_CTX for our
+ * own implementations like block-sha1 and ppc-sha1, so we list
+ * the default for OpenSSL compatible SHA-1 implementations here.
+ */
+#define platform_SHA_CTX       SHA_CTX
+#define platform_SHA1_Init     SHA1_Init
+#define platform_SHA1_Update   SHA1_Update
+#define platform_SHA1_Final            SHA1_Final
+#endif
+
+#define git_SHA_CTX            platform_SHA_CTX
+#define git_SHA1_Init          platform_SHA1_Init
+#define git_SHA1_Update                platform_SHA1_Update
+#define git_SHA1_Final         platform_SHA1_Final
+
+#ifdef SHA1_MAX_BLOCK_SIZE
+#include "compat/sha1-chunked.h"
+#undef git_SHA1_Update
+#define git_SHA1_Update                git_SHA1_Update_Chunked
+#endif
+
 /*
  * Note that these constants are suitable for indexing the hash_algos array and
  * comparing against each other, but are otherwise arbitrary, so they should not
 /* Number of algorithms supported (including unknown). */
 #define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
 
-typedef void (*git_hash_init_fn)(void *ctx);
-typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len);
-typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx);
+/* A suitably aligned type for stack allocations of hash contexts. */
+union git_hash_ctx {
+       git_SHA_CTX sha1;
+};
+typedef union git_hash_ctx git_hash_ctx;
+
+typedef void (*git_hash_init_fn)(git_hash_ctx *ctx);
+typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len);
+typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx);
 
 struct git_hash_algo {
        /*
@@ -44,9 +75,6 @@ struct git_hash_algo {
        /* A four-byte version identifier, used in pack indices. */
        uint32_t format_id;
 
-       /* The size of a hash context (e.g. git_SHA_CTX). */
-       size_t ctxsz;
-
        /* The length of the hash in binary. */
        size_t rawsz;
 
index 88d2a9bc40b4b02e96a6679e856e3a9d1a4a0359..bf9b9199e1056b042c3f7afaa308272bd7c647b6 100644 (file)
@@ -12,6 +12,7 @@
 #include "argv-array.h"
 #include "packfile.h"
 #include "object-store.h"
+#include "protocol.h"
 
 static const char content_type[] = "Content-Type";
 static const char content_length[] = "Content-Length";
@@ -468,8 +469,11 @@ static void get_info_refs(struct strbuf *hdr, char *arg)
                hdr_str(hdr, content_type, buf.buf);
                end_headers(hdr);
 
-               packet_write_fmt(1, "# service=git-%s\n", svc->name);
-               packet_flush(1);
+
+               if (determine_protocol_version_server() != protocol_v2) {
+                       packet_write_fmt(1, "# service=git-%s\n", svc->name);
+                       packet_flush(1);
+               }
 
                argv[0] = svc->name;
                run_service(argv, 0);
index 97fe22a7050474d2bc48ca0d483c10c6de92988d..c0998fd763b6178aa7c0d35603c9b56183d5157f 100644 (file)
@@ -362,8 +362,8 @@ static void start_put(struct transfer_request *request)
        ssize_t size;
        git_zstream stream;
 
-       unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
+       unpacked = read_object_file(&request->obj->oid, &type, &len);
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
 
        /* Set it up */
        git_deflate_init(&stream, zlib_compression_level);
index 75d55d42a98503c8e74387b99476d31feeeff806..7cdfb2f24c76d2f09b39ae2fa97685ecec2d1630 100644 (file)
@@ -24,7 +24,7 @@ enum object_request_state {
 
 struct object_request {
        struct walker *walker;
-       unsigned char sha1[20];
+       struct object_id oid;
        struct alt_base *repo;
        enum object_request_state state;
        struct http_object_request *req;
@@ -58,7 +58,7 @@ static void start_object_request(struct walker *walker,
        struct active_request_slot *slot;
        struct http_object_request *req;
 
-       req = new_http_object_request(obj_req->repo->base, obj_req->sha1);
+       req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash);
        if (req == NULL) {
                obj_req->state = ABORTED;
                return;
@@ -84,7 +84,7 @@ static void finish_object_request(struct object_request *obj_req)
                return;
 
        if (obj_req->req->rename == 0)
-               walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
+               walker_say(obj_req->walker, "got %s\n", oid_to_hex(&obj_req->oid));
 }
 
 static void process_object_response(void *callback_data)
@@ -131,7 +131,7 @@ static int fill_active_slot(struct walker *walker)
        list_for_each_safe(pos, tmp, head) {
                obj_req = list_entry(pos, struct object_request, node);
                if (obj_req->state == WAITING) {
-                       if (has_sha1_file(obj_req->sha1))
+                       if (has_sha1_file(obj_req->oid.hash))
                                obj_req->state = COMPLETE;
                        else {
                                start_object_request(walker, obj_req);
@@ -150,7 +150,7 @@ static void prefetch(struct walker *walker, unsigned char *sha1)
 
        newreq = xmalloc(sizeof(*newreq));
        newreq->walker = walker;
-       hashcpy(newreq->sha1, sha1);
+       hashcpy(newreq->oid.hash, sha1);
        newreq->repo = data->alt;
        newreq->state = WAITING;
        newreq->req = NULL;
@@ -483,13 +483,13 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
 
        list_for_each(pos, head) {
                obj_req = list_entry(pos, struct object_request, node);
-               if (!hashcmp(obj_req->sha1, sha1))
+               if (!hashcmp(obj_req->oid.hash, sha1))
                        break;
        }
        if (obj_req == NULL)
                return error("Couldn't find request for %s in the queue", hex);
 
-       if (has_sha1_file(obj_req->sha1)) {
+       if (has_sha1_file(obj_req->oid.hash)) {
                if (obj_req->req != NULL)
                        abort_http_object_request(obj_req->req);
                abort_object_request(obj_req);
@@ -543,7 +543,7 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
        } else if (req->zret != Z_STREAM_END) {
                walker->corrupt_object_found++;
                ret = error("File %s (%s) corrupt", hex, req->url);
-       } else if (hashcmp(obj_req->sha1, req->real_sha1)) {
+       } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) {
                ret = error("File %s has bad hash", hex);
        } else if (req->rename < 0) {
                struct strbuf buf = STRBUF_INIT;
diff --git a/http.c b/http.c
index 111e3c12c8e5edecf09289b6dc79fda232b55938..fed13b2169a49aa602fc2ff21d45e368e1734d79 100644 (file)
--- a/http.c
+++ b/http.c
@@ -63,6 +63,9 @@ static struct {
        { "tlsv1.1", CURL_SSLVERSION_TLSv1_1 },
        { "tlsv1.2", CURL_SSLVERSION_TLSv1_2 },
 #endif
+#if LIBCURL_VERSION_NUM >= 0x073400
+       { "tlsv1.3", CURL_SSLVERSION_TLSv1_3 },
+#endif
 };
 #if LIBCURL_VERSION_NUM >= 0x070903
 static const char *ssl_key;
@@ -70,6 +73,9 @@ static const char *ssl_key;
 #if LIBCURL_VERSION_NUM >= 0x070908
 static const char *ssl_capath;
 #endif
+#if LIBCURL_VERSION_NUM >= 0x071304
+static const char *curl_no_proxy;
+#endif
 #if LIBCURL_VERSION_NUM >= 0x072c00
 static const char *ssl_pinnedkey;
 #endif
@@ -78,7 +84,6 @@ static long curl_low_speed_limit = -1;
 static long curl_low_speed_time = -1;
 static int curl_ftp_no_epsv;
 static const char *curl_http_proxy;
-static const char *curl_no_proxy;
 static const char *http_proxy_authmethod;
 static struct {
        const char *name;
@@ -971,21 +976,6 @@ static void set_from_env(const char **var, const char *envname)
                *var = val;
 }
 
-static void protocol_http_header(void)
-{
-       if (get_protocol_version_config() > 0) {
-               struct strbuf protocol_header = STRBUF_INIT;
-
-               strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
-                           get_protocol_version_config());
-
-
-               extra_http_headers = curl_slist_append(extra_http_headers,
-                                                      protocol_header.buf);
-               strbuf_release(&protocol_header);
-       }
-}
-
 void http_init(struct remote *remote, const char *url, int proactive_auth)
 {
        char *low_speed_limit;
@@ -1016,8 +1006,6 @@ void http_init(struct remote *remote, const char *url, int proactive_auth)
        if (remote)
                var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
 
-       protocol_http_header();
-
        pragma_header = curl_slist_append(http_copy_default_headers(),
                "Pragma: no-cache");
        no_pragma_header = curl_slist_append(http_copy_default_headers(),
@@ -1261,14 +1249,14 @@ static struct fill_chain *fill_cfg;
 
 void add_fill_function(void *data, int (*fill)(void *))
 {
-       struct fill_chain *new = xmalloc(sizeof(*new));
+       struct fill_chain *new_fill = xmalloc(sizeof(*new_fill));
        struct fill_chain **linkp = &fill_cfg;
-       new->data = data;
-       new->fill = fill;
-       new->next = NULL;
+       new_fill->data = data;
+       new_fill->fill = fill;
+       new_fill->next = NULL;
        while (*linkp)
                linkp = &(*linkp)->next;
-       *linkp = new;
+       *linkp = new_fill;
 }
 
 void fill_active_slots(void)
@@ -1790,6 +1778,14 @@ static int http_request(const char *url,
 
        headers = curl_slist_append(headers, buf.buf);
 
+       /* Add additional headers here */
+       if (options && options->extra_headers) {
+               const struct string_list_item *item;
+               for_each_string_list_item(item, options->extra_headers) {
+                       headers = curl_slist_append(headers, item->string);
+               }
+       }
+
        curl_easy_setopt(slot->curl, CURLOPT_URL, url);
        curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
        curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
diff --git a/http.h b/http.h
index f7bd3b26b0da70e44402579e4912a1b20dcef16e..4df4a25e1abc232c27f1b00ba0a97b05a689db2e 100644 (file)
--- a/http.h
+++ b/http.h
@@ -172,6 +172,13 @@ struct http_get_options {
         * for details.
         */
        struct strbuf *base_url;
+
+       /*
+        * If not NULL, contains additional HTTP headers to be sent with the
+        * request. The strings in the list must not be freed until after the
+        * request has completed.
+        */
+       struct string_list *extra_headers;
 };
 
 /* Return values for http_get_*() */
index 36c7c1b4f6195b2d2f00013611a5d0aa96ecf408..ffb0a6eca8ce632dc448883a47987bf48adea31b 100644 (file)
@@ -1189,11 +1189,11 @@ static struct imap_store *imap_open_store(struct imap_server_conf *srvc, char *f
  */
 static void lf_to_crlf(struct strbuf *msg)
 {
-       char *new;
+       char *new_msg;
        size_t i, j;
        char lastc;
 
-       /* First pass: tally, in j, the size of the new string: */
+       /* First pass: tally, in j, the size of the new_msg string: */
        for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
                if (msg->buf[i] == '\n' && lastc != '\r')
                        j++; /* a CR will need to be added here */
@@ -1201,18 +1201,18 @@ static void lf_to_crlf(struct strbuf *msg)
                j++;
        }
 
-       new = xmallocz(j);
+       new_msg = xmallocz(j);
 
        /*
-        * Second pass: write the new string.  Note that this loop is
+        * Second pass: write the new_msg string.  Note that this loop is
         * otherwise identical to the first pass.
         */
        for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
                if (msg->buf[i] == '\n' && lastc != '\r')
-                       new[j++] = '\r';
-               lastc = new[j++] = msg->buf[i];
+                       new_msg[j++] = '\r';
+               lastc = new_msg[j++] = msg->buf[i];
        }
-       strbuf_attach(msg, new, j, j + 1);
+       strbuf_attach(msg, new_msg, j, j + 1);
 }
 
 /*
index 545ad0f28bce0ac0f92a31019b92f7ef8a668356..ecdce08c4be24cc14109796d9a7c165c53753a88 100644 (file)
@@ -151,29 +151,29 @@ static void range_set_union(struct range_set *out,
 
        assert(out->nr == 0);
        while (i < a->nr || j < b->nr) {
-               struct range *new;
+               struct range *new_range;
                if (i < a->nr && j < b->nr) {
                        if (ra[i].start < rb[j].start)
-                               new = &ra[i++];
+                               new_range = &ra[i++];
                        else if (ra[i].start > rb[j].start)
-                               new = &rb[j++];
+                               new_range = &rb[j++];
                        else if (ra[i].end < rb[j].end)
-                               new = &ra[i++];
+                               new_range = &ra[i++];
                        else
-                               new = &rb[j++];
+                               new_range = &rb[j++];
                } else if (i < a->nr)      /* b exhausted */
-                       new = &ra[i++];
+                       new_range = &ra[i++];
                else                       /* a exhausted */
-                       new = &rb[j++];
-               if (new->start == new->end)
+                       new_range = &rb[j++];
+               if (new_range->start == new_range->end)
                        ; /* empty range */
-               else if (!out->nr || out->ranges[out->nr-1].end < new->start) {
+               else if (!out->nr || out->ranges[out->nr-1].end < new_range->start) {
                        range_set_grow(out, 1);
-                       out->ranges[out->nr].start = new->start;
-                       out->ranges[out->nr].end = new->end;
+                       out->ranges[out->nr].start = new_range->start;
+                       out->ranges[out->nr].end = new_range->end;
                        out->nr++;
-               } else if (out->ranges[out->nr-1].end < new->end) {
-                       out->ranges[out->nr-1].end = new->end;
+               } else if (out->ranges[out->nr-1].end < new_range->end) {
+                       out->ranges[out->nr-1].end = new_range->end;
                }
        }
 }
@@ -501,8 +501,7 @@ static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec)
        unsigned mode;
        struct object_id oid;
 
-       if (get_tree_entry(commit->object.oid.hash, spec->path,
-                          oid.hash, &mode))
+       if (get_tree_entry(&commit->object.oid, spec->path, &oid, &mode))
                die("There is no path %s in the commit", spec->path);
        fill_filespec(spec, &oid, 1, mode);
 
@@ -696,18 +695,18 @@ static struct line_log_data *line_log_data_merge(struct line_log_data *a,
 static void add_line_range(struct rev_info *revs, struct commit *commit,
                           struct line_log_data *range)
 {
-       struct line_log_data *old = NULL;
-       struct line_log_data *new = NULL;
+       struct line_log_data *old_line = NULL;
+       struct line_log_data *new_line = NULL;
 
-       old = lookup_decoration(&revs->line_log_data, &commit->object);
-       if (old && range) {
-               new = line_log_data_merge(old, range);
-               free_line_log_data(old);
+       old_line = lookup_decoration(&revs->line_log_data, &commit->object);
+       if (old_line && range) {
+               new_line = line_log_data_merge(old_line, range);
+               free_line_log_data(old_line);
        } else if (range)
-               new = line_log_data_copy(range);
+               new_line = line_log_data_copy(range);
 
-       if (new)
-               add_decoration(&revs->line_log_data, &commit->object, new);
+       if (new_line)
+               add_decoration(&revs->line_log_data, &commit->object, new_line);
 }
 
 static void clear_commit_line_range(struct rev_info *revs, struct commit *commit)
@@ -1042,12 +1041,12 @@ static int process_diff_filepair(struct rev_info *rev,
 
 static struct diff_filepair *diff_filepair_dup(struct diff_filepair *pair)
 {
-       struct diff_filepair *new = xmalloc(sizeof(struct diff_filepair));
-       new->one = pair->one;
-       new->two = pair->two;
-       new->one->count++;
-       new->two->count++;
-       return new;
+       struct diff_filepair *new_filepair = xmalloc(sizeof(struct diff_filepair));
+       new_filepair->one = pair->one;
+       new_filepair->two = pair->two;
+       new_filepair->one->count++;
+       new_filepair->two->count++;
+       return new_filepair;
 }
 
 static void free_diffqueues(int n, struct diff_queue_struct *dq)
index 4356c45368e10bd7f41e15f91130dbfd3e4e9281..0ec83aaf1888903be4f1de38c8a94a594ef2130a 100644 (file)
@@ -117,7 +117,7 @@ static enum list_objects_filter_result filter_blobs_limit(
                assert(obj->type == OBJ_BLOB);
                assert((obj->flags & SEEN) == 0);
 
-               t = sha1_object_info(obj->oid.hash, &object_length);
+               t = oid_object_info(&obj->oid, &object_length);
                if (t != OBJ_BLOB) { /* probably OBJ_NONE */
                        /*
                         * We DO NOT have the blob locally, so we cannot
index fca29d4799da27164394a8c59da067a0522241fe..d1c0bedf244fce0c8894175bf800384c0474b312 100644 (file)
@@ -177,7 +177,7 @@ static void show_parents(struct commit *commit, int abbrev, FILE *file)
        struct commit_list *p;
        for (p = commit->parents; p ; p = p->next) {
                struct commit *parent = p->item;
-               fprintf(file, " %s", find_unique_abbrev(parent->object.oid.hash, abbrev));
+               fprintf(file, " %s", find_unique_abbrev(&parent->object.oid, abbrev));
        }
 }
 
@@ -185,7 +185,7 @@ static void show_children(struct rev_info *opt, struct commit *commit, int abbre
 {
        struct commit_list *p = lookup_decoration(&opt->children, &commit->object);
        for ( ; p; p = p->next) {
-               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(p->item->object.oid.hash, abbrev));
+               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(&p->item->object.oid, abbrev));
        }
 }
 
@@ -499,7 +499,7 @@ static void show_one_mergetag(struct commit *commit,
        int status, nth;
        size_t payload_size, gpg_message_offset;
 
-       hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash);
+       hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
        tag = lookup_tag(&oid);
        if (!tag)
                return; /* error message already given */
@@ -558,7 +558,7 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit), opt->diffopt.file);
+               fputs(find_unique_abbrev(&commit->object.oid, abbrev_commit), opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
                if (opt->children.name)
@@ -620,7 +620,8 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit),
+               fputs(find_unique_abbrev(&commit->object.oid,
+                                        abbrev_commit),
                      opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
@@ -628,8 +629,7 @@ void show_log(struct rev_info *opt)
                        show_children(opt, commit, abbrev_commit);
                if (parent)
                        fprintf(opt->diffopt.file, " (from %s)",
-                              find_unique_abbrev(parent->object.oid.hash,
-                                                 abbrev_commit));
+                              find_unique_abbrev(&parent->object.oid, abbrev_commit));
                fputs(diff_get_color_opt(&opt->diffopt, DIFF_RESET), opt->diffopt.file);
                show_decorations(opt, commit);
                if (opt->commit_format == CMIT_FMT_ONELINE) {
@@ -659,9 +659,6 @@ void show_log(struct rev_info *opt)
                show_mergetag(opt, commit);
        }
 
-       if (!get_cached_commit_buffer(commit, NULL))
-               return;
-
        if (opt->show_notes) {
                int raw;
                struct strbuf notebuf = STRBUF_INIT;
diff --git a/ls-refs.c b/ls-refs.c
new file mode 100644 (file)
index 0000000..a06f12e
--- /dev/null
+++ b/ls-refs.c
@@ -0,0 +1,96 @@
+#include "cache.h"
+#include "repository.h"
+#include "refs.h"
+#include "remote.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "pkt-line.h"
+
+/*
+ * Check if one of the prefixes is a prefix of the ref.
+ * If no prefixes were provided, all refs match.
+ */
+static int ref_match(const struct argv_array *prefixes, const char *refname)
+{
+       int i;
+
+       if (!prefixes->argc)
+               return 1; /* no restriction */
+
+       for (i = 0; i < prefixes->argc; i++) {
+               const char *prefix = prefixes->argv[i];
+
+               if (starts_with(refname, prefix))
+                       return 1;
+       }
+
+       return 0;
+}
+
+struct ls_refs_data {
+       unsigned peel;
+       unsigned symrefs;
+       struct argv_array prefixes;
+};
+
+static int send_ref(const char *refname, const struct object_id *oid,
+                   int flag, void *cb_data)
+{
+       struct ls_refs_data *data = cb_data;
+       const char *refname_nons = strip_namespace(refname);
+       struct strbuf refline = STRBUF_INIT;
+
+       if (!ref_match(&data->prefixes, refname))
+               return 0;
+
+       strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons);
+       if (data->symrefs && flag & REF_ISSYMREF) {
+               struct object_id unused;
+               const char *symref_target = resolve_ref_unsafe(refname, 0,
+                                                              &unused,
+                                                              &flag);
+
+               if (!symref_target)
+                       die("'%s' is a symref but it is not?", refname);
+
+               strbuf_addf(&refline, " symref-target:%s", symref_target);
+       }
+
+       if (data->peel) {
+               struct object_id peeled;
+               if (!peel_ref(refname, &peeled))
+                       strbuf_addf(&refline, " peeled:%s", oid_to_hex(&peeled));
+       }
+
+       strbuf_addch(&refline, '\n');
+       packet_write(1, refline.buf, refline.len);
+
+       strbuf_release(&refline);
+       return 0;
+}
+
+int ls_refs(struct repository *r, struct argv_array *keys,
+           struct packet_reader *request)
+{
+       struct ls_refs_data data;
+
+       memset(&data, 0, sizeof(data));
+
+       while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+               const char *arg = request->line;
+               const char *out;
+
+               if (!strcmp("peel", arg))
+                       data.peel = 1;
+               else if (!strcmp("symrefs", arg))
+                       data.symrefs = 1;
+               else if (skip_prefix(arg, "ref-prefix ", &out))
+                       argv_array_push(&data.prefixes, out);
+       }
+
+       head_ref_namespaced(send_ref, &data);
+       for_each_namespaced_ref(send_ref, &data);
+       packet_flush(1);
+       argv_array_clear(&data.prefixes);
+       return 0;
+}
diff --git a/ls-refs.h b/ls-refs.h
new file mode 100644 (file)
index 0000000..b62877e
--- /dev/null
+++ b/ls-refs.h
@@ -0,0 +1,10 @@
+#ifndef LS_REFS_H
+#define LS_REFS_H
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int ls_refs(struct repository *r, struct argv_array *keys,
+                  struct packet_reader *request);
+
+#endif /* LS_REFS_H */
index cb921b4db676e3db918ee16f419cd2b78e0bf57e..13f0d2884e25edef3fde5bdf72b89a770111e472 100644 (file)
--- a/mailmap.c
+++ b/mailmap.c
@@ -224,7 +224,7 @@ static int read_mailmap_blob(struct string_list *map,
        if (get_oid(name, &oid) < 0)
                return 0;
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("unable to read mailmap object at %s", name);
        if (type != OBJ_BLOB)
index 396b7338df2c53009900fbd3df36dd06a7219493..72cc2baa3f96b2cbfa296335c7f0ff1094b6e96c 100644 (file)
@@ -54,7 +54,7 @@ static void *fill_tree_desc_strict(struct tree_desc *desc,
        enum object_type type;
        unsigned long size;
 
-       buffer = read_sha1_file(hash->hash, &type, &size);
+       buffer = read_object_file(hash, &type, &size);
        if (!buffer)
                die("unable to read tree (%s)", oid_to_hex(hash));
        if (type != OBJ_TREE)
@@ -158,22 +158,20 @@ static void match_trees(const struct object_id *hash1,
 }
 
 /*
- * A tree "hash1" has a subdirectory at "prefix".  Come up with a
- * tree object by replacing it with another tree "hash2".
+ * A tree "oid1" has a subdirectory at "prefix".  Come up with a tree object by
+ * replacing it with another tree "oid2".
  */
-static int splice_tree(const unsigned char *hash1,
-                      const char *prefix,
-                      const unsigned char *hash2,
-                      unsigned char *result)
+static int splice_tree(const struct object_id *oid1, const char *prefix,
+                      const struct object_id *oid2, struct object_id *result)
 {
        char *subpath;
        int toplen;
        char *buf;
        unsigned long sz;
        struct tree_desc desc;
-       unsigned char *rewrite_here;
-       const unsigned char *rewrite_with;
-       unsigned char subtree[20];
+       struct object_id *rewrite_here;
+       const struct object_id *rewrite_with;
+       struct object_id subtree;
        enum object_type type;
        int status;
 
@@ -182,9 +180,9 @@ static int splice_tree(const unsigned char *hash1,
        if (*subpath)
                subpath++;
 
-       buf = read_sha1_file(hash1, &type, &sz);
+       buf = read_object_file(oid1, &type, &sz);
        if (!buf)
-               die("cannot read tree %s", sha1_to_hex(hash1));
+               die("cannot read tree %s", oid_to_hex(oid1));
        init_tree_desc(&desc, buf, sz);
 
        rewrite_here = NULL;
@@ -197,26 +195,26 @@ static int splice_tree(const unsigned char *hash1,
                if (strlen(name) == toplen &&
                    !memcmp(name, prefix, toplen)) {
                        if (!S_ISDIR(mode))
-                               die("entry %s in tree %s is not a tree",
-                                   name, sha1_to_hex(hash1));
-                       rewrite_here = (unsigned char *) oid->hash;
+                               die("entry %s in tree %s is not a tree", name,
+                                   oid_to_hex(oid1));
+                       rewrite_here = (struct object_id *)oid;
                        break;
                }
                update_tree_entry(&desc);
        }
        if (!rewrite_here)
-               die("entry %.*s not found in tree %s",
-                   toplen, prefix, sha1_to_hex(hash1));
+               die("entry %.*s not found in tree %s", toplen, prefix,
+                   oid_to_hex(oid1));
        if (*subpath) {
-               status = splice_tree(rewrite_here, subpath, hash2, subtree);
+               status = splice_tree(rewrite_here, subpath, oid2, &subtree);
                if (status)
                        return status;
-               rewrite_with = subtree;
+               rewrite_with = &subtree;
+       } else {
+               rewrite_with = oid2;
        }
-       else
-               rewrite_with = hash2;
-       hashcpy(rewrite_here, rewrite_with);
-       status = write_sha1_file(buf, sz, tree_type, result);
+       oidcpy(rewrite_here, rewrite_with);
+       status = write_object_file(buf, sz, tree_type, result);
        free(buf);
        return status;
 }
@@ -271,7 +269,7 @@ void shift_tree(const struct object_id *hash1,
                if (!*del_prefix)
                        return;
 
-               if (get_tree_entry(hash2->hash, del_prefix, shifted->hash, &mode))
+               if (get_tree_entry(hash2, del_prefix, shifted, &mode))
                        die("cannot find path %s in tree %s",
                            del_prefix, oid_to_hex(hash2));
                return;
@@ -280,7 +278,7 @@ void shift_tree(const struct object_id *hash1,
        if (!*add_prefix)
                return;
 
-       splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
+       splice_tree(hash1, add_prefix, hash2, shifted);
 }
 
 /*
@@ -298,12 +296,12 @@ void shift_tree_by(const struct object_id *hash1,
        unsigned candidate = 0;
 
        /* Can hash2 be a tree at shift_prefix in tree hash1? */
-       if (!get_tree_entry(hash1->hash, shift_prefix, sub1.hash, &mode1) &&
+       if (!get_tree_entry(hash1, shift_prefix, &sub1, &mode1) &&
            S_ISDIR(mode1))
                candidate |= 1;
 
        /* Can hash1 be a tree at shift_prefix in tree hash2? */
-       if (!get_tree_entry(hash2->hash, shift_prefix, sub2.hash, &mode2) &&
+       if (!get_tree_entry(hash2, shift_prefix, &sub2, &mode2) &&
            S_ISDIR(mode2))
                candidate |= 2;
 
@@ -334,7 +332,7 @@ void shift_tree_by(const struct object_id *hash1,
                 * shift tree2 down by adding shift_prefix above it
                 * to match tree1.
                 */
-               splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
+               splice_tree(hash1, shift_prefix, hash2, shifted);
        else
                /*
                 * shift tree2 up by removing shift_prefix from it
diff --git a/mem-pool.c b/mem-pool.c
new file mode 100644 (file)
index 0000000..389d7af
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Memory Pool implementation logic.
+ */
+
+#include "cache.h"
+#include "mem-pool.h"
+
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
+{
+       struct mp_block *p;
+
+       mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
+       p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
+       p->next_block = mem_pool->mp_block;
+       p->next_free = (char *)p->space;
+       p->end = p->next_free + block_alloc;
+       mem_pool->mp_block = p;
+
+       return p;
+}
+
+void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
+{
+       struct mp_block *p;
+       void *r;
+
+       /* round up to a 'uintmax_t' alignment */
+       if (len & (sizeof(uintmax_t) - 1))
+               len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
+
+       for (p = mem_pool->mp_block; p; p = p->next_block)
+               if (p->end - p->next_free >= len)
+                       break;
+
+       if (!p) {
+               if (len >= (mem_pool->block_alloc / 2)) {
+                       mem_pool->pool_alloc += len;
+                       return xmalloc(len);
+               }
+
+               p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
+       }
+
+       r = p->next_free;
+       p->next_free += len;
+       return r;
+}
+
+void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
+{
+       size_t len = st_mult(count, size);
+       void *r = mem_pool_alloc(mem_pool, len);
+       memset(r, 0, len);
+       return r;
+}
diff --git a/mem-pool.h b/mem-pool.h
new file mode 100644 (file)
index 0000000..829ad58
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef MEM_POOL_H
+#define MEM_POOL_H
+
+struct mp_block {
+       struct mp_block *next_block;
+       char *next_free;
+       char *end;
+       uintmax_t space[FLEX_ARRAY]; /* more */
+};
+
+struct mem_pool {
+       struct mp_block *mp_block;
+
+       /*
+        * The amount of available memory to grow the pool by.
+        * This size does not include the overhead for the mp_block.
+        */
+       size_t block_alloc;
+
+       /* The total amount of memory allocated by the pool. */
+       size_t pool_alloc;
+};
+
+/*
+ * Alloc memory from the mem_pool.
+ */
+void *mem_pool_alloc(struct mem_pool *pool, size_t len);
+
+/*
+ * Allocate and zero memory from the memory pool.
+ */
+void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
+
+#endif
index 9b6eac22e4256d8f2bf82961b6e4f320d89fdeba..fa49c17287f4120b4bbb75acc6b92b3d339710cd 100644 (file)
@@ -11,7 +11,7 @@ static int fill_mmfile_blob(mmfile_t *f, struct blob *obj)
        unsigned long size;
        enum object_type type;
 
-       buf = read_sha1_file(obj->object.oid.hash, &type, &size);
+       buf = read_object_file(&obj->object.oid, &type, &size);
        if (!buf)
                return -1;
        if (type != OBJ_BLOB) {
@@ -66,7 +66,7 @@ void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct
                        return NULL;
                if (!our)
                        our = their;
-               return read_sha1_file(our->object.oid.hash, &type, size);
+               return read_object_file(&our->object.oid, &type, size);
        }
 
        if (fill_mmfile_blob(&f1, our) < 0)
index cc5fa0a94965fad8821cdf81dfe84737f4dee043..9c05eb7f700eed0dc3e20a4c22f3a60e0aa21488 100644 (file)
@@ -49,6 +49,67 @@ static unsigned int path_hash(const char *path)
        return ignore_case ? strihash(path) : strhash(path);
 }
 
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+                                                     char *dir)
+{
+       struct dir_rename_entry key;
+
+       if (dir == NULL)
+               return NULL;
+       hashmap_entry_init(&key, strhash(dir));
+       key.dir = dir;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+                         const void *entry,
+                         const void *entry_or_key,
+                         const void *unused_keydata)
+{
+       const struct dir_rename_entry *e1 = entry;
+       const struct dir_rename_entry *e2 = entry_or_key;
+
+       return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+       hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+                                 char *directory)
+{
+       hashmap_entry_init(entry, strhash(directory));
+       entry->dir = directory;
+       entry->non_unique_new_dir = 0;
+       strbuf_init(&entry->new_dir, 0);
+       string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+                                                   char *target_file)
+{
+       struct collision_entry key;
+
+       hashmap_entry_init(&key, strhash(target_file));
+       key.target_file = target_file;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+                        const struct collision_entry *e1,
+                        const struct collision_entry *e2,
+                        const void *unused_keydata)
+{
+       return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+       hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
 static void flush_output(struct merge_options *o)
 {
        if (o->buffer_output < 2 && o->obuf.len) {
@@ -119,6 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b)
 
 enum rename_type {
        RENAME_NORMAL = 0,
+       RENAME_DIR,
        RENAME_DELETE,
        RENAME_ONE_FILE_TO_ONE,
        RENAME_ONE_FILE_TO_TWO,
@@ -228,7 +290,7 @@ static void output_commit_title(struct merge_options *o, struct commit *commit)
                strbuf_addf(&o->obuf, "virtual %s\n",
                        merge_remote_util(commit)->name);
        else {
-               strbuf_add_unique_abbrev(&o->obuf, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
                                         DEFAULT_ABBREV);
                strbuf_addch(&o->obuf, ' ');
                if (parse_commit(commit) != 0)
@@ -275,32 +337,37 @@ static void init_tree_desc_from_tree(struct tree_desc *desc, struct tree *tree)
        init_tree_desc(desc, tree->buffer, tree->size);
 }
 
-static int git_merge_trees(int index_only,
+static int git_merge_trees(struct merge_options *o,
                           struct tree *common,
                           struct tree *head,
                           struct tree *merge)
 {
        int rc;
        struct tree_desc t[3];
-       struct unpack_trees_options opts;
 
-       memset(&opts, 0, sizeof(opts));
-       if (index_only)
-               opts.index_only = 1;
+       memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+       if (o->call_depth)
+               o->unpack_opts.index_only = 1;
        else
-               opts.update = 1;
-       opts.merge = 1;
-       opts.head_idx = 2;
-       opts.fn = threeway_merge;
-       opts.src_index = &the_index;
-       opts.dst_index = &the_index;
-       setup_unpack_trees_porcelain(&opts, "merge");
+               o->unpack_opts.update = 1;
+       o->unpack_opts.merge = 1;
+       o->unpack_opts.head_idx = 2;
+       o->unpack_opts.fn = threeway_merge;
+       o->unpack_opts.src_index = &the_index;
+       o->unpack_opts.dst_index = &the_index;
+       setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
 
        init_tree_desc_from_tree(t+0, common);
        init_tree_desc_from_tree(t+1, head);
        init_tree_desc_from_tree(t+2, merge);
 
-       rc = unpack_trees(3, t, &opts);
+       rc = unpack_trees(3, t, &o->unpack_opts);
+       /*
+        * unpack_trees NULLifies src_index, but it's used in verify_uptodate,
+        * so set to the new index which will usually have modification
+        * timestamp info copied over.
+        */
+       o->unpack_opts.src_index = &the_index;
        cache_tree_free(&active_cache_tree);
        return rc;
 }
@@ -335,7 +402,7 @@ struct tree *write_tree_from_memory(struct merge_options *o)
        return result;
 }
 
-static int save_files_dirs(const unsigned char *sha1,
+static int save_files_dirs(const struct object_id *oid,
                struct strbuf *base, const char *path,
                unsigned int mode, int stage, void *context)
 {
@@ -360,6 +427,21 @@ static void get_files_dirs(struct merge_options *o, struct tree *tree)
        read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
 }
 
+static int get_tree_entry_if_blob(struct tree *tree,
+                                 const char *path,
+                                 struct object_id *hashy,
+                                 unsigned int *mode_o)
+{
+       int ret;
+
+       ret = get_tree_entry(&tree->object.oid, path, hashy, mode_o);
+       if (S_ISDIR(*mode_o)) {
+               oidcpy(hashy, &null_oid);
+               *mode_o = 0;
+       }
+       return ret;
+}
+
 /*
  * Returns an index_entry instance which doesn't have to correspond to
  * a real cache entry in Git's index.
@@ -370,12 +452,12 @@ static struct stage_data *insert_stage_data(const char *path,
 {
        struct string_list_item *item;
        struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
-       get_tree_entry(o->object.oid.hash, path,
-                       e->stages[1].oid.hash, &e->stages[1].mode);
-       get_tree_entry(a->object.oid.hash, path,
-                       e->stages[2].oid.hash, &e->stages[2].mode);
-       get_tree_entry(b->object.oid.hash, path,
-                       e->stages[3].oid.hash, &e->stages[3].mode);
+       get_tree_entry_if_blob(o, path,
+                              &e->stages[1].oid, &e->stages[1].mode);
+       get_tree_entry_if_blob(a, path,
+                              &e->stages[2].oid, &e->stages[2].mode);
+       get_tree_entry_if_blob(b, path,
+                              &e->stages[3].oid, &e->stages[3].mode);
        item = string_list_insert(entries, path);
        item->util = e;
        return e;
@@ -513,80 +595,31 @@ static void record_df_conflict_files(struct merge_options *o,
 
 struct rename {
        struct diff_filepair *pair;
+       /*
+        * Purpose of src_entry and dst_entry:
+        *
+        * If 'before' is renamed to 'after' then src_entry will contain
+        * the versions of 'before' from the merge_base, HEAD, and MERGE in
+        * stages 1, 2, and 3; dst_entry will contain the respective
+        * versions of 'after' in corresponding locations.  Thus, we have a
+        * total of six modes and oids, though some will be null.  (Stage 0
+        * is ignored; we're interested in handling conflicts.)
+        *
+        * Since we don't turn on break-rewrites by default, neither
+        * src_entry nor dst_entry can have all three of their stages have
+        * non-null oids, meaning at most four of the six will be non-null.
+        * Also, since this is a rename, both src_entry and dst_entry will
+        * have at least one non-null oid, meaning at least two will be
+        * non-null.  Of the six oids, a typical rename will have three be
+        * non-null.  Only two implies a rename/delete, and four implies a
+        * rename/add.
+        */
        struct stage_data *src_entry;
        struct stage_data *dst_entry;
+       unsigned add_turned_into_rename:1;
        unsigned processed:1;
 };
 
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
-                                      struct tree *tree,
-                                      struct tree *o_tree,
-                                      struct tree *a_tree,
-                                      struct tree *b_tree,
-                                      struct string_list *entries)
-{
-       int i;
-       struct string_list *renames;
-       struct diff_options opts;
-
-       renames = xcalloc(1, sizeof(struct string_list));
-       if (!o->detect_rename)
-               return renames;
-
-       diff_setup(&opts);
-       opts.flags.recursive = 1;
-       opts.flags.rename_empty = 0;
-       opts.detect_rename = DIFF_DETECT_RENAME;
-       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
-                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
-                           1000;
-       opts.rename_score = o->rename_score;
-       opts.show_rename_progress = o->show_rename_progress;
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_setup_done(&opts);
-       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
-       diffcore_std(&opts);
-       if (opts.needed_rename_limit > o->needed_rename_limit)
-               o->needed_rename_limit = opts.needed_rename_limit;
-       for (i = 0; i < diff_queued_diff.nr; ++i) {
-               struct string_list_item *item;
-               struct rename *re;
-               struct diff_filepair *pair = diff_queued_diff.queue[i];
-               if (pair->status != 'R') {
-                       diff_free_filepair(pair);
-                       continue;
-               }
-               re = xmalloc(sizeof(*re));
-               re->processed = 0;
-               re->pair = pair;
-               item = string_list_lookup(entries, re->pair->one->path);
-               if (!item)
-                       re->src_entry = insert_stage_data(re->pair->one->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->src_entry = item->util;
-
-               item = string_list_lookup(entries, re->pair->two->path);
-               if (!item)
-                       re->dst_entry = insert_stage_data(re->pair->two->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->dst_entry = item->util;
-               item = string_list_insert(renames, pair->one->path);
-               item->util = re;
-       }
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_queued_diff.nr = 0;
-       diff_flush(&opts);
-       return renames;
-}
-
 static int update_stages(struct merge_options *opt, const char *path,
                         const struct diff_filespec *o,
                         const struct diff_filespec *a,
@@ -618,6 +651,27 @@ static int update_stages(struct merge_options *opt, const char *path,
        return 0;
 }
 
+static int update_stages_for_stage_data(struct merge_options *opt,
+                                       const char *path,
+                                       const struct stage_data *stage_data)
+{
+       struct diff_filespec o, a, b;
+
+       o.mode = stage_data->stages[1].mode;
+       oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+       a.mode = stage_data->stages[2].mode;
+       oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+       b.mode = stage_data->stages[3].mode;
+       oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+       return update_stages(opt, path,
+                            is_null_oid(&o.oid) ? NULL : &o,
+                            is_null_oid(&a.oid) ? NULL : &a,
+                            is_null_oid(&b.oid) ? NULL : &b);
+}
+
 static void update_entry(struct stage_data *entry,
                         struct diff_filespec *o,
                         struct diff_filespec *a,
@@ -746,6 +800,20 @@ static int would_lose_untracked(const char *path)
        return !was_tracked(path) && file_exists(path);
 }
 
+static int was_dirty(struct merge_options *o, const char *path)
+{
+       struct cache_entry *ce;
+       int dirty = 1;
+
+       if (o->call_depth || !was_tracked(path))
+               return !dirty;
+
+       ce = cache_file_exists(path, strlen(path), ignore_case);
+       dirty = (ce->ce_stat_data.sd_mtime.sec > 0 &&
+                verify_uptodate(ce, &o->unpack_opts) != 0);
+       return dirty;
+}
+
 static int make_room_for_path(struct merge_options *o, const char *path)
 {
        int status, i;
@@ -823,7 +891,7 @@ static int update_file_flags(struct merge_options *o,
                        goto update_index;
                }
 
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        return err(o, _("cannot read object %s '%s'"), oid_to_hex(oid), path);
                if (type != OBJ_BLOB) {
@@ -1009,8 +1077,9 @@ static int merge_file_1(struct merge_options *o,
                        if ((merge_status < 0) || !result_buf.ptr)
                                ret = err(o, _("Failed to execute internal merge"));
 
-                       if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
-                                                   blob_type, result->oid.hash))
+                       if (!ret &&
+                           write_object_file(result_buf.ptr, result_buf.size,
+                                             blob_type, &result->oid))
                                ret = err(o, _("Unable to add %s to database"),
                                          a->path);
 
@@ -1094,6 +1163,38 @@ static int merge_file_one(struct merge_options *o,
        return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
 }
 
+static int conflict_rename_dir(struct merge_options *o,
+                              struct diff_filepair *pair,
+                              const char *rename_branch,
+                              const char *other_branch)
+{
+       const struct diff_filespec *dest = pair->two;
+
+       if (!o->call_depth && would_lose_untracked(dest->path)) {
+               char *alt_path = unique_path(o, dest->path, rename_branch);
+
+               output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+                              "writing to %s instead."),
+                      dest->path, alt_path);
+               /*
+                * Write the file in worktree at alt_path, but not in the
+                * index.  Instead, write to dest->path for the index but
+                * only at the higher appropriate stage.
+                */
+               if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+                       return -1;
+               free(alt_path);
+               return update_stages(o, dest->path, NULL,
+                                    rename_branch == o->branch1 ? dest : NULL,
+                                    rename_branch == o->branch1 ? NULL : dest);
+       }
+
+       /* Update dest->path both in index and in worktree */
+       if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+               return -1;
+       return 0;
+}
+
 static int handle_change_delete(struct merge_options *o,
                                 const char *path, const char *old_path,
                                 const struct object_id *o_oid, int o_mode,
@@ -1107,7 +1208,8 @@ static int handle_change_delete(struct merge_options *o,
        const char *update_path = path;
        int ret = 0;
 
-       if (dir_in_way(path, !o->call_depth, 0)) {
+       if (dir_in_way(path, !o->call_depth, 0) ||
+           (!o->call_depth && would_lose_untracked(path))) {
                update_path = alt_path = unique_path(o, path, change_branch);
        }
 
@@ -1222,17 +1324,34 @@ static int handle_file(struct merge_options *o,
 
        add = filespec_from_entry(&other, dst_entry, stage ^ 1);
        if (add) {
+               int ren_src_was_dirty = was_dirty(o, rename->path);
                char *add_name = unique_path(o, rename->path, other_branch);
                if (update_file(o, 0, &add->oid, add->mode, add_name))
                        return -1;
 
-               remove_file(o, 0, rename->path, 0);
+               if (ren_src_was_dirty) {
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              rename->path);
+               }
+               /*
+                * Because the double negatives somehow keep confusing me...
+                *    1) update_wd iff !ren_src_was_dirty.
+                *    2) no_wd iff !update_wd
+                *    3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+                */
+               remove_file(o, 0, rename->path, ren_src_was_dirty);
                dst_name = unique_path(o, rename->path, cur_branch);
        } else {
                if (dir_in_way(rename->path, !o->call_depth, 0)) {
                        dst_name = unique_path(o, rename->path, cur_branch);
                        output(o, 1, _("%s is a directory in %s adding as %s instead"),
                               rename->path, other_branch, dst_name);
+               } else if (!o->call_depth &&
+                          would_lose_untracked(rename->path)) {
+                       dst_name = unique_path(o, rename->path, cur_branch);
+                       output(o, 1, _("Refusing to lose untracked file at %s; "
+                                      "adding as %s instead"),
+                              rename->path, dst_name);
                }
        }
        if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
@@ -1358,11 +1477,43 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
                char *new_path2 = unique_path(o, path, ci->branch2);
                output(o, 1, _("Renaming %s to %s and %s to %s instead"),
                       a->path, new_path1, b->path, new_path2);
-               remove_file(o, 0, path, 0);
+               if (was_dirty(o, path))
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              path);
+               else if (would_lose_untracked(path))
+                       /*
+                        * Only way we get here is if both renames were from
+                        * a directory rename AND user had an untracked file
+                        * at the location where both files end up after the
+                        * two directory renames.  See testcase 10d of t6043.
+                        */
+                       output(o, 1, _("Refusing to lose untracked file at "
+                                      "%s, even though it's in the way."),
+                              path);
+               else
+                       remove_file(o, 0, path, 0);
                ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
                if (!ret)
                        ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
                                          new_path2);
+               /*
+                * unpack_trees() actually populates the index for us for
+                * "normal" rename/rename(2to1) situtations so that the
+                * correct entries are at the higher stages, which would
+                * make the call below to update_stages_for_stage_data
+                * unnecessary.  However, if either of the renames came
+                * from a directory rename, then unpack_trees() will not
+                * have gotten the right data loaded into the index, so we
+                * need to do so now.  (While it'd be tempting to move this
+                * call to update_stages_for_stage_data() to
+                * apply_directory_rename_modifications(), that would break
+                * our intermediate calls to would_lose_untracked() since
+                * those rely on the current in-memory index.  See also the
+                * big "NOTE" in update_stages()).
+                */
+               if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+                       ret = -1;
+
                free(new_path2);
                free(new_path1);
        }
@@ -1370,6 +1521,754 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
        return ret;
 }
 
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+                                              struct tree *o_tree,
+                                              struct tree *tree)
+{
+       struct diff_queue_struct *ret;
+       struct diff_options opts;
+
+       diff_setup(&opts);
+       opts.flags.recursive = 1;
+       opts.flags.rename_empty = 0;
+       opts.detect_rename = DIFF_DETECT_RENAME;
+       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+                           1000;
+       opts.rename_score = o->rename_score;
+       opts.show_rename_progress = o->show_rename_progress;
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_setup_done(&opts);
+       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+       diffcore_std(&opts);
+       if (opts.needed_rename_limit > o->needed_rename_limit)
+               o->needed_rename_limit = opts.needed_rename_limit;
+
+       ret = xmalloc(sizeof(*ret));
+       *ret = diff_queued_diff;
+
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_queued_diff.nr = 0;
+       diff_queued_diff.queue = NULL;
+       diff_flush(&opts);
+       return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+       struct object_id hashy;
+       unsigned int mode_o;
+
+       return !get_tree_entry(&tree->object.oid, path,
+                              &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir.  In perl-speak:
+ *   new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ *   Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+                             const char *old_path)
+{
+       struct strbuf new_path = STRBUF_INIT;
+       int oldlen, newlen;
+
+       if (entry->non_unique_new_dir)
+               return NULL;
+
+       oldlen = strlen(entry->dir);
+       newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+       strbuf_grow(&new_path, newlen);
+       strbuf_addbuf(&new_path, &entry->new_dir);
+       strbuf_addstr(&new_path, &old_path[oldlen]);
+
+       return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+                                   char **old_dir, char **new_dir)
+{
+       char *end_of_old, *end_of_new;
+       int old_len, new_len;
+
+       *old_dir = NULL;
+       *new_dir = NULL;
+
+       /*
+        * For
+        *    "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+        * the "e/foo.c" part is the same, we just want to know that
+        *    "a/b/c/d" was renamed to "a/b/some/thing/else"
+        * so, for this example, this function returns "a/b/c/d" in
+        * *old_dir and "a/b/some/thing/else" in *new_dir.
+        *
+        * Also, if the basename of the file changed, we don't care.  We
+        * want to know which portion of the directory, if any, changed.
+        */
+       end_of_old = strrchr(old_path, '/');
+       end_of_new = strrchr(new_path, '/');
+
+       if (end_of_old == NULL || end_of_new == NULL)
+               return;
+       while (*--end_of_new == *--end_of_old &&
+              end_of_old != old_path &&
+              end_of_new != new_path)
+               ; /* Do nothing; all in the while loop */
+       /*
+        * We've found the first non-matching character in the directory
+        * paths.  That means the current directory we were comparing
+        * represents the rename.  Move end_of_old and end_of_new back
+        * to the full directory name.
+        */
+       if (*end_of_old == '/')
+               end_of_old++;
+       if (*end_of_old != '/')
+               end_of_new++;
+       end_of_old = strchr(end_of_old, '/');
+       end_of_new = strchr(end_of_new, '/');
+
+       /*
+        * It may have been the case that old_path and new_path were the same
+        * directory all along.  Don't claim a rename if they're the same.
+        */
+       old_len = end_of_old - old_path;
+       new_len = end_of_new - new_path;
+
+       if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+               *old_dir = xstrndup(old_path, old_len);
+               *new_dir = xstrndup(new_path, new_len);
+       }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+                                  struct string_list *items_to_remove)
+{
+       int i;
+       struct dir_rename_entry *entry;
+
+       for (i = 0; i < items_to_remove->nr; i++) {
+               entry = items_to_remove->items[i].util;
+               hashmap_remove(dir_renames, entry, NULL);
+       }
+       string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location.  If there is a rename and
+ * there are no conflicts, return the new name.  Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+                                        const char *path,
+                                        struct dir_rename_entry *entry,
+                                        struct hashmap *collisions,
+                                        struct tree *tree)
+{
+       char *new_path = NULL;
+       struct collision_entry *collision_ent;
+       int clean = 1;
+       struct strbuf collision_paths = STRBUF_INIT;
+
+       /*
+        * entry has the mapping of old directory name to new directory name
+        * that we want to apply to path.
+        */
+       new_path = apply_dir_rename(entry, path);
+
+       if (!new_path) {
+               /* This should only happen when entry->non_unique_new_dir set */
+               if (!entry->non_unique_new_dir)
+                       BUG("entry->non_unqiue_dir not set and !new_path");
+               output(o, 1, _("CONFLICT (directory rename split): "
+                              "Unclear where to place %s because directory "
+                              "%s was renamed to multiple other directories, "
+                              "with no destination getting a majority of the "
+                              "files."),
+                      path, entry->dir);
+               clean = 0;
+               return NULL;
+       }
+
+       /*
+        * The caller needs to have ensured that it has pre-populated
+        * collisions with all paths that map to new_path.  Do a quick check
+        * to ensure that's the case.
+        */
+       collision_ent = collision_find_entry(collisions, new_path);
+       if (collision_ent == NULL)
+               BUG("collision_ent is NULL");
+
+       /*
+        * Check for one-sided add/add/.../add conflicts, i.e.
+        * where implicit renames from the other side doing
+        * directory rename(s) can affect this side of history
+        * to put multiple paths into the same location.  Warn
+        * and bail on directory renames for such paths.
+        */
+       if (collision_ent->reported_already) {
+               clean = 0;
+       } else if (tree_has_path(tree, new_path)) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+                              "file/dir at %s in the way of implicit "
+                              "directory rename(s) putting the following "
+                              "path(s) there: %s."),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       } else if (collision_ent->source_files.nr > 1) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+                              "more than one path to %s; implicit directory "
+                              "renames tried to put these paths there: %s"),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       }
+
+       /* Free memory we no longer need */
+       strbuf_release(&collision_paths);
+       if (!clean && new_path) {
+               free(new_path);
+               return NULL;
+       }
+
+       return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ *   1. Check for both sides renaming to the same thing, in order to avoid
+ *      implicit renaming of files that should be left in place.  (See
+ *      testcase 6b in t6043 for details.)
+ *   2. Prune directory renames if there are still files left in the
+ *      the original directory.  These represent a partial directory rename,
+ *      i.e. a rename where only some of the files within the directory
+ *      were renamed elsewhere.  (Technically, this could be done earlier
+ *      in get_directory_renames(), except that would prevent us from
+ *      doing the previous check and thus failing testcase 6b.)
+ *   3. Check for rename/rename(1to2) conflicts (at the directory level).
+ *      In the future, we could potentially record this info as well and
+ *      omit reporting rename/rename(1to2) conflicts for each path within
+ *      the affected directories, thus cleaning up the merge output.
+ *   NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ *         directory level, because merging directories is fine.  If it
+ *         causes conflicts for files within those merged directories, then
+ *         that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+                                            struct hashmap *dir_re_head,
+                                            struct tree *head,
+                                            struct hashmap *dir_re_merge,
+                                            struct tree *merge)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *head_ent;
+       struct dir_rename_entry *merge_ent;
+
+       struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+       struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+       hashmap_iter_init(dir_re_head, &iter);
+       while ((head_ent = hashmap_iter_next(&iter))) {
+               merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+               if (merge_ent &&
+                   !head_ent->non_unique_new_dir &&
+                   !merge_ent->non_unique_new_dir &&
+                   !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+                       /* 1. Renamed identically; remove it from both sides */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               } else if (tree_has_path(head, head_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+       hashmap_iter_init(dir_re_merge, &iter);
+       while ((merge_ent = hashmap_iter_next(&iter))) {
+               head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+               if (tree_has_path(merge, merge_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+               } else if (head_ent &&
+                          !head_ent->non_unique_new_dir &&
+                          !merge_ent->non_unique_new_dir) {
+                       /* 3. rename/rename(1to2) */
+                       /*
+                        * We can assume it's not rename/rename(1to1) because
+                        * that was case (1), already checked above.  So we
+                        * know that head_ent->new_dir and merge_ent->new_dir
+                        * are different strings.
+                        */
+                       output(o, 1, _("CONFLICT (rename/rename): "
+                                      "Rename directory %s->%s in %s. "
+                                      "Rename directory %s->%s in %s"),
+                              head_ent->dir, head_ent->new_dir.buf, o->branch1,
+                              head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+                                            struct tree *tree)
+{
+       struct hashmap *dir_renames;
+       struct hashmap_iter iter;
+       struct dir_rename_entry *entry;
+       int i;
+
+       /*
+        * Typically, we think of a directory rename as all files from a
+        * certain directory being moved to a target directory.  However,
+        * what if someone first moved two files from the original
+        * directory in one commit, and then renamed the directory
+        * somewhere else in a later commit?  At merge time, we just know
+        * that files from the original directory went to two different
+        * places, and that the bulk of them ended up in the same place.
+        * We want each directory rename to represent where the bulk of the
+        * files from that directory end up; this function exists to find
+        * where the bulk of the files went.
+        *
+        * The first loop below simply iterates through the list of file
+        * renames, finding out how often each directory rename pair
+        * possibility occurs.
+        */
+       dir_renames = xmalloc(sizeof(struct hashmap));
+       dir_rename_init(dir_renames);
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               int *count;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *old_dir, *new_dir;
+
+               /* File not part of directory rename if it wasn't renamed */
+               if (pair->status != 'R')
+                       continue;
+
+               get_renamed_dir_portion(pair->one->path, pair->two->path,
+                                       &old_dir,        &new_dir);
+               if (!old_dir)
+                       /* Directory didn't change at all; ignore this one. */
+                       continue;
+
+               entry = dir_rename_find_entry(dir_renames, old_dir);
+               if (!entry) {
+                       entry = xmalloc(sizeof(struct dir_rename_entry));
+                       dir_rename_entry_init(entry, old_dir);
+                       hashmap_put(dir_renames, entry);
+               } else {
+                       free(old_dir);
+               }
+               item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+               if (!item) {
+                       item = string_list_insert(&entry->possible_new_dirs,
+                                                 new_dir);
+                       item->util = xcalloc(1, sizeof(int));
+               } else {
+                       free(new_dir);
+               }
+               count = item->util;
+               *count += 1;
+       }
+
+       /*
+        * For each directory with files moved out of it, we find out which
+        * target directory received the most files so we can declare it to
+        * be the "winning" target location for the directory rename.  This
+        * winner gets recorded in new_dir.  If there is no winner
+        * (multiple target directories received the same number of files),
+        * we set non_unique_new_dir.  Once we've determined the winner (or
+        * that there is no winner), we no longer need possible_new_dirs.
+        */
+       hashmap_iter_init(dir_renames, &iter);
+       while ((entry = hashmap_iter_next(&iter))) {
+               int max = 0;
+               int bad_max = 0;
+               char *best = NULL;
+
+               for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+                       int *count = entry->possible_new_dirs.items[i].util;
+
+                       if (*count == max)
+                               bad_max = max;
+                       else if (*count > max) {
+                               max = *count;
+                               best = entry->possible_new_dirs.items[i].string;
+                       }
+               }
+               if (bad_max == max)
+                       entry->non_unique_new_dir = 1;
+               else {
+                       assert(entry->new_dir.len == 0);
+                       strbuf_addstr(&entry->new_dir, best);
+               }
+               /*
+                * The relevant directory sub-portion of the original full
+                * filepaths were xstrndup'ed before inserting into
+                * possible_new_dirs, and instead of manually iterating the
+                * list and free'ing each, just lie and tell
+                * possible_new_dirs that it did the strdup'ing so that it
+                * will free them for us.
+                */
+               entry->possible_new_dirs.strdup_strings = 1;
+               string_list_clear(&entry->possible_new_dirs, 1);
+       }
+
+       return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+                                                 struct hashmap *dir_renames)
+{
+       char temp[PATH_MAX];
+       char *end;
+       struct dir_rename_entry *entry;
+
+       strcpy(temp, path);
+       while ((end = strrchr(temp, '/'))) {
+               *end = '\0';
+               entry = dir_rename_find_entry(dir_renames, temp);
+               if (entry)
+                       return entry;
+       }
+       return NULL;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+                              struct hashmap *dir_renames,
+                              struct diff_queue_struct *pairs)
+{
+       int i;
+
+       /*
+        * Multiple files can be mapped to the same path due to directory
+        * renames done by the other side of history.  Since that other
+        * side of history could have merged multiple directories into one,
+        * if our side of history added the same file basename to each of
+        * those directories, then all N of them would get implicitly
+        * renamed by the directory rename detection into the same path,
+        * and we'd get an add/add/.../add conflict, and all those adds
+        * from *this* side of history.  This is not representable in the
+        * index, and users aren't going to easily be able to make sense of
+        * it.  So we need to provide a good warning about what's
+        * happening, and fall back to no-directory-rename detection
+        * behavior for those paths.
+        *
+        * See testcases 9e and all of section 5 from t6043 for examples.
+        */
+       collision_init(collisions);
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct dir_rename_entry *dir_rename_ent;
+               struct collision_entry *collision_ent;
+               char *new_path;
+               struct diff_filepair *pair = pairs->queue[i];
+
+               if (pair->status != 'A' && pair->status != 'R')
+                       continue;
+               dir_rename_ent = check_dir_renamed(pair->two->path,
+                                                  dir_renames);
+               if (!dir_rename_ent)
+                       continue;
+
+               new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+               if (!new_path)
+                       /*
+                        * dir_rename_ent->non_unique_new_path is true, which
+                        * means there is no directory rename for us to use,
+                        * which means it won't cause us any additional
+                        * collisions.
+                        */
+                       continue;
+               collision_ent = collision_find_entry(collisions, new_path);
+               if (!collision_ent) {
+                       collision_ent = xcalloc(1,
+                                               sizeof(struct collision_entry));
+                       hashmap_entry_init(collision_ent, strhash(new_path));
+                       hashmap_put(collisions, collision_ent);
+                       collision_ent->target_file = new_path;
+               } else {
+                       free(new_path);
+               }
+               string_list_insert(&collision_ent->source_files,
+                                  pair->two->path);
+       }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+                                       const char *path,
+                                       struct tree *tree,
+                                       struct hashmap *dir_renames,
+                                       struct hashmap *dir_rename_exclusions,
+                                       struct hashmap *collisions,
+                                       int *clean_merge)
+{
+       char *new_path = NULL;
+       struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+       struct dir_rename_entry *oentry = NULL;
+
+       if (!entry)
+               return new_path;
+
+       /*
+        * This next part is a little weird.  We do not want to do an
+        * implicit rename into a directory we renamed on our side, because
+        * that will result in a spurious rename/rename(1to2) conflict.  An
+        * example:
+        *   Base commit: dumbdir/afile, otherdir/bfile
+        *   Side 1:      smrtdir/afile, otherdir/bfile
+        *   Side 2:      dumbdir/afile, dumbdir/bfile
+        * Here, while working on Side 1, we could notice that otherdir was
+        * renamed/merged to dumbdir, and change the diff_filepair for
+        * otherdir/bfile into a rename into dumbdir/bfile.  However, Side
+        * 2 will notice the rename from dumbdir to smrtdir, and do the
+        * transitive rename to move it from dumbdir/bfile to
+        * smrtdir/bfile.  That gives us bfile in dumbdir vs being in
+        * smrtdir, a rename/rename(1to2) conflict.  We really just want
+        * the file to end up in smrtdir.  And the way to achieve that is
+        * to not let Side1 do the rename to dumbdir, since we know that is
+        * the source of one of our directory renames.
+        *
+        * That's why oentry and dir_rename_exclusions is here.
+        *
+        * As it turns out, this also prevents N-way transient rename
+        * confusion; See testcases 9c and 9d of t6043.
+        */
+       oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+       if (oentry) {
+               output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+                              "to %s, because %s itself was renamed."),
+                      entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+       } else {
+               new_path = handle_path_level_conflicts(o, path, entry,
+                                                      collisions, tree);
+               *clean_merge &= (new_path != NULL);
+       }
+
+       return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+                                                struct diff_filepair *pair,
+                                                char *new_path,
+                                                struct rename *re,
+                                                struct tree *tree,
+                                                struct tree *o_tree,
+                                                struct tree *a_tree,
+                                                struct tree *b_tree,
+                                                struct string_list *entries,
+                                                int *clean)
+{
+       struct string_list_item *item;
+       int stage = (tree == a_tree ? 2 : 3);
+       int update_wd;
+
+       /*
+        * In all cases where we can do directory rename detection,
+        * unpack_trees() will have read pair->two->path into the
+        * index and the working copy.  We need to remove it so that
+        * we can instead place it at new_path.  It is guaranteed to
+        * not be untracked (unpack_trees() would have errored out
+        * saying the file would have been overwritten), but it might
+        * be dirty, though.
+        */
+       update_wd = !was_dirty(o, pair->two->path);
+       if (!update_wd)
+               output(o, 1, _("Refusing to lose dirty file at %s"),
+                      pair->two->path);
+       remove_file(o, 1, pair->two->path, !update_wd);
+
+       /* Find or create a new re->dst_entry */
+       item = string_list_lookup(entries, new_path);
+       if (item) {
+               /*
+                * Since we're renaming on this side of history, and it's
+                * due to a directory rename on the other side of history
+                * (which we only allow when the directory in question no
+                * longer exists on the other side of history), the
+                * original entry for re->dst_entry is no longer
+                * necessary...
+                */
+               re->dst_entry->processed = 1;
+
+               /*
+                * ...because we'll be using this new one.
+                */
+               re->dst_entry = item->util;
+       } else {
+               /*
+                * re->dst_entry is for the before-dir-rename path, and we
+                * need it to hold information for the after-dir-rename
+                * path.  Before creating a new entry, we need to mark the
+                * old one as unnecessary (...unless it is shared by
+                * src_entry, i.e. this didn't use to be a rename, in which
+                * case we can just allow the normal processing to happen
+                * for it).
+                */
+               if (pair->status == 'R')
+                       re->dst_entry->processed = 1;
+
+               re->dst_entry = insert_stage_data(new_path,
+                                                 o_tree, a_tree, b_tree,
+                                                 entries);
+               item = string_list_insert(entries, new_path);
+               item->util = re->dst_entry;
+       }
+
+       /*
+        * Update the stage_data with the information about the path we are
+        * moving into place.  That slot will be empty and available for us
+        * to write to because of the collision checks in
+        * handle_path_level_conflicts().  In other words,
+        * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+        * open for us to write to.
+        *
+        * It may be tempting to actually update the index at this point as
+        * well, using update_stages_for_stage_data(), but as per the big
+        * "NOTE" in update_stages(), doing so will modify the current
+        * in-memory index which will break calls to would_lose_untracked()
+        * that we need to make.  Instead, we need to just make sure that
+        * the various conflict_rename_*() functions update the index
+        * explicitly rather than relying on unpack_trees() to have done it.
+        */
+       get_tree_entry(&tree->object.oid,
+                      pair->two->path,
+                      &re->dst_entry->stages[stage].oid,
+                      &re->dst_entry->stages[stage].mode);
+
+       /* Update pair status */
+       if (pair->status == 'A') {
+               /*
+                * Recording rename information for this add makes it look
+                * like a rename/delete conflict.  Make sure we can
+                * correctly handle this as an add that was moved to a new
+                * directory instead of reporting a rename/delete conflict.
+                */
+               re->add_turned_into_rename = 1;
+       }
+       /*
+        * We don't actually look at pair->status again, but it seems
+        * pedagogically correct to adjust it.
+        */
+       pair->status = 'R';
+
+       /*
+        * Finally, record the new location.
+        */
+       pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+                                      struct diff_queue_struct *pairs,
+                                      struct hashmap *dir_renames,
+                                      struct hashmap *dir_rename_exclusions,
+                                      struct tree *tree,
+                                      struct tree *o_tree,
+                                      struct tree *a_tree,
+                                      struct tree *b_tree,
+                                      struct string_list *entries,
+                                      int *clean_merge)
+{
+       int i;
+       struct hashmap collisions;
+       struct hashmap_iter iter;
+       struct collision_entry *e;
+       struct string_list *renames;
+
+       compute_collisions(&collisions, dir_renames, pairs);
+       renames = xcalloc(1, sizeof(struct string_list));
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               struct rename *re;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *new_path; /* non-NULL only with directory renames */
+
+               if (pair->status != 'A' && pair->status != 'R') {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+               new_path = check_for_directory_rename(o, pair->two->path, tree,
+                                                     dir_renames,
+                                                     dir_rename_exclusions,
+                                                     &collisions,
+                                                     clean_merge);
+               if (pair->status != 'R' && !new_path) {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+
+               re = xmalloc(sizeof(*re));
+               re->processed = 0;
+               re->add_turned_into_rename = 0;
+               re->pair = pair;
+               item = string_list_lookup(entries, re->pair->one->path);
+               if (!item)
+                       re->src_entry = insert_stage_data(re->pair->one->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->src_entry = item->util;
+
+               item = string_list_lookup(entries, re->pair->two->path);
+               if (!item)
+                       re->dst_entry = insert_stage_data(re->pair->two->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->dst_entry = item->util;
+               item = string_list_insert(renames, pair->one->path);
+               item->util = re;
+               if (new_path)
+                       apply_directory_rename_modifications(o, pair, new_path,
+                                                            re, tree, o_tree,
+                                                            a_tree, b_tree,
+                                                            entries,
+                                                            clean_merge);
+       }
+
+       hashmap_iter_init(&collisions, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->target_file);
+               string_list_clear(&e->source_files, 0);
+       }
+       hashmap_free(&collisions, 1);
+       return renames;
+}
+
 static int process_renames(struct merge_options *o,
                           struct string_list *a_renames,
                           struct string_list *b_renames)
@@ -1528,7 +2427,19 @@ static int process_renames(struct merge_options *o,
                        dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
                        try_merge = 0;
 
-                       if (oid_eq(&src_other.oid, &null_oid)) {
+                       if (oid_eq(&src_other.oid, &null_oid) &&
+                           ren1->add_turned_into_rename) {
+                               setup_rename_conflict_info(RENAME_DIR,
+                                                          ren1->pair,
+                                                          NULL,
+                                                          branch1,
+                                                          branch2,
+                                                          ren1->dst_entry,
+                                                          NULL,
+                                                          o,
+                                                          NULL,
+                                                          NULL);
+                       } else if (oid_eq(&src_other.oid, &null_oid)) {
                                setup_rename_conflict_info(RENAME_DELETE,
                                                           ren1->pair,
                                                           NULL,
@@ -1625,6 +2536,105 @@ static int process_renames(struct merge_options *o,
        return clean_merge;
 }
 
+struct rename_info {
+       struct string_list *head_renames;
+       struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+                                  struct hashmap *dir_renames)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *e;
+
+       hashmap_iter_init(dir_renames, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->dir);
+               strbuf_release(&e->new_dir);
+               /* possible_new_dirs already cleared in get_directory_renames */
+       }
+       hashmap_free(dir_renames, 1);
+       free(dir_renames);
+
+       free(pairs->queue);
+       free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+                         struct tree *common,
+                         struct tree *head,
+                         struct tree *merge,
+                         struct string_list *entries,
+                         struct rename_info *ri)
+{
+       struct diff_queue_struct *head_pairs, *merge_pairs;
+       struct hashmap *dir_re_head, *dir_re_merge;
+       int clean = 1;
+
+       ri->head_renames = NULL;
+       ri->merge_renames = NULL;
+
+       if (!o->detect_rename)
+               return 1;
+
+       head_pairs = get_diffpairs(o, common, head);
+       merge_pairs = get_diffpairs(o, common, merge);
+
+       dir_re_head = get_directory_renames(head_pairs, head);
+       dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+       handle_directory_level_conflicts(o,
+                                        dir_re_head, head,
+                                        dir_re_merge, merge);
+
+       ri->head_renames  = get_renames(o, head_pairs,
+                                       dir_re_merge, dir_re_head, head,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       ri->merge_renames = get_renames(o, merge_pairs,
+                                       dir_re_head, dir_re_merge, merge,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+       /*
+        * Some cleanup is deferred until cleanup_renames() because the
+        * data structures are still needed and referenced in
+        * process_entry().  But there are a few things we can free now.
+        */
+       initial_cleanup_rename(head_pairs, dir_re_head);
+       initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+       return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+       const struct rename *re;
+       int i;
+
+       if (rename == NULL)
+               return;
+
+       for (i = 0; i < rename->nr; i++) {
+               re = rename->items[i].util;
+               diff_free_filepair(re->pair);
+       }
+       string_list_clear(rename, 1);
+       free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+       final_cleanup_rename(re_info->head_renames);
+       final_cleanup_rename(re_info->merge_renames);
+}
+
 static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
 {
        return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
@@ -1636,7 +2646,7 @@ static int read_oid_strbuf(struct merge_options *o,
        void *buf;
        enum object_type type;
        unsigned long size;
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return err(o, _("cannot read object %s"), oid_to_hex(oid));
        if (type != OBJ_BLOB) {
@@ -1715,6 +2725,7 @@ static int handle_modify_delete(struct merge_options *o,
 
 static int merge_content(struct merge_options *o,
                         const char *path,
+                        int file_in_way,
                         struct object_id *o_oid, int o_mode,
                         struct object_id *a_oid, int a_mode,
                         struct object_id *b_oid, int b_mode,
@@ -1762,7 +2773,6 @@ static int merge_content(struct merge_options *o,
 
        if (mfi.clean && !df_conflict_remains &&
            oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
-               int path_renamed_outside_HEAD;
                output(o, 3, _("Skipped %s (merged same as existing)"), path);
                /*
                 * The content merge resulted in the same file contents we
@@ -1770,8 +2780,7 @@ static int merge_content(struct merge_options *o,
                 * are recorded at the correct path (which may not be true
                 * if the merge involves a rename).
                 */
-               path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
-               if (!path_renamed_outside_HEAD) {
+               if (was_tracked(path)) {
                        add_cacheinfo(o, mfi.mode, &mfi.oid, path,
                                      0, (!o->call_depth), 0);
                        return mfi.clean;
@@ -1789,7 +2798,7 @@ static int merge_content(struct merge_options *o,
                                return -1;
        }
 
-       if (df_conflict_remains) {
+       if (df_conflict_remains || file_in_way) {
                char *new_path;
                if (o->call_depth) {
                        remove_file_from_cache(path);
@@ -1823,6 +2832,30 @@ static int merge_content(struct merge_options *o,
        return mfi.clean;
 }
 
+static int conflict_rename_normal(struct merge_options *o,
+                                 const char *path,
+                                 struct object_id *o_oid, unsigned int o_mode,
+                                 struct object_id *a_oid, unsigned int a_mode,
+                                 struct object_id *b_oid, unsigned int b_mode,
+                                 struct rename_conflict_info *ci)
+{
+       int clean_merge;
+       int file_in_the_way = 0;
+
+       if (was_dirty(o, path)) {
+               file_in_the_way = 1;
+               output(o, 1, _("Refusing to lose dirty file at %s"), path);
+       }
+
+       /* Merge the content and write it out */
+       clean_merge = merge_content(o, path, file_in_the_way,
+                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+                                   ci);
+       if (clean_merge > 0 && file_in_the_way)
+               clean_merge = 0;
+       return clean_merge;
+}
+
 /* Per entry merge function */
 static int process_entry(struct merge_options *o,
                         const char *path, struct stage_data *entry)
@@ -1842,9 +2875,20 @@ static int process_entry(struct merge_options *o,
                switch (conflict_info->rename_type) {
                case RENAME_NORMAL:
                case RENAME_ONE_FILE_TO_ONE:
-                       clean_merge = merge_content(o, path,
-                                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
-                                                   conflict_info);
+                       clean_merge = conflict_rename_normal(o,
+                                                            path,
+                                                            o_oid, o_mode,
+                                                            a_oid, a_mode,
+                                                            b_oid, b_mode,
+                                                            conflict_info);
+                       break;
+               case RENAME_DIR:
+                       clean_merge = 1;
+                       if (conflict_rename_dir(o,
+                                               conflict_info->pair1,
+                                               conflict_info->branch1,
+                                               conflict_info->branch2))
+                               clean_merge = -1;
                        break;
                case RENAME_DELETE:
                        clean_merge = 0;
@@ -1932,7 +2976,7 @@ static int process_entry(struct merge_options *o,
        } else if (a_oid && b_oid) {
                /* Case C: Added in both (check for same permissions) and */
                /* case D: Modified in both, but differently. */
-               clean_merge = merge_content(o, path,
+               clean_merge = merge_content(o, path, 0 /* file_in_way */,
                                            o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
                                            NULL);
        } else if (!o_oid && !a_oid && !b_oid) {
@@ -1973,7 +3017,7 @@ int merge_trees(struct merge_options *o,
                return 1;
        }
 
-       code = git_merge_trees(o->call_depth, common, head, merge);
+       code = git_merge_trees(o, common, head, merge);
 
        if (code != 0) {
                if (show(o, 4) || o->call_depth)
@@ -1984,7 +3028,8 @@ int merge_trees(struct merge_options *o,
        }
 
        if (unmerged_cache()) {
-               struct string_list *entries, *re_head, *re_merge;
+               struct string_list *entries;
+               struct rename_info re_info;
                int i;
                /*
                 * Only need the hashmap while processing entries, so
@@ -1998,10 +3043,9 @@ int merge_trees(struct merge_options *o,
                get_files_dirs(o, merge);
 
                entries = get_unmerged();
+               clean = handle_renames(o, common, head, merge, entries,
+                                      &re_info);
                record_df_conflict_files(o, entries);
-               re_head  = get_renames(o, head, common, head, merge, entries);
-               re_merge = get_renames(o, merge, common, head, merge, entries);
-               clean = process_renames(o, re_head, re_merge);
                if (clean < 0)
                        goto cleanup;
                for (i = entries->nr-1; 0 <= i; i--) {
@@ -2025,16 +3069,13 @@ int merge_trees(struct merge_options *o,
                }
 
 cleanup:
-               string_list_clear(re_merge, 0);
-               string_list_clear(re_head, 0);
+               final_cleanup_renames(&re_info);
+
                string_list_clear(entries, 1);
+               free(entries);
 
                hashmap_free(&o->current_file_dir_set, 1);
 
-               free(re_merge);
-               free(re_head);
-               free(entries);
-
                if (clean < 0)
                        return clean;
        }
@@ -2070,7 +3111,7 @@ int merge_recursive(struct merge_options *o,
 {
        struct commit_list *iter;
        struct commit *merged_common_ancestors;
-       struct tree *mrtree = mrtree;
+       struct tree *mrtree;
        int clean;
 
        if (show(o, 4)) {
@@ -2198,11 +3239,13 @@ int merge_recursive_generic(struct merge_options *o,
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        clean = merge_recursive(o, head_commit, next_commit, ca,
                        result);
-       if (clean < 0)
+       if (clean < 0) {
+               rollback_lock_file(&lock);
                return clean;
+       }
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return err(o, _("Unable to write index."));
 
        return clean ? 0 : 1;
index 80d69d140195cc3ba1054050569e56bfc0277b56..d863cf88676ef321aad85d395f964efe5673a805 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef MERGE_RECURSIVE_H
 #define MERGE_RECURSIVE_H
 
+#include "unpack-trees.h"
 #include "string-list.h"
 
 struct merge_options {
@@ -27,6 +28,32 @@ struct merge_options {
        struct strbuf obuf;
        struct hashmap current_file_dir_set;
        struct string_list df_conflict_file_set;
+       struct unpack_trees_options unpack_opts;
+};
+
+/*
+ * For dir_rename_entry, directory names are stored as a full path from the
+ * toplevel of the repository and do not include a trailing '/'.  Also:
+ *
+ *   dir:                original name of directory being renamed
+ *   non_unique_new_dir: if true, could not determine new_dir
+ *   new_dir:            final name of directory being renamed
+ *   possible_new_dirs:  temporary used to help determine new_dir; see comments
+ *                       in get_directory_renames() for details
+ */
+struct dir_rename_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *dir;
+       unsigned non_unique_new_dir:1;
+       struct strbuf new_dir;
+       struct string_list possible_new_dirs;
+};
+
+struct collision_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *target_file;
+       struct string_list source_files;
+       unsigned reported_already:1;
 };
 
 /* merge_trees() but with recursive ancestor consolidation */
diff --git a/merge.c b/merge.c
index 195b5787005ee8f63bc10845f6d788aa4730a61b..f06a4773d4f4093d700c652accc79ae17f161a59 100644 (file)
--- a/merge.c
+++ b/merge.c
@@ -113,17 +113,23 @@ int checkout_fast_forward(const struct object_id *head,
        setup_unpack_trees_porcelain(&opts, "merge");
 
        trees[nr_trees] = parse_tree_indirect(head);
-       if (!trees[nr_trees++])
+       if (!trees[nr_trees++]) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        trees[nr_trees] = parse_tree_indirect(remote);
-       if (!trees[nr_trees++])
+       if (!trees[nr_trees++]) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        for (i = 0; i < nr_trees; i++) {
                parse_tree(trees[i]);
                init_tree_desc(t+i, trees[i]->buffer, trees[i]->size);
        }
-       if (unpack_trees(nr_trees, t, &opts))
+       if (unpack_trees(nr_trees, t, &opts)) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
                return error(_("unable to write new index file"));
        return 0;
index 45c98db0a057e93b5f3f84eba2008da86130010d..163849831c9f11316ce97c649b77c32cf2eed276 100644 (file)
@@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash(
 
 static void lazy_init_name_hash(struct index_state *istate)
 {
+       uint64_t start = getnanotime();
+
        if (istate->name_hash_initialized)
                return;
        hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
@@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
        }
 
        istate->name_hash_initialized = 1;
+       trace_performance_since(start, "initialize name hash");
 }
 
 /*
@@ -696,12 +699,12 @@ void adjust_dirname_case(struct index_state *istate, char *name)
                if (*ptr == '/') {
                        struct dir_entry *dir;
 
-                       ptr++;
-                       dir = find_dir_entry(istate, name, ptr - name + 1);
+                       dir = find_dir_entry(istate, name, ptr - name);
                        if (dir) {
                                memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
-                               startPtr = ptr;
+                               startPtr = ptr + 1;
                        }
+                       ptr++;
                }
        }
 }
index 17ee8602b3d2fbc2469a98e6cdeab0f2ff2b7ad3..e61988e503b0b2097afeb6716123d88429c0717d 100644 (file)
@@ -54,10 +54,10 @@ int notes_cache_write(struct notes_cache *c)
        if (!c->tree.dirty)
                return 0;
 
-       if (write_notes_tree(&c->tree, tree_oid.hash))
+       if (write_notes_tree(&c->tree, &tree_oid))
                return -1;
-       if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL,
-                       commit_oid.hash, NULL, NULL) < 0)
+       if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL,
+                       &commit_oid, NULL, NULL) < 0)
                return -1;
        if (update_ref("update notes cache", c->tree.update_ref, &commit_oid,
                       NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
@@ -77,7 +77,7 @@ char *notes_cache_get(struct notes_cache *c, struct object_id *key_oid,
        value_oid = get_note(&c->tree, key_oid);
        if (!value_oid)
                return NULL;
-       value = read_sha1_file(value_oid->hash, &type, &size);
+       value = read_object_file(value_oid, &type, &size);
 
        *outsize = size;
        return value;
@@ -88,7 +88,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid,
 {
        struct object_id value_oid;
 
-       if (write_sha1_file(data, size, "blob", value_oid.hash) < 0)
+       if (write_object_file(data, size, "blob", &value_oid) < 0)
                return -1;
        return add_note(&c->tree, key_oid, &value_oid, NULL);
 }
index 0f6573cb17cbbbc2219453bdca424fd7ebc156dc..8e0726a9418e3b24bc8e665057f607e18e7d4206 100644 (file)
@@ -322,7 +322,7 @@ static void write_note_to_worktree(const struct object_id *obj,
 {
        enum object_type type;
        unsigned long size;
-       void *buf = read_sha1_file(note->hash, &type, &size);
+       void *buf = read_object_file(note, &type, &size);
 
        if (!buf)
                die("cannot read note %s for object %s",
@@ -642,9 +642,8 @@ int notes_merge(struct notes_merge_options *o,
                struct commit_list *parents = NULL;
                commit_list_insert(remote, &parents); /* LIFO order */
                commit_list_insert(local, &parents);
-               create_notes_commit(local_tree, parents,
-                                   o->commit_msg.buf, o->commit_msg.len,
-                                   result_oid->hash);
+               create_notes_commit(local_tree, parents, o->commit_msg.buf,
+                                   o->commit_msg.len, result_oid);
        }
 
 found_result:
@@ -718,8 +717,8 @@ int notes_merge_commit(struct notes_merge_options *o,
                strbuf_setlen(&path, baselen);
        }
 
-       create_notes_commit(partial_tree, partial_commit->parents,
-                           msg, strlen(msg), result_oid->hash);
+       create_notes_commit(partial_tree, partial_commit->parents, msg,
+                           strlen(msg), result_oid);
        unuse_commit_buffer(partial_commit, buffer);
        if (o->verbosity >= 4)
                printf("Finalized notes merge commit: %s\n",
index 5c8e70c98fd26cca8bc690bc92653dd578a78c92..02407fe2a7327947dda6bf755405d2778ae450ae 100644 (file)
@@ -6,13 +6,13 @@
 
 void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
                         const char *msg, size_t msg_len,
-                        unsigned char *result_sha1)
+                        struct object_id *result_oid)
 {
        struct object_id tree_oid;
 
        assert(t->initialized);
 
-       if (write_notes_tree(t, tree_oid.hash))
+       if (write_notes_tree(t, &tree_oid))
                die("Failed to write notes tree to database");
 
        if (!parents) {
@@ -27,7 +27,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
                /* else: t->ref points to nothing, assume root/orphan commit */
        }
 
-       if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL))
+       if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL,
+                       NULL))
                die("Failed to commit notes tree to database");
 }
 
@@ -47,7 +48,7 @@ void commit_notes(struct notes_tree *t, const char *msg)
        strbuf_addstr(&buf, msg);
        strbuf_complete_line(&buf);
 
-       create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash);
+       create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid);
        strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
        update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0,
                   UPDATE_REFS_DIE_ON_ERR);
index 11905783989aa0d87be08e0c4311762b4647e9a4..5d79cbef512e3bdbbfafedbd7650036aff695f83 100644 (file)
@@ -15,7 +15,8 @@
  * The resulting commit SHA1 is stored in result_sha1.
  */
 void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
-                        const char *msg, size_t msg_len, unsigned char *result_sha1);
+                        const char *msg, size_t msg_len,
+                        struct object_id *result_oid);
 
 void commit_notes(struct notes_tree *t, const char *msg);
 
diff --git a/notes.c b/notes.c
index c7f21fae441067f7250caf0b4186fcbcbc7630bb..a386d450c4c812ef30d0fc661fe2c03e1d062a83 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -270,8 +270,8 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree,
                                if (!oidcmp(&l->val_oid, &entry->val_oid))
                                        return 0;
 
-                               ret = combine_notes(l->val_oid.hash,
-                                                   entry->val_oid.hash);
+                               ret = combine_notes(&l->val_oid,
+                                                   &entry->val_oid);
                                if (!ret && is_null_oid(&l->val_oid))
                                        note_tree_remove(t, tree, n, entry);
                                free(entry);
@@ -667,7 +667,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws)
                ret = tree_write_stack_finish_subtree(n);
                if (ret)
                        return ret;
-               ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash);
+               ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
                if (ret)
                        return ret;
                strbuf_release(&n->buf);
@@ -786,8 +786,8 @@ static int prune_notes_helper(const struct object_id *object_oid,
        return 0;
 }
 
-int combine_notes_concatenate(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_concatenate(struct object_id *cur_oid,
+                             const struct object_id *new_oid)
 {
        char *cur_msg = NULL, *new_msg = NULL, *buf;
        unsigned long cur_len, new_len, buf_len;
@@ -795,18 +795,18 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
        int ret;
 
        /* read in both note blob objects */
-       if (!is_null_sha1(new_sha1))
-               new_msg = read_sha1_file(new_sha1, &new_type, &new_len);
+       if (!is_null_oid(new_oid))
+               new_msg = read_object_file(new_oid, &new_type, &new_len);
        if (!new_msg || !new_len || new_type != OBJ_BLOB) {
                free(new_msg);
                return 0;
        }
-       if (!is_null_sha1(cur_sha1))
-               cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len);
+       if (!is_null_oid(cur_oid))
+               cur_msg = read_object_file(cur_oid, &cur_type, &cur_len);
        if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
                free(cur_msg);
                free(new_msg);
-               hashcpy(cur_sha1, new_sha1);
+               oidcpy(cur_oid, new_oid);
                return 0;
        }
 
@@ -825,20 +825,20 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
        free(new_msg);
 
        /* create a new blob object from buf */
-       ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1);
+       ret = write_object_file(buf, buf_len, blob_type, cur_oid);
        free(buf);
        return ret;
 }
 
-int combine_notes_overwrite(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_overwrite(struct object_id *cur_oid,
+                           const struct object_id *new_oid)
 {
-       hashcpy(cur_sha1, new_sha1);
+       oidcpy(cur_oid, new_oid);
        return 0;
 }
 
-int combine_notes_ignore(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_ignore(struct object_id *cur_oid,
+                        const struct object_id *new_oid)
 {
        return 0;
 }
@@ -848,17 +848,17 @@ int combine_notes_ignore(unsigned char *cur_sha1,
  * newlines removed.
  */
 static int string_list_add_note_lines(struct string_list *list,
-                                     const unsigned char *sha1)
+                                     const struct object_id *oid)
 {
        char *data;
        unsigned long len;
        enum object_type t;
 
-       if (is_null_sha1(sha1))
+       if (is_null_oid(oid))
                return 0;
 
        /* read_sha1_file NUL-terminates */
-       data = read_sha1_file(sha1, &t, &len);
+       data = read_object_file(oid, &t, &len);
        if (t != OBJ_BLOB || !data || !len) {
                free(data);
                return t != OBJ_BLOB || !data;
@@ -884,17 +884,17 @@ static int string_list_join_lines_helper(struct string_list_item *item,
        return 0;
 }
 
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+                               const struct object_id *new_oid)
 {
        struct string_list sort_uniq_list = STRING_LIST_INIT_DUP;
        struct strbuf buf = STRBUF_INIT;
        int ret = 1;
 
        /* read both note blob objects into unique_lines */
-       if (string_list_add_note_lines(&sort_uniq_list, cur_sha1))
+       if (string_list_add_note_lines(&sort_uniq_list, cur_oid))
                goto out;
-       if (string_list_add_note_lines(&sort_uniq_list, new_sha1))
+       if (string_list_add_note_lines(&sort_uniq_list, new_oid))
                goto out;
        string_list_remove_empty_items(&sort_uniq_list, 0);
        string_list_sort(&sort_uniq_list);
@@ -905,7 +905,7 @@ int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
                                 string_list_join_lines_helper, &buf))
                goto out;
 
-       ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1);
+       ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
 
 out:
        strbuf_release(&buf);
@@ -1012,7 +1012,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
                return;
        if (flags & NOTES_INIT_WRITABLE && read_ref(notes_ref, &object_oid))
                die("Cannot use notes ref %s", notes_ref);
-       if (get_tree_entry(object_oid.hash, "", oid.hash, &mode))
+       if (get_tree_entry(&object_oid, "", &oid, &mode))
                die("Failed to read notes tree referenced by %s (%s)",
                    notes_ref, oid_to_hex(&object_oid));
 
@@ -1123,11 +1123,12 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
        return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data);
 }
 
-int write_notes_tree(struct notes_tree *t, unsigned char *result)
+int write_notes_tree(struct notes_tree *t, struct object_id *result)
 {
        struct tree_write_stack root;
        struct write_each_note_data cb_data;
        int ret;
+       int flags;
 
        if (!t)
                t = &default_notes_tree;
@@ -1141,12 +1142,12 @@ int write_notes_tree(struct notes_tree *t, unsigned char *result)
        cb_data.next_non_note = t->first_non_note;
 
        /* Write tree objects representing current notes tree */
-       ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
-                               FOR_EACH_NOTE_YIELD_SUBTREES,
-                       write_each_note, &cb_data) ||
-               write_each_non_note_until(NULL, &cb_data) ||
-               tree_write_stack_finish_subtree(&root) ||
-               write_sha1_file(root.buf.buf, root.buf.len, tree_type, result);
+       flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
+               FOR_EACH_NOTE_YIELD_SUBTREES;
+       ret = for_each_note(t, flags, write_each_note, &cb_data) ||
+             write_each_non_note_until(NULL, &cb_data) ||
+             tree_write_stack_finish_subtree(&root) ||
+             write_object_file(root.buf.buf, root.buf.len, tree_type, result);
        strbuf_release(&root.buf);
        return ret;
 }
@@ -1216,7 +1217,7 @@ static void format_note(struct notes_tree *t, const struct object_id *object_oid
        if (!oid)
                return;
 
-       if (!(msg = read_sha1_file(oid->hash, &type, &msglen)) || type != OBJ_BLOB) {
+       if (!(msg = read_object_file(oid, &type, &msglen)) || type != OBJ_BLOB) {
                free(msg);
                return;
        }
diff --git a/notes.h b/notes.h
index 3848c2fb3f03510c1d0e1bd23ef6fbb3060b89c6..0433f45db55b5b807abcd633f507e9c75324d4e6 100644 (file)
--- a/notes.h
+++ b/notes.h
@@ -9,27 +9,32 @@
  * When adding a new note annotating the same object as an existing note, it is
  * up to the caller to decide how to combine the two notes. The decision is
  * made by passing in a function of the following form. The function accepts
- * two SHA1s -- of the existing note and the new note, respectively. The
+ * two object_ids -- of the existing note and the new note, respectively. The
  * function then combines the notes in whatever way it sees fit, and writes the
- * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return
+ * resulting oid into the first argument (cur_oid). A non-zero return
  * value indicates failure.
  *
- * The two given SHA1s shall both be non-NULL and different from each other.
- * Either of them (but not both) may be == null_sha1, which indicates an
- * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1,
+ * The two given object_ids shall both be non-NULL and different from each
+ * other. Either of them (but not both) may be == null_oid, which indicates an
+ * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid,
  * the note will be removed from the notes tree.
  *
  * The default combine_notes function (you get this when passing NULL) is
  * combine_notes_concatenate(), which appends the contents of the new note to
  * the contents of the existing note.
  */
-typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1);
+typedef int (*combine_notes_fn)(struct object_id *cur_oid,
+                               const struct object_id *new_oid);
 
 /* Common notes combinators */
-int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1);
+int combine_notes_concatenate(struct object_id *cur_oid,
+                             const struct object_id *new_oid);
+int combine_notes_overwrite(struct object_id *cur_oid,
+                           const struct object_id *new_oid);
+int combine_notes_ignore(struct object_id *cur_oid,
+                        const struct object_id *new_oid);
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+                               const struct object_id *new_oid);
 
 /*
  * Notes tree object
@@ -212,7 +217,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
  * Write the given notes_tree structure to the object database
  *
  * Creates a new tree object encapsulating the current state of the given
- * notes_tree, and stores its SHA1 into the 'result' argument.
+ * notes_tree, and stores its object id into the 'result' argument.
  *
  * Returns zero on success, non-zero on failure.
  *
@@ -220,7 +225,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
  * this function has returned zero. Please also remember to create a
  * corresponding commit object, and update the appropriate notes ref.
  */
-int write_notes_tree(struct notes_tree *t, unsigned char *result);
+int write_notes_tree(struct notes_tree *t, struct object_id *result);
 
 /* Flags controlling the operation of prune */
 #define NOTES_PRUNE_VERBOSE 1
index 4c2cf7ff5d525b8559fc9099b1b957b0e61f3f97..a0a756f24f33621bbe83cbe69b18eef6790edacf 100644 (file)
--- a/object.c
+++ b/object.c
@@ -28,7 +28,7 @@ static const char *object_type_strings[] = {
        "tag",          /* OBJ_TAG = 4 */
 };
 
-const char *typename(unsigned int type)
+const char *type_name(unsigned int type)
 {
        if (type >= ARRAY_SIZE(object_type_strings))
                return NULL;
@@ -168,7 +168,7 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet)
                if (!quiet)
                        error("object %s is a %s, not a %s",
                              oid_to_hex(&obj->oid),
-                             typename(obj->type), typename(type));
+                             type_name(obj->type), type_name(type));
                return NULL;
        }
 }
@@ -246,7 +246,7 @@ struct object *parse_object(const struct object_id *oid)
        unsigned long size;
        enum object_type type;
        int eaten;
-       const unsigned char *repl = lookup_replace_object(oid->hash);
+       const struct object_id *repl = lookup_replace_object(oid);
        void *buffer;
        struct object *obj;
 
@@ -256,8 +256,8 @@ struct object *parse_object(const struct object_id *oid)
 
        if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
            (!obj && has_object_file(oid) &&
-            sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
-               if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
+            oid_object_info(oid, NULL) == OBJ_BLOB)) {
+               if (check_object_signature(repl, NULL, 0, NULL) < 0) {
                        error("sha1 mismatch %s", oid_to_hex(oid));
                        return NULL;
                }
@@ -265,11 +265,11 @@ struct object *parse_object(const struct object_id *oid)
                return lookup_object(oid->hash);
        }
 
-       buffer = read_sha1_file(oid->hash, &type, &size);
+       buffer = read_object_file(oid, &type, &size);
        if (buffer) {
-               if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
+               if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
                        free(buffer);
-                       error("sha1 mismatch %s", sha1_to_hex(repl));
+                       error("sha1 mismatch %s", oid_to_hex(repl));
                        return NULL;
                }
 
index 87563d90562b51859da8313b105a3e7785ed9d26..f13f85b2a94e3afc15debfbaf89416b5cda45acb 100644 (file)
--- a/object.h
+++ b/object.h
@@ -28,18 +28,22 @@ struct object_array {
 #define TYPE_BITS   3
 /*
  * object flag allocation:
- * revision.h:      0---------10                                26
- * fetch-pack.c:    0---5
- * walker.c:        0-2
- * upload-pack.c:       4       11----------------19
- * builtin/blame.c:               12-13
- * bisect.c:                               16
- * bundle.c:                               16
- * http-push.c:                            16-----19
- * commit.c:                               16-----19
- * sha1_name.c:                                     20
- * list-objects-filter.c:                             21
- * builtin/fsck.c:  0--3
+ * revision.h:               0---------10                                26
+ * fetch-pack.c:             0----5
+ * walker.c:                 0-2
+ * upload-pack.c:                4       11----------------19
+ * builtin/blame.c:                        12-13
+ * bisect.c:                                        16
+ * bundle.c:                                        16
+ * http-push.c:                                     16-----19
+ * commit.c:                                        16-----19
+ * sha1_name.c:                                              20
+ * list-objects-filter.c:                                      21
+ * builtin/fsck.c:           0--3
+ * builtin/index-pack.c:                                     2021
+ * builtin/pack-objects.c:                                   20
+ * builtin/reflog.c:                   10--12
+ * builtin/unpack-objects.c:                                 2021
  */
 #define FLAG_BITS  27
 
@@ -53,7 +57,7 @@ struct object {
        struct object_id oid;
 };
 
-extern const char *typename(unsigned int type);
+extern const char *type_name(unsigned int type);
 extern int type_from_string_gently(const char *str, ssize_t, int gentle);
 #define type_from_string(str) type_from_string_gently(str, -1, 0)
 
index a8df5ce2ab67bfdb5445e4c02e4ad65032abe9a0..41ae27fb19a94a4c44058d8850a9592c41b82f90 100644 (file)
@@ -73,8 +73,7 @@ void bitmap_writer_build_type_index(struct pack_idx_entry **index,
                        break;
 
                default:
-                       real_type = sha1_object_info(entry->idx.oid.hash,
-                                                    NULL);
+                       real_type = oid_object_info(&entry->idx.oid, NULL);
                        break;
                }
 
@@ -440,19 +439,19 @@ void bitmap_writer_select_commits(struct commit **indexed_commits,
 }
 
 
-static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+static int hashwrite_ewah_helper(void *f, const void *buf, size_t len)
 {
-       /* sha1write will die on error */
-       sha1write(f, buf, len);
+       /* hashwrite will die on error */
+       hashwrite(f, buf, len);
        return len;
 }
 
 /**
  * Write the bitmap index to disk
  */
-static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap)
 {
-       if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+       if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0)
                die("Failed to write bitmap index");
 }
 
@@ -462,7 +461,7 @@ static const unsigned char *sha1_access(size_t pos, void *table)
        return index[pos]->oid.hash;
 }
 
-static void write_selected_commits_v1(struct sha1file *f,
+static void write_selected_commits_v1(struct hashfile *f,
                                      struct pack_idx_entry **index,
                                      uint32_t index_nr)
 {
@@ -477,15 +476,15 @@ static void write_selected_commits_v1(struct sha1file *f,
                if (commit_pos < 0)
                        die("BUG: trying to write commit not in index");
 
-               sha1write_be32(f, commit_pos);
-               sha1write_u8(f, stored->xor_offset);
-               sha1write_u8(f, stored->flags);
+               hashwrite_be32(f, commit_pos);
+               hashwrite_u8(f, stored->xor_offset);
+               hashwrite_u8(f, stored->flags);
 
                dump_bitmap(f, stored->write_as);
        }
 }
 
-static void write_hash_cache(struct sha1file *f,
+static void write_hash_cache(struct hashfile *f,
                             struct pack_idx_entry **index,
                             uint32_t index_nr)
 {
@@ -494,7 +493,7 @@ static void write_hash_cache(struct sha1file *f,
        for (i = 0; i < index_nr; ++i) {
                struct object_entry *entry = (struct object_entry *)index[i];
                uint32_t hash_value = htonl(entry->hash);
-               sha1write(f, &hash_value, sizeof(hash_value));
+               hashwrite(f, &hash_value, sizeof(hash_value));
        }
 }
 
@@ -511,13 +510,13 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        static uint16_t default_version = 1;
        static uint16_t flags = BITMAP_OPT_FULL_DAG;
        struct strbuf tmp_file = STRBUF_INIT;
-       struct sha1file *f;
+       struct hashfile *f;
 
        struct bitmap_disk_header header;
 
        int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX");
 
-       f = sha1fd(fd, tmp_file.buf);
+       f = hashfd(fd, tmp_file.buf);
 
        memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
        header.version = htons(default_version);
@@ -525,7 +524,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        header.entry_count = htonl(writer.selected_nr);
        hashcpy(header.checksum, writer.pack_checksum);
 
-       sha1write(f, &header, sizeof(header));
+       hashwrite(f, &header, sizeof(header));
        dump_bitmap(f, writer.commits);
        dump_bitmap(f, writer.trees);
        dump_bitmap(f, writer.blobs);
@@ -535,7 +534,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        if (options & BITMAP_OPT_HASH_CACHE)
                write_hash_cache(f, index, index_nr);
 
-       sha1close(f, NULL, CSUM_FSYNC);
+       hashclose(f, NULL, CSUM_FSYNC);
 
        if (adjust_shared_perm(tmp_file.buf))
                die_errno("unable to make temporary bitmap file readable");
index 2378f2599928abbb42d13a27b5baf3f531d9e47c..385d964bdd1691b962250ec5a49abf2253ca75c1 100644 (file)
@@ -42,7 +42,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
        } while (len);
 
        index_crc = p->index_data;
-       index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+       index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
 
        return data_crc != ntohl(*index_crc);
 }
@@ -55,7 +55,7 @@ static int verify_packfile(struct packed_git *p,
 {
        off_t index_size = p->index_size;
        const unsigned char *index_base = p->index_data;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
        off_t offset = 0, pack_sig_ofs = 0;
        uint32_t nr_objects, i;
@@ -65,24 +65,24 @@ static int verify_packfile(struct packed_git *p,
        if (!is_pack_valid(p))
                return error("packfile %s cannot be accessed", p->pack_name);
 
-       git_SHA1_Init(&ctx);
+       the_hash_algo->init_fn(&ctx);
        do {
                unsigned long remaining;
                unsigned char *in = use_pack(p, w_curs, offset, &remaining);
                offset += remaining;
                if (!pack_sig_ofs)
-                       pack_sig_ofs = p->pack_size - 20;
+                       pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
                if (offset > pack_sig_ofs)
                        remaining -= (unsigned int)(offset - pack_sig_ofs);
-               git_SHA1_Update(&ctx, in, remaining);
+               the_hash_algo->update_fn(&ctx, in, remaining);
        } while (offset < pack_sig_ofs);
-       git_SHA1_Final(hash, &ctx);
+       the_hash_algo->final_fn(hash, &ctx);
        pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
        if (hashcmp(hash, pack_sig))
-               err = error("%s SHA1 checksum mismatch",
+               err = error("%s pack checksum mismatch",
                            p->pack_name);
-       if (hashcmp(index_base + index_size - 40, pack_sig))
-               err = error("%s SHA1 does not match its index",
+       if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+               err = error("%s pack checksum does not match its index",
                            p->pack_name);
        unuse_pack(w_curs);
 
@@ -127,7 +127,7 @@ static int verify_packfile(struct packed_git *p,
 
                if (type == OBJ_BLOB && big_file_threshold <= size) {
                        /*
-                        * Let check_sha1_signature() check it with
+                        * Let check_object_signature() check it with
                         * the streaming interface; no point slurping
                         * the data in-core only to discard.
                         */
@@ -142,7 +142,7 @@ static int verify_packfile(struct packed_git *p,
                        err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name,
                                    (uintmax_t)entries[i].offset);
-               else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type)))
+               else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type)))
                        err = error("packed %s from %s is corrupt",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name);
                else if (fn) {
@@ -166,8 +166,8 @@ int verify_pack_index(struct packed_git *p)
 {
        off_t index_size;
        const unsigned char *index_base;
-       git_SHA_CTX ctx;
-       unsigned char sha1[20];
+       git_hash_ctx ctx;
+       unsigned char hash[GIT_MAX_RAWSZ];
        int err = 0;
 
        if (open_pack_index(p))
@@ -176,11 +176,11 @@ int verify_pack_index(struct packed_git *p)
        index_base = p->index_data;
 
        /* Verify SHA1 sum of the index file */
-       git_SHA1_Init(&ctx);
-       git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
-       git_SHA1_Final(sha1, &ctx);
-       if (hashcmp(sha1, index_base + index_size - 20))
-               err = error("Packfile index for %s SHA1 mismatch",
+       the_hash_algo->init_fn(&ctx);
+       the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+       the_hash_algo->final_fn(hash, &ctx);
+       if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+               err = error("Packfile index for %s hash mismatch",
                            p->pack_name);
        return err;
 }
index fea62841920c9647edfcfba249a59bfb91170d8e..d775c7406dd5a869a1ce4d28f6ef872e08476b77 100644 (file)
@@ -46,7 +46,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                           int nr_objects, const struct pack_idx_option *opts,
                           const unsigned char *sha1)
 {
-       struct sha1file *f;
+       struct hashfile *f;
        struct pack_idx_entry **sorted_by_sha, **list, **last;
        off_t last_obj_offset = 0;
        uint32_t array[256];
@@ -68,7 +68,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
 
        if (opts->flags & WRITE_IDX_VERIFY) {
                assert(index_name);
-               f = sha1fd_check(index_name);
+               f = hashfd_check(index_name);
        } else {
                if (!index_name) {
                        struct strbuf tmp_file = STRBUF_INIT;
@@ -80,7 +80,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                        if (fd < 0)
                                die_errno("unable to create '%s'", index_name);
                }
-               f = sha1fd(fd, index_name);
+               f = hashfd(fd, index_name);
        }
 
        /* if last object's offset is >= 2^31 we should use index V2 */
@@ -91,7 +91,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                struct pack_idx_header hdr;
                hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
                hdr.idx_version = htonl(index_version);
-               sha1write(f, &hdr, sizeof(hdr));
+               hashwrite(f, &hdr, sizeof(hdr));
        }
 
        /*
@@ -110,7 +110,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                array[i] = htonl(next - sorted_by_sha);
                list = next;
        }
-       sha1write(f, array, 256 * 4);
+       hashwrite(f, array, 256 * 4);
 
        /*
         * Write the actual SHA1 entries..
@@ -120,9 +120,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                struct pack_idx_entry *obj = *list++;
                if (index_version < 2) {
                        uint32_t offset = htonl(obj->offset);
-                       sha1write(f, &offset, 4);
+                       hashwrite(f, &offset, 4);
                }
-               sha1write(f, obj->oid.hash, 20);
+               hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
                if ((opts->flags & WRITE_IDX_STRICT) &&
                    (i && !oidcmp(&list[-2]->oid, &obj->oid)))
                        die("The same object %s appears twice in the pack",
@@ -137,7 +137,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                for (i = 0; i < nr_objects; i++) {
                        struct pack_idx_entry *obj = *list++;
                        uint32_t crc32_val = htonl(obj->crc32);
-                       sha1write(f, &crc32_val, 4);
+                       hashwrite(f, &crc32_val, 4);
                }
 
                /* write the 32-bit offset table */
@@ -150,7 +150,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                                  ? (0x80000000 | nr_large_offset++)
                                  : obj->offset);
                        offset = htonl(offset);
-                       sha1write(f, &offset, 4);
+                       hashwrite(f, &offset, 4);
                }
 
                /* write the large offset table */
@@ -164,25 +164,25 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                                continue;
                        split[0] = htonl(offset >> 32);
                        split[1] = htonl(offset & 0xffffffff);
-                       sha1write(f, split, 8);
+                       hashwrite(f, split, 8);
                        nr_large_offset--;
                }
        }
 
-       sha1write(f, sha1, 20);
-       sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
+       hashwrite(f, sha1, the_hash_algo->rawsz);
+       hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
                            ? CSUM_CLOSE : CSUM_FSYNC));
        return index_name;
 }
 
-off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
 {
        struct pack_header hdr;
 
        hdr.hdr_signature = htonl(PACK_SIGNATURE);
        hdr.hdr_version = htonl(PACK_VERSION);
        hdr.hdr_entries = htonl(nr_entries);
-       sha1write(f, &hdr, sizeof(hdr));
+       hashwrite(f, &hdr, sizeof(hdr));
        return sizeof(hdr);
 }
 
@@ -203,20 +203,20 @@ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
  * interested in the resulting SHA1 of pack data above partial_pack_offset.
  */
 void fixup_pack_header_footer(int pack_fd,
-                        unsigned char *new_pack_sha1,
+                        unsigned char *new_pack_hash,
                         const char *pack_name,
                         uint32_t object_count,
-                        unsigned char *partial_pack_sha1,
+                        unsigned char *partial_pack_hash,
                         off_t partial_pack_offset)
 {
        int aligned_sz, buf_sz = 8 * 1024;
-       git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
+       git_hash_ctx old_hash_ctx, new_hash_ctx;
        struct pack_header hdr;
        char *buf;
        ssize_t read_result;
 
-       git_SHA1_Init(&old_sha1_ctx);
-       git_SHA1_Init(&new_sha1_ctx);
+       the_hash_algo->init_fn(&old_hash_ctx);
+       the_hash_algo->init_fn(&new_hash_ctx);
 
        if (lseek(pack_fd, 0, SEEK_SET) != 0)
                die_errno("Failed seeking to start of '%s'", pack_name);
@@ -228,9 +228,9 @@ void fixup_pack_header_footer(int pack_fd,
                          pack_name);
        if (lseek(pack_fd, 0, SEEK_SET) != 0)
                die_errno("Failed seeking to start of '%s'", pack_name);
-       git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
+       the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
        hdr.hdr_entries = htonl(object_count);
-       git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
+       the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
        write_or_die(pack_fd, &hdr, sizeof(hdr));
        partial_pack_offset -= sizeof(hdr);
 
@@ -238,28 +238,28 @@ void fixup_pack_header_footer(int pack_fd,
        aligned_sz = buf_sz - sizeof(hdr);
        for (;;) {
                ssize_t m, n;
-               m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
+               m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
                        partial_pack_offset : aligned_sz;
                n = xread(pack_fd, buf, m);
                if (!n)
                        break;
                if (n < 0)
                        die_errno("Failed to checksum '%s'", pack_name);
-               git_SHA1_Update(&new_sha1_ctx, buf, n);
+               the_hash_algo->update_fn(&new_hash_ctx, buf, n);
 
                aligned_sz -= n;
                if (!aligned_sz)
                        aligned_sz = buf_sz;
 
-               if (!partial_pack_sha1)
+               if (!partial_pack_hash)
                        continue;
 
-               git_SHA1_Update(&old_sha1_ctx, buf, n);
+               the_hash_algo->update_fn(&old_hash_ctx, buf, n);
                partial_pack_offset -= n;
                if (partial_pack_offset == 0) {
-                       unsigned char sha1[20];
-                       git_SHA1_Final(sha1, &old_sha1_ctx);
-                       if (hashcmp(sha1, partial_pack_sha1) != 0)
+                       unsigned char hash[GIT_MAX_RAWSZ];
+                       the_hash_algo->final_fn(hash, &old_hash_ctx);
+                       if (hashcmp(hash, partial_pack_hash) != 0)
                                die("Unexpected checksum for %s "
                                    "(disk corruption?)", pack_name);
                        /*
@@ -267,23 +267,24 @@ void fixup_pack_header_footer(int pack_fd,
                         * pack, which also means making partial_pack_offset
                         * big enough not to matter anymore.
                         */
-                       git_SHA1_Init(&old_sha1_ctx);
+                       the_hash_algo->init_fn(&old_hash_ctx);
                        partial_pack_offset = ~partial_pack_offset;
                        partial_pack_offset -= MSB(partial_pack_offset, 1);
                }
        }
        free(buf);
 
-       if (partial_pack_sha1)
-               git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
-       git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
-       write_or_die(pack_fd, new_pack_sha1, 20);
+       if (partial_pack_hash)
+               the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
+       the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
+       write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
        fsync_or_die(pack_fd, pack_name);
 }
 
 char *index_pack_lockfile(int ip_out)
 {
-       char packname[46];
+       char packname[GIT_MAX_HEXSZ + 6];
+       const int len = the_hash_algo->hexsz + 6;
 
        /*
         * The first thing we expect from index-pack's output
@@ -292,9 +293,9 @@ char *index_pack_lockfile(int ip_out)
         * case, we need it to remove the corresponding .keep file
         * later on.  If we don't get that then tough luck with it.
         */
-       if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') {
+       if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
                const char *name;
-               packname[45] = 0;
+               packname[len-1] = 0;
                if (skip_prefix(packname, "keep\t", &name))
                        return xstrfmt("%s/pack/pack-%s.keep",
                                       get_object_directory(), name);
@@ -332,14 +333,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
        return n;
 }
 
-struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+struct hashfile *create_tmp_packfile(char **pack_tmp_name)
 {
        struct strbuf tmpname = STRBUF_INIT;
        int fd;
 
        fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
        *pack_tmp_name = strbuf_detach(&tmpname, NULL);
-       return sha1fd(fd, *pack_tmp_name);
+       return hashfd(fd, *pack_tmp_name);
 }
 
 void finish_tmp_packfile(struct strbuf *name_buffer,
diff --git a/pack.h b/pack.h
index 8294341af174ad67693006dfcb49a7208732f45b..34a9d458b411927b7c1e121e88387e022841ea2a 100644 (file)
--- a/pack.h
+++ b/pack.h
@@ -81,7 +81,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry
 extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
 extern int verify_pack_index(struct packed_git *);
 extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct sha1file *f, uint32_t);
+extern off_t write_pack_header(struct hashfile *f, uint32_t);
 extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
 extern char *index_pack_lockfile(int fd);
 
@@ -98,7 +98,7 @@ extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
 #define PH_ERROR_PROTOCOL      (-3)
 extern int read_pack_header(int fd, struct pack_header *);
 
-extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
 extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
 
 #endif
index 0906b8f74184ebb83e20124c0946ecfd124653ee..0bc67d0e00966008f9f8a6fa1c1b6540569a7cc0 100644 (file)
@@ -1108,13 +1108,13 @@ static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
 {
        int type;
        struct revindex_entry *revidx;
-       const unsigned char *sha1;
+       struct object_id oid;
        revidx = find_pack_revindex(p, obj_offset);
        if (!revidx)
                return OBJ_BAD;
-       sha1 = nth_packed_object_sha1(p, revidx->nr);
-       mark_bad_packed_object(p, sha1);
-       type = sha1_object_info(sha1, NULL);
+       nth_packed_object_oid(&oid, p, revidx->nr);
+       mark_bad_packed_object(p, oid.hash);
+       type = oid_object_info(&oid, NULL);
        if (type <= OBJ_NONE)
                return OBJ_BAD;
        return type;
@@ -1374,16 +1374,16 @@ int packed_object_info(struct packed_git *p, off_t obj_offset,
                *oi->disk_sizep = revidx[1].offset - obj_offset;
        }
 
-       if (oi->typep || oi->typename) {
+       if (oi->typep || oi->type_name) {
                enum object_type ptot;
                ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
                                             curpos);
                if (oi->typep)
                        *oi->typep = ptot;
-               if (oi->typename) {
-                       const char *tn = typename(ptot);
+               if (oi->type_name) {
+                       const char *tn = type_name(ptot);
                        if (tn)
-                               strbuf_addstr(oi->typename, tn);
+                               strbuf_addstr(oi->type_name, tn);
                }
                if (ptot < 0) {
                        type = OBJ_BAD;
@@ -1465,7 +1465,7 @@ struct unpack_entry_stack_ent {
        unsigned long size;
 };
 
-static void *read_object(const unsigned char *sha1, enum object_type *type,
+static void *read_object(const struct object_id *oid, enum object_type *type,
                         unsigned long *size)
 {
        struct object_info oi = OBJECT_INFO_INIT;
@@ -1474,7 +1474,7 @@ static void *read_object(const unsigned char *sha1, enum object_type *type,
        oi.sizep = size;
        oi.contentp = &content;
 
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+       if (oid_object_info_extended(oid, &oi, 0) < 0)
                return NULL;
        return content;
 }
@@ -1514,11 +1514,11 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                        struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
                        off_t len = revidx[1].offset - obj_offset;
                        if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
-                               const unsigned char *sha1 =
-                                       nth_packed_object_sha1(p, revidx->nr);
+                               struct object_id oid;
+                               nth_packed_object_oid(&oid, p, revidx->nr);
                                error("bad packed object CRC for %s",
-                                     sha1_to_hex(sha1));
-                               mark_bad_packed_object(p, sha1);
+                                     oid_to_hex(&oid));
+                               mark_bad_packed_object(p, oid.hash);
                                data = NULL;
                                goto out;
                        }
@@ -1601,16 +1601,16 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                         * of a corrupted pack, and is better than failing outright.
                         */
                        struct revindex_entry *revidx;
-                       const unsigned char *base_sha1;
+                       struct object_id base_oid;
                        revidx = find_pack_revindex(p, obj_offset);
                        if (revidx) {
-                               base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+                               nth_packed_object_oid(&base_oid, p, revidx->nr);
                                error("failed to read delta base object %s"
                                      " at offset %"PRIuMAX" from %s",
-                                     sha1_to_hex(base_sha1), (uintmax_t)obj_offset,
+                                     oid_to_hex(&base_oid), (uintmax_t)obj_offset,
                                      p->pack_name);
-                               mark_bad_packed_object(p, base_sha1);
-                               base = read_object(base_sha1, &type, &base_size);
+                               mark_bad_packed_object(p, base_oid.hash);
+                               base = read_object(&base_oid, &type, &base_size);
                                external_base = base;
                        }
                }
@@ -1667,6 +1667,29 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
        return data;
 }
 
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result)
+{
+       const unsigned char *index_fanout = p->index_data;
+       const unsigned char *index_lookup;
+       int index_lookup_width;
+
+       if (!index_fanout)
+               BUG("bsearch_pack called without a valid pack-index");
+
+       index_lookup = index_fanout + 4 * 256;
+       if (p->index_version == 1) {
+               index_lookup_width = 24;
+               index_lookup += 4;
+       } else {
+               index_lookup_width = 20;
+               index_fanout += 8;
+               index_lookup += 8;
+       }
+
+       return bsearch_hash(oid->hash, (const uint32_t*)index_fanout,
+                           index_lookup, index_lookup_width, result);
+}
+
 const unsigned char *nth_packed_object_sha1(struct packed_git *p,
                                            uint32_t n)
 {
@@ -1733,52 +1756,18 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
 off_t find_pack_entry_one(const unsigned char *sha1,
                                  struct packed_git *p)
 {
-       const uint32_t *level1_ofs = p->index_data;
        const unsigned char *index = p->index_data;
-       unsigned hi, lo, stride;
-       static int debug_lookup = -1;
-
-       if (debug_lookup < 0)
-               debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+       struct object_id oid;
+       uint32_t result;
 
        if (!index) {
                if (open_pack_index(p))
                        return 0;
-               level1_ofs = p->index_data;
-               index = p->index_data;
-       }
-       if (p->index_version > 1) {
-               level1_ofs += 2;
-               index += 8;
-       }
-       index += 4 * 256;
-       hi = ntohl(level1_ofs[*sha1]);
-       lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
-       if (p->index_version > 1) {
-               stride = 20;
-       } else {
-               stride = 24;
-               index += 4;
        }
 
-       if (debug_lookup)
-               printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
-                      sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
-       while (lo < hi) {
-               unsigned mi = lo + (hi - lo) / 2;
-               int cmp = hashcmp(index + mi * stride, sha1);
-
-               if (debug_lookup)
-                       printf("lo %u hi %u rg %u mi %u\n",
-                              lo, hi, hi - lo, mi);
-               if (!cmp)
-                       return nth_packed_object_offset(p, mi);
-               if (cmp > 0)
-                       hi = mi;
-               else
-                       lo = mi+1;
-       }
+       hashcpy(oid.hash, sha1);
+       if (bsearch_pack(&oid, p, &result))
+               return nth_packed_object_offset(p, result);
        return 0;
 }
 
index efda10329ca09b6dc50caeeda139d9f512c922e6..a92c0b241cfa65066ab57995f0f902ddb79a3be4 100644 (file)
@@ -80,6 +80,14 @@ extern struct packed_git *add_packed_git(const char *path, size_t path_len, int
  */
 extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
 
+/*
+ * Perform binary search on a pack-index for a given oid. Packfile is expected to
+ * have a valid pack-index.
+ *
+ * See 'bsearch_hash' for more information.
+ */
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result);
+
 /*
  * Return the SHA-1 of the nth object within the specified packfile.
  * Open the index if it is not already open.  The return value points
index d02eb8b0151626bab833489d7139a9be34209a32..0f7059a8ab32a624775026d7dc2289245c87c192 100644 (file)
@@ -317,14 +317,16 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
                return get_value(p, options, all_opts, flags ^ opt_flags);
        }
 
-       if (ambiguous_option)
-               return error("Ambiguous option: %s "
+       if (ambiguous_option) {
+               error("Ambiguous option: %s "
                        "(could be --%s%s or --%s%s)",
                        arg,
                        (ambiguous_flags & OPT_UNSET) ?  "no-" : "",
                        ambiguous_option->long_name,
                        (abbrev_flags & OPT_UNSET) ?  "no-" : "",
                        abbrev_option->long_name);
+               return -3;
+       }
        if (abbrev_option)
                return get_value(p, abbrev_option, all_opts, abbrev_flags);
        return -2;
@@ -425,6 +427,48 @@ void parse_options_start(struct parse_opt_ctx_t *ctx,
        parse_options_check(options);
 }
 
+/*
+ * TODO: we are not completing the --no-XXX form yet because there are
+ * many options that do not suppress it properly.
+ */
+static int show_gitcomp(struct parse_opt_ctx_t *ctx,
+                       const struct option *opts)
+{
+       for (; opts->type != OPTION_END; opts++) {
+               const char *suffix = "";
+
+               if (!opts->long_name)
+                       continue;
+               if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE))
+                       continue;
+
+               switch (opts->type) {
+               case OPTION_GROUP:
+                       continue;
+               case OPTION_STRING:
+               case OPTION_FILENAME:
+               case OPTION_INTEGER:
+               case OPTION_MAGNITUDE:
+               case OPTION_CALLBACK:
+                       if (opts->flags & PARSE_OPT_NOARG)
+                               break;
+                       if (opts->flags & PARSE_OPT_OPTARG)
+                               break;
+                       if (opts->flags & PARSE_OPT_LASTARG_DEFAULT)
+                               break;
+                       suffix = "=";
+                       break;
+               default:
+                       break;
+               }
+               if (opts->flags & PARSE_OPT_COMP_ARG)
+                       suffix = "=";
+               printf(" --%s%s", opts->long_name, suffix);
+       }
+       fputc('\n', stdout);
+       exit(0);
+}
+
 static int usage_with_options_internal(struct parse_opt_ctx_t *,
                                       const char * const *,
                                       const struct option *, int, int);
@@ -434,7 +478,6 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                       const char * const usagestr[])
 {
        int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
-       int err = 0;
 
        /* we must reset ->opt, unknown short option leave it dangling */
        ctx->opt = NULL;
@@ -455,11 +498,15 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                if (internal_help && ctx->total == 1 && !strcmp(arg + 1, "h"))
                        goto show_usage;
 
+               /* lone --git-completion-helper is asked by git-completion.bash */
+               if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper"))
+                       return show_gitcomp(ctx, options);
+
                if (arg[1] != '-') {
                        ctx->opt = arg + 1;
                        switch (parse_short_opt(ctx, options)) {
                        case -1:
-                               goto show_usage_error;
+                               return PARSE_OPT_ERROR;
                        case -2:
                                if (ctx->opt)
                                        check_typos(arg + 1, options);
@@ -472,7 +519,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        while (ctx->opt) {
                                switch (parse_short_opt(ctx, options)) {
                                case -1:
-                                       goto show_usage_error;
+                                       return PARSE_OPT_ERROR;
                                case -2:
                                        if (internal_help && *ctx->opt == 'h')
                                                goto show_usage;
@@ -504,9 +551,11 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        goto show_usage;
                switch (parse_long_opt(ctx, arg + 2, options)) {
                case -1:
-                       goto show_usage_error;
+                       return PARSE_OPT_ERROR;
                case -2:
                        goto unknown;
+               case -3:
+                       goto show_usage;
                }
                continue;
 unknown:
@@ -517,10 +566,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
        }
        return PARSE_OPT_DONE;
 
- show_usage_error:
-       err = 1;
  show_usage:
-       return usage_with_options_internal(ctx, usagestr, options, 0, err);
+       return usage_with_options_internal(ctx, usagestr, options, 0, 0);
 }
 
 int parse_options_end(struct parse_opt_ctx_t *ctx)
@@ -539,6 +586,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
        parse_options_start(&ctx, argc, argv, prefix, options, flags);
        switch (parse_options_step(&ctx, options, usagestr)) {
        case PARSE_OPT_HELP:
+       case PARSE_OPT_ERROR:
                exit(129);
        case PARSE_OPT_NON_OPTION:
        case PARSE_OPT_DONE:
index af711227ae3aac7af07de0322a364ccac03b819d..dd14911a297a5b10705ecb31243c55a7dc2f193c 100644 (file)
@@ -38,7 +38,9 @@ enum parse_opt_option_flags {
        PARSE_OPT_LASTARG_DEFAULT = 16,
        PARSE_OPT_NODASH = 32,
        PARSE_OPT_LITERAL_ARGHELP = 64,
-       PARSE_OPT_SHELL_EVAL = 256
+       PARSE_OPT_SHELL_EVAL = 256,
+       PARSE_OPT_NOCOMPLETE = 512,
+       PARSE_OPT_COMP_ARG = 1024
 };
 
 struct option;
@@ -89,6 +91,11 @@ typedef int parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  *   PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets
  *                             (i.e. '<argh>') in the help message.
  *                             Useful for options with multiple parameters.
+ *   PARSE_OPT_NOCOMPLETE: by default all visible options are completable
+ *                        by git-completion.bash. This option suppresses that.
+ *   PARSE_OPT_COMP_ARG: this option forces to git-completion.bash to
+ *                      complete an option as --name= not --name even if
+ *                      the option takes optional argument.
  *
  * `callback`::
  *   pointer to the callback to use for OPTION_CALLBACK or
@@ -112,19 +119,24 @@ struct option {
        intptr_t defval;
 };
 
+#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
+                                     PARSE_OPT_NOARG|(f), NULL, (b) }
+#define OPT_COUNTUP_F(s, l, v, h, f) { OPTION_COUNTUP, (s), (l), (v), NULL, \
+                                      (h), PARSE_OPT_NOARG|(f) }
+#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \
+                                         (h), PARSE_OPT_NOARG | (f), NULL, (i) }
+#define OPT_BOOL_F(s, l, v, h, f)   OPT_SET_INT_F(s, l, v, h, 1, f)
+
 #define OPT_END()                   { OPTION_END }
 #define OPT_ARGUMENT(l, h)          { OPTION_ARGUMENT, 0, (l), NULL, NULL, \
                                      (h), PARSE_OPT_NOARG}
 #define OPT_GROUP(h)                { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
-#define OPT_BIT(s, l, v, h, b)      { OPTION_BIT, (s), (l), (v), NULL, (h), \
-                                     PARSE_OPT_NOARG, NULL, (b) }
+#define OPT_BIT(s, l, v, h, b)      OPT_BIT_F(s, l, v, h, b, 0)
 #define OPT_NEGBIT(s, l, v, h, b)   { OPTION_NEGBIT, (s), (l), (v), NULL, \
                                      (h), PARSE_OPT_NOARG, NULL, (b) }
-#define OPT_COUNTUP(s, l, v, h)     { OPTION_COUNTUP, (s), (l), (v), NULL, \
-                                     (h), PARSE_OPT_NOARG }
-#define OPT_SET_INT(s, l, v, h, i)  { OPTION_SET_INT, (s), (l), (v), NULL, \
-                                     (h), PARSE_OPT_NOARG, NULL, (i) }
-#define OPT_BOOL(s, l, v, h)        OPT_SET_INT(s, l, v, h, 1)
+#define OPT_COUNTUP(s, l, v, h)     OPT_COUNTUP_F(s, l, v, h, 0)
+#define OPT_SET_INT(s, l, v, h, i)  OPT_SET_INT_F(s, l, v, h, i, 0)
+#define OPT_BOOL(s, l, v, h)        OPT_BOOL_F(s, l, v, h, 0)
 #define OPT_HIDDEN_BOOL(s, l, v, h) { OPTION_SET_INT, (s), (l), (v), NULL, \
                                      (h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1}
 #define OPT_CMDMODE(s, l, v, h, i)  { OPTION_CMDMODE, (s), (l), (v), NULL, \
@@ -188,6 +200,7 @@ enum {
        PARSE_OPT_HELP = -1,
        PARSE_OPT_DONE,
        PARSE_OPT_NON_OPTION,
+       PARSE_OPT_ERROR,
        PARSE_OPT_UNKNOWN
 };
 
@@ -240,7 +253,7 @@ extern int parse_opt_passthru_argv(const struct option *, const char *, int);
        { OPTION_CALLBACK, 'q', "quiet", (var), NULL, N_("be more quiet"), \
          PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }
 #define OPT__DRY_RUN(var, h)  OPT_BOOL('n', "dry-run", (var), (h))
-#define OPT__FORCE(var, h)    OPT_COUNTUP('f', "force",   (var), (h))
+#define OPT__FORCE(var, h, f) OPT_COUNTUP_F('f', "force",   (var), (h), (f))
 #define OPT__ABBREV(var)  \
        { OPTION_CALLBACK, 0, "abbrev", (var), N_("n"), \
          N_("use <n> digits to display SHA-1s"),       \
diff --git a/perl/FromCPAN/.gitattributes b/perl/FromCPAN/.gitattributes
new file mode 100644 (file)
index 0000000..8b64fc5
--- /dev/null
@@ -0,0 +1 @@
+/Error.pm whitespace=-blank-at-eof
diff --git a/perl/FromCPAN/Error.pm b/perl/FromCPAN/Error.pm
new file mode 100644 (file)
index 0000000..8b95e2d
--- /dev/null
@@ -0,0 +1,1040 @@
+# Error.pm
+#
+# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
+# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
+#
+# but modified ***significantly***
+
+package Error;
+
+use strict;
+use warnings;
+
+use vars qw($VERSION);
+use 5.004;
+
+$VERSION = "0.17025";
+
+use overload (
+       '""'       =>   'stringify',
+       '0+'       =>   'value',
+       'bool'     =>   sub { return 1; },
+       'fallback' =>   1
+);
+
+$Error::Depth = 0;     # Depth to pass to caller()
+$Error::Debug = 0;     # Generate verbose stack traces
+@Error::STACK = ();    # Clause stack for try
+$Error::THROWN = undef;        # last error thrown, a workaround until die $ref works
+
+my $LAST;              # Last error created
+my %ERROR;             # Last error associated with package
+
+sub _throw_Error_Simple
+{
+    my $args = shift;
+    return Error::Simple->new($args->{'text'});
+}
+
+$Error::ObjectifyCallback = \&_throw_Error_Simple;
+
+
+# Exported subs are defined in Error::subs
+
+use Scalar::Util ();
+
+sub import {
+    shift;
+    my @tags = @_;
+    local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
+
+    @tags = grep {
+       if( $_ eq ':warndie' ) {
+          Error::WarnDie->import();
+          0;
+       }
+       else {
+          1;
+       }
+    } @tags;
+
+    Error::subs->import(@tags);
+}
+
+# I really want to use last for the name of this method, but it is a keyword
+# which prevent the syntax  last Error
+
+sub prior {
+    shift; # ignore
+
+    return $LAST unless @_;
+
+    my $pkg = shift;
+    return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
+       unless ref($pkg);
+
+    my $obj = $pkg;
+    my $err = undef;
+    if($obj->isa('HASH')) {
+       $err = $obj->{'__Error__'}
+           if exists $obj->{'__Error__'};
+    }
+    elsif($obj->isa('GLOB')) {
+       $err = ${*$obj}{'__Error__'}
+           if exists ${*$obj}{'__Error__'};
+    }
+
+    $err;
+}
+
+sub flush {
+    shift; #ignore
+
+    unless (@_) {
+       $LAST = undef;
+       return;
+    }
+
+    my $pkg = shift;
+    return unless ref($pkg);
+
+    undef $ERROR{$pkg} if defined $ERROR{$pkg};
+}
+
+# Return as much information as possible about where the error
+# happened. The -stacktrace element only exists if $Error::DEBUG
+# was set when the error was created
+
+sub stacktrace {
+    my $self = shift;
+
+    return $self->{'-stacktrace'}
+       if exists $self->{'-stacktrace'};
+
+    my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
+
+    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+       unless($text =~ /\n$/s);
+
+    $text;
+}
+
+
+sub associate {
+    my $err = shift;
+    my $obj = shift;
+
+    return unless ref($obj);
+
+    if($obj->isa('HASH')) {
+       $obj->{'__Error__'} = $err;
+    }
+    elsif($obj->isa('GLOB')) {
+       ${*$obj}{'__Error__'} = $err;
+    }
+    $obj = ref($obj);
+    $ERROR{ ref($obj) } = $err;
+
+    return;
+}
+
+
+sub new {
+    my $self = shift;
+    my($pkg,$file,$line) = caller($Error::Depth);
+
+    my $err = bless {
+       '-package' => $pkg,
+       '-file'    => $file,
+       '-line'    => $line,
+       @_
+    }, $self;
+
+    $err->associate($err->{'-object'})
+       if(exists $err->{'-object'});
+
+    # To always create a stacktrace would be very inefficient, so
+    # we only do it if $Error::Debug is set
+
+    if($Error::Debug) {
+       require Carp;
+       local $Carp::CarpLevel = $Error::Depth;
+       my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
+       my $trace = Carp::longmess($text);
+       # Remove try calls from the trace
+       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+       $err->{'-stacktrace'} = $trace
+    }
+
+    $@ = $LAST = $ERROR{$pkg} = $err;
+}
+
+# Throw an error. this contains some very gory code.
+
+sub throw {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    # if we are not rethrow-ing then create the object to throw
+    $self = $self->new(@_) unless ref($self);
+
+    die $Error::THROWN = $self;
+}
+
+# syntactic sugar for
+#
+#    die with Error( ... );
+
+sub with {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    $self->new(@_);
+}
+
+# syntactic sugar for
+#
+#    record Error( ... ) and return;
+
+sub record {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    $self->new(@_);
+}
+
+# catch clause for
+#
+# try { ... } catch CLASS with { ... }
+
+sub catch {
+    my $pkg = shift;
+    my $code = shift;
+    my $clauses = shift || {};
+    my $catch = $clauses->{'catch'} ||= [];
+
+    unshift @$catch,  $pkg, $code;
+
+    $clauses;
+}
+
+# Object query methods
+
+sub object {
+    my $self = shift;
+    exists $self->{'-object'} ? $self->{'-object'} : undef;
+}
+
+sub file {
+    my $self = shift;
+    exists $self->{'-file'} ? $self->{'-file'} : undef;
+}
+
+sub line {
+    my $self = shift;
+    exists $self->{'-line'} ? $self->{'-line'} : undef;
+}
+
+sub text {
+    my $self = shift;
+    exists $self->{'-text'} ? $self->{'-text'} : undef;
+}
+
+# overload methods
+
+sub stringify {
+    my $self = shift;
+    defined $self->{'-text'} ? $self->{'-text'} : "Died";
+}
+
+sub value {
+    my $self = shift;
+    exists $self->{'-value'} ? $self->{'-value'} : undef;
+}
+
+package Error::Simple;
+
+use vars qw($VERSION);
+
+$VERSION = "0.17025";
+
+@Error::Simple::ISA = qw(Error);
+
+sub new {
+    my $self  = shift;
+    my $text  = "" . shift;
+    my $value = shift;
+    my(@args) = ();
+
+    local $Error::Depth = $Error::Depth + 1;
+
+    @args = ( -file => $1, -line => $2)
+       if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
+    push(@args, '-value', 0 + $value)
+       if defined($value);
+
+    $self->SUPER::new(-text => $text, @args);
+}
+
+sub stringify {
+    my $self = shift;
+    my $text = $self->SUPER::stringify;
+    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+       unless($text =~ /\n$/s);
+    $text;
+}
+
+##########################################################################
+##########################################################################
+
+# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
+# Peter Seibel <peter@weblogic.com>
+
+package Error::subs;
+
+use Exporter ();
+use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
+
+@EXPORT_OK   = qw(try with finally except otherwise);
+%EXPORT_TAGS = (try => \@EXPORT_OK);
+
+@ISA = qw(Exporter);
+
+sub run_clauses ($$$\@) {
+    my($clauses,$err,$wantarray,$result) = @_;
+    my $code = undef;
+
+    $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
+
+    CATCH: {
+
+       # catch
+       my $catch;
+       if(defined($catch = $clauses->{'catch'})) {
+           my $i = 0;
+
+           CATCHLOOP:
+           for( ; $i < @$catch ; $i += 2) {
+               my $pkg = $catch->[$i];
+               unless(defined $pkg) {
+                   #except
+                   splice(@$catch,$i,2,$catch->[$i+1]->($err));
+                   $i -= 2;
+                   next CATCHLOOP;
+               }
+               elsif(Scalar::Util::blessed($err) && $err->isa($pkg)) {
+                   $code = $catch->[$i+1];
+                   while(1) {
+                       my $more = 0;
+                       local($Error::THROWN, $@);
+                       my $ok = eval {
+                           $@ = $err;
+                           if($wantarray) {
+                               @{$result} = $code->($err,\$more);
+                           }
+                           elsif(defined($wantarray)) {
+                               @{$result} = ();
+                               $result->[0] = $code->($err,\$more);
+                           }
+                           else {
+                               $code->($err,\$more);
+                           }
+                           1;
+                       };
+                       if( $ok ) {
+                           next CATCHLOOP if $more;
+                           undef $err;
+                       }
+                       else {
+                           $err = $@ || $Error::THROWN;
+                               $err = $Error::ObjectifyCallback->({'text' =>$err})
+                                       unless ref($err);
+                       }
+                       last CATCH;
+                   };
+               }
+           }
+       }
+
+       # otherwise
+       my $owise;
+       if(defined($owise = $clauses->{'otherwise'})) {
+           my $code = $clauses->{'otherwise'};
+           my $more = 0;
+        local($Error::THROWN, $@);
+           my $ok = eval {
+               $@ = $err;
+               if($wantarray) {
+                   @{$result} = $code->($err,\$more);
+               }
+               elsif(defined($wantarray)) {
+                   @{$result} = ();
+                   $result->[0] = $code->($err,\$more);
+               }
+               else {
+                   $code->($err,\$more);
+               }
+               1;
+           };
+           if( $ok ) {
+               undef $err;
+           }
+           else {
+               $err = $@ || $Error::THROWN;
+
+               $err = $Error::ObjectifyCallback->({'text' =>$err})
+                       unless ref($err);
+           }
+       }
+    }
+    $err;
+}
+
+sub try (&;$) {
+    my $try = shift;
+    my $clauses = @_ ? shift : {};
+    my $ok = 0;
+    my $err = undef;
+    my @result = ();
+
+    unshift @Error::STACK, $clauses;
+
+    my $wantarray = wantarray();
+
+    do {
+       local $Error::THROWN = undef;
+       local $@ = undef;
+
+       $ok = eval {
+           if($wantarray) {
+               @result = $try->();
+           }
+           elsif(defined $wantarray) {
+               $result[0] = $try->();
+           }
+           else {
+               $try->();
+           }
+           1;
+       };
+
+       $err = $@ || $Error::THROWN
+           unless $ok;
+    };
+
+    shift @Error::STACK;
+
+    $err = run_clauses($clauses,$err,wantarray,@result)
+    unless($ok);
+
+    $clauses->{'finally'}->()
+       if(defined($clauses->{'finally'}));
+
+    if (defined($err))
+    {
+        if (Scalar::Util::blessed($err) && $err->can('throw'))
+        {
+            throw $err;
+        }
+        else
+        {
+            die $err;
+        }
+    }
+
+    wantarray ? @result : $result[0];
+}
+
+# Each clause adds a sub to the list of clauses. The finally clause is
+# always the last, and the otherwise clause is always added just before
+# the finally clause.
+#
+# All clauses, except the finally clause, add a sub which takes one argument
+# this argument will be the error being thrown. The sub will return a code ref
+# if that clause can handle that error, otherwise undef is returned.
+#
+# The otherwise clause adds a sub which unconditionally returns the users
+# code reference, this is why it is forced to be last.
+#
+# The catch clause is defined in Error.pm, as the syntax causes it to
+# be called as a method
+
+sub with (&;$) {
+    @_
+}
+
+sub finally (&) {
+    my $code = shift;
+    my $clauses = { 'finally' => $code };
+    $clauses;
+}
+
+# The except clause is a block which returns a hashref or a list of
+# key-value pairs, where the keys are the classes and the values are subs.
+
+sub except (&;$) {
+    my $code = shift;
+    my $clauses = shift || {};
+    my $catch = $clauses->{'catch'} ||= [];
+
+    my $sub = sub {
+       my $ref;
+       my(@array) = $code->($_[0]);
+       if(@array == 1 && ref($array[0])) {
+           $ref = $array[0];
+           $ref = [ %$ref ]
+               if(UNIVERSAL::isa($ref,'HASH'));
+       }
+       else {
+           $ref = \@array;
+       }
+       @$ref
+    };
+
+    unshift @{$catch}, undef, $sub;
+
+    $clauses;
+}
+
+sub otherwise (&;$) {
+    my $code = shift;
+    my $clauses = shift || {};
+
+    if(exists $clauses->{'otherwise'}) {
+       require Carp;
+       Carp::croak("Multiple otherwise clauses");
+    }
+
+    $clauses->{'otherwise'} = $code;
+
+    $clauses;
+}
+
+1;
+
+package Error::WarnDie;
+
+sub gen_callstack($)
+{
+    my ( $start ) = @_;
+
+    require Carp;
+    local $Carp::CarpLevel = $start;
+    my $trace = Carp::longmess("");
+    # Remove try calls from the trace
+    $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+    $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+    my @callstack = split( m/\n/, $trace );
+    return @callstack;
+}
+
+my $old_DIE;
+my $old_WARN;
+
+sub DEATH
+{
+    my ( $e ) = @_;
+
+    local $SIG{__DIE__} = $old_DIE if( defined $old_DIE );
+
+    die @_ if $^S;
+
+    my ( $etype, $message, $location, @callstack );
+    if ( ref($e) && $e->isa( "Error" ) ) {
+        $etype = "exception of type " . ref( $e );
+        $message = $e->text;
+        $location = $e->file . ":" . $e->line;
+        @callstack = split( m/\n/, $e->stacktrace );
+    }
+    else {
+        # Don't apply subsequent layer of message formatting
+        die $e if( $e =~ m/^\nUnhandled perl error caught at toplevel:\n\n/ );
+        $etype = "perl error";
+        my $stackdepth = 0;
+        while( caller( $stackdepth ) =~ m/^Error(?:$|::)/ ) {
+            $stackdepth++
+        }
+
+        @callstack = gen_callstack( $stackdepth + 1 );
+
+        $message = "$e";
+        chomp $message;
+
+        if ( $message =~ s/ at (.*?) line (\d+)\.$// ) {
+            $location = $1 . ":" . $2;
+        }
+        else {
+            my @caller = caller( $stackdepth );
+            $location = $caller[1] . ":" . $caller[2];
+        }
+    }
+
+    shift @callstack;
+    # Do it this way in case there are no elements; we don't print a spurious \n
+    my $callstack = join( "", map { "$_\n"} @callstack );
+
+    die "\nUnhandled $etype caught at toplevel:\n\n  $message\n\nThrown from: $location\n\nFull stack trace:\n\n$callstack\n";
+}
+
+sub TAXES
+{
+    my ( $message ) = @_;
+
+    local $SIG{__WARN__} = $old_WARN if( defined $old_WARN );
+
+    $message =~ s/ at .*? line \d+\.$//;
+    chomp $message;
+
+    my @callstack = gen_callstack( 1 );
+    my $location = shift @callstack;
+
+    # $location already starts in a leading space
+    $message .= $location;
+
+    # Do it this way in case there are no elements; we don't print a spurious \n
+    my $callstack = join( "", map { "$_\n"} @callstack );
+
+    warn "$message:\n$callstack";
+}
+
+sub import
+{
+    $old_DIE  = $SIG{__DIE__};
+    $old_WARN = $SIG{__WARN__};
+
+    $SIG{__DIE__}  = \&DEATH;
+    $SIG{__WARN__} = \&TAXES;
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Error - Error/exception handling in an OO-ish way
+
+=head1 WARNING
+
+Using the "Error" module is B<no longer recommended> due to the black-magical
+nature of its syntactic sugar, which often tends to break. Its maintainers
+have stopped actively writing code that uses it, and discourage people
+from doing so. See the "SEE ALSO" section below for better recommendations.
+
+=head1 SYNOPSIS
+
+    use Error qw(:try);
+
+    throw Error::Simple( "A simple error");
+
+    sub xyz {
+        ...
+       record Error::Simple("A simple error")
+           and return;
+    }
+
+    unlink($file) or throw Error::Simple("$file: $!",$!);
+
+    try {
+       do_some_stuff();
+       die "error!" if $condition;
+       throw Error::Simple "Oops!" if $other_condition;
+    }
+    catch Error::IO with {
+       my $E = shift;
+       print STDERR "File ", $E->{'-file'}, " had a problem\n";
+    }
+    except {
+       my $E = shift;
+       my $general_handler=sub {send_message $E->{-description}};
+       return {
+           UserException1 => $general_handler,
+           UserException2 => $general_handler
+       };
+    }
+    otherwise {
+       print STDERR "Well I don't know what to say\n";
+    }
+    finally {
+       close_the_garage_door_already(); # Should be reliable
+    }; # Don't forget the trailing ; or you might be surprised
+
+=head1 DESCRIPTION
+
+The C<Error> package provides two interfaces. Firstly C<Error> provides
+a procedural interface to exception handling. Secondly C<Error> is a
+base class for errors/exceptions that can either be thrown, for
+subsequent catch, or can simply be recorded.
+
+Errors in the class C<Error> should not be thrown directly, but the
+user should throw errors from a sub-class of C<Error>.
+
+=head1 PROCEDURAL INTERFACE
+
+C<Error> exports subroutines to perform exception handling. These will
+be exported if the C<:try> tag is used in the C<use> line.
+
+=over 4
+
+=item try BLOCK CLAUSES
+
+C<try> is the main subroutine called by the user. All other subroutines
+exported are clauses to the try subroutine.
+
+The BLOCK will be evaluated and, if no error is throw, try will return
+the result of the block.
+
+C<CLAUSES> are the subroutines below, which describe what to do in the
+event of an error being thrown within BLOCK.
+
+=item catch CLASS with BLOCK
+
+This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
+to be caught and handled by evaluating C<BLOCK>.
+
+C<BLOCK> will be passed two arguments. The first will be the error
+being thrown. The second is a reference to a scalar variable. If this
+variable is set by the catch block then, on return from the catch
+block, try will continue processing as if the catch block was never
+found. The error will also be available in C<$@>.
+
+To propagate the error the catch block may call C<$err-E<gt>throw>
+
+If the scalar reference by the second argument is not set, and the
+error is not thrown. Then the current try block will return with the
+result from the catch block.
+
+=item except BLOCK
+
+When C<try> is looking for a handler, if an except clause is found
+C<BLOCK> is evaluated. The return value from this block should be a
+HASHREF or a list of key-value pairs, where the keys are class names
+and the values are CODE references for the handler of errors of that
+type.
+
+=item otherwise BLOCK
+
+Catch any error by executing the code in C<BLOCK>
+
+When evaluated C<BLOCK> will be passed one argument, which will be the
+error being processed. The error will also be available in C<$@>.
+
+Only one otherwise block may be specified per try block
+
+=item finally BLOCK
+
+Execute the code in C<BLOCK> either after the code in the try block has
+successfully completed, or if the try block throws an error then
+C<BLOCK> will be executed after the handler has completed.
+
+If the handler throws an error then the error will be caught, the
+finally block will be executed and the error will be re-thrown.
+
+Only one finally block may be specified per try block
+
+=back
+
+=head1 COMPATIBILITY
+
+L<Moose> exports a keyword called C<with> which clashes with Error's. This
+example returns a prototype mismatch error:
+
+    package MyTest;
+
+    use warnings;
+    use Moose;
+    use Error qw(:try);
+
+(Thanks to C<maik.hentsche@amd.com> for the report.).
+
+=head1 CLASS INTERFACE
+
+=head2 CONSTRUCTORS
+
+The C<Error> object is implemented as a HASH. This HASH is initialized
+with the arguments that are passed to it's constructor. The elements
+that are used by, or are retrievable by the C<Error> class are listed
+below, other classes may add to these.
+
+       -file
+       -line
+       -text
+       -value
+       -object
+
+If C<-file> or C<-line> are not specified in the constructor arguments
+then these will be initialized with the file name and line number where
+the constructor was called from.
+
+If the error is associated with an object then the object should be
+passed as the C<-object> argument. This will allow the C<Error> package
+to associate the error with the object.
+
+The C<Error> package remembers the last error created, and also the
+last error associated with a package. This could either be the last
+error created by a sub in that package, or the last error which passed
+an object blessed into that package as the C<-object> argument.
+
+=over 4
+
+=item Error->new()
+
+See the Error::Simple documentation.
+
+=item throw ( [ ARGS ] )
+
+Create a new C<Error> object and throw an error, which will be caught
+by a surrounding C<try> block, if there is one. Otherwise it will cause
+the program to exit.
+
+C<throw> may also be called on an existing error to re-throw it.
+
+=item with ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+    die with Some::Error ( ... );
+
+=item record ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+    record Some::Error ( ... )
+       and return;
+
+=back
+
+=head2 STATIC METHODS
+
+=over 4
+
+=item prior ( [ PACKAGE ] )
+
+Return the last error created, or the last error associated with
+C<PACKAGE>
+
+=item flush ( [ PACKAGE ] )
+
+Flush the last error created, or the last error associated with
+C<PACKAGE>.It is necessary to clear the error stack before exiting the
+package or uncaught errors generated using C<record> will be reported.
+
+     $Error->flush;
+
+=cut
+
+=back
+
+=head2 OBJECT METHODS
+
+=over 4
+
+=item stacktrace
+
+If the variable C<$Error::Debug> was non-zero when the error was
+created, then C<stacktrace> returns a string created by calling
+C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
+the text of the error appended with the filename and line number of
+where the error was created, providing the text does not end with a
+newline.
+
+=item object
+
+The object this error was associated with
+
+=item file
+
+The file where the constructor of this error was called from
+
+=item line
+
+The line where the constructor of this error was called from
+
+=item text
+
+The text of the error
+
+=item $err->associate($obj)
+
+Associates an error with an object to allow error propagation. I.e:
+
+    $ber->encode(...) or
+        return Error->prior($ber)->associate($ldap);
+
+=back
+
+=head2 OVERLOAD METHODS
+
+=over 4
+
+=item stringify
+
+A method that converts the object into a string. This method may simply
+return the same as the C<text> method, or it may append more
+information. For example the file name and line number.
+
+By default this method returns the C<-text> argument that was passed to
+the constructor, or the string C<"Died"> if none was given.
+
+=item value
+
+A method that will return a value that can be associated with the
+error. For example if an error was created due to the failure of a
+system call, then this may return the numeric value of C<$!> at the
+time.
+
+By default this method returns the C<-value> argument that was passed
+to the constructor.
+
+=back
+
+=head1 PRE-DEFINED ERROR CLASSES
+
+=head2 Error::Simple
+
+This class can be used to hold simple error strings and values. It's
+constructor takes two arguments. The first is a text value, the second
+is a numeric value. These values are what will be returned by the
+overload methods.
+
+If the text value ends with C<at file line 1> as $@ strings do, then
+this information will be used to set the C<-file> and C<-line> arguments
+of the error object.
+
+This class is used internally if an eval'd block die's with an error
+that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
+
+
+=head1 $Error::ObjectifyCallback
+
+This variable holds a reference to a subroutine that converts errors that
+are plain strings to objects. It is used by Error.pm to convert textual
+errors to objects, and can be overridden by the user.
+
+It accepts a single argument which is a hash reference to named parameters.
+Currently the only named parameter passed is C<'text'> which is the text
+of the error, but others may be available in the future.
+
+For example the following code will cause Error.pm to throw objects of the
+class MyError::Bar by default:
+
+    sub throw_MyError_Bar
+    {
+        my $args = shift;
+        my $err = MyError::Bar->new();
+        $err->{'MyBarText'} = $args->{'text'};
+        return $err;
+    }
+
+    {
+        local $Error::ObjectifyCallback = \&throw_MyError_Bar;
+
+        # Error handling here.
+    }
+
+=cut
+
+=head1 MESSAGE HANDLERS
+
+C<Error> also provides handlers to extend the output of the C<warn()> perl
+function, and to handle the printing of a thrown C<Error> that is not caught
+or otherwise handled. These are not installed by default, but are requested
+using the C<:warndie> tag in the C<use> line.
+
+ use Error qw( :warndie );
+
+These new error handlers are installed in C<$SIG{__WARN__}> and
+C<$SIG{__DIE__}>. If these handlers are already defined when the tag is
+imported, the old values are stored, and used during the new code. Thus, to
+arrange for custom handling of warnings and errors, you will need to perform
+something like the following:
+
+ BEGIN {
+   $SIG{__WARN__} = sub {
+     print STDERR "My special warning handler: $_[0]"
+   };
+ }
+
+ use Error qw( :warndie );
+
+Note that setting C<$SIG{__WARN__}> after the C<:warndie> tag has been
+imported will overwrite the handler that C<Error> provides. If this cannot be
+avoided, then the tag can be explicitly C<import>ed later
+
+ use Error;
+
+ $SIG{__WARN__} = ...;
+
+ import Error qw( :warndie );
+
+=head2 EXAMPLE
+
+The C<__DIE__> handler turns messages such as
+
+ Can't call method "foo" on an undefined value at examples/warndie.pl line 16.
+
+into
+
+ Unhandled perl error caught at toplevel:
+
+   Can't call method "foo" on an undefined value
+
+ Thrown from: examples/warndie.pl:16
+
+ Full stack trace:
+
+         main::inner('undef') called at examples/warndie.pl line 20
+         main::outer('undef') called at examples/warndie.pl line 23
+
+=cut
+
+=head1 SEE ALSO
+
+See L<Exception::Class> for a different module providing Object-Oriented
+exception handling, along with a convenient syntax for declaring hierarchies
+for them. It doesn't provide Error's syntactic sugar of C<try { ... }>,
+C<catch { ... }>, etc. which may be a good thing or a bad thing based
+on what you want. (Because Error's syntactic sugar tends to break.)
+
+L<Error::Exception> aims to combine L<Error> and L<Exception::Class>
+"with correct stringification".
+
+L<TryCatch> and L<Try::Tiny> are similar in concept to Error.pm only providing
+a syntax that hopefully breaks less.
+
+=head1 KNOWN BUGS
+
+None, but that does not mean there are not any.
+
+=head1 AUTHORS
+
+Graham Barr <gbarr@pobox.com>
+
+The code that inspired me to write this was originally written by
+Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
+<jglick@sig.bsh.com>.
+
+C<:warndie> handlers added by Paul Evans <leonerd@leonerd.org.uk>
+
+=head1 MAINTAINER
+
+Shlomi Fish, L<http://www.shlomifish.org/> .
+
+=head1 PAST MAINTAINERS
+
+Arun Kumar U <u_arunkumar@yahoo.com>
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-8  Graham Barr. All rights reserved.
+This program is free software; you can redistribute it and/or modify it
+under the same terms as Perl itself.
+
+=cut
diff --git a/perl/FromCPAN/Mail/Address.pm b/perl/FromCPAN/Mail/Address.pm
new file mode 100644 (file)
index 0000000..683d490
--- /dev/null
@@ -0,0 +1,280 @@
+# Copyrights 1995-2018 by [Mark Overmeer].
+#  For other contributors see ChangeLog.
+# See the manual pages for details on the licensing terms.
+# Pod stripped from pm file by OODoc 2.02.
+# This code is part of the bundle MailTools.  Meta-POD processed with
+# OODoc into POD and HTML manual-pages.  See README.md for Copyright.
+# Licensed under the same terms as Perl itself.
+
+package Mail::Address;
+use vars '$VERSION';
+$VERSION = '2.20';
+
+use strict;
+
+use Carp;
+
+# use locale;   removed in version 1.78, because it causes taint problems
+
+sub Version { our $VERSION }
+
+
+
+# given a comment, attempt to extract a person's name
+sub _extract_name
+{   # This function can be called as method as well
+    my $self = @_ && ref $_[0] ? shift : undef;
+
+    local $_ = shift
+        or return '';
+
+    # Using encodings, too hard. See Mail::Message::Field::Full.
+    return '' if m/\=\?.*?\?\=/;
+
+    # trim whitespace
+    s/^\s+//;
+    s/\s+$//;
+    s/\s+/ /;
+
+    # Disregard numeric names (e.g. 123456.1234@compuserve.com)
+    return "" if /^[\d ]+$/;
+
+    s/^\((.*)\)$/$1/; # remove outermost parenthesis
+    s/^"(.*)"$/$1/;   # remove outer quotation marks
+    s/\(.*?\)//g;     # remove minimal embedded comments
+    s/\\//g;          # remove all escapes
+    s/^"(.*)"$/$1/;   # remove internal quotation marks
+    s/^([^\s]+) ?, ?(.*)$/$2 $1/; # reverse "Last, First M." if applicable
+    s/,.*//;
+
+    # Change casing only when the name contains only upper or only
+    # lower cased characters.
+    unless( m/[A-Z]/ && m/[a-z]/ )
+    {   # Set the case of the name to first char upper rest lower
+        s/\b(\w+)/\L\u$1/igo;  # Upcase first letter on name
+        s/\bMc(\w)/Mc\u$1/igo; # Scottish names such as 'McLeod'
+        s/\bo'(\w)/O'\u$1/igo; # Irish names such as 'O'Malley, O'Reilly'
+        s/\b(x*(ix)?v*(iv)?i*)\b/\U$1/igo; # Roman numerals, eg 'Level III Support'
+    }
+
+    # some cleanup
+    s/\[[^\]]*\]//g;
+    s/(^[\s'"]+|[\s'"]+$)//g;
+    s/\s{2,}/ /g;
+
+    $_;
+}
+
+sub _tokenise
+{   local $_ = join ',', @_;
+    my (@words,$snippet,$field);
+
+    s/\A\s+//;
+    s/[\r\n]+/ /g;
+
+    while ($_ ne '')
+    {   $field = '';
+        if(s/^\s*\(/(/ )    # (...)
+        {   my $depth = 0;
+
+     PAREN: while(s/^(\(([^\(\)\\]|\\.)*)//)
+            {   $field .= $1;
+                $depth++;
+                while(s/^(([^\(\)\\]|\\.)*\)\s*)//)
+                {   $field .= $1;
+                    last PAREN unless --$depth;
+                   $field .= $1 if s/^(([^\(\)\\]|\\.)+)//;
+                }
+            }
+
+            carp "Unmatched () '$field' '$_'"
+                if $depth;
+
+            $field =~ s/\s+\Z//;
+            push @words, $field;
+
+            next;
+        }
+
+        if( s/^("(?:[^"\\]+|\\.)*")\s*//       # "..."
+         || s/^(\[(?:[^\]\\]+|\\.)*\])\s*//    # [...]
+         || s/^([^\s()<>\@,;:\\".[\]]+)\s*//
+         || s/^([()<>\@,;:\\".[\]])\s*//
+          )
+        {   push @words, $1;
+            next;
+        }
+
+        croak "Unrecognised line: $_";
+    }
+
+    push @words, ",";
+    \@words;
+}
+
+sub _find_next
+{   my ($idx, $tokens, $len) = @_;
+
+    while($idx < $len)
+    {   my $c = $tokens->[$idx];
+        return $c if $c eq ',' || $c eq ';' || $c eq '<';
+        $idx++;
+    }
+
+    "";
+}
+
+sub _complete
+{   my ($class, $phrase, $address, $comment) = @_;
+
+    @$phrase || @$comment || @$address
+       or return undef;
+
+    my $o = $class->new(join(" ",@$phrase), join("",@$address), join(" ",@$comment));
+    @$phrase = @$address = @$comment = ();
+    $o;
+}
+
+#------------
+
+sub new(@)
+{   my $class = shift;
+    bless [@_], $class;
+}
+
+
+sub parse(@)
+{   my $class = shift;
+    my @line  = grep {defined} @_;
+    my $line  = join '', @line;
+
+    my (@phrase, @comment, @address, @objs);
+    my ($depth, $idx) = (0, 0);
+
+    my $tokens  = _tokenise @line;
+    my $len     = @$tokens;
+    my $next    = _find_next $idx, $tokens, $len;
+
+    local $_;
+    for(my $idx = 0; $idx < $len; $idx++)
+    {   $_ = $tokens->[$idx];
+
+        if(substr($_,0,1) eq '(') { push @comment, $_ }
+        elsif($_ eq '<')    { $depth++ }
+        elsif($_ eq '>')    { $depth-- if $depth }
+        elsif($_ eq ',' || $_ eq ';')
+        {   warn "Unmatched '<>' in $line" if $depth;
+            my $o = $class->_complete(\@phrase, \@address, \@comment);
+            push @objs, $o if defined $o;
+            $depth = 0;
+            $next = _find_next $idx+1, $tokens, $len;
+        }
+        elsif($depth)       { push @address, $_ }
+        elsif($next eq '<') { push @phrase,  $_ }
+        elsif( /^[.\@:;]$/ || !@address || $address[-1] =~ /^[.\@:;]$/ )
+        {   push @address, $_ }
+        else
+        {   warn "Unmatched '<>' in $line" if $depth;
+            my $o = $class->_complete(\@phrase, \@address, \@comment);
+            push @objs, $o if defined $o;
+            $depth = 0;
+            push @address, $_;
+        }
+    }
+    @objs;
+}
+
+#------------
+
+sub phrase  { shift->set_or_get(0, @_) }
+sub address { shift->set_or_get(1, @_) }
+sub comment { shift->set_or_get(2, @_) }
+
+sub set_or_get($)
+{   my ($self, $i) = (shift, shift);
+    @_ or return $self->[$i];
+
+    my $val = $self->[$i];
+    $self->[$i] = shift if @_;
+    $val;
+}
+
+
+my $atext = '[\-\w !#$%&\'*+/=?^`{|}~]';
+sub format
+{   my @addrs;
+
+    foreach (@_)
+    {   my ($phrase, $email, $comment) = @$_;
+        my @addr;
+
+        if(defined $phrase && length $phrase)
+        {   push @addr
+              , $phrase =~ /^(?:\s*$atext\s*)+$/o ? $phrase
+              : $phrase =~ /(?<!\\)"/             ? $phrase
+              :                                    qq("$phrase");
+
+            push @addr, "<$email>"
+                if defined $email && length $email;
+        }
+        elsif(defined $email && length $email)
+        {   push @addr, $email;
+        }
+
+        if(defined $comment && $comment =~ /\S/)
+        {   $comment =~ s/^\s*\(?/(/;
+            $comment =~ s/\)?\s*$/)/;
+        }
+
+        push @addr, $comment
+            if defined $comment && length $comment;
+
+        push @addrs, join(" ", @addr)
+            if @addr;
+    }
+
+    join ", ", @addrs;
+}
+
+#------------
+
+sub name
+{   my $self   = shift;
+    my $phrase = $self->phrase;
+    my $addr   = $self->address;
+
+    $phrase    = $self->comment
+        unless defined $phrase && length $phrase;
+
+    my $name   = $self->_extract_name($phrase);
+
+    # first.last@domain address
+    if($name eq '' && $addr =~ /([^\%\.\@_]+([\._][^\%\.\@_]+)+)[\@\%]/)
+    {   ($name  = $1) =~ s/[\._]+/ /g;
+       $name   = _extract_name $name;
+    }
+
+    if($name eq '' && $addr =~ m#/g=#i)    # X400 style address
+    {   my ($f) = $addr =~ m#g=([^/]*)#i;
+       my ($l) = $addr =~ m#s=([^/]*)#i;
+       $name   = _extract_name "$f $l";
+    }
+
+    length $name ? $name : undef;
+}
+
+
+sub host
+{   my $addr = shift->address || '';
+    my $i    = rindex $addr, '@';
+    $i >= 0 ? substr($addr, $i+1) : undef;
+}
+
+
+sub user
+{   my $addr = shift->address || '';
+    my $i    = rindex $addr, '@';
+    $i >= 0 ? substr($addr,0,$i) : $addr;
+}
+
+1;
index 9d60d7948b22254e6f61cc0d984b4ef40f27bc4f..16ebcc612ce4acb4fba6511d5b388184934cb22a 100644 (file)
@@ -9,7 +9,10 @@ package Git;
 
 use 5.008;
 use strict;
+use warnings;
 
+use File::Temp ();
+use File::Spec ();
 
 BEGIN {
 
@@ -101,7 +104,7 @@ =head1 DESCRIPTION
 
 
 use Carp qw(carp croak); # but croak is bad - throw instead
-use Git::Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
 use Cwd qw(abs_path cwd);
 use IPC::Open2 qw(open2);
 use Fcntl qw(SEEK_SET SEEK_CUR);
@@ -189,7 +192,6 @@ sub repository {
                };
 
                if ($dir) {
-                       _verify_require();
                        File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir;
                        $opts{Repository} = abs_path($dir);
 
@@ -534,7 +536,9 @@ sub version {
 sub get_tz_offset {
        # some systems don't handle or mishandle %z, so be creative.
        my $t = shift || time;
-       my $gm = timegm(localtime($t));
+       my @t = localtime($t);
+       $t[5] += 1900;
+       my $gm = timegm(@t);
        my $sign = qw( + + - )[ $gm <=> $t ];
        return sprintf("%s%02d%02d", $sign, (gmtime(abs($t - $gm)))[2,1]);
 }
@@ -1288,8 +1292,6 @@ sub temp_release {
 sub _temp_cache {
        my ($self, $name) = _maybe_self(@_);
 
-       _verify_require();
-
        my $temp_fd = \$TEMP_FILEMAP{$name};
        if (defined $$temp_fd and $$temp_fd->opened) {
                if ($TEMP_FILES{$$temp_fd}{locked}) {
@@ -1323,11 +1325,6 @@ sub _temp_cache {
        $$temp_fd;
 }
 
-sub _verify_require {
-       eval { require File::Temp; require File::Spec; };
-       $@ and throw Error::Simple($@);
-}
-
 =item temp_reset ( FILEHANDLE )
 
 Truncates and resets the position of the C<FILEHANDLE>.
@@ -1692,7 +1689,6 @@ sub DESTROY {
 # Pipe implementation for ActiveState Perl.
 
 package Git::activestate_pipe;
-use strict;
 
 sub TIEHANDLE {
        my ($class, @params) = @_;
diff --git a/perl/Git/Error.pm b/perl/Git/Error.pm
deleted file mode 100644 (file)
index 09bbc97..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-package Git::Error;
-use 5.008;
-use strict;
-use warnings;
-
-=head1 NAME
-
-Git::Error - Wrapper for the L<Error> module, in case it's not installed
-
-=head1 DESCRIPTION
-
-Wraps the import function for the L<Error> module.
-
-This module is only intended to be used for code shipping in the
-C<git.git> repository. Use it for anything else at your peril!
-
-=cut
-
-sub import {
-    shift;
-    my $caller = caller;
-
-    eval {
-       require Error;
-       1;
-    } or do {
-       my $error = $@ || "Zombie Error";
-
-       my $Git_Error_pm_path = $INC{"Git/Error.pm"} || die "BUG: Should have our own path from %INC!";
-
-       require File::Basename;
-       my $Git_Error_pm_root = File::Basename::dirname($Git_Error_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_Error_pm_path'!";
-
-       require File::Spec;
-       my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_Error_pm_root, 'FromCPAN');
-       die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
-
-       local @INC = ($Git_pm_FromCPAN_root, @INC);
-       require Error;
-    };
-
-    unshift @_, $caller;
-    goto &Error::import;
-}
-
-1;
diff --git a/perl/Git/FromCPAN/Error.pm b/perl/Git/FromCPAN/Error.pm
deleted file mode 100644 (file)
index 6098135..0000000
+++ /dev/null
@@ -1,827 +0,0 @@
-# Error.pm
-#
-# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
-# This program is free software; you can redistribute it and/or
-# modify it under the same terms as Perl itself.
-#
-# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
-# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
-#
-# but modified ***significantly***
-
-package Error;
-
-use strict;
-use vars qw($VERSION);
-use 5.004;
-
-$VERSION = "0.15009";
-
-use overload (
-       '""'       =>   'stringify',
-       '0+'       =>   'value',
-       'bool'     =>   sub { return 1; },
-       'fallback' =>   1
-);
-
-$Error::Depth = 0;     # Depth to pass to caller()
-$Error::Debug = 0;     # Generate verbose stack traces
-@Error::STACK = ();    # Clause stack for try
-$Error::THROWN = undef;        # last error thrown, a workaround until die $ref works
-
-my $LAST;              # Last error created
-my %ERROR;             # Last error associated with package
-
-sub throw_Error_Simple
-{
-    my $args = shift;
-    return Error::Simple->new($args->{'text'});
-}
-
-$Error::ObjectifyCallback = \&throw_Error_Simple;
-
-
-# Exported subs are defined in Error::subs
-
-sub import {
-    shift;
-    local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
-    Error::subs->import(@_);
-}
-
-# I really want to use last for the name of this method, but it is a keyword
-# which prevent the syntax  last Error
-
-sub prior {
-    shift; # ignore
-
-    return $LAST unless @_;
-
-    my $pkg = shift;
-    return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
-       unless ref($pkg);
-
-    my $obj = $pkg;
-    my $err = undef;
-    if($obj->isa('HASH')) {
-       $err = $obj->{'__Error__'}
-           if exists $obj->{'__Error__'};
-    }
-    elsif($obj->isa('GLOB')) {
-       $err = ${*$obj}{'__Error__'}
-           if exists ${*$obj}{'__Error__'};
-    }
-
-    $err;
-}
-
-sub flush {
-    shift; #ignore
-
-    unless (@_) {
-       $LAST = undef;
-       return;
-    }
-
-    my $pkg = shift;
-    return unless ref($pkg);
-
-    undef $ERROR{$pkg} if defined $ERROR{$pkg};
-}
-
-# Return as much information as possible about where the error
-# happened. The -stacktrace element only exists if $Error::DEBUG
-# was set when the error was created
-
-sub stacktrace {
-    my $self = shift;
-
-    return $self->{'-stacktrace'}
-       if exists $self->{'-stacktrace'};
-
-    my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
-
-    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
-       unless($text =~ /\n$/s);
-
-    $text;
-}
-
-# Allow error propagation, ie
-#
-# $ber->encode(...) or
-#    return Error->prior($ber)->associate($ldap);
-
-sub associate {
-    my $err = shift;
-    my $obj = shift;
-
-    return unless ref($obj);
-
-    if($obj->isa('HASH')) {
-       $obj->{'__Error__'} = $err;
-    }
-    elsif($obj->isa('GLOB')) {
-       ${*$obj}{'__Error__'} = $err;
-    }
-    $obj = ref($obj);
-    $ERROR{ ref($obj) } = $err;
-
-    return;
-}
-
-sub new {
-    my $self = shift;
-    my($pkg,$file,$line) = caller($Error::Depth);
-
-    my $err = bless {
-       '-package' => $pkg,
-       '-file'    => $file,
-       '-line'    => $line,
-       @_
-    }, $self;
-
-    $err->associate($err->{'-object'})
-       if(exists $err->{'-object'});
-
-    # To always create a stacktrace would be very inefficient, so
-    # we only do it if $Error::Debug is set
-
-    if($Error::Debug) {
-       require Carp;
-       local $Carp::CarpLevel = $Error::Depth;
-       my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
-       my $trace = Carp::longmess($text);
-       # Remove try calls from the trace
-       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
-       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
-       $err->{'-stacktrace'} = $trace
-    }
-
-    $@ = $LAST = $ERROR{$pkg} = $err;
-}
-
-# Throw an error. this contains some very gory code.
-
-sub throw {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    # if we are not rethrow-ing then create the object to throw
-    $self = $self->new(@_) unless ref($self);
-
-    die $Error::THROWN = $self;
-}
-
-# syntactic sugar for
-#
-#    die with Error( ... );
-
-sub with {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    $self->new(@_);
-}
-
-# syntactic sugar for
-#
-#    record Error( ... ) and return;
-
-sub record {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    $self->new(@_);
-}
-
-# catch clause for
-#
-# try { ... } catch CLASS with { ... }
-
-sub catch {
-    my $pkg = shift;
-    my $code = shift;
-    my $clauses = shift || {};
-    my $catch = $clauses->{'catch'} ||= [];
-
-    unshift @$catch,  $pkg, $code;
-
-    $clauses;
-}
-
-# Object query methods
-
-sub object {
-    my $self = shift;
-    exists $self->{'-object'} ? $self->{'-object'} : undef;
-}
-
-sub file {
-    my $self = shift;
-    exists $self->{'-file'} ? $self->{'-file'} : undef;
-}
-
-sub line {
-    my $self = shift;
-    exists $self->{'-line'} ? $self->{'-line'} : undef;
-}
-
-sub text {
-    my $self = shift;
-    exists $self->{'-text'} ? $self->{'-text'} : undef;
-}
-
-# overload methods
-
-sub stringify {
-    my $self = shift;
-    defined $self->{'-text'} ? $self->{'-text'} : "Died";
-}
-
-sub value {
-    my $self = shift;
-    exists $self->{'-value'} ? $self->{'-value'} : undef;
-}
-
-package Error::Simple;
-
-@Error::Simple::ISA = qw(Error);
-
-sub new {
-    my $self  = shift;
-    my $text  = "" . shift;
-    my $value = shift;
-    my(@args) = ();
-
-    local $Error::Depth = $Error::Depth + 1;
-
-    @args = ( -file => $1, -line => $2)
-       if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
-    push(@args, '-value', 0 + $value)
-       if defined($value);
-
-    $self->SUPER::new(-text => $text, @args);
-}
-
-sub stringify {
-    my $self = shift;
-    my $text = $self->SUPER::stringify;
-    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
-       unless($text =~ /\n$/s);
-    $text;
-}
-
-##########################################################################
-##########################################################################
-
-# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
-# Peter Seibel <peter@weblogic.com>
-
-package Error::subs;
-
-use Exporter ();
-use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
-
-@EXPORT_OK   = qw(try with finally except otherwise);
-%EXPORT_TAGS = (try => \@EXPORT_OK);
-
-@ISA = qw(Exporter);
-
-
-sub blessed {
-       my $item = shift;
-       local $@; # don't kill an outer $@
-       ref $item and eval { $item->can('can') };
-}
-
-
-sub run_clauses ($$$\@) {
-    my($clauses,$err,$wantarray,$result) = @_;
-    my $code = undef;
-
-    $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
-
-    CATCH: {
-
-       # catch
-       my $catch;
-       if(defined($catch = $clauses->{'catch'})) {
-           my $i = 0;
-
-           CATCHLOOP:
-           for( ; $i < @$catch ; $i += 2) {
-               my $pkg = $catch->[$i];
-               unless(defined $pkg) {
-                   #except
-                   splice(@$catch,$i,2,$catch->[$i+1]->());
-                   $i -= 2;
-                   next CATCHLOOP;
-               }
-               elsif(blessed($err) && $err->isa($pkg)) {
-                   $code = $catch->[$i+1];
-                   while(1) {
-                       my $more = 0;
-                       local($Error::THROWN);
-                       my $ok = eval {
-                           if($wantarray) {
-                               @{$result} = $code->($err,\$more);
-                           }
-                           elsif(defined($wantarray)) {
-                               @{$result} = ();
-                               $result->[0] = $code->($err,\$more);
-                           }
-                           else {
-                               $code->($err,\$more);
-                           }
-                           1;
-                       };
-                       if( $ok ) {
-                           next CATCHLOOP if $more;
-                           undef $err;
-                       }
-                       else {
-                           $err = defined($Error::THROWN)
-                                   ? $Error::THROWN : $@;
-                $err = $Error::ObjectifyCallback->({'text' =>$err})
-                    unless ref($err);
-                       }
-                       last CATCH;
-                   };
-               }
-           }
-       }
-
-       # otherwise
-       my $owise;
-       if(defined($owise = $clauses->{'otherwise'})) {
-           my $code = $clauses->{'otherwise'};
-           my $more = 0;
-           my $ok = eval {
-               if($wantarray) {
-                   @{$result} = $code->($err,\$more);
-               }
-               elsif(defined($wantarray)) {
-                   @{$result} = ();
-                   $result->[0] = $code->($err,\$more);
-               }
-               else {
-                   $code->($err,\$more);
-               }
-               1;
-           };
-           if( $ok ) {
-               undef $err;
-           }
-           else {
-               $err = defined($Error::THROWN)
-                       ? $Error::THROWN : $@;
-
-        $err = $Error::ObjectifyCallback->({'text' =>$err})
-            unless ref($err);
-           }
-       }
-    }
-    $err;
-}
-
-sub try (&;$) {
-    my $try = shift;
-    my $clauses = @_ ? shift : {};
-    my $ok = 0;
-    my $err = undef;
-    my @result = ();
-
-    unshift @Error::STACK, $clauses;
-
-    my $wantarray = wantarray();
-
-    do {
-       local $Error::THROWN = undef;
-    local $@ = undef;
-
-       $ok = eval {
-           if($wantarray) {
-               @result = $try->();
-           }
-           elsif(defined $wantarray) {
-               $result[0] = $try->();
-           }
-           else {
-               $try->();
-           }
-           1;
-       };
-
-       $err = defined($Error::THROWN) ? $Error::THROWN : $@
-           unless $ok;
-    };
-
-    shift @Error::STACK;
-
-    $err = run_clauses($clauses,$err,wantarray,@result)
-       unless($ok);
-
-    $clauses->{'finally'}->()
-       if(defined($clauses->{'finally'}));
-
-    if (defined($err))
-    {
-        if (blessed($err) && $err->can('throw'))
-        {
-            throw $err;
-        }
-        else
-        {
-            die $err;
-        }
-    }
-
-    wantarray ? @result : $result[0];
-}
-
-# Each clause adds a sub to the list of clauses. The finally clause is
-# always the last, and the otherwise clause is always added just before
-# the finally clause.
-#
-# All clauses, except the finally clause, add a sub which takes one argument
-# this argument will be the error being thrown. The sub will return a code ref
-# if that clause can handle that error, otherwise undef is returned.
-#
-# The otherwise clause adds a sub which unconditionally returns the users
-# code reference, this is why it is forced to be last.
-#
-# The catch clause is defined in Error.pm, as the syntax causes it to
-# be called as a method
-
-sub with (&;$) {
-    @_
-}
-
-sub finally (&) {
-    my $code = shift;
-    my $clauses = { 'finally' => $code };
-    $clauses;
-}
-
-# The except clause is a block which returns a hashref or a list of
-# key-value pairs, where the keys are the classes and the values are subs.
-
-sub except (&;$) {
-    my $code = shift;
-    my $clauses = shift || {};
-    my $catch = $clauses->{'catch'} ||= [];
-
-    my $sub = sub {
-       my $ref;
-       my(@array) = $code->($_[0]);
-       if(@array == 1 && ref($array[0])) {
-           $ref = $array[0];
-           $ref = [ %$ref ]
-               if(UNIVERSAL::isa($ref,'HASH'));
-       }
-       else {
-           $ref = \@array;
-       }
-       @$ref
-    };
-
-    unshift @{$catch}, undef, $sub;
-
-    $clauses;
-}
-
-sub otherwise (&;$) {
-    my $code = shift;
-    my $clauses = shift || {};
-
-    if(exists $clauses->{'otherwise'}) {
-       require Carp;
-       Carp::croak("Multiple otherwise clauses");
-    }
-
-    $clauses->{'otherwise'} = $code;
-
-    $clauses;
-}
-
-1;
-__END__
-
-=head1 NAME
-
-Error - Error/exception handling in an OO-ish way
-
-=head1 SYNOPSIS
-
-    use Error qw(:try);
-
-    throw Error::Simple( "A simple error");
-
-    sub xyz {
-        ...
-       record Error::Simple("A simple error")
-           and return;
-    }
-
-    unlink($file) or throw Error::Simple("$file: $!",$!);
-
-    try {
-       do_some_stuff();
-       die "error!" if $condition;
-       throw Error::Simple -text => "Oops!" if $other_condition;
-    }
-    catch Error::IO with {
-       my $E = shift;
-       print STDERR "File ", $E->{'-file'}, " had a problem\n";
-    }
-    except {
-       my $E = shift;
-       my $general_handler=sub {send_message $E->{-description}};
-       return {
-           UserException1 => $general_handler,
-           UserException2 => $general_handler
-       };
-    }
-    otherwise {
-       print STDERR "Well I don't know what to say\n";
-    }
-    finally {
-       close_the_garage_door_already(); # Should be reliable
-    }; # Don't forget the trailing ; or you might be surprised
-
-=head1 DESCRIPTION
-
-The C<Error> package provides two interfaces. Firstly C<Error> provides
-a procedural interface to exception handling. Secondly C<Error> is a
-base class for errors/exceptions that can either be thrown, for
-subsequent catch, or can simply be recorded.
-
-Errors in the class C<Error> should not be thrown directly, but the
-user should throw errors from a sub-class of C<Error>.
-
-=head1 PROCEDURAL INTERFACE
-
-C<Error> exports subroutines to perform exception handling. These will
-be exported if the C<:try> tag is used in the C<use> line.
-
-=over 4
-
-=item try BLOCK CLAUSES
-
-C<try> is the main subroutine called by the user. All other subroutines
-exported are clauses to the try subroutine.
-
-The BLOCK will be evaluated and, if no error is throw, try will return
-the result of the block.
-
-C<CLAUSES> are the subroutines below, which describe what to do in the
-event of an error being thrown within BLOCK.
-
-=item catch CLASS with BLOCK
-
-This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
-to be caught and handled by evaluating C<BLOCK>.
-
-C<BLOCK> will be passed two arguments. The first will be the error
-being thrown. The second is a reference to a scalar variable. If this
-variable is set by the catch block then, on return from the catch
-block, try will continue processing as if the catch block was never
-found.
-
-To propagate the error the catch block may call C<$err-E<gt>throw>
-
-If the scalar reference by the second argument is not set, and the
-error is not thrown. Then the current try block will return with the
-result from the catch block.
-
-=item except BLOCK
-
-When C<try> is looking for a handler, if an except clause is found
-C<BLOCK> is evaluated. The return value from this block should be a
-HASHREF or a list of key-value pairs, where the keys are class names
-and the values are CODE references for the handler of errors of that
-type.
-
-=item otherwise BLOCK
-
-Catch any error by executing the code in C<BLOCK>
-
-When evaluated C<BLOCK> will be passed one argument, which will be the
-error being processed.
-
-Only one otherwise block may be specified per try block
-
-=item finally BLOCK
-
-Execute the code in C<BLOCK> either after the code in the try block has
-successfully completed, or if the try block throws an error then
-C<BLOCK> will be executed after the handler has completed.
-
-If the handler throws an error then the error will be caught, the
-finally block will be executed and the error will be re-thrown.
-
-Only one finally block may be specified per try block
-
-=back
-
-=head1 CLASS INTERFACE
-
-=head2 CONSTRUCTORS
-
-The C<Error> object is implemented as a HASH. This HASH is initialized
-with the arguments that are passed to its constructor. The elements
-that are used by, or are retrievable by the C<Error> class are listed
-below, other classes may add to these.
-
-       -file
-       -line
-       -text
-       -value
-       -object
-
-If C<-file> or C<-line> are not specified in the constructor arguments
-then these will be initialized with the file name and line number where
-the constructor was called from.
-
-If the error is associated with an object then the object should be
-passed as the C<-object> argument. This will allow the C<Error> package
-to associate the error with the object.
-
-The C<Error> package remembers the last error created, and also the
-last error associated with a package. This could either be the last
-error created by a sub in that package, or the last error which passed
-an object blessed into that package as the C<-object> argument.
-
-=over 4
-
-=item throw ( [ ARGS ] )
-
-Create a new C<Error> object and throw an error, which will be caught
-by a surrounding C<try> block, if there is one. Otherwise it will cause
-the program to exit.
-
-C<throw> may also be called on an existing error to re-throw it.
-
-=item with ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
-    die with Some::Error ( ... );
-
-=item record ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
-    record Some::Error ( ... )
-       and return;
-
-=back
-
-=head2 STATIC METHODS
-
-=over 4
-
-=item prior ( [ PACKAGE ] )
-
-Return the last error created, or the last error associated with
-C<PACKAGE>
-
-=item flush ( [ PACKAGE ] )
-
-Flush the last error created, or the last error associated with
-C<PACKAGE>.It is necessary to clear the error stack before exiting the
-package or uncaught errors generated using C<record> will be reported.
-
-     $Error->flush;
-
-=cut
-
-=back
-
-=head2 OBJECT METHODS
-
-=over 4
-
-=item stacktrace
-
-If the variable C<$Error::Debug> was non-zero when the error was
-created, then C<stacktrace> returns a string created by calling
-C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
-the text of the error appended with the filename and line number of
-where the error was created, providing the text does not end with a
-newline.
-
-=item object
-
-The object this error was associated with
-
-=item file
-
-The file where the constructor of this error was called from
-
-=item line
-
-The line where the constructor of this error was called from
-
-=item text
-
-The text of the error
-
-=back
-
-=head2 OVERLOAD METHODS
-
-=over 4
-
-=item stringify
-
-A method that converts the object into a string. This method may simply
-return the same as the C<text> method, or it may append more
-information. For example the file name and line number.
-
-By default this method returns the C<-text> argument that was passed to
-the constructor, or the string C<"Died"> if none was given.
-
-=item value
-
-A method that will return a value that can be associated with the
-error. For example if an error was created due to the failure of a
-system call, then this may return the numeric value of C<$!> at the
-time.
-
-By default this method returns the C<-value> argument that was passed
-to the constructor.
-
-=back
-
-=head1 PRE-DEFINED ERROR CLASSES
-
-=over 4
-
-=item Error::Simple
-
-This class can be used to hold simple error strings and values. Its
-constructor takes two arguments. The first is a text value, the second
-is a numeric value. These values are what will be returned by the
-overload methods.
-
-If the text value ends with C<at file line 1> as $@ strings do, then
-this information will be used to set the C<-file> and C<-line> arguments
-of the error object.
-
-This class is used internally if an eval'd block die's with an error
-that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
-
-=back
-
-=head1 $Error::ObjectifyCallback
-
-This variable holds a reference to a subroutine that converts errors that
-are plain strings to objects. It is used by Error.pm to convert textual
-errors to objects, and can be overridden by the user.
-
-It accepts a single argument which is a hash reference to named parameters.
-Currently the only named parameter passed is C<'text'> which is the text
-of the error, but others may be available in the future.
-
-For example the following code will cause Error.pm to throw objects of the
-class MyError::Bar by default:
-
-    sub throw_MyError_Bar
-    {
-        my $args = shift;
-        my $err = MyError::Bar->new();
-        $err->{'MyBarText'} = $args->{'text'};
-        return $err;
-    }
-
-    {
-        local $Error::ObjectifyCallback = \&throw_MyError_Bar;
-
-        # Error handling here.
-    }
-
-=head1 KNOWN BUGS
-
-None, but that does not mean there are not any.
-
-=head1 AUTHORS
-
-Graham Barr <gbarr@pobox.com>
-
-The code that inspired me to write this was originally written by
-Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
-<jglick@sig.bsh.com>.
-
-=head1 MAINTAINER
-
-Shlomi Fish <shlomif@iglu.org.il>
-
-=head1 PAST MAINTAINERS
-
-Arun Kumar U <u_arunkumar@yahoo.com>
-
-=cut
diff --git a/perl/Git/FromCPAN/Mail/Address.pm b/perl/Git/FromCPAN/Mail/Address.pm
deleted file mode 100644 (file)
index 13b2ff7..0000000
+++ /dev/null
@@ -1,276 +0,0 @@
-# Copyrights 1995-2017 by [Mark Overmeer <perl@overmeer.net>].
-#  For other contributors see ChangeLog.
-# See the manual pages for details on the licensing terms.
-# Pod stripped from pm file by OODoc 2.02.
-package Mail::Address;
-use vars '$VERSION';
-$VERSION = '2.19';
-
-use strict;
-
-use Carp;
-
-# use locale;   removed in version 1.78, because it causes taint problems
-
-sub Version { our $VERSION }
-
-
-
-# given a comment, attempt to extract a person's name
-sub _extract_name
-{   # This function can be called as method as well
-    my $self = @_ && ref $_[0] ? shift : undef;
-
-    local $_ = shift
-        or return '';
-
-    # Using encodings, too hard. See Mail::Message::Field::Full.
-    return '' if m/\=\?.*?\?\=/;
-
-    # trim whitespace
-    s/^\s+//;
-    s/\s+$//;
-    s/\s+/ /;
-
-    # Disregard numeric names (e.g. 123456.1234@compuserve.com)
-    return "" if /^[\d ]+$/;
-
-    s/^\((.*)\)$/$1/; # remove outermost parenthesis
-    s/^"(.*)"$/$1/;   # remove outer quotation marks
-    s/\(.*?\)//g;     # remove minimal embedded comments
-    s/\\//g;          # remove all escapes
-    s/^"(.*)"$/$1/;   # remove internal quotation marks
-    s/^([^\s]+) ?, ?(.*)$/$2 $1/; # reverse "Last, First M." if applicable
-    s/,.*//;
-
-    # Change casing only when the name contains only upper or only
-    # lower cased characters.
-    unless( m/[A-Z]/ && m/[a-z]/ )
-    {   # Set the case of the name to first char upper rest lower
-        s/\b(\w+)/\L\u$1/igo;  # Upcase first letter on name
-        s/\bMc(\w)/Mc\u$1/igo; # Scottish names such as 'McLeod'
-        s/\bo'(\w)/O'\u$1/igo; # Irish names such as 'O'Malley, O'Reilly'
-        s/\b(x*(ix)?v*(iv)?i*)\b/\U$1/igo; # Roman numerals, eg 'Level III Support'
-    }
-
-    # some cleanup
-    s/\[[^\]]*\]//g;
-    s/(^[\s'"]+|[\s'"]+$)//g;
-    s/\s{2,}/ /g;
-
-    $_;
-}
-
-sub _tokenise
-{   local $_ = join ',', @_;
-    my (@words,$snippet,$field);
-
-    s/\A\s+//;
-    s/[\r\n]+/ /g;
-
-    while ($_ ne '')
-    {   $field = '';
-        if(s/^\s*\(/(/ )    # (...)
-        {   my $depth = 0;
-
-     PAREN: while(s/^(\(([^\(\)\\]|\\.)*)//)
-            {   $field .= $1;
-                $depth++;
-                while(s/^(([^\(\)\\]|\\.)*\)\s*)//)
-                {   $field .= $1;
-                    last PAREN unless --$depth;
-                   $field .= $1 if s/^(([^\(\)\\]|\\.)+)//;
-                }
-            }
-
-            carp "Unmatched () '$field' '$_'"
-                if $depth;
-
-            $field =~ s/\s+\Z//;
-            push @words, $field;
-
-            next;
-        }
-
-        if( s/^("(?:[^"\\]+|\\.)*")\s*//       # "..."
-         || s/^(\[(?:[^\]\\]+|\\.)*\])\s*//    # [...]
-         || s/^([^\s()<>\@,;:\\".[\]]+)\s*//
-         || s/^([()<>\@,;:\\".[\]])\s*//
-          )
-        {   push @words, $1;
-            next;
-        }
-
-        croak "Unrecognised line: $_";
-    }
-
-    push @words, ",";
-    \@words;
-}
-
-sub _find_next
-{   my ($idx, $tokens, $len) = @_;
-
-    while($idx < $len)
-    {   my $c = $tokens->[$idx];
-        return $c if $c eq ',' || $c eq ';' || $c eq '<';
-        $idx++;
-    }
-
-    "";
-}
-
-sub _complete
-{   my ($class, $phrase, $address, $comment) = @_;
-
-    @$phrase || @$comment || @$address
-       or return undef;
-
-    my $o = $class->new(join(" ",@$phrase), join("",@$address), join(" ",@$comment));
-    @$phrase = @$address = @$comment = ();
-    $o;
-}
-
-#------------
-
-sub new(@)
-{   my $class = shift;
-    bless [@_], $class;
-}
-
-
-sub parse(@)
-{   my $class = shift;
-    my @line  = grep {defined} @_;
-    my $line  = join '', @line;
-
-    my (@phrase, @comment, @address, @objs);
-    my ($depth, $idx) = (0, 0);
-
-    my $tokens  = _tokenise @line;
-    my $len     = @$tokens;
-    my $next    = _find_next $idx, $tokens, $len;
-
-    local $_;
-    for(my $idx = 0; $idx < $len; $idx++)
-    {   $_ = $tokens->[$idx];
-
-        if(substr($_,0,1) eq '(') { push @comment, $_ }
-        elsif($_ eq '<')    { $depth++ }
-        elsif($_ eq '>')    { $depth-- if $depth }
-        elsif($_ eq ',' || $_ eq ';')
-        {   warn "Unmatched '<>' in $line" if $depth;
-            my $o = $class->_complete(\@phrase, \@address, \@comment);
-            push @objs, $o if defined $o;
-            $depth = 0;
-            $next = _find_next $idx+1, $tokens, $len;
-        }
-        elsif($depth)       { push @address, $_ }
-        elsif($next eq '<') { push @phrase,  $_ }
-        elsif( /^[.\@:;]$/ || !@address || $address[-1] =~ /^[.\@:;]$/ )
-        {   push @address, $_ }
-        else
-        {   warn "Unmatched '<>' in $line" if $depth;
-            my $o = $class->_complete(\@phrase, \@address, \@comment);
-            push @objs, $o if defined $o;
-            $depth = 0;
-            push @address, $_;
-        }
-    }
-    @objs;
-}
-
-#------------
-
-sub phrase  { shift->set_or_get(0, @_) }
-sub address { shift->set_or_get(1, @_) }
-sub comment { shift->set_or_get(2, @_) }
-
-sub set_or_get($)
-{   my ($self, $i) = (shift, shift);
-    @_ or return $self->[$i];
-
-    my $val = $self->[$i];
-    $self->[$i] = shift if @_;
-    $val;
-}
-
-
-my $atext = '[\-\w !#$%&\'*+/=?^`{|}~]';
-sub format
-{   my @addrs;
-
-    foreach (@_)
-    {   my ($phrase, $email, $comment) = @$_;
-        my @addr;
-
-        if(defined $phrase && length $phrase)
-        {   push @addr
-              , $phrase =~ /^(?:\s*$atext\s*)+$/o ? $phrase
-              : $phrase =~ /(?<!\\)"/             ? $phrase
-              :                                    qq("$phrase");
-
-            push @addr, "<$email>"
-                if defined $email && length $email;
-        }
-        elsif(defined $email && length $email)
-        {   push @addr, $email;
-        }
-
-        if(defined $comment && $comment =~ /\S/)
-        {   $comment =~ s/^\s*\(?/(/;
-            $comment =~ s/\)?\s*$/)/;
-        }
-
-        push @addr, $comment
-            if defined $comment && length $comment;
-
-        push @addrs, join(" ", @addr)
-            if @addr;
-    }
-
-    join ", ", @addrs;
-}
-
-#------------
-
-sub name
-{   my $self   = shift;
-    my $phrase = $self->phrase;
-    my $addr   = $self->address;
-
-    $phrase    = $self->comment
-        unless defined $phrase && length $phrase;
-
-    my $name   = $self->_extract_name($phrase);
-
-    # first.last@domain address
-    if($name eq '' && $addr =~ /([^\%\.\@_]+([\._][^\%\.\@_]+)+)[\@\%]/)
-    {   ($name  = $1) =~ s/[\._]+/ /g;
-       $name   = _extract_name $name;
-    }
-
-    if($name eq '' && $addr =~ m#/g=#i)    # X400 style address
-    {   my ($f) = $addr =~ m#g=([^/]*)#i;
-       my ($l) = $addr =~ m#s=([^/]*)#i;
-       $name   = _extract_name "$f $l";
-    }
-
-    length $name ? $name : undef;
-}
-
-
-sub host
-{   my $addr = shift->address || '';
-    my $i    = rindex $addr, '@';
-    $i >= 0 ? substr($addr, $i+1) : undef;
-}
-
-
-sub user
-{   my $addr = shift->address || '';
-    my $i    = rindex $addr, '@';
-    $i >= 0 ? substr($addr,0,$i) : $addr;
-}
-
-1;
diff --git a/perl/Git/LoadCPAN.pm b/perl/Git/LoadCPAN.pm
new file mode 100644 (file)
index 0000000..e5585e7
--- /dev/null
@@ -0,0 +1,104 @@
+package Git::LoadCPAN;
+use 5.008;
+use strict;
+use warnings;
+
+=head1 NAME
+
+Git::LoadCPAN - Wrapper for loading modules from the CPAN (OS) or Git's own copy
+
+=head1 DESCRIPTION
+
+The Perl code in Git depends on some modules from the CPAN, but we
+don't want to make those a hard requirement for anyone building from
+source.
+
+Therefore the L<Git::LoadCPAN> namespace shipped with Git contains
+wrapper modules like C<Git::LoadCPAN::Module::Name> that will first
+attempt to load C<Module::Name> from the OS, and if that doesn't work
+will fall back on C<FromCPAN::Module::Name> shipped with Git itself.
+
+Usually distributors will not ship with Git's Git::FromCPAN tree at
+all via the C<NO_PERL_CPAN_FALLBACKS> option, preferring to use their
+own packaging of CPAN modules instead.
+
+This module is only intended to be used for code shipping in the
+C<git.git> repository. Use it for anything else at your peril!
+
+=cut
+
+# NO_PERL_CPAN_FALLBACKS_STR evades the sed search-replace from the
+# Makefile, and allows for detecting whether the module is loaded from
+# perl/Git as opposed to perl/build/Git, which is useful for one-off
+# testing without having Error.pm et al installed.
+use constant NO_PERL_CPAN_FALLBACKS_STR => '@@' . 'NO_PERL_CPAN_FALLBACKS' . '@@';
+use constant NO_PERL_CPAN_FALLBACKS => (
+       q[@@NO_PERL_CPAN_FALLBACKS@@] ne ''
+       and
+       q[@@NO_PERL_CPAN_FALLBACKS@@] ne NO_PERL_CPAN_FALLBACKS_STR
+);
+
+sub import {
+       shift;
+       my $caller = caller;
+       my %args = @_;
+       my $module = exists $args{module} ? delete $args{module} : die "BUG: Expected 'module' parameter!";
+       my $import = exists $args{import} ? delete $args{import} : die "BUG: Expected 'import' parameter!";
+       die "BUG: Too many arguments!" if keys %args;
+
+       # Foo::Bar to Foo/Bar.pm
+       my $package_pm = $module;
+       $package_pm =~ s[::][/]g;
+       $package_pm .= '.pm';
+
+       eval {
+               require $package_pm;
+               1;
+       } or do {
+               my $error = $@ || "Zombie Error";
+
+               if (NO_PERL_CPAN_FALLBACKS) {
+                       chomp(my $error = sprintf <<'THEY_PROMISED', $module);
+BUG: The '%s' module is not here, but NO_PERL_CPAN_FALLBACKS was set!
+
+Git needs this Perl module from the CPAN, and will by default ship
+with a copy of it. This Git was built with NO_PERL_CPAN_FALLBACKS,
+meaning that whoever built it promised to provide this module.
+
+You're seeing this error because they broke that promise, and we can't
+load our fallback version, since we were asked not to install it.
+
+If you're seeing this error and didn't package Git yourself the
+package you're using is broken, or your system is broken. This error
+won't appear if Git is built without NO_PERL_CPAN_FALLBACKS (instead
+we'll use our fallback version of the module).
+THEY_PROMISED
+                       die $error;
+               }
+
+               my $Git_LoadCPAN_pm_path = $INC{"Git/LoadCPAN.pm"} || die "BUG: Should have our own path from %INC!";
+
+               require File::Basename;
+               my $Git_LoadCPAN_pm_root = File::Basename::dirname($Git_LoadCPAN_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_LoadCPAN_pm_path'!";
+
+               require File::Spec;
+               my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_LoadCPAN_pm_root, '..', 'FromCPAN');
+               die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
+
+               local @INC = ($Git_pm_FromCPAN_root, @INC);
+               require $package_pm;
+       };
+
+       if ($import) {
+               no strict 'refs';
+               *{"${caller}::import"} = sub {
+                       shift;
+                       use strict 'refs';
+                       unshift @_, $module;
+                       goto &{"${module}::import"};
+               };
+               use strict 'refs';
+       }
+}
+
+1;
diff --git a/perl/Git/LoadCPAN/Error.pm b/perl/Git/LoadCPAN/Error.pm
new file mode 100644 (file)
index 0000000..c6d2c45
--- /dev/null
@@ -0,0 +1,10 @@
+package Git::LoadCPAN::Error;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+       module => 'Error',
+       import => 1,
+);
+
+1;
diff --git a/perl/Git/LoadCPAN/Mail/Address.pm b/perl/Git/LoadCPAN/Mail/Address.pm
new file mode 100644 (file)
index 0000000..f70a4f0
--- /dev/null
@@ -0,0 +1,10 @@
+package Git::LoadCPAN::Mail::Address;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+       module => 'Mail::Address',
+       import => 0,
+);
+
+1;
diff --git a/perl/Git/Mail/Address.pm b/perl/Git/Mail/Address.pm
deleted file mode 100755 (executable)
index 2ce3e84..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-package Git::Mail::Address;
-use 5.008;
-use strict;
-use warnings;
-
-=head1 NAME
-
-Git::Mail::Address - Wrapper for the L<Mail::Address> module, in case it's not installed
-
-=head1 DESCRIPTION
-
-This module is only intended to be used for code shipping in the
-C<git.git> repository. Use it for anything else at your peril!
-
-=cut
-
-eval {
-    require Mail::Address;
-    1;
-} or do {
-    require Git::FromCPAN::Mail::Address;
-};
-
-1;
index bc4eed3d75461444f8af0e27e2930ccb25663312..991a5885e9230b1f55bd6f3b7f7b53321bf9e562 100644 (file)
@@ -1405,7 +1405,7 @@ sub parse_svn_date {
                $ENV{TZ} = 'UTC';
 
                my $epoch_in_UTC =
-                   Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y - 1900);
+                   Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y);
 
                # Determine our local timezone (including DST) at the
                # time of $epoch_in_UTC.  $Git::SVN::Log::TZ stored the
index 2827ca772a3703f71bc588d0f6cacd5caa318fe7..555eb2a50746bb8f129a8c14c9b6d61169093436 100644 (file)
@@ -91,6 +91,12 @@ void packet_flush(int fd)
        write_or_die(fd, "0000", 4);
 }
 
+void packet_delim(int fd)
+{
+       packet_trace("0001", 4, 1);
+       write_or_die(fd, "0001", 4);
+}
+
 int packet_flush_gently(int fd)
 {
        packet_trace("0000", 4, 1);
@@ -105,6 +111,12 @@ void packet_buf_flush(struct strbuf *buf)
        strbuf_add(buf, "0000", 4);
 }
 
+void packet_buf_delim(struct strbuf *buf)
+{
+       packet_trace("0001", 4, 1);
+       strbuf_add(buf, "0001", 4);
+}
+
 static void set_packet_header(char *buf, const int size)
 {
        static char hexchar[] = "0123456789abcdef";
@@ -203,6 +215,22 @@ void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
        va_end(args);
 }
 
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
+{
+       size_t orig_len, n;
+
+       orig_len = buf->len;
+       strbuf_addstr(buf, "0000");
+       strbuf_add(buf, data, len);
+       n = buf->len - orig_len;
+
+       if (n > LARGE_PACKET_MAX)
+               die("protocol error: impossibly long line");
+
+       set_packet_header(&buf->buf[orig_len], n);
+       packet_trace(data, len, 1);
+}
+
 int write_packetized_from_fd(int fd_in, int fd_out)
 {
        static char buf[LARGE_PACKET_DATA_MAX];
@@ -280,28 +308,43 @@ static int packet_length(const char *linelen)
        return (val < 0) ? val : (val << 8) | hex2chr(linelen + 2);
 }
 
-int packet_read(int fd, char **src_buf, size_t *src_len,
-               char *buffer, unsigned size, int options)
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+                                               size_t *src_len, char *buffer,
+                                               unsigned size, int *pktlen,
+                                               int options)
 {
-       int len, ret;
+       int len;
        char linelen[4];
 
-       ret = get_packet_data(fd, src_buf, src_len, linelen, 4, options);
-       if (ret < 0)
-               return ret;
+       if (get_packet_data(fd, src_buffer, src_len, linelen, 4, options) < 0) {
+               *pktlen = -1;
+               return PACKET_READ_EOF;
+       }
+
        len = packet_length(linelen);
-       if (len < 0)
+
+       if (len < 0) {
                die("protocol error: bad line length character: %.4s", linelen);
-       if (!len) {
+       } else if (!len) {
                packet_trace("0000", 4, 0);
-               return 0;
+               *pktlen = 0;
+               return PACKET_READ_FLUSH;
+       } else if (len == 1) {
+               packet_trace("0001", 4, 0);
+               *pktlen = 0;
+               return PACKET_READ_DELIM;
+       } else if (len < 4) {
+               die("protocol error: bad line length %d", len);
        }
+
        len -= 4;
-       if (len >= size)
+       if ((unsigned)len >= size)
                die("protocol error: bad line length %d", len);
-       ret = get_packet_data(fd, src_buf, src_len, buffer, len, options);
-       if (ret < 0)
-               return ret;
+
+       if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
+               *pktlen = -1;
+               return PACKET_READ_EOF;
+       }
 
        if ((options & PACKET_READ_CHOMP_NEWLINE) &&
            len && buffer[len-1] == '\n')
@@ -309,7 +352,19 @@ int packet_read(int fd, char **src_buf, size_t *src_len,
 
        buffer[len] = 0;
        packet_trace(buffer, len, 0);
-       return len;
+       *pktlen = len;
+       return PACKET_READ_NORMAL;
+}
+
+int packet_read(int fd, char **src_buffer, size_t *src_len,
+               char *buffer, unsigned size, int options)
+{
+       int pktlen = -1;
+
+       packet_read_with_status(fd, src_buffer, src_len, buffer, size,
+                               &pktlen, options);
+
+       return pktlen;
 }
 
 static char *packet_read_line_generic(int fd,
@@ -377,3 +432,53 @@ ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out)
        }
        return sb_out->len - orig_len;
 }
+
+/* Packet Reader Functions */
+void packet_reader_init(struct packet_reader *reader, int fd,
+                       char *src_buffer, size_t src_len,
+                       int options)
+{
+       memset(reader, 0, sizeof(*reader));
+
+       reader->fd = fd;
+       reader->src_buffer = src_buffer;
+       reader->src_len = src_len;
+       reader->buffer = packet_buffer;
+       reader->buffer_size = sizeof(packet_buffer);
+       reader->options = options;
+}
+
+enum packet_read_status packet_reader_read(struct packet_reader *reader)
+{
+       if (reader->line_peeked) {
+               reader->line_peeked = 0;
+               return reader->status;
+       }
+
+       reader->status = packet_read_with_status(reader->fd,
+                                                &reader->src_buffer,
+                                                &reader->src_len,
+                                                reader->buffer,
+                                                reader->buffer_size,
+                                                &reader->pktlen,
+                                                reader->options);
+
+       if (reader->status == PACKET_READ_NORMAL)
+               reader->line = reader->buffer;
+       else
+               reader->line = NULL;
+
+       return reader->status;
+}
+
+enum packet_read_status packet_reader_peek(struct packet_reader *reader)
+{
+       /* Only allow peeking a single line */
+       if (reader->line_peeked)
+               return reader->status;
+
+       /* Peek a line by reading it and setting peeked flag */
+       packet_reader_read(reader);
+       reader->line_peeked = 1;
+       return reader->status;
+}
index 3dad583e2d02264c4a831c939ae0e13a54de2ff6..5b28d43472db41a59f0a44845953f163748593b0 100644 (file)
  * side can't, we stay with pure read/write interfaces.
  */
 void packet_flush(int fd);
+void packet_delim(int fd);
 void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 void packet_buf_flush(struct strbuf *buf);
+void packet_buf_delim(struct strbuf *buf);
 void packet_write(int fd_out, const char *buf, size_t size);
 void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
 int packet_flush_gently(int fd);
 int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
 int write_packetized_from_fd(int fd_in, int fd_out);
@@ -65,6 +68,23 @@ int write_packetized_from_buf(const char *src_in, size_t len, int fd_out);
 int packet_read(int fd, char **src_buffer, size_t *src_len, char
                *buffer, unsigned size, int options);
 
+/*
+ * Read a packetized line into a buffer like the 'packet_read()' function but
+ * returns an 'enum packet_read_status' which indicates the status of the read.
+ * The number of bytes read will be assigined to *pktlen if the status of the
+ * read was 'PACKET_READ_NORMAL'.
+ */
+enum packet_read_status {
+       PACKET_READ_EOF,
+       PACKET_READ_NORMAL,
+       PACKET_READ_FLUSH,
+       PACKET_READ_DELIM,
+};
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+                                               size_t *src_len, char *buffer,
+                                               unsigned size, int *pktlen,
+                                               int options);
+
 /*
  * Convenience wrapper for packet_read that is not gentle, and sets the
  * CHOMP_NEWLINE option. The return value is NULL for a flush packet,
@@ -96,6 +116,64 @@ char *packet_read_line_buf(char **src_buf, size_t *src_len, int *size);
  */
 ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
 
+struct packet_reader {
+       /* source file descriptor */
+       int fd;
+
+       /* source buffer and its size */
+       char *src_buffer;
+       size_t src_len;
+
+       /* buffer that pkt-lines are read into and its size */
+       char *buffer;
+       unsigned buffer_size;
+
+       /* options to be used during reads */
+       int options;
+
+       /* status of the last read */
+       enum packet_read_status status;
+
+       /* length of data read during the last read */
+       int pktlen;
+
+       /* the last line read */
+       const char *line;
+
+       /* indicates if a line has been peeked */
+       int line_peeked;
+};
+
+/*
+ * Initialize a 'struct packet_reader' object which is an
+ * abstraction around the 'packet_read_with_status()' function.
+ */
+extern void packet_reader_init(struct packet_reader *reader, int fd,
+                              char *src_buffer, size_t src_len,
+                              int options);
+
+/*
+ * Perform a packet read and return the status of the read.
+ * The values of 'pktlen' and 'line' are updated based on the status of the
+ * read as follows:
+ *
+ * PACKET_READ_ERROR: 'pktlen' is set to '-1' and 'line' is set to NULL
+ * PACKET_READ_NORMAL: 'pktlen' is set to the number of bytes read
+ *                    'line' is set to point at the read line
+ * PACKET_READ_FLUSH: 'pktlen' is set to '0' and 'line' is set to NULL
+ */
+extern enum packet_read_status packet_reader_read(struct packet_reader *reader);
+
+/*
+ * Peek the next packet line without consuming it and return the status.
+ * The next call to 'packet_reader_read()' will perform a read of the same line
+ * that was peeked, consuming the line.
+ *
+ * Peeking multiple times without calling 'packet_reader_read()' will return
+ * the same result.
+ */
+extern enum packet_read_status packet_reader_peek(struct packet_reader *reader);
+
 #define DEFAULT_PACKET_MAX 1000
 #define LARGE_PACKET_MAX 65520
 #define LARGE_PACKET_DATA_MAX (LARGE_PACKET_MAX - 4)
index 2a83255e4eeaf8e8ce74516880b0333b23d6f583..4d08d4487460f839f39f667c714f1bb85fa1e144 100644 (file)
@@ -78,6 +78,7 @@ static void preload_index(struct index_state *index,
 {
        int threads, i, work, offset;
        struct thread_data data[MAX_PARALLEL];
+       uint64_t start = getnanotime();
 
        if (!core_preload_index)
                return;
@@ -108,6 +109,7 @@ static void preload_index(struct index_state *index,
                if (pthread_join(p->pthread, NULL))
                        die("unable to join threaded lstat");
        }
+       trace_performance_since(start, "preload index");
 }
 #endif
 
index f7ce4902301490d73bdd79bd396cf7bbe5f893ea..34fe891fc03672fa042257e4d32630882868eadf 100644 (file)
--- a/pretty.c
+++ b/pretty.c
@@ -549,7 +549,7 @@ static void add_merge_info(const struct pretty_print_context *pp,
                struct object_id *oidp = &parent->item->object.oid;
                strbuf_addch(sb, ' ');
                if (pp->abbrev)
-                       strbuf_add_unique_abbrev(sb, oidp->hash, pp->abbrev);
+                       strbuf_add_unique_abbrev(sb, oidp, pp->abbrev);
                else
                        strbuf_addstr(sb, oid_to_hex(oidp));
                parent = parent->next;
@@ -1156,7 +1156,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                return 1;
        case 'h':               /* abbreviated commit hash */
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT));
-               strbuf_add_unique_abbrev(sb, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(sb, &commit->object.oid,
                                         c->pretty_ctx->abbrev);
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_RESET));
                return 1;
@@ -1164,7 +1164,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                strbuf_addstr(sb, oid_to_hex(&commit->tree->object.oid));
                return 1;
        case 't':               /* abbreviated tree hash */
-               strbuf_add_unique_abbrev(sb, commit->tree->object.oid.hash,
+               strbuf_add_unique_abbrev(sb, &commit->tree->object.oid,
                                         c->pretty_ctx->abbrev);
                return 1;
        case 'P':               /* parent hashes */
@@ -1178,7 +1178,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                for (p = commit->parents; p; p = p->next) {
                        if (p != commit->parents)
                                strbuf_addch(sb, ' ');
-                       strbuf_add_unique_abbrev(sb, p->item->object.oid.hash,
+                       strbuf_add_unique_abbrev(sb, &p->item->object.oid,
                                                 c->pretty_ctx->abbrev);
                }
                return 1;
index 43012b7eb6e18bc48d93e6b96a3f3f57fe8ef79f..5e636785d14f8ef5283561d4606fface080bf3c0 100644 (file)
@@ -8,6 +8,8 @@ static enum protocol_version parse_protocol_version(const char *value)
                return protocol_v0;
        else if (!strcmp(value, "1"))
                return protocol_v1;
+       else if (!strcmp(value, "2"))
+               return protocol_v2;
        else
                return protocol_unknown_version;
 }
index 1b2bc94a8d9f3c008bb76a0dfcdd640f99756fe3..2ad35e433c1e6f5f0d08c9d7500dc35782429f51 100644 (file)
@@ -5,6 +5,7 @@ enum protocol_version {
        protocol_unknown_version = -1,
        protocol_v0 = 0,
        protocol_v1 = 1,
+       protocol_v2 = 2,
 };
 
 /*
diff --git a/quote.c b/quote.c
index 37d26868656fe8c8c65de41580ab5ae9e89981fc..c95dd2cafbaa85c9c443a229134842bf06ce3200 100644 (file)
--- a/quote.c
+++ b/quote.c
@@ -118,9 +118,15 @@ static char *sq_dequote_step(char *arg, char **next)
                                *next = NULL;
                        return arg;
                case '\\':
-                       c = *++src;
-                       if (need_bs_quote(c) && *++src == '\'') {
-                               *dst++ = c;
+                       /*
+                        * Allow backslashed characters outside of
+                        * single-quotes only if they need escaping,
+                        * and only if we resume the single-quoted part
+                        * afterward.
+                        */
+                       if (need_bs_quote(src[1]) && src[2] == '\'') {
+                               *dst++ = src[1];
+                               src += 2;
                                continue;
                        }
                /* Fallthrough */
index 25cfd99d1cae0bd1180ec0c7476a013859a0f2b5..a6ea33a5db8b616420572cda4b857ac6d7d43cb1 100644 (file)
@@ -78,7 +78,7 @@ static void add_recent_object(const struct object_id *oid,
         * later processing, and the revision machinery expects
         * commits and tags to have been parsed.
         */
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type < 0)
                die("unable to get object info for %s", oid_to_hex(oid));
 
@@ -95,7 +95,7 @@ static void add_recent_object(const struct object_id *oid,
                break;
        default:
                die("unknown object type for %s: %s",
-                   oid_to_hex(oid), typename(type));
+                   oid_to_hex(oid), type_name(type));
        }
 
        if (!obj)
index 9925a94a6ba6b98693efff59a81878e0ffc42a76..10f1c6bb8a316e85448445afc3478c832d61709c 100644 (file)
@@ -62,6 +62,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
        replace_index_entry_in_base(istate, old, ce);
        remove_name_hash(istate, old);
        free(old);
+       ce->ce_flags &= ~CE_HASHED;
        set_index_entry(istate, nr, ce);
        ce->ce_flags |= CE_UPDATE_IN_BASE;
        mark_fsmonitor_invalid(istate, ce);
@@ -70,20 +71,20 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
 
 void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
 {
-       struct cache_entry *old = istate->cache[nr], *new;
+       struct cache_entry *old_entry = istate->cache[nr], *new_entry;
        int namelen = strlen(new_name);
 
-       new = xmalloc(cache_entry_size(namelen));
-       copy_cache_entry(new, old);
-       new->ce_flags &= ~CE_HASHED;
-       new->ce_namelen = namelen;
-       new->index = 0;
-       memcpy(new->name, new_name, namelen + 1);
+       new_entry = xmalloc(cache_entry_size(namelen));
+       copy_cache_entry(new_entry, old_entry);
+       new_entry->ce_flags &= ~CE_HASHED;
+       new_entry->ce_namelen = namelen;
+       new_entry->index = 0;
+       memcpy(new_entry->name, new_name, namelen + 1);
 
-       cache_tree_invalidate_path(istate, old->name);
-       untracked_cache_remove_from_index(istate, old->name);
+       cache_tree_invalidate_path(istate, old_entry->name);
+       untracked_cache_remove_from_index(istate, old_entry->name);
        remove_index_entry_at(istate, nr);
-       add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+       add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
 }
 
 void fill_stat_data(struct stat_data *sd, struct stat *st)
@@ -184,7 +185,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
        if (strbuf_readlink(&sb, ce->name, expected_size))
                return -1;
 
-       buffer = read_sha1_file(ce->oid.hash, &type, &size);
+       buffer = read_object_file(&ce->oid, &type, &size);
        if (buffer) {
                if (size == sb.len)
                        match = memcmp(buffer, sb.buf, size);
@@ -615,26 +616,26 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
                                           struct cache_entry *alias)
 {
        int len;
-       struct cache_entry *new;
+       struct cache_entry *new_entry;
 
        if (alias->ce_flags & CE_ADDED)
                die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
 
        /* Ok, create the new entry using the name of the existing alias */
        len = ce_namelen(alias);
-       new = xcalloc(1, cache_entry_size(len));
-       memcpy(new->name, alias->name, len);
-       copy_cache_entry(new, ce);
+       new_entry = xcalloc(1, cache_entry_size(len));
+       memcpy(new_entry->name, alias->name, len);
+       copy_cache_entry(new_entry, ce);
        save_or_free_index_entry(istate, ce);
-       return new;
+       return new_entry;
 }
 
 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
 {
-       unsigned char sha1[20];
-       if (write_sha1_file("", 0, blob_type, sha1))
+       struct object_id oid;
+       if (write_object_file("", 0, blob_type, &oid))
                die("cannot create an empty blob in the object database");
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, &oid);
 }
 
 int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
@@ -1324,7 +1325,8 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
 
        size = ce_size(ce);
        updated = xmalloc(size);
-       memcpy(updated, ce, size);
+       copy_cache_entry(updated, ce);
+       memcpy(updated->name, ce->name, ce->ce_namelen + 1);
        fill_stat_cache_info(updated, &st);
        /*
         * If ignore_valid is not set, we should leave CE_VALID bit
@@ -1371,6 +1373,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
+       uint64_t start = getnanotime();
 
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
@@ -1378,7 +1381,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
        unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
        for (i = 0; i < istate->cache_nr; i++) {
-               struct cache_entry *ce, *new;
+               struct cache_entry *ce, *new_entry;
                int cache_errno = 0;
                int changed = 0;
                int filtered = 0;
@@ -1407,10 +1410,10 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                if (filtered)
                        continue;
 
-               new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
-               if (new == ce)
+               new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+               if (new_entry == ce)
                        continue;
-               if (!new) {
+               if (!new_entry) {
                        const char *fmt;
 
                        if (really && cache_errno == EINVAL) {
@@ -1439,8 +1442,9 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                        continue;
                }
 
-               replace_index_entry(istate, i, new);
+               replace_index_entry(istate, i, new_entry);
        }
+       trace_performance_since(start, "refresh index");
        return has_errors;
 }
 
@@ -1544,8 +1548,8 @@ int verify_ce_order;
 
 static int verify_hdr(struct cache_header *hdr, unsigned long size)
 {
-       git_SHA_CTX c;
-       unsigned char sha1[20];
+       git_hash_ctx c;
+       unsigned char hash[GIT_MAX_RAWSZ];
        int hdr_version;
 
        if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
@@ -1557,10 +1561,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
        if (!verify_index_checksum)
                return 0;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, size - 20);
-       git_SHA1_Final(sha1, &c);
-       if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+       the_hash_algo->final_fn(hash, &c);
+       if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
                return error("bad index file sha1 signature");
        return 0;
 }
@@ -1790,7 +1794,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
                die_errno("cannot stat the open index");
 
        mmap_size = xsize_t(st.st_size);
-       if (mmap_size < sizeof(struct cache_header) + 20)
+       if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                die("index file smaller than expected");
 
        mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1802,7 +1806,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        if (verify_hdr(hdr, mmap_size) < 0)
                goto unmap;
 
-       hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+       hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
        istate->version = ntohl(hdr->hdr_version);
        istate->cache_nr = ntohl(hdr->hdr_entries);
        istate->cache_alloc = alloc_nr(istate->cache_nr);
@@ -1830,7 +1834,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        istate->timestamp.sec = st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
 
-       while (src_offset <= mmap_size - 20 - 8) {
+       while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
                /* After an array of active_nr index entries,
                 * there can be arbitrary number of extended
                 * sections, each of which is prefixed with
@@ -1871,6 +1875,7 @@ static void freshen_shared_index(const char *shared_index, int warn)
 int read_index_from(struct index_state *istate, const char *path,
                    const char *gitdir)
 {
+       uint64_t start = getnanotime();
        struct split_index *split_index;
        int ret;
        char *base_sha1_hex;
@@ -1881,6 +1886,7 @@ int read_index_from(struct index_state *istate, const char *path,
                return istate->cache_nr;
 
        ret = do_read_index(istate, path, 0);
+       trace_performance_since(start, "read cache %s", path);
 
        split_index = istate->split_index;
        if (!split_index || is_null_sha1(split_index->base_sha1)) {
@@ -1904,6 +1910,7 @@ int read_index_from(struct index_state *istate, const char *path,
        freshen_shared_index(base_path, 0);
        merge_base_index(istate);
        post_read_index_from(istate);
+       trace_performance_since(start, "read cache %s", base_path);
        free(base_path);
        return ret;
 }
@@ -1956,11 +1963,11 @@ int unmerged_index(const struct index_state *istate)
 static unsigned char write_buffer[WRITE_BUFFER_SIZE];
 static unsigned long write_buffer_len;
 
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
 {
        unsigned int buffered = write_buffer_len;
        if (buffered) {
-               git_SHA1_Update(context, write_buffer, buffered);
+               the_hash_algo->update_fn(context, write_buffer, buffered);
                if (write_in_full(fd, write_buffer, buffered) < 0)
                        return -1;
                write_buffer_len = 0;
@@ -1968,7 +1975,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd)
        return 0;
 }
 
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
 {
        while (len) {
                unsigned int buffered = write_buffer_len;
@@ -1990,7 +1997,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
        return 0;
 }
 
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
                                  unsigned int ext, unsigned int sz)
 {
        ext = htonl(ext);
@@ -1999,26 +2006,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd,
                (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
 }
 
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
 {
        unsigned int left = write_buffer_len;
 
        if (left) {
                write_buffer_len = 0;
-               git_SHA1_Update(context, write_buffer, left);
+               the_hash_algo->update_fn(context, write_buffer, left);
        }
 
-       /* Flush first if not enough space for SHA1 signature */
-       if (left + 20 > WRITE_BUFFER_SIZE) {
+       /* Flush first if not enough space for hash signature */
+       if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
                if (write_in_full(fd, write_buffer, left) < 0)
                        return -1;
                left = 0;
        }
 
-       /* Append the SHA1 signature at the end */
-       git_SHA1_Final(write_buffer + left, context);
-       hashcpy(sha1, write_buffer + left);
-       left += 20;
+       /* Append the hash signature at the end */
+       the_hash_algo->final_fn(write_buffer + left, context);
+       hashcpy(hash, write_buffer + left);
+       left += the_hash_algo->rawsz;
        return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
 }
 
@@ -2099,17 +2106,19 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
        }
 }
 
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
                          struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
 {
        int size;
-       int saved_namelen = saved_namelen; /* compiler workaround */
        int result;
+       unsigned int saved_namelen;
+       int stripped_name = 0;
        static unsigned char padding[8] = { 0x00 };
 
        if (ce->ce_flags & CE_STRIP_NAME) {
                saved_namelen = ce_namelen(ce);
                ce->ce_namelen = 0;
+               stripped_name = 1;
        }
 
        if (ce->ce_flags & CE_EXTENDED)
@@ -2149,7 +2158,7 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
                strbuf_splice(previous_name, common, to_remove,
                              ce->name + common, ce_namelen(ce) - common);
        }
-       if (ce->ce_flags & CE_STRIP_NAME) {
+       if (stripped_name) {
                ce->ce_namelen = saved_namelen;
                ce->ce_flags &= ~CE_STRIP_NAME;
        }
@@ -2166,7 +2175,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        int fd;
        ssize_t n;
        struct stat st;
-       unsigned char sha1[20];
+       unsigned char hash[GIT_MAX_RAWSZ];
 
        if (!istate->initialized)
                return 0;
@@ -2178,14 +2187,14 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        if (fstat(fd, &st))
                goto out;
 
-       if (st.st_size < sizeof(struct cache_header) + 20)
+       if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                goto out;
 
-       n = pread_in_full(fd, sha1, 20, st.st_size - 20);
-       if (n != 20)
+       n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+       if (n != the_hash_algo->rawsz)
                goto out;
 
-       if (hashcmp(istate->sha1, sha1))
+       if (hashcmp(istate->sha1, hash))
                goto out;
 
        close(fd);
@@ -2233,8 +2242,9 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile
 static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                          int strip_extensions)
 {
+       uint64_t start = getnanotime();
        int newfd = tempfile->fd;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        struct cache_header hdr;
        int i, err = 0, removed, extended, hdr_version;
        struct cache_entry **cache = istate->cache;
@@ -2272,7 +2282,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
        hdr.hdr_version = htonl(hdr_version);
        hdr.hdr_entries = htonl(entries - removed);
 
-       git_SHA1_Init(&c);
+       the_hash_algo->init_fn(&c);
        if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
                return -1;
 
@@ -2373,6 +2383,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                return -1;
        istate->timestamp.sec = (unsigned int)st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
+       trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
        return 0;
 }
 
@@ -2531,6 +2542,12 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
        int new_shared_index, ret;
        struct split_index *si = istate->split_index;
 
+       if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
+               if (flags & COMMIT_LOCK)
+                       rollback_lock_file(lock);
+               return 0;
+       }
+
        if (istate->fsmonitor_last_update)
                fill_fsmonitor_bitmap(istate);
 
@@ -2676,7 +2693,7 @@ void *read_blob_data_from_index(const struct index_state *istate,
        }
        if (pos < 0)
                return NULL;
-       data = read_sha1_file(istate->cache[pos]->oid.hash, &type, &sz);
+       data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return NULL;
index f9e25aea7a97e18b5723c8fa379da859d6dc9fcf..9a333e21b51a0583415e23f905876291dd2ef349 100644 (file)
@@ -529,12 +529,12 @@ static void end_align_handler(struct ref_formatting_stack **stack)
 
 static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
 {
-       struct ref_formatting_stack *new;
+       struct ref_formatting_stack *new_stack;
 
        push_stack_element(&state->stack);
-       new = state->stack;
-       new->at_end = end_align_handler;
-       new->at_end_data = &atomv->atom->u.align;
+       new_stack = state->stack;
+       new_stack->at_end = end_align_handler;
+       new_stack->at_end_data = &atomv->atom->u.align;
 }
 
 static void if_then_else_handler(struct ref_formatting_stack **stack)
@@ -574,16 +574,16 @@ static void if_then_else_handler(struct ref_formatting_stack **stack)
 
 static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
 {
-       struct ref_formatting_stack *new;
+       struct ref_formatting_stack *new_stack;
        struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
 
        if_then_else->str = atomv->atom->u.if_then_else.str;
        if_then_else->cmp_status = atomv->atom->u.if_then_else.cmp_status;
 
        push_stack_element(&state->stack);
-       new = state->stack;
-       new->at_end = if_then_else_handler;
-       new->at_end_data = if_then_else;
+       new_stack = state->stack;
+       new_stack->at_end = if_then_else_handler;
+       new_stack->at_end_data = if_then_else;
 }
 
 static int is_empty(const char *s)
@@ -728,7 +728,7 @@ int verify_ref_format(struct ref_format *format)
 static void *get_obj(const struct object_id *oid, struct object **obj, unsigned long *sz, int *eaten)
 {
        enum object_type type;
-       void *buf = read_sha1_file(oid->hash, &type, sz);
+       void *buf = read_object_file(oid, &type, sz);
 
        if (buf)
                *obj = parse_object_buffer(oid, type, *sz, buf, eaten);
@@ -737,18 +737,18 @@ static void *get_obj(const struct object_id *oid, struct object **obj, unsigned
        return buf;
 }
 
-static int grab_objectname(const char *name, const unsigned char *sha1,
+static int grab_objectname(const char *name, const struct object_id *oid,
                           struct atom_value *v, struct used_atom *atom)
 {
        if (starts_with(name, "objectname")) {
                if (atom->u.objectname.option == O_SHORT) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, DEFAULT_ABBREV));
+                       v->s = xstrdup(find_unique_abbrev(oid, DEFAULT_ABBREV));
                        return 1;
                } else if (atom->u.objectname.option == O_FULL) {
-                       v->s = xstrdup(sha1_to_hex(sha1));
+                       v->s = xstrdup(oid_to_hex(oid));
                        return 1;
                } else if (atom->u.objectname.option == O_LENGTH) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, atom->u.objectname.length));
+                       v->s = xstrdup(find_unique_abbrev(oid, atom->u.objectname.length));
                        return 1;
                } else
                        die("BUG: unknown %%(objectname) option");
@@ -769,13 +769,13 @@ static void grab_common_values(struct atom_value *val, int deref, struct object
                if (deref)
                        name++;
                if (!strcmp(name, "objecttype"))
-                       v->s = typename(obj->type);
+                       v->s = type_name(obj->type);
                else if (!strcmp(name, "objectsize")) {
                        v->value = sz;
                        v->s = xstrfmt("%lu", sz);
                }
                else if (deref)
-                       grab_objectname(name, obj->oid.hash, v, &used_atom[i]);
+                       grab_objectname(name, &obj->oid, v, &used_atom[i]);
        }
 }
 
@@ -795,7 +795,7 @@ static void grab_tag_values(struct atom_value *val, int deref, struct object *ob
                if (!strcmp(name, "tag"))
                        v->s = tag->tag;
                else if (!strcmp(name, "type") && tag->tagged)
-                       v->s = typename(tag->tagged->type);
+                       v->s = type_name(tag->tagged->type);
                else if (!strcmp(name, "object") && tag->tagged)
                        v->s = xstrdup(oid_to_hex(&tag->tagged->oid));
        }
@@ -1249,8 +1249,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
        if (atom->u.remote_ref.option == RR_REF)
                *s = show_ref(&atom->u.remote_ref.refname, refname);
        else if (atom->u.remote_ref.option == RR_TRACK) {
-               if (stat_tracking_info(branch, &num_ours,
-                                      &num_theirs, NULL)) {
+               if (stat_tracking_info(branch, &num_ours, &num_theirs,
+                                      NULL, AHEAD_BEHIND_FULL) < 0) {
                        *s = xstrdup(msgs.gone);
                } else if (!num_ours && !num_theirs)
                        *s = "";
@@ -1267,8 +1267,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
                        free((void *)to_free);
                }
        } else if (atom->u.remote_ref.option == RR_TRACKSHORT) {
-               if (stat_tracking_info(branch, &num_ours,
-                                      &num_theirs, NULL))
+               if (stat_tracking_info(branch, &num_ours, &num_theirs,
+                                      NULL, AHEAD_BEHIND_FULL) < 0)
                        return;
 
                if (!num_ours && !num_theirs)
@@ -1354,15 +1354,31 @@ static const char *get_refname(struct used_atom *atom, struct ref_array_item *re
        return show_ref(&atom->u.refname, ref->refname);
 }
 
+static void get_object(struct ref_array_item *ref, const struct object_id *oid,
+                      int deref, struct object **obj)
+{
+       int eaten;
+       unsigned long size;
+       void *buf = get_obj(oid, obj, &size, &eaten);
+       if (!buf)
+               die(_("missing object %s for %s"),
+                   oid_to_hex(oid), ref->refname);
+       if (!*obj)
+               die(_("parse_object_buffer failed on %s for %s"),
+                   oid_to_hex(oid), ref->refname);
+
+       grab_values(ref->value, deref, *obj, buf, size);
+       if (!eaten)
+               free(buf);
+}
+
 /*
  * Parse the object referred by ref, and grab needed value.
  */
 static void populate_value(struct ref_array_item *ref)
 {
-       void *buf;
        struct object *obj;
-       int eaten, i;
-       unsigned long size;
+       int i;
        const struct object_id *tagged;
 
        ref->value = xcalloc(used_atom_cnt, sizeof(struct atom_value));
@@ -1439,7 +1455,7 @@ static void populate_value(struct ref_array_item *ref)
                                v->s = xstrdup(buf + 1);
                        }
                        continue;
-               } else if (!deref && grab_objectname(name, ref->objectname.hash, v, atom)) {
+               } else if (!deref && grab_objectname(name, &ref->objectname, v, atom)) {
                        continue;
                } else if (!strcmp(name, "HEAD")) {
                        if (atom->u.head && !strcmp(ref->refname, atom->u.head))
@@ -1478,22 +1494,12 @@ static void populate_value(struct ref_array_item *ref)
        for (i = 0; i < used_atom_cnt; i++) {
                struct atom_value *v = &ref->value[i];
                if (v->s == NULL)
-                       goto need_obj;
+                       break;
        }
-       return;
-
- need_obj:
-       buf = get_obj(&ref->objectname, &obj, &size, &eaten);
-       if (!buf)
-               die(_("missing object %s for %s"),
-                   oid_to_hex(&ref->objectname), ref->refname);
-       if (!obj)
-               die(_("parse_object_buffer failed on %s for %s"),
-                   oid_to_hex(&ref->objectname), ref->refname);
+       if (used_atom_cnt <= i)
+               return;
 
-       grab_values(ref->value, 0, obj, buf, size);
-       if (!eaten)
-               free(buf);
+       get_object(ref, &ref->objectname, 0, &obj);
 
        /*
         * If there is no atom that wants to know about tagged
@@ -1514,16 +1520,7 @@ static void populate_value(struct ref_array_item *ref)
         * is not consistent with what deref_tag() does
         * which peels the onion to the core.
         */
-       buf = get_obj(tagged, &obj, &size, &eaten);
-       if (!buf)
-               die(_("missing object %s for %s"),
-                   oid_to_hex(tagged), ref->refname);
-       if (!obj)
-               die(_("parse_object_buffer failed on %s for %s"),
-                   oid_to_hex(tagged), ref->refname);
-       grab_values(ref->value, 1, obj, buf, size);
-       if (!eaten)
-               free(buf);
+       get_object(ref, tagged, 1, &obj);
 }
 
 /*
diff --git a/refs.c b/refs.c
index 20ba82b4343ff2ef72cea32deec8a8d7fbd6def7..10f69da2b859140e5d9bc61b395f737b2458dc6b 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -13,6 +13,7 @@
 #include "tag.h"
 #include "submodule.h"
 #include "worktree.h"
+#include "argv-array.h"
 
 /*
  * List of all available backends
@@ -301,7 +302,7 @@ enum peel_status peel_object(const struct object_id *name, struct object_id *oid
        struct object *o = lookup_unknown_object(name->hash);
 
        if (o->type == OBJ_NONE) {
-               int type = sha1_object_info(name->hash, NULL);
+               int type = oid_object_info(name, NULL);
                if (type < 0 || !object_as_type(o, type, 0))
                        return PEEL_INVALID;
        }
@@ -501,6 +502,19 @@ int refname_match(const char *abbrev_name, const char *full_name)
        return 0;
 }
 
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix)
+{
+       const char **p;
+       int len = strlen(prefix);
+
+       for (p = ref_rev_parse_rules; *p; p++)
+               argv_array_pushf(prefixes, *p, len, prefix);
+}
+
 /*
  * *string and *len will only be substituted, and *string returned (for
  * later free()ing) if the string passed in is a magic short-hand form
diff --git a/refs.h b/refs.h
index 01be5ae32fb01298ff6c0738ac4adfe42643b682..93b6dce9444d2fe96e220a9462be028132c7d12e 100644 (file)
--- a/refs.h
+++ b/refs.h
@@ -139,6 +139,13 @@ int resolve_gitlink_ref(const char *submodule, const char *refname,
  */
 int refname_match(const char *abbrev_name, const char *full_name);
 
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+struct argv_array;
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix);
+
 int expand_ref(const char *str, int len, struct object_id *oid, char **ref);
 int dwim_ref(const char *str, int len, struct object_id *oid, char **ref);
 int dwim_log(const char *str, int len, struct object_id *oid, char **ref);
index 023243fd5f1833f3c5f0b6fd3cd82b2e0c69644e..65288c647278aa27790b13c0360f756686dadf7a 100644 (file)
@@ -68,17 +68,21 @@ struct snapshot {
        int mmapped;
 
        /*
-        * The contents of the `packed-refs` file. If the file was
-        * already sorted, this points at the mmapped contents of the
-        * file. If not, this points at heap-allocated memory
-        * containing the contents, sorted. If there were no contents
-        * (e.g., because the file didn't exist), `buf` and `eof` are
-        * both NULL.
+        * The contents of the `packed-refs` file:
+        *
+        * - buf -- a pointer to the start of the memory
+        * - start -- a pointer to the first byte of actual references
+        *   (i.e., after the header line, if one is present)
+        * - eof -- a pointer just past the end of the reference
+        *   contents
+        *
+        * If the `packed-refs` file was already sorted, `buf` points
+        * at the mmapped contents of the file. If not, it points at
+        * heap-allocated memory containing the contents, sorted. If
+        * there were no contents (e.g., because the file didn't
+        * exist), `buf`, `start`, and `eof` are all NULL.
         */
-       char *buf, *eof;
-
-       /* The size of the header line, if any; otherwise, 0: */
-       size_t header_len;
+       char *buf, *start, *eof;
 
        /*
         * What is the peeled state of the `packed-refs` file that
@@ -169,8 +173,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot)
        } else {
                free(snapshot->buf);
        }
-       snapshot->buf = snapshot->eof = NULL;
-       snapshot->header_len = 0;
+       snapshot->buf = snapshot->start = snapshot->eof = NULL;
 }
 
 /*
@@ -319,13 +322,14 @@ static void sort_snapshot(struct snapshot *snapshot)
        size_t len, i;
        char *new_buffer, *dst;
 
-       pos = snapshot->buf + snapshot->header_len;
+       pos = snapshot->start;
        eof = snapshot->eof;
-       len = eof - pos;
 
-       if (!len)
+       if (pos == eof)
                return;
 
+       len = eof - pos;
+
        /*
         * Initialize records based on a crude estimate of the number
         * of references in the file (we'll grow it below if needed):
@@ -391,9 +395,8 @@ static void sort_snapshot(struct snapshot *snapshot)
         * place:
         */
        clear_snapshot_buffer(snapshot);
-       snapshot->buf = new_buffer;
+       snapshot->buf = snapshot->start = new_buffer;
        snapshot->eof = new_buffer + len;
-       snapshot->header_len = 0;
 
 cleanup:
        free(records);
@@ -442,23 +445,26 @@ static const char *find_end_of_record(const char *p, const char *end)
  */
 static void verify_buffer_safe(struct snapshot *snapshot)
 {
-       const char *buf = snapshot->buf + snapshot->header_len;
+       const char *start = snapshot->start;
        const char *eof = snapshot->eof;
        const char *last_line;
 
-       if (buf == eof)
+       if (start == eof)
                return;
 
-       last_line = find_start_of_record(buf, eof - 1);
+       last_line = find_start_of_record(start, eof - 1);
        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
                die_invalid_line(snapshot->refs->path,
                                 last_line, eof - last_line);
 }
 
+#define SMALL_FILE_SIZE (32*1024)
+
 /*
  * Depending on `mmap_strategy`, either mmap or read the contents of
  * the `packed-refs` file into the snapshot. Return 1 if the file
- * existed and was read, or 0 if the file was absent. Die on errors.
+ * existed and was read, or 0 if the file was absent or empty. Die on
+ * errors.
  */
 static int load_contents(struct snapshot *snapshot)
 {
@@ -489,24 +495,23 @@ static int load_contents(struct snapshot *snapshot)
                die_errno("couldn't stat %s", snapshot->refs->path);
        size = xsize_t(st.st_size);
 
-       switch (mmap_strategy) {
-       case MMAP_NONE:
+       if (!size) {
+               return 0;
+       } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
                snapshot->buf = xmalloc(size);
                bytes_read = read_in_full(fd, snapshot->buf, size);
                if (bytes_read < 0 || bytes_read != size)
                        die_errno("couldn't read %s", snapshot->refs->path);
-               snapshot->eof = snapshot->buf + size;
                snapshot->mmapped = 0;
-               break;
-       case MMAP_TEMPORARY:
-       case MMAP_OK:
+       } else {
                snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
-               snapshot->eof = snapshot->buf + size;
                snapshot->mmapped = 1;
-               break;
        }
        close(fd);
 
+       snapshot->start = snapshot->buf;
+       snapshot->eof = snapshot->buf + size;
+
        return 1;
 }
 
@@ -515,9 +520,11 @@ static int load_contents(struct snapshot *snapshot)
  * `refname` starts. If `mustexist` is true and the reference doesn't
  * exist, then return NULL. If `mustexist` is false and the reference
  * doesn't exist, then return the point where that reference would be
- * inserted. In the latter mode, `refname` doesn't have to be a proper
- * reference name; for example, one could search for "refs/replace/"
- * to find the start of any replace references.
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
  *
  * The record is sought using a binary search, so `snapshot->buf` must
  * be sorted.
@@ -539,7 +546,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
         * preceding records all have reference names that come
         * *before* `refname`.
         */
-       const char *lo = snapshot->buf + snapshot->header_len;
+       const char *lo = snapshot->start;
 
        /*
         * A pointer to a the first character of a record whose
@@ -547,7 +554,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
         */
        const char *hi = snapshot->eof;
 
-       while (lo < hi) {
+       while (lo != hi) {
                const char *mid, *rec;
                int cmp;
 
@@ -616,9 +623,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
 
        /* If the file has a header line, process it: */
        if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
-               struct strbuf tmp = STRBUF_INIT;
-               char *p;
-               const char *eol;
+               char *tmp, *p, *eol;
                struct string_list traits = STRING_LIST_INIT_NODUP;
 
                eol = memchr(snapshot->buf, '\n',
@@ -628,9 +633,9 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                                              snapshot->buf,
                                              snapshot->eof - snapshot->buf);
 
-               strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+               tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
 
-               if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+               if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
                        die_invalid_line(refs->path,
                                         snapshot->buf,
                                         snapshot->eof - snapshot->buf);
@@ -647,10 +652,10 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                /* perhaps other traits later as well */
 
                /* The "+ 1" is for the LF character. */
-               snapshot->header_len = eol + 1 - snapshot->buf;
+               snapshot->start = eol + 1;
 
                string_list_clear(&traits, 0);
-               strbuf_release(&tmp);
+               free(tmp);
        }
 
        verify_buffer_safe(snapshot);
@@ -671,13 +676,12 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                 * We don't want to leave the file mmapped, so we are
                 * forced to make a copy now:
                 */
-               size_t size = snapshot->eof -
-                       (snapshot->buf + snapshot->header_len);
+               size_t size = snapshot->eof - snapshot->start;
                char *buf_copy = xmalloc(size);
 
-               memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+               memcpy(buf_copy, snapshot->start, size);
                clear_snapshot_buffer(snapshot);
-               snapshot->buf = buf_copy;
+               snapshot->buf = snapshot->start = buf_copy;
                snapshot->eof = buf_copy + size;
        }
 
@@ -924,7 +928,12 @@ static struct ref_iterator *packed_ref_iterator_begin(
         */
        snapshot = get_snapshot(refs);
 
-       if (!snapshot->buf)
+       if (prefix && *prefix)
+               start = find_reference_location(snapshot, prefix, 0);
+       else
+               start = snapshot->start;
+
+       if (start == snapshot->eof)
                return empty_ref_iterator_begin();
 
        iter = xcalloc(1, sizeof(*iter));
@@ -934,11 +943,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
        iter->snapshot = snapshot;
        acquire_snapshot(snapshot);
 
-       if (prefix && *prefix)
-               start = find_reference_location(snapshot, prefix, 0);
-       else
-               start = snapshot->buf + snapshot->header_len;
-
        iter->pos = start;
        iter->eof = snapshot->eof;
        strbuf_init(&iter->refname_buf, 0);
index 6ec535243565d4b1a50e22868966b4aec33776f8..4f779eee54b0836a19131c22d53d143259bd83c2 100644 (file)
@@ -1,6 +1,7 @@
 #include "cache.h"
 #include "config.h"
 #include "remote.h"
+#include "connect.h"
 #include "strbuf.h"
 #include "walker.h"
 #include "http.h"
@@ -13,6 +14,8 @@
 #include "credential.h"
 #include "sha1-array.h"
 #include "send-pack.h"
+#include "protocol.h"
+#include "quote.h"
 
 static struct remote *remote;
 /* always ends with a trailing slash */
@@ -145,7 +148,15 @@ static int set_option(const char *name, const char *value)
                        return -1;
                return 0;
        } else if (!strcmp(name, "push-option")) {
-               string_list_append(&options.push_options, value);
+               if (*value != '"')
+                       string_list_append(&options.push_options, value);
+               else {
+                       struct strbuf unquoted = STRBUF_INIT;
+                       if (unquote_c_style(&unquoted, value, NULL) < 0)
+                               die("invalid quoting in push-option value");
+                       string_list_append_nodup(&options.push_options,
+                                                strbuf_detach(&unquoted, NULL));
+               }
                return 0;
 
 #if LIBCURL_VERSION_NUM >= 0x070a08
@@ -175,12 +186,13 @@ static int set_option(const char *name, const char *value)
 }
 
 struct discovery {
-       const char *service;
+       char *service;
        char *buf_alloc;
        char *buf;
        size_t len;
        struct ref *refs;
        struct oid_array shallow;
+       enum protocol_version version;
        unsigned proto_git : 1;
 };
 static struct discovery *last_discovery;
@@ -188,8 +200,31 @@ static struct discovery *last_discovery;
 static struct ref *parse_git_refs(struct discovery *heads, int for_push)
 {
        struct ref *list = NULL;
-       get_remote_heads(-1, heads->buf, heads->len, &list,
-                        for_push ? REF_NORMAL : 0, NULL, &heads->shallow);
+       struct packet_reader reader;
+
+       packet_reader_init(&reader, -1, heads->buf, heads->len,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       heads->version = discover_version(&reader);
+       switch (heads->version) {
+       case protocol_v2:
+               /*
+                * Do nothing.  This isn't a list of refs but rather a
+                * capability advertisement.  Client would have run
+                * 'stateless-connect' so we'll dump this capability listing
+                * and let them request the refs themselves.
+                */
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &list, for_push ? REF_NORMAL : 0,
+                                NULL, &heads->shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
+
        return list;
 }
 
@@ -250,6 +285,7 @@ static void free_discovery(struct discovery *d)
                free(d->shallow.oid);
                free(d->buf_alloc);
                free_refs(d->refs);
+               free(d->service);
                free(d);
        }
 }
@@ -281,6 +317,19 @@ static int show_http_message(struct strbuf *type, struct strbuf *charset,
        return 0;
 }
 
+static int get_protocol_http_header(enum protocol_version version,
+                                   struct strbuf *header)
+{
+       if (version > 0) {
+               strbuf_addf(header, GIT_PROTOCOL_HEADER ": version=%d",
+                           version);
+
+               return 1;
+       }
+
+       return 0;
+}
+
 static struct discovery *discover_refs(const char *service, int for_push)
 {
        struct strbuf exp = STRBUF_INIT;
@@ -289,9 +338,12 @@ static struct discovery *discover_refs(const char *service, int for_push)
        struct strbuf buffer = STRBUF_INIT;
        struct strbuf refs_url = STRBUF_INIT;
        struct strbuf effective_url = STRBUF_INIT;
+       struct strbuf protocol_header = STRBUF_INIT;
+       struct string_list extra_headers = STRING_LIST_INIT_DUP;
        struct discovery *last = last_discovery;
        int http_ret, maybe_smart = 0;
        struct http_get_options http_options;
+       enum protocol_version version = get_protocol_version_config();
 
        if (last && !strcmp(service, last->service))
                return last;
@@ -308,11 +360,24 @@ static struct discovery *discover_refs(const char *service, int for_push)
                strbuf_addf(&refs_url, "service=%s", service);
        }
 
+       /*
+        * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+        * to perform a push, then fallback to v0 since the client doesn't know
+        * how to push yet using v2.
+        */
+       if (version == protocol_v2 && !strcmp("git-receive-pack", service))
+               version = protocol_v0;
+
+       /* Add the extra Git-Protocol header */
+       if (get_protocol_http_header(version, &protocol_header))
+               string_list_append(&extra_headers, protocol_header.buf);
+
        memset(&http_options, 0, sizeof(http_options));
        http_options.content_type = &type;
        http_options.charset = &charset;
        http_options.effective_url = &effective_url;
        http_options.base_url = &url;
+       http_options.extra_headers = &extra_headers;
        http_options.initial_request = 1;
        http_options.no_cache = 1;
        http_options.keep_error = 1;
@@ -336,7 +401,7 @@ static struct discovery *discover_refs(const char *service, int for_push)
                warning(_("redirecting to %s"), url.buf);
 
        last= xcalloc(1, sizeof(*last_discovery));
-       last->service = service;
+       last->service = xstrdup(service);
        last->buf_alloc = strbuf_detach(&buffer, &last->len);
        last->buf = last->buf_alloc;
 
@@ -351,6 +416,8 @@ static struct discovery *discover_refs(const char *service, int for_push)
                 * pkt-line matches our request.
                 */
                line = packet_read_line_buf(&last->buf, &last->len, NULL);
+               if (!line)
+                       die("invalid server response; expected service, got flush packet");
 
                strbuf_reset(&exp);
                strbuf_addf(&exp, "# service=%s", service);
@@ -366,6 +433,9 @@ static struct discovery *discover_refs(const char *service, int for_push)
                        ;
 
                last->proto_git = 1;
+       } else if (maybe_smart &&
+                  last->len > 5 && starts_with(last->buf + 4, "version 2")) {
+               last->proto_git = 1;
        }
 
        if (last->proto_git)
@@ -379,6 +449,8 @@ static struct discovery *discover_refs(const char *service, int for_push)
        strbuf_release(&charset);
        strbuf_release(&effective_url);
        strbuf_release(&buffer);
+       strbuf_release(&protocol_header);
+       string_list_clear(&extra_headers, 0);
        last_discovery = last;
        return last;
 }
@@ -415,6 +487,7 @@ struct rpc_state {
        char *service_url;
        char *hdr_content_type;
        char *hdr_accept;
+       char *protocol_header;
        char *buf;
        size_t alloc;
        size_t len;
@@ -601,6 +674,10 @@ static int post_rpc(struct rpc_state *rpc)
        headers = curl_slist_append(headers, needs_100_continue ?
                "Expect: 100-continue" : "Expect:");
 
+       /* Add the extra Git-Protocol header */
+       if (rpc->protocol_header)
+               headers = curl_slist_append(headers, rpc->protocol_header);
+
 retry:
        slot = get_active_slot();
 
@@ -741,6 +818,11 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
        strbuf_addf(&buf, "Accept: application/x-%s-result", svc);
        rpc->hdr_accept = strbuf_detach(&buf, NULL);
 
+       if (get_protocol_http_header(heads->version, &buf))
+               rpc->protocol_header = strbuf_detach(&buf, NULL);
+       else
+               rpc->protocol_header = NULL;
+
        while (!err) {
                int n = packet_read(rpc->out, NULL, NULL, rpc->buf, rpc->alloc, 0);
                if (!n)
@@ -768,6 +850,7 @@ static int rpc_service(struct rpc_state *rpc, struct discovery *heads)
        free(rpc->service_url);
        free(rpc->hdr_content_type);
        free(rpc->hdr_accept);
+       free(rpc->protocol_header);
        free(rpc->buf);
        strbuf_release(&buf);
        return err;
@@ -1045,6 +1128,202 @@ static void parse_push(struct strbuf *buf)
        free(specs);
 }
 
+/*
+ * Used to represent the state of a connection to an HTTP server when
+ * communicating using git's wire-protocol version 2.
+ */
+struct proxy_state {
+       char *service_name;
+       char *service_url;
+       struct curl_slist *headers;
+       struct strbuf request_buffer;
+       int in;
+       int out;
+       struct packet_reader reader;
+       size_t pos;
+       int seen_flush;
+};
+
+static void proxy_state_init(struct proxy_state *p, const char *service_name,
+                            enum protocol_version version)
+{
+       struct strbuf buf = STRBUF_INIT;
+
+       memset(p, 0, sizeof(*p));
+       p->service_name = xstrdup(service_name);
+
+       p->in = 0;
+       p->out = 1;
+       strbuf_init(&p->request_buffer, 0);
+
+       strbuf_addf(&buf, "%s%s", url.buf, p->service_name);
+       p->service_url = strbuf_detach(&buf, NULL);
+
+       p->headers = http_copy_default_headers();
+
+       strbuf_addf(&buf, "Content-Type: application/x-%s-request", p->service_name);
+       p->headers = curl_slist_append(p->headers, buf.buf);
+       strbuf_reset(&buf);
+
+       strbuf_addf(&buf, "Accept: application/x-%s-result", p->service_name);
+       p->headers = curl_slist_append(p->headers, buf.buf);
+       strbuf_reset(&buf);
+
+       p->headers = curl_slist_append(p->headers, "Transfer-Encoding: chunked");
+
+       /* Add the Git-Protocol header */
+       if (get_protocol_http_header(version, &buf))
+               p->headers = curl_slist_append(p->headers, buf.buf);
+
+       packet_reader_init(&p->reader, p->in, NULL, 0,
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       strbuf_release(&buf);
+}
+
+static void proxy_state_clear(struct proxy_state *p)
+{
+       free(p->service_name);
+       free(p->service_url);
+       curl_slist_free_all(p->headers);
+       strbuf_release(&p->request_buffer);
+}
+
+/*
+ * CURLOPT_READFUNCTION callback function.
+ * Attempts to copy over a single packet-line at a time into the
+ * curl provided buffer.
+ */
+static size_t proxy_in(char *buffer, size_t eltsize,
+                      size_t nmemb, void *userdata)
+{
+       size_t max;
+       struct proxy_state *p = userdata;
+       size_t avail = p->request_buffer.len - p->pos;
+
+
+       if (eltsize != 1)
+               BUG("curl read callback called with size = %"PRIuMAX" != 1",
+                   (uintmax_t)eltsize);
+       max = nmemb;
+
+       if (!avail) {
+               if (p->seen_flush) {
+                       p->seen_flush = 0;
+                       return 0;
+               }
+
+               strbuf_reset(&p->request_buffer);
+               switch (packet_reader_read(&p->reader)) {
+               case PACKET_READ_EOF:
+                       die("unexpected EOF when reading from parent process");
+               case PACKET_READ_NORMAL:
+                       packet_buf_write_len(&p->request_buffer, p->reader.line,
+                                            p->reader.pktlen);
+                       break;
+               case PACKET_READ_DELIM:
+                       packet_buf_delim(&p->request_buffer);
+                       break;
+               case PACKET_READ_FLUSH:
+                       packet_buf_flush(&p->request_buffer);
+                       p->seen_flush = 1;
+                       break;
+               }
+               p->pos = 0;
+               avail = p->request_buffer.len;
+       }
+
+       if (max < avail)
+               avail = max;
+       memcpy(buffer, p->request_buffer.buf + p->pos, avail);
+       p->pos += avail;
+       return avail;
+}
+
+static size_t proxy_out(char *buffer, size_t eltsize,
+                       size_t nmemb, void *userdata)
+{
+       size_t size;
+       struct proxy_state *p = userdata;
+
+       if (eltsize != 1)
+               BUG("curl read callback called with size = %"PRIuMAX" != 1",
+                   (uintmax_t)eltsize);
+       size = nmemb;
+
+       write_or_die(p->out, buffer, size);
+       return size;
+}
+
+/* Issues a request to the HTTP server configured in `p` */
+static int proxy_request(struct proxy_state *p)
+{
+       struct active_request_slot *slot;
+
+       slot = get_active_slot();
+
+       curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
+       curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, p->service_url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, p->headers);
+
+       /* Setup function to read request from client */
+       curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, proxy_in);
+       curl_easy_setopt(slot->curl, CURLOPT_READDATA, p);
+
+       /* Setup function to write server response to client */
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, proxy_out);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, p);
+
+       if (run_slot(slot, NULL) != HTTP_OK)
+               return -1;
+
+       return 0;
+}
+
+static int stateless_connect(const char *service_name)
+{
+       struct discovery *discover;
+       struct proxy_state p;
+
+       /*
+        * Run the info/refs request and see if the server supports protocol
+        * v2.  If and only if the server supports v2 can we successfully
+        * establish a stateless connection, otherwise we need to tell the
+        * client to fallback to using other transport helper functions to
+        * complete their request.
+        */
+       discover = discover_refs(service_name, 0);
+       if (discover->version != protocol_v2) {
+               printf("fallback\n");
+               fflush(stdout);
+               return -1;
+       } else {
+               /* Stateless Connection established */
+               printf("\n");
+               fflush(stdout);
+       }
+
+       proxy_state_init(&p, service_name, discover->version);
+
+       /*
+        * Dump the capability listing that we got from the server earlier
+        * during the info/refs request.
+        */
+       write_or_die(p.out, discover->buf, discover->len);
+
+       /* Peek the next packet line.  Until we see EOF keep sending POSTs */
+       while (packet_reader_peek(&p.reader) != PACKET_READ_EOF) {
+               if (proxy_request(&p)) {
+                       /* We would have an err here */
+                       break;
+               }
+       }
+
+       proxy_state_clear(&p);
+       return 0;
+}
+
 int cmd_main(int argc, const char **argv)
 {
        struct strbuf buf = STRBUF_INIT;
@@ -1113,12 +1392,16 @@ int cmd_main(int argc, const char **argv)
                        fflush(stdout);
 
                } else if (!strcmp(buf.buf, "capabilities")) {
+                       printf("stateless-connect\n");
                        printf("fetch\n");
                        printf("option\n");
                        printf("push\n");
                        printf("check-connectivity\n");
                        printf("\n");
                        fflush(stdout);
+               } else if (skip_prefix(buf.buf, "stateless-connect ", &arg)) {
+                       if (!stateless_connect(arg))
+                               break;
                } else {
                        error("remote-curl: unknown command '%s' from git", buf.buf);
                        return 1;
index bcebb4c789567eb4017a3a0132ba55c59c427991..c4bb9a8ba920c1b344b910e7f6e59915044f4f9a 100644 (file)
@@ -61,7 +61,7 @@ static char *read_ref_note(const struct object_id *oid)
        init_notes(NULL, notes_ref, NULL, 0);
        if (!(note_oid = get_note(NULL, oid)))
                return NULL;    /* note tree not found */
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)))
+       if (!(msg = read_object_file(note_oid, &type, &msglen)))
                error("Empty notes tree. %s", notes_ref);
        else if (!msglen || type != OBJ_BLOB) {
                error("Note contains unusable content. "
@@ -108,7 +108,7 @@ static int note2mark_cb(const struct object_id *object_oid,
        enum object_type type;
        struct rev_note note;
 
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)) ||
+       if (!(msg = read_object_file(note_oid, &type, &msglen)) ||
                        !msglen || type != OBJ_BLOB) {
                free(msg);
                return 1;
index 4e93753e1988afd4a01559951f96142c6dc2e73d..91eb010ca983c5493bbc17c5652ef31060390226 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -22,6 +22,7 @@ static struct refspec s_tag_refspec = {
        "refs/tags/*"
 };
 
+/* See TAG_REFSPEC for the string version */
 const struct refspec *tag_refspec = &s_tag_refspec;
 
 struct counted_string {
@@ -103,6 +104,17 @@ static void add_fetch_refspec(struct remote *remote, const char *ref)
        remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
 }
 
+void add_prune_tags_to_fetch_refspec(struct remote *remote)
+{
+       int nr = remote->fetch_refspec_nr;
+       int bufsize = nr  + 1;
+       int size = sizeof(struct refspec);
+
+       remote->fetch = xrealloc(remote->fetch, size  * bufsize);
+       memcpy(&remote->fetch[nr], tag_refspec, size);
+       add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
+}
+
 static void add_url(struct remote *remote, const char *url)
 {
        ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
@@ -173,6 +185,7 @@ static struct remote *make_remote(const char *name, int len)
 
        ret = xcalloc(1, sizeof(struct remote));
        ret->prune = -1;  /* unspecified */
+       ret->prune_tags = -1;  /* unspecified */
        ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
        remotes[remotes_nr++] = ret;
        ret->name = xstrndup(name, len);
@@ -391,6 +404,8 @@ static int handle_config(const char *key, const char *value, void *cb)
                remote->skip_default_update = git_config_bool(key, value);
        else if (!strcmp(subkey, "prune"))
                remote->prune = git_config_bool(key, value);
+       else if (!strcmp(subkey, "prunetags"))
+               remote->prune_tags = git_config_bool(key, value);
        else if (!strcmp(subkey, "url")) {
                const char *v;
                if (git_config_string(&v, key, value))
@@ -1361,7 +1376,7 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds
                        continue; /* not a tag */
                if (string_list_has_string(&dst_tag, ref->name))
                        continue; /* they already have it */
-               if (sha1_object_info(ref->new_oid.hash, NULL) != OBJ_TAG)
+               if (oid_object_info(&ref->new_oid, NULL) != OBJ_TAG)
                        continue; /* be conservative */
                item = string_list_append(&src_tag, ref->name);
                item->util = ref;
@@ -1970,33 +1985,33 @@ static void unmark_and_free(struct commit_list *list, unsigned int mark)
 int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
 {
        struct object *o;
-       struct commit *old, *new;
+       struct commit *old_commit, *new_commit;
        struct commit_list *list, *used;
        int found = 0;
 
        /*
-        * Both new and old must be commit-ish and new is descendant of
-        * old.  Otherwise we require --force.
+        * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+        * old_commit.  Otherwise we require --force.
         */
        o = deref_tag(parse_object(old_oid), NULL, 0);
        if (!o || o->type != OBJ_COMMIT)
                return 0;
-       old = (struct commit *) o;
+       old_commit = (struct commit *) o;
 
        o = deref_tag(parse_object(new_oid), NULL, 0);
        if (!o || o->type != OBJ_COMMIT)
                return 0;
-       new = (struct commit *) o;
+       new_commit = (struct commit *) o;
 
-       if (parse_commit(new) < 0)
+       if (parse_commit(new_commit) < 0)
                return 0;
 
        used = list = NULL;
-       commit_list_insert(new, &list);
+       commit_list_insert(new_commit, &list);
        while (list) {
-               new = pop_most_recent_commit(&list, TMP_MARK);
-               commit_list_insert(new, &used);
-               if (new == old) {
+               new_commit = pop_most_recent_commit(&list, TMP_MARK);
+               commit_list_insert(new_commit, &used);
+               if (new_commit == old_commit) {
                        found = 1;
                        break;
                }
@@ -2007,16 +2022,23 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
 }
 
 /*
- * Compare a branch with its upstream, and save their differences (number
- * of commits) in *num_ours and *num_theirs. The name of the upstream branch
- * (or NULL if no upstream is defined) is returned via *upstream_name, if it
- * is not itself NULL.
+ * Lookup the upstream branch for the given branch and if present, optionally
+ * compute the commit ahead/behind values for the pair.
+ *
+ * If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the
+ * counts in *num_ours and *num_theirs.  If abf is AHEAD_BEHIND_QUICK, skip
+ * the (potentially expensive) a/b computation (*num_ours and *num_theirs are
+ * set to zero).
+ *
+ * The name of the upstream branch (or NULL if no upstream is defined) is
+ * returned via *upstream_name, if it is not itself NULL.
  *
  * Returns -1 if num_ours and num_theirs could not be filled in (e.g., no
- * upstream defined, or ref does not exist), 0 otherwise.
+ * upstream defined, or ref does not exist).  Returns 0 if the commits are
+ * identical.  Returns 1 if commits are different.
  */
 int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
-                      const char **upstream_name)
+                      const char **upstream_name, enum ahead_behind_flags abf)
 {
        struct object_id oid;
        struct commit *ours, *theirs;
@@ -2044,11 +2066,15 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
        if (!ours)
                return -1;
 
+       *num_theirs = *num_ours = 0;
+
        /* are we the same? */
-       if (theirs == ours) {
-               *num_theirs = *num_ours = 0;
+       if (theirs == ours)
                return 0;
-       }
+       if (abf == AHEAD_BEHIND_QUICK)
+               return 1;
+       if (abf != AHEAD_BEHIND_FULL)
+               BUG("stat_tracking_info: invalid abf '%d'", abf);
 
        /* Run "rev-list --left-right ours...theirs" internally... */
        argv_array_push(&argv, ""); /* ignored */
@@ -2064,8 +2090,6 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
                die("revision walk setup failed");
 
        /* ... and count the commits on each side. */
-       *num_ours = 0;
-       *num_theirs = 0;
        while (1) {
                struct commit *c = get_revision(&revs);
                if (!c)
@@ -2081,20 +2105,22 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
        clear_commit_marks(theirs, ALL_REV_FLAGS);
 
        argv_array_clear(&argv);
-       return 0;
+       return 1;
 }
 
 /*
  * Return true when there is anything to report, otherwise false.
  */
-int format_tracking_info(struct branch *branch, struct strbuf *sb)
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+                        enum ahead_behind_flags abf)
 {
-       int ours, theirs;
+       int ours, theirs, sti;
        const char *full_base;
        char *base;
        int upstream_is_gone = 0;
 
-       if (stat_tracking_info(branch, &ours, &theirs, &full_base) < 0) {
+       sti = stat_tracking_info(branch, &ours, &theirs, &full_base, abf);
+       if (sti < 0) {
                if (!full_base)
                        return 0;
                upstream_is_gone = 1;
@@ -2108,10 +2134,17 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb)
                if (advice_status_hints)
                        strbuf_addstr(sb,
                                _("  (use \"git branch --unset-upstream\" to fixup)\n"));
-       } else if (!ours && !theirs) {
+       } else if (!sti) {
                strbuf_addf(sb,
                        _("Your branch is up to date with '%s'.\n"),
                        base);
+       } else if (abf == AHEAD_BEHIND_QUICK) {
+               strbuf_addf(sb,
+                           _("Your branch and '%s' refer to different commits.\n"),
+                           base);
+               if (advice_status_hints)
+                       strbuf_addf(sb, _("  (use \"%s\" for details)\n"),
+                                   "git status --ahead-behind");
        } else if (!theirs) {
                strbuf_addf(sb,
                        Q_("Your branch is ahead of '%s' by %d commit.\n",
index 1f6611be214363a4be363fad959135a9d123cee0..2b3180f94dbc185a315255938c2de6cc6aef0b13 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -47,6 +47,7 @@ struct remote {
        int skip_default_update;
        int mirror;
        int prune;
+       int prune_tags;
 
        const char *receivepack;
        const char *uploadpack;
@@ -150,10 +151,17 @@ int check_ref_type(const struct ref *ref, int flags);
 void free_refs(struct ref *ref);
 
 struct oid_array;
-extern struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct packet_reader;
+struct argv_array;
+extern struct ref **get_remote_heads(struct packet_reader *reader,
                                     struct ref **list, unsigned int flags,
                                     struct oid_array *extra_have,
-                                    struct oid_array *shallow);
+                                    struct oid_array *shallow_points);
+
+/* Used for protocol v2 in order to retrieve refs from a remote */
+extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+                                   struct ref **list, int for_push,
+                                   const struct argv_array *ref_prefixes);
 
 int resolve_remote_symref(struct ref *ref, struct ref *list);
 int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
@@ -257,10 +265,18 @@ enum match_refs_flags {
        MATCH_REFS_FOLLOW_TAGS  = (1 << 3)
 };
 
+/* Flags for --ahead-behind option. */
+enum ahead_behind_flags {
+       AHEAD_BEHIND_UNSPECIFIED = -1,
+       AHEAD_BEHIND_QUICK       =  0,  /* just eq/neq reporting */
+       AHEAD_BEHIND_FULL        =  1,  /* traditional a/b reporting */
+};
+
 /* Reporting of tracking info */
 int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
-                      const char **upstream_name);
-int format_tracking_info(struct branch *branch, struct strbuf *sb);
+                      const char **upstream_name, enum ahead_behind_flags abf);
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+                        enum ahead_behind_flags abf);
 
 struct ref *get_local_heads(void);
 /*
@@ -297,4 +313,8 @@ extern int parseopt_push_cas_option(const struct option *, const char *arg, int
 extern int is_empty_cas(const struct push_cas_option *);
 void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *);
 
+#define TAG_REFSPEC "refs/tags/*:refs/tags/*"
+
+void add_prune_tags_to_fetch_refspec(struct remote *remote);
+
 #endif
index 3e49965d050829ad883e571d9b7c4596991aa974..336357394d8b1eac1414fe4577272b0002463a86 100644 (file)
@@ -8,8 +8,8 @@
  * sha1.
  */
 static struct replace_object {
-       unsigned char original[20];
-       unsigned char replacement[20];
+       struct object_id original;
+       struct object_id replacement;
 } **replace_object;
 
 static int replace_object_alloc, replace_object_nr;
@@ -17,7 +17,7 @@ static int replace_object_alloc, replace_object_nr;
 static const unsigned char *replace_sha1_access(size_t index, void *table)
 {
        struct replace_object **replace = table;
-       return replace[index]->original;
+       return replace[index]->original.hash;
 }
 
 static int replace_object_pos(const unsigned char *sha1)
@@ -29,7 +29,7 @@ static int replace_object_pos(const unsigned char *sha1)
 static int register_replace_object(struct replace_object *replace,
                                   int ignore_dups)
 {
-       int pos = replace_object_pos(replace->original);
+       int pos = replace_object_pos(replace->original.hash);
 
        if (0 <= pos) {
                if (ignore_dups)
@@ -59,14 +59,14 @@ static int register_replace_ref(const char *refname,
        const char *hash = slash ? slash + 1 : refname;
        struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
 
-       if (strlen(hash) != 40 || get_sha1_hex(hash, repl_obj->original)) {
+       if (get_oid_hex(hash, &repl_obj->original)) {
                free(repl_obj);
                warning("bad replace ref name: %s", refname);
                return 0;
        }
 
        /* Copy sha1 from the read ref */
-       hashcpy(repl_obj->replacement, oid->hash);
+       oidcpy(&repl_obj->replacement, oid);
 
        /* Register new object */
        if (register_replace_object(repl_obj, 1))
@@ -92,16 +92,16 @@ static void prepare_replace_object(void)
 #define MAXREPLACEDEPTH 5
 
 /*
- * If a replacement for object sha1 has been set up, return the
+ * If a replacement for object oid has been set up, return the
  * replacement object's name (replaced recursively, if necessary).
- * The return value is either sha1 or a pointer to a
+ * The return value is either oid or a pointer to a
  * permanently-allocated value.  This function always respects replace
  * references, regardless of the value of check_replace_refs.
  */
-const unsigned char *do_lookup_replace_object(const unsigned char *sha1)
+const struct object_id *do_lookup_replace_object(const struct object_id *oid)
 {
        int pos, depth = MAXREPLACEDEPTH;
-       const unsigned char *cur = sha1;
+       const struct object_id *cur = oid;
 
        prepare_replace_object();
 
@@ -109,11 +109,11 @@ const unsigned char *do_lookup_replace_object(const unsigned char *sha1)
        do {
                if (--depth < 0)
                        die("replace depth too high for object %s",
-                           sha1_to_hex(sha1));
+                           oid_to_hex(oid));
 
-               pos = replace_object_pos(cur);
+               pos = replace_object_pos(cur->hash);
                if (0 <= pos)
-                       cur = replace_object[pos]->replacement;
+                       cur = &replace_object[pos]->replacement;
        } while (0 <= pos);
 
        return cur;
index 79203c6c1eae0db5515030b138e9e40e219af5f5..18cae2d11c9a86aae0ed352a8f7606b142c5c183 100644 (file)
--- a/rerere.c
+++ b/rerere.c
@@ -719,11 +719,9 @@ static void update_paths(struct string_list *update)
                        item->string);
        }
 
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
-                       die("Unable to write new index file");
-       } else
-               rollback_lock_file(&index_lock);
+       if (write_locked_index(&the_index, &index_lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die("Unable to write new index file");
 }
 
 static void remove_variant(struct rerere_id *id)
@@ -981,8 +979,8 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
                        break;
                i = ce_stage(ce) - 1;
                if (!mmfile[i].ptr) {
-                       mmfile[i].ptr = read_sha1_file(ce->oid.hash, &type,
-                                                      &size);
+                       mmfile[i].ptr = read_object_file(&ce->oid, &type,
+                                                        &size);
                        mmfile[i].size = size;
                }
        }
index b40f3173d3fe5ef5c06c00ff8994060a9078669d..aed95b4b35fbb187bb96242afcf6b4d8e3d2008b 100644 (file)
@@ -24,7 +24,7 @@ void record_resolve_undo(struct index_state *istate, struct cache_entry *ce)
        if (!lost->util)
                lost->util = xcalloc(1, sizeof(*ui));
        ui = lost->util;
-       hashcpy(ui->sha1[stage - 1], ce->oid.hash);
+       oidcpy(&ui->oid[stage - 1], &ce->oid);
        ui->mode[stage - 1] = ce->ce_mode;
 }
 
@@ -44,7 +44,7 @@ void resolve_undo_write(struct strbuf *sb, struct string_list *resolve_undo)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       strbuf_add(sb, ui->sha1[i], 20);
+                       strbuf_add(sb, ui->oid[i].hash, the_hash_algo->rawsz);
                }
        }
 }
@@ -55,6 +55,7 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
        size_t len;
        char *endptr;
        int i;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        resolve_undo = xcalloc(1, sizeof(*resolve_undo));
        resolve_undo->strdup_strings = 1;
@@ -87,11 +88,11 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       if (size < 20)
+                       if (size < rawsz)
                                goto error;
-                       hashcpy(ui->sha1[i], (const unsigned char *)data);
-                       size -= 20;
-                       data += 20;
+                       memcpy(ui->oid[i].hash, (const unsigned char *)data, rawsz);
+                       size -= rawsz;
+                       data += rawsz;
                }
        }
        return resolve_undo;
@@ -145,7 +146,7 @@ int unmerge_index_entry_at(struct index_state *istate, int pos)
                struct cache_entry *nce;
                if (!ru->mode[i])
                        continue;
-               nce = make_cache_entry(ru->mode[i], ru->sha1[i],
+               nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
                                       name, i + 1, 0);
                if (matched)
                        nce->ce_flags |= CE_MATCHED;
index 46306455edddb94a554a7a2fcadf49a30861f599..87291904bd34e0e7f3a3601b6742f5345391824d 100644 (file)
@@ -3,7 +3,7 @@
 
 struct resolve_undo_info {
        unsigned int mode[3];
-       unsigned char sha1[3][20];
+       struct object_id oid[3];
 };
 
 extern void record_resolve_undo(struct index_state *, struct cache_entry *);
index 5ce9b93baa72685893dd30b62c067bff1f18d636..b42c836d7a64a67779c587954bcab90d919aaffb 100644 (file)
@@ -113,7 +113,8 @@ void mark_parents_uninteresting(struct commit *commit)
                         * it is popped next time around, we won't be trying
                         * to parse it and get an error.
                         */
-                       if (!has_object_file(&commit->object.oid))
+                       if (!commit->object.parsed &&
+                           !has_object_file(&commit->object.oid))
                                commit->object.parsed = 1;
 
                        if (commit->object.flags & UNINTERESTING)
@@ -1065,14 +1066,9 @@ static int limit_list(struct rev_info *revs)
                        return -1;
                if (obj->flags & UNINTERESTING) {
                        mark_parents_uninteresting(commit);
-                       if (revs->show_all)
-                               p = &commit_list_insert(commit, p)->next;
                        slop = still_interesting(list, date, slop, &interesting_cache);
                        if (slop)
                                continue;
-                       /* If showing all, add the whole pending list to the end */
-                       if (revs->show_all)
-                               *p = list;
                        break;
                }
                if (revs->min_age != -1 && (commit->date > revs->min_age))
@@ -1864,8 +1860,6 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->dense = 1;
        } else if (!strcmp(arg, "--sparse")) {
                revs->dense = 0;
-       } else if (!strcmp(arg, "--show-all")) {
-               revs->show_all = 1;
        } else if (!strcmp(arg, "--in-commit-order")) {
                revs->tree_blobs_in_commit_order = 1;
        } else if (!strcmp(arg, "--remove-empty")) {
@@ -3094,8 +3088,6 @@ enum commit_action get_commit_action(struct rev_info *revs, struct commit *commi
                return commit_ignore;
        if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
                return commit_ignore;
-       if (revs->show_all)
-               return commit_show;
        if (commit->object.flags & UNINTERESTING)
                return commit_ignore;
        if (revs->min_age != -1 &&
@@ -3194,7 +3186,6 @@ enum commit_action simplify_commit(struct rev_info *revs, struct commit *commit)
        enum commit_action action = get_commit_action(revs, commit);
 
        if (action == commit_show &&
-           !revs->show_all &&
            revs->prune && revs->dense && want_ancestry(revs)) {
                /*
                 * --full-diff on simplified parents is no good: it
index 3dee97bfb97a4abd636ad7055eb84cf5dea79558..b8c47b98e22562ef320197b4ddbc8f0c3ee40f98 100644 (file)
@@ -90,7 +90,6 @@ struct rev_info {
        unsigned int    dense:1,
                        prune:1,
                        no_walk:2,
-                       show_all:1,
                        remove_empty_trees:1,
                        simplify_history:1,
                        topo_order:1,
index a483d5904a3ec1acae8908dd2e699fa00bcaaa9d..84899e423f098bb8cd61eee86bcc484139b1482f 100644 (file)
@@ -621,7 +621,7 @@ static void trace_run_command(const struct child_process *cp)
        if (!trace_want(&trace_default_key))
                return;
 
-       strbuf_addf(&buf, "trace: run_command:");
+       strbuf_addstr(&buf, "trace: run_command:");
        if (cp->dir) {
                strbuf_addstr(&buf, " cd ");
                sq_quote_buf_pretty(&buf, cp->dir);
index 2112d3b27ad21e1d457b08a3aa71a811b5e8ee79..19025a7aca82a7066b9a2d40d4d50406a9749a5f 100644 (file)
@@ -37,14 +37,14 @@ int option_parse_push_signed(const struct option *opt,
        die("bad %s argument: %s", opt->long_name, arg);
 }
 
-static void feed_object(const unsigned char *sha1, FILE *fh, int negative)
+static void feed_object(const struct object_id *oid, FILE *fh, int negative)
 {
-       if (negative && !has_sha1_file(sha1))
+       if (negative && !has_sha1_file(oid->hash))
                return;
 
        if (negative)
                putc('^', fh);
-       fputs(sha1_to_hex(sha1), fh);
+       fputs(oid_to_hex(oid), fh);
        putc('\n', fh);
 }
 
@@ -89,13 +89,13 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc
         */
        po_in = xfdopen(po.in, "w");
        for (i = 0; i < extra->nr; i++)
-               feed_object(extra->oid[i].hash, po_in, 1);
+               feed_object(&extra->oid[i], po_in, 1);
 
        while (refs) {
                if (!is_null_oid(&refs->old_oid))
-                       feed_object(refs->old_oid.hash, po_in, 1);
+                       feed_object(&refs->old_oid, po_in, 1);
                if (!is_null_oid(&refs->new_oid))
-                       feed_object(refs->new_oid.hash, po_in, 0);
+                       feed_object(&refs->new_oid, po_in, 0);
                refs = refs->next;
        }
 
@@ -137,6 +137,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc
 static int receive_unpack_status(int in)
 {
        const char *line = packet_read_line(in, NULL);
+       if (!line)
+               return error(_("unexpected flush packet while reading remote unpack status"));
        if (!skip_prefix(line, "unpack ", &line))
                return error(_("unable to parse remote unpack status: %s"), line);
        if (strcmp(line, "ok"))
index 5bfdc4044233d5f809f9f1fbc55ebe3da477e3f0..667f35ebdffbc1ef730e310cbea9b3dbce786e21 100644 (file)
@@ -282,7 +282,7 @@ struct commit_message {
 
 static const char *short_commit_name(struct commit *commit)
 {
-       return find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
+       return find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV);
 }
 
 static int get_message(struct commit *commit, struct commit_message *out)
@@ -339,7 +339,7 @@ static void print_advice(int show_hint, struct replay_opts *opts)
 static int write_message(const void *buf, size_t len, const char *filename,
                         int append_eol)
 {
-       static struct lock_file msg_file;
+       struct lock_file msg_file = LOCK_INIT;
 
        int msg_fd = hold_lock_file_for_update(&msg_file, filename, 0);
        if (msg_fd < 0)
@@ -352,10 +352,8 @@ static int write_message(const void *buf, size_t len, const char *filename,
                rollback_lock_file(&msg_file);
                return error_errno(_("could not write eol to '%s'"), filename);
        }
-       if (commit_lock_file(&msg_file) < 0) {
-               rollback_lock_file(&msg_file);
-               return error(_("failed to finalize '%s'."), filename);
-       }
+       if (commit_lock_file(&msg_file) < 0)
+               return error(_("failed to finalize '%s'"), filename);
 
        return 0;
 }
@@ -485,7 +483,7 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
        struct tree *result, *next_tree, *base_tree, *head_tree;
        int clean;
        char **xopt;
-       static struct lock_file index_lock;
+       struct lock_file index_lock = LOCK_INIT;
 
        if (hold_locked_index(&index_lock, LOCK_REPORT_ON_ERROR) < 0)
                return -1;
@@ -514,18 +512,19 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
                fputs(o.obuf.buf, stdout);
        strbuf_release(&o.obuf);
        diff_warn_rename_limit("merge.renamelimit", o.needed_rename_limit, 0);
-       if (clean < 0)
+       if (clean < 0) {
+               rollback_lock_file(&index_lock);
                return clean;
+       }
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &index_lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                /*
                 * TRANSLATORS: %s will be "revert", "cherry-pick" or
                 * "rebase -i".
                 */
                return error(_("%s: Unable to write new index file"),
                        _(action_name(opts)));
-       rollback_lock_file(&index_lock);
 
        if (!clean)
                append_conflicts_hint(msgbuf);
@@ -1113,7 +1112,7 @@ static int try_to_commit(struct strbuf *msg, const char *author,
                commit_list_insert(current_head, &parents);
        }
 
-       if (write_cache_as_tree(tree.hash, 0, NULL)) {
+       if (write_cache_as_tree(&tree, 0, NULL)) {
                res = error(_("git write-tree failed to write a tree"));
                goto out;
        }
@@ -1149,8 +1148,8 @@ static int try_to_commit(struct strbuf *msg, const char *author,
                goto out;
        }
 
-       if (commit_tree_extended(msg->buf, msg->len, tree.hash, parents,
-                                oid->hash, author, opts->gpg_sign, extra)) {
+       if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+                                oid, author, opts->gpg_sign, extra)) {
                res = error(_("failed to write commit object"));
                goto out;
        }
@@ -1475,7 +1474,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                 * that represents the "current" state for merge-recursive
                 * to work on.
                 */
-               if (write_cache_as_tree(head.hash, 0, NULL))
+               if (write_cache_as_tree(&head, 0, NULL))
                        return error(_("your index file is unmerged."));
        } else {
                unborn = get_oid("HEAD", &head);
@@ -1705,7 +1704,7 @@ static int prepare_revs(struct replay_opts *opts)
 
 static int read_and_refresh_cache(struct replay_opts *opts)
 {
-       static struct lock_file index_lock;
+       struct lock_file index_lock = LOCK_INIT;
        int index_fd = hold_locked_index(&index_lock, 0);
        if (read_index_preload(&the_index, NULL) < 0) {
                rollback_lock_file(&index_lock);
@@ -1713,13 +1712,13 @@ static int read_and_refresh_cache(struct replay_opts *opts)
                        _(action_name(opts)));
        }
        refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
-       if (the_index.cache_changed && index_fd >= 0) {
-               if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) {
+       if (index_fd >= 0) {
+               if (write_locked_index(&the_index, &index_lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED)) {
                        return error(_("git %s: failed to refresh the index"),
                                _(action_name(opts)));
                }
        }
-       rollback_lock_file(&index_lock);
        return 0;
 }
 
@@ -1869,22 +1868,31 @@ static int count_commands(struct todo_list *todo_list)
        return count;
 }
 
+static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
+{
+       int fd;
+       ssize_t len;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return error_errno(_("could not open '%s'"), path);
+       len = strbuf_read(sb, fd, 0);
+       close(fd);
+       if (len < 0)
+               return error(_("could not read '%s'."), path);
+       return len;
+}
+
 static int read_populate_todo(struct todo_list *todo_list,
                        struct replay_opts *opts)
 {
        struct stat st;
        const char *todo_file = get_todo_path(opts);
-       int fd, res;
+       int res;
 
        strbuf_reset(&todo_list->buf);
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0)
-               return error_errno(_("could not open '%s'"), todo_file);
-       if (strbuf_read(&todo_list->buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0)
+               return -1;
 
        res = stat(todo_file, &st);
        if (res)
@@ -2099,16 +2107,14 @@ static int create_seq_dir(void)
 
 static int save_head(const char *head)
 {
-       static struct lock_file head_lock;
+       struct lock_file head_lock = LOCK_INIT;
        struct strbuf buf = STRBUF_INIT;
        int fd;
        ssize_t written;
 
        fd = hold_lock_file_for_update(&head_lock, git_path_head_file(), 0);
-       if (fd < 0) {
-               rollback_lock_file(&head_lock);
+       if (fd < 0)
                return error_errno(_("could not lock HEAD"));
-       }
        strbuf_addf(&buf, "%s\n", head);
        written = write_in_full(fd, buf.buf, buf.len);
        strbuf_release(&buf);
@@ -2117,10 +2123,8 @@ static int save_head(const char *head)
                return error_errno(_("could not write to '%s'"),
                                   git_path_head_file());
        }
-       if (commit_lock_file(&head_lock) < 0) {
-               rollback_lock_file(&head_lock);
-               return error(_("failed to finalize '%s'."), git_path_head_file());
-       }
+       if (commit_lock_file(&head_lock) < 0)
+               return error(_("failed to finalize '%s'"), git_path_head_file());
        return 0;
 }
 
@@ -2224,7 +2228,7 @@ int sequencer_rollback(struct replay_opts *opts)
 
 static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
 {
-       static struct lock_file todo_lock;
+       struct lock_file todo_lock = LOCK_INIT;
        const char *todo_path = get_todo_path(opts);
        int next = todo_list->current, offset, fd;
 
@@ -2244,7 +2248,7 @@ static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
                        todo_list->buf.len - offset) < 0)
                return error_errno(_("could not write to '%s'"), todo_path);
        if (commit_lock_file(&todo_lock) < 0)
-               return error(_("failed to finalize '%s'."), todo_path);
+               return error(_("failed to finalize '%s'"), todo_path);
 
        if (is_rebase_i(opts)) {
                const char *done_path = rebase_path_done();
@@ -2314,6 +2318,9 @@ static int make_patch(struct commit *commit, struct replay_opts *opts)
        p = short_commit_name(commit);
        if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0)
                return -1;
+       if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid,
+                      NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
+               res |= error(_("could not update %s"), "REBASE_HEAD");
 
        strbuf_addf(&buf, "%s/patch", get_dir(opts));
        memset(&log_tree_opt, 0, sizeof(log_tree_opt));
@@ -2565,6 +2572,7 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                        unlink(rebase_path_author_script());
                        unlink(rebase_path_stopped_sha());
                        unlink(rebase_path_amend());
+                       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
                }
                if (item->command <= TODO_SQUASH) {
                        if (is_rebase_i(opts))
@@ -2868,9 +2876,10 @@ int sequencer_pick_revisions(struct replay_opts *opts)
 
                if (!get_oid(name, &oid)) {
                        if (!lookup_commit_reference_gently(&oid, 1)) {
-                               enum object_type type = sha1_object_info(oid.hash, NULL);
+                               enum object_type type = oid_object_info(&oid,
+                                                                       NULL);
                                return error(_("%s: can't cherry-pick a %s"),
-                                       name, typename(type));
+                                       name, type_name(type));
                        }
                } else
                        return error(_("%s: bad revision"), name);
@@ -3151,20 +3160,13 @@ int check_todo_list(void)
        struct strbuf todo_file = STRBUF_INIT;
        struct todo_list todo_list = TODO_LIST_INIT;
        struct strbuf missing = STRBUF_INIT;
-       int advise_to_edit_todo = 0, res = 0, fd, i;
+       int advise_to_edit_todo = 0, res = 0, i;
 
        strbuf_addstr(&todo_file, rebase_path_todo());
-       fd = open(todo_file.buf, O_RDONLY);
-       if (fd < 0) {
-               res = error_errno(_("could not open '%s'"), todo_file.buf);
-               goto leave_check;
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               res = error(_("could not read '%s'."), todo_file.buf);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+               res = -1;
                goto leave_check;
        }
-       close(fd);
        advise_to_edit_todo = res =
                parse_insn_buffer(todo_list.buf.buf, &todo_list);
 
@@ -3180,17 +3182,10 @@ int check_todo_list(void)
 
        todo_list_release(&todo_list);
        strbuf_addstr(&todo_file, ".backup");
-       fd = open(todo_file.buf, O_RDONLY);
-       if (fd < 0) {
-               res = error_errno(_("could not open '%s'"), todo_file.buf);
-               goto leave_check;
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               res = error(_("could not read '%s'."), todo_file.buf);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+               res = -1;
                goto leave_check;
        }
-       close(fd);
        strbuf_release(&todo_file);
        res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list);
 
@@ -3271,15 +3266,8 @@ int skip_unnecessary_picks(void)
        }
        strbuf_release(&buf);
 
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0) {
-               return error_errno(_("could not open '%s'"), todo_file);
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+               return -1;
        if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
                todo_list_release(&todo_list);
                return -1;
@@ -3370,17 +3358,11 @@ int rearrange_squash(void)
        const char *todo_file = rebase_path_todo();
        struct todo_list todo_list = TODO_LIST_INIT;
        struct hashmap subject2item;
-       int res = 0, rearranged = 0, *next, *tail, fd, i;
+       int res = 0, rearranged = 0, *next, *tail, i;
        char **subjects;
 
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0)
-               return error_errno(_("could not open '%s'"), todo_file);
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+               return -1;
        if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
                todo_list_release(&todo_list);
                return -1;
diff --git a/serve.c b/serve.c
new file mode 100644 (file)
index 0000000..a5a7b2f
--- /dev/null
+++ b/serve.c
@@ -0,0 +1,257 @@
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "pkt-line.h"
+#include "version.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "serve.h"
+#include "upload-pack.h"
+
+static int always_advertise(struct repository *r,
+                           struct strbuf *value)
+{
+       return 1;
+}
+
+static int agent_advertise(struct repository *r,
+                          struct strbuf *value)
+{
+       if (value)
+               strbuf_addstr(value, git_user_agent_sanitized());
+       return 1;
+}
+
+struct protocol_capability {
+       /*
+        * The name of the capability.  The server uses this name when
+        * advertising this capability, and the client uses this name to
+        * specify this capability.
+        */
+       const char *name;
+
+       /*
+        * Function queried to see if a capability should be advertised.
+        * Optionally a value can be specified by adding it to 'value'.
+        * If a value is added to 'value', the server will advertise this
+        * capability as "<name>=<value>" instead of "<name>".
+        */
+       int (*advertise)(struct repository *r, struct strbuf *value);
+
+       /*
+        * Function called when a client requests the capability as a command.
+        * The function will be provided the capabilities requested via 'keys'
+        * as well as a struct packet_reader 'request' which the command should
+        * use to read the command specific part of the request.  Every command
+        * MUST read until a flush packet is seen before sending a response.
+        *
+        * This field should be NULL for capabilities which are not commands.
+        */
+       int (*command)(struct repository *r,
+                      struct argv_array *keys,
+                      struct packet_reader *request);
+};
+
+static struct protocol_capability capabilities[] = {
+       { "agent", agent_advertise, NULL },
+       { "ls-refs", always_advertise, ls_refs },
+       { "fetch", upload_pack_advertise, upload_pack_v2 },
+};
+
+static void advertise_capabilities(void)
+{
+       struct strbuf capability = STRBUF_INIT;
+       struct strbuf value = STRBUF_INIT;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+               struct protocol_capability *c = &capabilities[i];
+
+               if (c->advertise(the_repository, &value)) {
+                       strbuf_addstr(&capability, c->name);
+
+                       if (value.len) {
+                               strbuf_addch(&capability, '=');
+                               strbuf_addbuf(&capability, &value);
+                       }
+
+                       strbuf_addch(&capability, '\n');
+                       packet_write(1, capability.buf, capability.len);
+               }
+
+               strbuf_reset(&capability);
+               strbuf_reset(&value);
+       }
+
+       packet_flush(1);
+       strbuf_release(&capability);
+       strbuf_release(&value);
+}
+
+static struct protocol_capability *get_capability(const char *key)
+{
+       int i;
+
+       if (!key)
+               return NULL;
+
+       for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+               struct protocol_capability *c = &capabilities[i];
+               const char *out;
+               if (skip_prefix(key, c->name, &out) && (!*out || *out == '='))
+                       return c;
+       }
+
+       return NULL;
+}
+
+static int is_valid_capability(const char *key)
+{
+       const struct protocol_capability *c = get_capability(key);
+
+       return c && c->advertise(the_repository, NULL);
+}
+
+static int is_command(const char *key, struct protocol_capability **command)
+{
+       const char *out;
+
+       if (skip_prefix(key, "command=", &out)) {
+               struct protocol_capability *cmd = get_capability(out);
+
+               if (*command)
+                       die("command '%s' requested after already requesting command '%s'",
+                           out, (*command)->name);
+               if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command)
+                       die("invalid command '%s'", out);
+
+               *command = cmd;
+               return 1;
+       }
+
+       return 0;
+}
+
+int has_capability(const struct argv_array *keys, const char *capability,
+                  const char **value)
+{
+       int i;
+       for (i = 0; i < keys->argc; i++) {
+               const char *out;
+               if (skip_prefix(keys->argv[i], capability, &out) &&
+                   (!*out || *out == '=')) {
+                       if (value) {
+                               if (*out == '=')
+                                       out++;
+                               *value = out;
+                       }
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+enum request_state {
+       PROCESS_REQUEST_KEYS,
+       PROCESS_REQUEST_DONE,
+};
+
+static int process_request(void)
+{
+       enum request_state state = PROCESS_REQUEST_KEYS;
+       struct packet_reader reader;
+       struct argv_array keys = ARGV_ARRAY_INIT;
+       struct protocol_capability *command = NULL;
+
+       packet_reader_init(&reader, 0, NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       /*
+        * Check to see if the client closed their end before sending another
+        * request.  If so we can terminate the connection.
+        */
+       if (packet_reader_peek(&reader) == PACKET_READ_EOF)
+               return 1;
+       reader.options = PACKET_READ_CHOMP_NEWLINE;
+
+       while (state != PROCESS_REQUEST_DONE) {
+               switch (packet_reader_peek(&reader)) {
+               case PACKET_READ_EOF:
+                       BUG("Should have already died when seeing EOF");
+               case PACKET_READ_NORMAL:
+                       /* collect request; a sequence of keys and values */
+                       if (is_command(reader.line, &command) ||
+                           is_valid_capability(reader.line))
+                               argv_array_push(&keys, reader.line);
+                       else
+                               die("unknown capability '%s'", reader.line);
+
+                       /* Consume the peeked line */
+                       packet_reader_read(&reader);
+                       break;
+               case PACKET_READ_FLUSH:
+                       /*
+                        * If no command and no keys were given then the client
+                        * wanted to terminate the connection.
+                        */
+                       if (!keys.argc)
+                               return 1;
+
+                       /*
+                        * The flush packet isn't consume here like it is in
+                        * the other parts of this switch statement.  This is
+                        * so that the command can read the flush packet and
+                        * see the end of the request in the same way it would
+                        * if command specific arguments were provided after a
+                        * delim packet.
+                        */
+                       state = PROCESS_REQUEST_DONE;
+                       break;
+               case PACKET_READ_DELIM:
+                       /* Consume the peeked line */
+                       packet_reader_read(&reader);
+
+                       state = PROCESS_REQUEST_DONE;
+                       break;
+               }
+       }
+
+       if (!command)
+               die("no command requested");
+
+       command->command(the_repository, &keys, &reader);
+
+       argv_array_clear(&keys);
+       return 0;
+}
+
+/* Main serve loop for protocol version 2 */
+void serve(struct serve_options *options)
+{
+       if (options->advertise_capabilities || !options->stateless_rpc) {
+               /* serve by default supports v2 */
+               packet_write_fmt(1, "version 2\n");
+
+               advertise_capabilities();
+               /*
+                * If only the list of capabilities was requested exit
+                * immediately after advertising capabilities
+                */
+               if (options->advertise_capabilities)
+                       return;
+       }
+
+       /*
+        * If stateless-rpc was requested then exit after
+        * a single request/response exchange
+        */
+       if (options->stateless_rpc) {
+               process_request();
+       } else {
+               for (;;)
+                       if (process_request())
+                               break;
+       }
+}
diff --git a/serve.h b/serve.h
new file mode 100644 (file)
index 0000000..fe65ba9
--- /dev/null
+++ b/serve.h
@@ -0,0 +1,15 @@
+#ifndef SERVE_H
+#define SERVE_H
+
+struct argv_array;
+extern int has_capability(const struct argv_array *keys, const char *capability,
+                         const char **value);
+
+struct serve_options {
+       unsigned advertise_capabilities;
+       unsigned stateless_rpc;
+};
+#define SERVE_OPTIONS_INIT { 0 }
+extern void serve(struct serve_options *options);
+
+#endif /* SERVE_H */
diff --git a/setup.c b/setup.c
index 6fac1bb58a7b2b2521f126d14aabbc8682b14fa0..664453fcef7f3b75f56d000dc52f42a0aff42fb6 100644 (file)
--- a/setup.c
+++ b/setup.c
@@ -119,7 +119,7 @@ char *prefix_path(const char *prefix, int len, const char *path)
 {
        char *r = prefix_path_gently(prefix, len, NULL, path);
        if (!r)
-               die("'%s' is outside repository", path);
+               die(_("'%s' is outside repository"), path);
        return r;
 }
 
@@ -160,7 +160,7 @@ int check_filename(const char *prefix, const char *arg)
                free(to_free);
                return 0; /* file does not exist */
        }
-       die_errno("failed to stat '%s'", arg);
+       die_errno(_("failed to stat '%s'"), arg);
 }
 
 static void NORETURN die_verify_filename(const char *prefix,
@@ -230,7 +230,7 @@ void verify_filename(const char *prefix,
                     int diagnose_misspelt_rev)
 {
        if (*arg == '-')
-               die("option '%s' must come before non-option arguments", arg);
+               die(_("option '%s' must come before non-option arguments"), arg);
        if (looks_like_pathspec(arg) || check_filename(prefix, arg))
                return;
        die_verify_filename(prefix, arg, diagnose_misspelt_rev);
@@ -385,14 +385,14 @@ void setup_work_tree(void)
                return;
 
        if (work_tree_config_is_bogus)
-               die("unable to set up work tree using invalid config");
+               die(_("unable to set up work tree using invalid config"));
 
        work_tree = get_git_work_tree();
        git_dir = get_git_dir();
        if (!is_absolute_path(git_dir))
                git_dir = real_path(get_git_dir());
        if (!work_tree || chdir(work_tree))
-               die("This operation must be run in a work tree");
+               die(_("this operation must be run in a work tree"));
 
        /*
         * Make sure subsequent git processes find correct worktree
@@ -530,17 +530,17 @@ void read_gitfile_error_die(int error_code, const char *path, const char *dir)
                /* non-fatal; follow return path */
                break;
        case READ_GITFILE_ERR_OPEN_FAILED:
-               die_errno("Error opening '%s'", path);
+               die_errno(_("error opening '%s'"), path);
        case READ_GITFILE_ERR_TOO_LARGE:
-               die("Too large to be a .git file: '%s'", path);
+               die(_("too large to be a .git file: '%s'"), path);
        case READ_GITFILE_ERR_READ_FAILED:
-               die("Error reading %s", path);
+               die(_("error reading %s"), path);
        case READ_GITFILE_ERR_INVALID_FORMAT:
-               die("Invalid gitfile format: %s", path);
+               die(_("invalid gitfile format: %s"), path);
        case READ_GITFILE_ERR_NO_PATH:
-               die("No path in gitfile: %s", path);
+               die(_("no path in gitfile: %s"), path);
        case READ_GITFILE_ERR_NOT_A_REPO:
-               die("Not a git repository: %s", dir);
+               die(_("not a git repository: %s"), dir);
        default:
                die("BUG: unknown error code");
        }
@@ -639,7 +639,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
        int offset;
 
        if (PATH_MAX - 40 < strlen(gitdirenv))
-               die("'$%s' too big", GIT_DIR_ENVIRONMENT);
+               die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT);
 
        gitfile = (char*)read_gitfile(gitdirenv);
        if (gitfile) {
@@ -653,7 +653,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
                        free(gitfile);
                        return NULL;
                }
-               die("Not a git repository: '%s'", gitdirenv);
+               die(_("not a git repository: '%s'"), gitdirenv);
        }
 
        if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) {
@@ -682,12 +682,12 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
                else {
                        char *core_worktree;
                        if (chdir(gitdirenv))
-                               die_errno("Could not chdir to '%s'", gitdirenv);
+                               die_errno(_("cannot chdir to '%s'"), gitdirenv);
                        if (chdir(git_work_tree_cfg))
-                               die_errno("Could not chdir to '%s'", git_work_tree_cfg);
+                               die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg);
                        core_worktree = xgetcwd();
                        if (chdir(cwd->buf))
-                               die_errno("Could not come back to cwd");
+                               die_errno(_("cannot come back to cwd"));
                        set_git_work_tree(core_worktree);
                        free(core_worktree);
                }
@@ -715,7 +715,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
        if (offset >= 0) {      /* cwd inside worktree? */
                set_git_dir(real_path(gitdirenv));
                if (chdir(worktree))
-                       die_errno("Could not chdir to '%s'", worktree);
+                       die_errno(_("cannot chdir to '%s'"), worktree);
                strbuf_addch(cwd, '/');
                free(gitfile);
                return cwd->buf + offset;
@@ -743,7 +743,7 @@ static const char *setup_discovered_git_dir(const char *gitdir,
                if (offset != cwd->len && !is_absolute_path(gitdir))
                        gitdir = to_free = real_pathdup(gitdir, 1);
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
                free(to_free);
                return ret;
@@ -753,7 +753,7 @@ static const char *setup_discovered_git_dir(const char *gitdir,
        if (is_bare_repository_cfg > 0) {
                set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir));
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                return NULL;
        }
 
@@ -792,7 +792,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
 
                gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset);
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
        }
 
@@ -800,7 +800,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
        inside_work_tree = 0;
        if (offset != cwd->len) {
                if (chdir(cwd->buf))
-                       die_errno("Cannot come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                root_len = offset_1st_component(cwd->buf);
                strbuf_setlen(cwd, offset > root_len ? offset : root_len);
                set_git_dir(cwd->buf);
@@ -813,9 +813,9 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
 static const char *setup_nongit(const char *cwd, int *nongit_ok)
 {
        if (!nongit_ok)
-               die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
+               die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
        if (chdir(cwd))
-               die_errno(_("Cannot come back to cwd"));
+               die_errno(_("cannot come back to cwd"));
        *nongit_ok = 1;
        return NULL;
 }
@@ -824,7 +824,7 @@ static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_
 {
        struct stat buf;
        if (stat(path, &buf)) {
-               die_errno("failed to stat '%*s%s%s'",
+               die_errno(_("failed to stat '%*s%s%s'"),
                                prefix_len,
                                prefix ? prefix : "",
                                prefix ? "/" : "", path);
@@ -1066,13 +1066,13 @@ const char *setup_git_directory_gently(int *nongit_ok)
                break;
        case GIT_DIR_DISCOVERED:
                if (dir.len < cwd.len && chdir(dir.buf))
-                       die(_("Cannot change to '%s'"), dir.buf);
+                       die(_("cannot change to '%s'"), dir.buf);
                prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len,
                                                  &repo_fmt, nongit_ok);
                break;
        case GIT_DIR_BARE:
                if (dir.len < cwd.len && chdir(dir.buf))
-                       die(_("Cannot change to '%s'"), dir.buf);
+                       die(_("cannot change to '%s'"), dir.buf);
                prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok);
                break;
        case GIT_DIR_HIT_CEILING:
@@ -1085,7 +1085,7 @@ const char *setup_git_directory_gently(int *nongit_ok)
                        strbuf_release(&dir);
                        return NULL;
                }
-               die(_("Not a git repository (or any parent up to mount point %s)\n"
+               die(_("not a git repository (or any parent up to mount point %s)\n"
                      "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
                    dir.buf);
        default:
@@ -1168,7 +1168,7 @@ int git_config_perm(const char *var, const char *value)
        /* A filemode value was given: 0xxx */
 
        if ((i & 0600) != 0600)
-               die(_("Problem with core.sharedRepository filemode value "
+               die(_("problem with core.sharedRepository filemode value "
                    "(0%.3o).\nThe owner of files must always have "
                    "read and write permissions."), i);
 
@@ -1211,7 +1211,7 @@ void sanitize_stdfds(void)
        while (fd != -1 && fd < 2)
                fd = dup(fd);
        if (fd == -1)
-               die_errno("open /dev/null or dup failed");
+               die_errno(_("open /dev/null or dup failed"));
        if (fd > 2)
                close(fd);
 }
@@ -1226,12 +1226,12 @@ int daemonize(void)
                case 0:
                        break;
                case -1:
-                       die_errno("fork failed");
+                       die_errno(_("fork failed"));
                default:
                        exit(0);
        }
        if (setsid() == -1)
-               die_errno("setsid failed");
+               die_errno(_("setsid failed"));
        close(0);
        close(1);
        close(2);
index 4cf3ebd9212f6d5c9b9829373e58c34b83f0a548..8d0b1db3e27c99b80faa2867565de237581802f4 100644 (file)
@@ -99,3 +99,31 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
        } while (lo < hi);
        return -lo-1;
 }
+
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+                const unsigned char *table, size_t stride, uint32_t *result)
+{
+       uint32_t hi, lo;
+
+       hi = ntohl(fanout_nbo[*sha1]);
+       lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1]));
+
+       while (lo < hi) {
+               unsigned mi = lo + (hi - lo) / 2;
+               int cmp = hashcmp(table + mi * stride, sha1);
+
+               if (!cmp) {
+                       if (result)
+                               *result = mi;
+                       return 1;
+               }
+               if (cmp > 0)
+                       hi = mi;
+               else
+                       lo = mi + 1;
+       }
+
+       if (result)
+               *result = lo;
+       return 0;
+}
index cf5314f402ce78f0d5ab2bd72ee7f334b6394e04..7678b23b36c291c7b1b6656e0c42fc14ea12da15 100644 (file)
@@ -7,4 +7,26 @@ extern int sha1_pos(const unsigned char *sha1,
                    void *table,
                    size_t nr,
                    sha1_access_fn fn);
+
+/*
+ * Searches for sha1 in table, using the given fanout table to determine the
+ * interval to search, then using binary search. Returns 1 if found, 0 if not.
+ *
+ * Takes the following parameters:
+ *
+ *  - sha1: the hash to search for
+ *  - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the
+ *    integer at position i represents the number of elements in table whose
+ *    first byte is less than or equal to i
+ *  - table: a sorted list of hashes with optional extra information in between
+ *  - stride: distance between two consecutive elements in table (should be
+ *    GIT_MAX_RAWSZ or greater)
+ *  - result: if not NULL, this function stores the element index of the
+ *    position found (if the search is successful) or the index of the least
+ *    element that is greater than sha1 (if the search is not successful)
+ *
+ * This function does not verify the validity of the fanout table.
+ */
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+                const unsigned char *table, size_t stride, uint32_t *result);
 #endif
index 314ff55b4769a4262a76e3fa4c639f7d9c9f4b88..77ccaab928529d37aa971168c54e31358e12ef8e 100644 (file)
@@ -32,6 +32,9 @@
 #include "fetch-object.h"
 #include "object-store.h"
 
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
+
 const unsigned char null_sha1[GIT_MAX_RAWSZ];
 const struct object_id null_oid;
 const struct object_id empty_tree_oid = {
@@ -41,32 +44,32 @@ const struct object_id empty_blob_oid = {
        EMPTY_BLOB_SHA1_BIN_LITERAL
 };
 
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
 {
-       git_SHA1_Init((git_SHA_CTX *)ctx);
+       git_SHA1_Init(&ctx->sha1);
 }
 
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
 {
-       git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+       git_SHA1_Update(&ctx->sha1, data, len);
 }
 
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
 {
-       git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+       git_SHA1_Final(hash, &ctx->sha1);
 }
 
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
 {
        die("trying to init unknown hash");
 }
 
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
 {
        die("trying to update unknown hash");
 }
 
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
 {
        die("trying to finalize unknown hash");
 }
@@ -77,7 +80,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
                0x00000000,
                0,
                0,
-               0,
                git_hash_unknown_init,
                git_hash_unknown_update,
                git_hash_unknown_final,
@@ -88,7 +90,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
                "sha-1",
                /* "sha1", big-endian */
                0x73686131,
-               sizeof(git_SHA_CTX),
                GIT_SHA1_RAWSZ,
                GIT_SHA1_HEXSZ,
                git_hash_sha1_init,
@@ -790,31 +791,31 @@ void *xmmap(void *start, size_t length,
  * With "map" == NULL, try reading the object named with "sha1" using
  * the streaming interface and rehash it to do the same.
  */
-int check_sha1_signature(const unsigned char *sha1, void *map,
-                        unsigned long size, const char *type)
+int check_object_signature(const struct object_id *oid, void *map,
+                          unsigned long size, const char *type)
 {
-       unsigned char real_sha1[20];
+       struct object_id real_oid;
        enum object_type obj_type;
        struct git_istream *st;
-       git_SHA_CTX c;
-       char hdr[32];
+       git_hash_ctx c;
+       char hdr[MAX_HEADER_LEN];
        int hdrlen;
 
        if (map) {
-               hash_sha1_file(map, size, type, real_sha1);
-               return hashcmp(sha1, real_sha1) ? -1 : 0;
+               hash_object_file(map, size, type, &real_oid);
+               return oidcmp(oid, &real_oid) ? -1 : 0;
        }
 
-       st = open_istream(sha1, &obj_type, &size, NULL);
+       st = open_istream(oid, &obj_type, &size, NULL);
        if (!st)
                return -1;
 
        /* Generate the header */
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
 
        /* Sha1.. */
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, hdrlen);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
        for (;;) {
                char buf[1024 * 16];
                ssize_t readlen = read_istream(st, buf, sizeof(buf));
@@ -825,11 +826,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
                }
                if (!readlen)
                        break;
-               git_SHA1_Update(&c, buf, readlen);
+               the_hash_algo->update_fn(&c, buf, readlen);
        }
-       git_SHA1_Final(real_sha1, &c);
+       the_hash_algo->final_fn(real_oid.hash, &c);
        close_istream(st);
-       return hashcmp(sha1, real_sha1) ? -1 : 0;
+       return oidcmp(oid, &real_oid) ? -1 : 0;
 }
 
 int git_open_cloexec(const char *name, int flags)
@@ -1101,8 +1102,8 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
        }
 
        type = type_from_string_gently(type_buf, type_len, 1);
-       if (oi->typename)
-               strbuf_add(oi->typename, type_buf, type_len);
+       if (oi->type_name)
+               strbuf_add(oi->type_name, type_buf, type_len);
        /*
         * Set type to 0 if its an unknown object and
         * we're obtaining the type using '--allow-unknown-type'
@@ -1157,7 +1158,7 @@ static int sha1_loose_object_info(struct repository *r,
        unsigned long mapsize;
        void *map;
        git_zstream stream;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        struct strbuf hdrbuf = STRBUF_INIT;
        unsigned long size_scratch;
 
@@ -1172,7 +1173,7 @@ static int sha1_loose_object_info(struct repository *r,
         * return value implicitly indicates whether the
         * object even exists.
         */
-       if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) {
+       if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
                const char *path;
                struct stat st;
                if (stat_sha1_file(r, sha1, &st, &path) < 0)
@@ -1229,24 +1230,25 @@ static int sha1_loose_object_info(struct repository *r,
 
 int fetch_if_missing = 1;
 
-int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
+int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags)
 {
        static struct object_info blank_oi = OBJECT_INFO_INIT;
        struct pack_entry e;
        int rtype;
-       const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
-                                   lookup_replace_object(sha1) :
-                                   sha1;
+       const struct object_id *real = oid;
        int already_retried = 0;
 
-       if (is_null_sha1(real))
+       if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+               real = lookup_replace_object(oid);
+
+       if (is_null_oid(real))
                return -1;
 
        if (!oi)
                oi = &blank_oi;
 
        if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
-               struct cached_object *co = find_cached_object(real);
+               struct cached_object *co = find_cached_object(real->hash);
                if (co) {
                        if (oi->typep)
                                *(oi->typep) = co->type;
@@ -1256,8 +1258,8 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                                *(oi->disk_sizep) = 0;
                        if (oi->delta_base_sha1)
                                hashclr(oi->delta_base_sha1);
-                       if (oi->typename)
-                               strbuf_addstr(oi->typename, typename(co->type));
+                       if (oi->type_name)
+                               strbuf_addstr(oi->type_name, type_name(co->type));
                        if (oi->contentp)
                                *oi->contentp = xmemdupz(co->buf, co->size);
                        oi->whence = OI_CACHED;
@@ -1266,17 +1268,22 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
        }
 
        while (1) {
-               if (find_pack_entry(the_repository, real, &e))
+               if (find_pack_entry(the_repository, real->hash, &e))
                        break;
 
+               if (flags & OBJECT_INFO_IGNORE_LOOSE)
+                       return -1;
+
                /* Most likely it's a loose object. */
-               if (!sha1_loose_object_info(the_repository, real, oi, flags))
+               if (!sha1_loose_object_info(the_repository, real->hash, oi, flags))
                        return 0;
 
                /* Not a loose object; someone else may have just packed it. */
-               reprepare_packed_git(the_repository);
-               if (find_pack_entry(the_repository, real, &e))
-                       break;
+               if (!(flags & OBJECT_INFO_QUICK)) {
+                       reprepare_packed_git(the_repository);
+                       if (find_pack_entry(the_repository, real->hash, &e))
+                               break;
+               }
 
                /* Check if it is a missing object */
                if (fetch_if_missing && repository_format_partial_clone &&
@@ -1285,7 +1292,7 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                         * TODO Investigate haveing fetch_object() return
                         * TODO error/success and stopping the music here.
                         */
-                       fetch_object(repository_format_partial_clone, real);
+                       fetch_object(repository_format_partial_clone, real->hash);
                        already_retried = 1;
                        continue;
                }
@@ -1301,8 +1308,8 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                return 0;
        rtype = packed_object_info(e.p, e.offset, oi);
        if (rtype < 0) {
-               mark_bad_packed_object(e.p, real);
-               return sha1_object_info_extended(real, oi, 0);
+               mark_bad_packed_object(e.p, real->hash);
+               return oid_object_info_extended(real, oi, 0);
        } else if (oi->whence == OI_PACKED) {
                oi->u.packed.offset = e.offset;
                oi->u.packed.pack = e.p;
@@ -1314,15 +1321,15 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
 }
 
 /* returns enum object_type or negative */
-int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
+int oid_object_info(const struct object_id *oid, unsigned long *sizep)
 {
        enum object_type type;
        struct object_info oi = OBJECT_INFO_INIT;
 
        oi.typep = &type;
        oi.sizep = sizep;
-       if (sha1_object_info_extended(sha1, &oi,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0)
+       if (oid_object_info_extended(oid, &oi,
+                                    OBJECT_INFO_LOOKUP_REPLACE) < 0)
                return -1;
        return type;
 }
@@ -1330,24 +1337,27 @@ int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
 static void *read_object(const unsigned char *sha1, enum object_type *type,
                         unsigned long *size)
 {
+       struct object_id oid;
        struct object_info oi = OBJECT_INFO_INIT;
        void *content;
        oi.typep = type;
        oi.sizep = size;
        oi.contentp = &content;
 
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+       hashcpy(oid.hash, sha1);
+
+       if (oid_object_info_extended(&oid, &oi, 0) < 0)
                return NULL;
        return content;
 }
 
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
-                     unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+                       struct object_id *oid)
 {
        struct cached_object *co;
 
-       hash_sha1_file(buf, len, typename(type), sha1);
-       if (has_sha1_file(sha1) || find_cached_object(sha1))
+       hash_object_file(buf, len, type_name(type), oid);
+       if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
                return 0;
        ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
        co = &cached_objects[cached_object_nr++];
@@ -1355,7 +1365,7 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
        co->type = type;
        co->buf = xmalloc(len);
        memcpy(co->buf, buf, len);
-       hashcpy(co->sha1, sha1);
+       hashcpy(co->sha1, oid->hash);
        return 0;
 }
 
@@ -1364,65 +1374,65 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
  * deal with them should arrange to call read_object() and give error
  * messages themselves.
  */
-void *read_sha1_file_extended(const unsigned char *sha1,
-                             enum object_type *type,
-                             unsigned long *size,
-                             int lookup_replace)
+void *read_object_file_extended(const struct object_id *oid,
+                               enum object_type *type,
+                               unsigned long *size,
+                               int lookup_replace)
 {
        void *data;
        const struct packed_git *p;
        const char *path;
        struct stat st;
-       const unsigned char *repl = lookup_replace ? lookup_replace_object(sha1)
-                                                  : sha1;
+       const struct object_id *repl = lookup_replace ? lookup_replace_object(oid)
+                                                     : oid;
 
        errno = 0;
-       data = read_object(repl, type, size);
+       data = read_object(repl->hash, type, size);
        if (data)
                return data;
 
        if (errno && errno != ENOENT)
-               die_errno("failed to read object %s", sha1_to_hex(sha1));
+               die_errno("failed to read object %s", oid_to_hex(oid));
 
        /* die if we replaced an object with one that does not exist */
-       if (repl != sha1)
+       if (repl != oid)
                die("replacement %s not found for %s",
-                   sha1_to_hex(repl), sha1_to_hex(sha1));
+                   oid_to_hex(repl), oid_to_hex(oid));
 
-       if (!stat_sha1_file(the_repository, repl, &st, &path))
+       if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
                die("loose object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), path);
+                   oid_to_hex(repl), path);
 
-       if ((p = has_packed_and_bad(repl)) != NULL)
+       if ((p = has_packed_and_bad(repl->hash)) != NULL)
                die("packed object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), p->pack_name);
+                   oid_to_hex(repl), p->pack_name);
 
        return NULL;
 }
 
-void *read_object_with_reference(const unsigned char *sha1,
+void *read_object_with_reference(const struct object_id *oid,
                                 const char *required_type_name,
                                 unsigned long *size,
-                                unsigned char *actual_sha1_return)
+                                struct object_id *actual_oid_return)
 {
        enum object_type type, required_type;
        void *buffer;
        unsigned long isize;
-       unsigned char actual_sha1[20];
+       struct object_id actual_oid;
 
        required_type = type_from_string(required_type_name);
-       hashcpy(actual_sha1, sha1);
+       oidcpy(&actual_oid, oid);
        while (1) {
                int ref_length = -1;
                const char *ref_type = NULL;
 
-               buffer = read_sha1_file(actual_sha1, &type, &isize);
+               buffer = read_object_file(&actual_oid, &type, &isize);
                if (!buffer)
                        return NULL;
                if (type == required_type) {
                        *size = isize;
-                       if (actual_sha1_return)
-                               hashcpy(actual_sha1_return, actual_sha1);
+                       if (actual_oid_return)
+                               oidcpy(actual_oid_return, &actual_oid);
                        return buffer;
                }
                /* Handle references */
@@ -1436,32 +1446,32 @@ void *read_object_with_reference(const unsigned char *sha1,
                }
                ref_length = strlen(ref_type);
 
-               if (ref_length + 40 > isize ||
+               if (ref_length + GIT_SHA1_HEXSZ > isize ||
                    memcmp(buffer, ref_type, ref_length) ||
-                   get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
+                   get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
                        free(buffer);
                        return NULL;
                }
                free(buffer);
                /* Now we have the ID of the referred-to object in
-                * actual_sha1.  Check again. */
+                * actual_oid.  Check again. */
        }
 }
 
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
-                                    const char *type, unsigned char *sha1,
-                                    char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+                                     const char *type, struct object_id *oid,
+                                     char *hdr, int *hdrlen)
 {
-       git_SHA_CTX c;
+       git_hash_ctx c;
 
        /* Generate the header */
        *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
 
        /* Sha1.. */
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, *hdrlen);
-       git_SHA1_Update(&c, buf, len);
-       git_SHA1_Final(sha1, &c);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, *hdrlen);
+       the_hash_algo->update_fn(&c, buf, len);
+       the_hash_algo->final_fn(oid->hash, &c);
 }
 
 /*
@@ -1514,12 +1524,12 @@ static int write_buffer(int fd, const void *buf, size_t len)
        return 0;
 }
 
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
-                   unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+                    struct object_id *oid)
 {
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen = sizeof(hdr);
-       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
        return 0;
 }
 
@@ -1577,19 +1587,20 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename)
        return fd;
 }
 
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
-                             const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+                             int hdrlen, const void *buf, unsigned long len,
+                             time_t mtime)
 {
        int fd, ret;
        unsigned char compressed[4096];
        git_zstream stream;
-       git_SHA_CTX c;
-       unsigned char parano_sha1[20];
+       git_hash_ctx c;
+       struct object_id parano_oid;
        static struct strbuf tmp_file = STRBUF_INIT;
        static struct strbuf filename = STRBUF_INIT;
 
        strbuf_reset(&filename);
-       sha1_file_name(the_repository, &filename, sha1);
+       sha1_file_name(the_repository, &filename, oid->hash);
 
        fd = create_tmpfile(&tmp_file, filename.buf);
        if (fd < 0) {
@@ -1603,14 +1614,14 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        git_deflate_init(&stream, zlib_compression_level);
        stream.next_out = compressed;
        stream.avail_out = sizeof(compressed);
-       git_SHA1_Init(&c);
+       the_hash_algo->init_fn(&c);
 
        /* First header.. */
        stream.next_in = (unsigned char *)hdr;
        stream.avail_in = hdrlen;
        while (git_deflate(&stream, 0) == Z_OK)
                ; /* nothing */
-       git_SHA1_Update(&c, hdr, hdrlen);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
 
        /* Then the data itself.. */
        stream.next_in = (void *)buf;
@@ -1618,7 +1629,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        do {
                unsigned char *in0 = stream.next_in;
                ret = git_deflate(&stream, Z_FINISH);
-               git_SHA1_Update(&c, in0, stream.next_in - in0);
+               the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
                if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
                        die("unable to write sha1 file");
                stream.next_out = compressed;
@@ -1626,13 +1637,16 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        } while (ret == Z_OK);
 
        if (ret != Z_STREAM_END)
-               die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+               die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+                   ret);
        ret = git_deflate_end_gently(&stream);
        if (ret != Z_OK)
-               die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
-       git_SHA1_Final(parano_sha1, &c);
-       if (hashcmp(sha1, parano_sha1) != 0)
-               die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+               die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+                   ret);
+       the_hash_algo->final_fn(parano_oid.hash, &c);
+       if (oidcmp(oid, &parano_oid) != 0)
+               die("confused by unstable object source data for %s",
+                   oid_to_hex(oid));
 
        close_sha1_file(fd);
 
@@ -1665,58 +1679,60 @@ static int freshen_packed_object(const unsigned char *sha1)
        return 1;
 }
 
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+                     struct object_id *oid)
 {
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen = sizeof(hdr);
 
        /* Normally if we have it in the pack then we do not bother writing
         * it out into .git/objects/??/?{38} file.
         */
-       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
-       if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
                return 0;
-       return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+       return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
 }
 
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
-                            struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+                              const char *type, struct object_id *oid,
+                              unsigned flags)
 {
        char *header;
        int hdrlen, status = 0;
 
        /* type string, SP, %lu of the length plus NUL must fit this */
-       hdrlen = strlen(type) + 32;
+       hdrlen = strlen(type) + MAX_HEADER_LEN;
        header = xmalloc(hdrlen);
-       write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+       write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
 
        if (!(flags & HASH_WRITE_OBJECT))
                goto cleanup;
        if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
                goto cleanup;
-       status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+       status = write_loose_object(oid, header, hdrlen, buf, len, 0);
 
 cleanup:
        free(header);
        return status;
 }
 
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
 {
        void *buf;
        unsigned long len;
        enum object_type type;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen;
        int ret;
 
-       if (has_loose_object(sha1))
+       if (has_loose_object(oid->hash))
                return 0;
-       buf = read_object(sha1, &type, &len);
+       buf = read_object(oid->hash, &type, &len);
        if (!buf)
-               return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
-       ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+               return error("cannot read sha1_file for %s", oid_to_hex(oid));
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+       ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
        free(buf);
 
        return ret;
@@ -1724,10 +1740,12 @@ int force_object_loose(const unsigned char *sha1, time_t mtime)
 
 int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
 {
+       struct object_id oid;
        if (!startup_info->have_repository)
                return 0;
-       return sha1_object_info_extended(sha1, NULL,
-                                        flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+       hashcpy(oid.hash, sha1);
+       return oid_object_info_extended(&oid, NULL,
+                                       flags | OBJECT_INFO_SKIP_CACHED) >= 0;
 }
 
 int has_object_file(const struct object_id *oid)
@@ -1799,9 +1817,9 @@ static int index_mem(struct object_id *oid, void *buf, size_t size,
        }
 
        if (write_object)
-               ret = write_sha1_file(buf, size, typename(type), oid->hash);
+               ret = write_object_file(buf, size, type_name(type), oid);
        else
-               ret = hash_sha1_file(buf, size, typename(type), oid->hash);
+               ret = hash_object_file(buf, size, type_name(type), oid);
        if (re_allocated)
                free(buf);
        return ret;
@@ -1821,11 +1839,11 @@ static int index_stream_convert_blob(struct object_id *oid, int fd,
                                 get_conv_flags(flags));
 
        if (write_object)
-               ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
-                                     oid->hash);
+               ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                       oid);
        else
-               ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
-                                    oid->hash);
+               ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                      oid);
        strbuf_release(&sbuf);
        return ret;
 }
@@ -1893,7 +1911,7 @@ static int index_stream(struct object_id *oid, int fd, size_t size,
                        enum object_type type, const char *path,
                        unsigned flags)
 {
-       return index_bulk_checkin(oid->hash, fd, size, type, path, flags);
+       return index_bulk_checkin(oid, fd, size, type, path, flags);
 }
 
 int index_fd(struct object_id *oid, int fd, struct stat *st,
@@ -1939,8 +1957,8 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne
                if (strbuf_readlink(&sb, path, st->st_size))
                        return error_errno("readlink(\"%s\")", path);
                if (!(flags & HASH_WRITE_OBJECT))
-                       hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
-               else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+                       hash_object_file(sb.buf, sb.len, blob_type, oid);
+               else if (write_object_file(sb.buf, sb.len, blob_type, oid))
                        rc = error("%s: failed to insert into database", path);
                strbuf_release(&sb);
                break;
@@ -1967,14 +1985,14 @@ int read_pack_header(int fd, struct pack_header *header)
        return 0;
 }
 
-void assert_sha1_type(const unsigned char *sha1, enum object_type expect)
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
 {
-       enum object_type type = sha1_object_info(sha1, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
        if (type < 0)
-               die("%s is not a valid object", sha1_to_hex(sha1));
+               die("%s is not a valid object", oid_to_hex(oid));
        if (type != expect)
-               die("%s is not a valid '%s' object", sha1_to_hex(sha1),
-                   typename(expect));
+               die("%s is not a valid '%s' object", oid_to_hex(oid),
+                   type_name(expect));
 }
 
 int for_each_file_in_obj_subdir(unsigned int subdir_nr,
@@ -2125,14 +2143,14 @@ static int check_stream_sha1(git_zstream *stream,
                             const char *path,
                             const unsigned char *expected_sha1)
 {
-       git_SHA_CTX c;
+       git_hash_ctx c;
        unsigned char real_sha1[GIT_MAX_RAWSZ];
        unsigned char buf[4096];
        unsigned long total_read;
        int status = Z_OK;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, stream->total_out);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, stream->total_out);
 
        /*
         * We already read some bytes into hdr, but the ones up to the NUL
@@ -2151,7 +2169,7 @@ static int check_stream_sha1(git_zstream *stream,
                if (size - total_read < stream->avail_out)
                        stream->avail_out = size - total_read;
                status = git_inflate(stream, Z_FINISH);
-               git_SHA1_Update(&c, buf, stream->next_out - buf);
+               the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
                total_read += stream->next_out - buf;
        }
        git_inflate_end(stream);
@@ -2166,7 +2184,7 @@ static int check_stream_sha1(git_zstream *stream,
                return -1;
        }
 
-       git_SHA1_Final(real_sha1, &c);
+       the_hash_algo->final_fn(real_sha1, &c);
        if (hashcmp(expected_sha1, real_sha1)) {
                error("sha1 mismatch for %s (expected %s)", path,
                      sha1_to_hex(expected_sha1));
@@ -2177,7 +2195,7 @@ static int check_stream_sha1(git_zstream *stream,
 }
 
 int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
+                     const struct object_id *expected_oid,
                      enum object_type *type,
                      unsigned long *size,
                      void **contents)
@@ -2186,7 +2204,7 @@ int read_loose_object(const char *path,
        void *map = NULL;
        unsigned long mapsize;
        git_zstream stream;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
 
        *contents = NULL;
 
@@ -2209,19 +2227,19 @@ int read_loose_object(const char *path,
        }
 
        if (*type == OBJ_BLOB) {
-               if (check_stream_sha1(&stream, hdr, *size, path, expected_sha1) < 0)
+               if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
                        goto out;
        } else {
-               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_sha1);
+               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
                if (!*contents) {
                        error("unable to unpack contents of %s", path);
                        git_inflate_end(&stream);
                        goto out;
                }
-               if (check_sha1_signature(expected_sha1, *contents,
-                                        *size, typename(*type))) {
+               if (check_object_signature(expected_oid, *contents,
+                                        *size, type_name(*type))) {
                        error("sha1 mismatch for %s (expected %s)", path,
-                             sha1_to_hex(expected_sha1));
+                             oid_to_hex(expected_oid));
                        free(*contents);
                        goto out;
                }
index 31c013ac7eb81ecc4fef85c9f47209c9826adcb9..5b93bf8da36939376b506f96624f568875397969 100644 (file)
@@ -152,31 +152,14 @@ static int match_sha(unsigned len, const unsigned char *a, const unsigned char *
 static void unique_in_pack(struct packed_git *p,
                           struct disambiguate_state *ds)
 {
-       uint32_t num, last, i, first = 0;
+       uint32_t num, i, first = 0;
        const struct object_id *current = NULL;
 
        if (open_pack_index(p) || !p->num_objects)
                return;
 
        num = p->num_objects;
-       last = num;
-       while (first < last) {
-               uint32_t mid = first + (last - first) / 2;
-               const unsigned char *current;
-               int cmp;
-
-               current = nth_packed_object_sha1(p, mid);
-               cmp = hashcmp(ds->bin_pfx.hash, current);
-               if (!cmp) {
-                       first = mid;
-                       break;
-               }
-               if (cmp > 0) {
-                       first = mid+1;
-                       continue;
-               }
-               last = mid;
-       }
+       bsearch_pack(&ds->bin_pfx, p, &first);
 
        /*
         * At this point, "first" is the location of the lowest object
@@ -240,7 +223,7 @@ static int finish_object_disambiguation(struct disambiguate_state *ds,
 
 static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_COMMIT;
 }
 
@@ -249,7 +232,7 @@ static int disambiguate_committish_only(const struct object_id *oid, void *cb_da
        struct object *obj;
        int kind;
 
-       kind = sha1_object_info(oid->hash, NULL);
+       kind = oid_object_info(oid, NULL);
        if (kind == OBJ_COMMIT)
                return 1;
        if (kind != OBJ_TAG)
@@ -264,7 +247,7 @@ static int disambiguate_committish_only(const struct object_id *oid, void *cb_da
 
 static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_TREE;
 }
 
@@ -273,7 +256,7 @@ static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_
        struct object *obj;
        int kind;
 
-       kind = sha1_object_info(oid->hash, NULL);
+       kind = oid_object_info(oid, NULL);
        if (kind == OBJ_TREE || kind == OBJ_COMMIT)
                return 1;
        if (kind != OBJ_TAG)
@@ -288,7 +271,7 @@ static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_
 
 static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_BLOB;
 }
 
@@ -367,7 +350,7 @@ static int show_ambiguous_object(const struct object_id *oid, void *data)
        if (ds->fn && !ds->fn(oid, ds->cb_data))
                return 0;
 
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type == OBJ_COMMIT) {
                struct commit *commit = lookup_commit(oid);
                if (commit) {
@@ -382,8 +365,8 @@ static int show_ambiguous_object(const struct object_id *oid, void *data)
        }
 
        advise("  %s %s%s",
-              find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
-              typename(type) ? typename(type) : "unknown type",
+              find_unique_abbrev(oid, DEFAULT_ABBREV),
+              type_name(type) ? type_name(type) : "unknown type",
               desc.buf);
 
        strbuf_release(&desc);
@@ -482,7 +465,7 @@ struct min_abbrev_data {
        unsigned int init_len;
        unsigned int cur_len;
        char *hex;
-       const unsigned char *hash;
+       const struct object_id *oid;
 };
 
 static inline char get_hex_char_from_oid(const struct object_id *oid,
@@ -514,50 +497,34 @@ static void find_abbrev_len_for_pack(struct packed_git *p,
                                     struct min_abbrev_data *mad)
 {
        int match = 0;
-       uint32_t num, last, first = 0;
+       uint32_t num, first = 0;
        struct object_id oid;
+       const struct object_id *mad_oid;
 
        if (open_pack_index(p) || !p->num_objects)
                return;
 
        num = p->num_objects;
-       last = num;
-       while (first < last) {
-               uint32_t mid = first + (last - first) / 2;
-               const unsigned char *current;
-               int cmp;
-
-               current = nth_packed_object_sha1(p, mid);
-               cmp = hashcmp(mad->hash, current);
-               if (!cmp) {
-                       match = 1;
-                       first = mid;
-                       break;
-               }
-               if (cmp > 0) {
-                       first = mid + 1;
-                       continue;
-               }
-               last = mid;
-       }
+       mad_oid = mad->oid;
+       match = bsearch_pack(mad_oid, p, &first);
 
        /*
         * first is now the position in the packfile where we would insert
         * mad->hash if it does not exist (or the position of mad->hash if
-        * it does exist). Hence, we consider a maximum of three objects
+        * it does exist). Hence, we consider a maximum of two objects
         * nearby for the abbreviation length.
         */
        mad->init_len = 0;
        if (!match) {
-               nth_packed_object_oid(&oid, p, first);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first))
+                       extend_abbrev_len(&oid, mad);
        } else if (first < num - 1) {
-               nth_packed_object_oid(&oid, p, first + 1);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first + 1))
+                       extend_abbrev_len(&oid, mad);
        }
        if (first > 0) {
-               nth_packed_object_oid(&oid, p, first - 1);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first - 1))
+                       extend_abbrev_len(&oid, mad);
        }
        mad->init_len = mad->cur_len;
 }
@@ -570,7 +537,7 @@ static void find_abbrev_len_packed(struct min_abbrev_data *mad)
                find_abbrev_len_for_pack(p, mad);
 }
 
-int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
 {
        struct disambiguate_state ds;
        struct min_abbrev_data mad;
@@ -597,14 +564,14 @@ int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
                        len = FALLBACK_DEFAULT_ABBREV;
        }
 
-       sha1_to_hex_r(hex, sha1);
+       oid_to_hex_r(hex, oid);
        if (len == GIT_SHA1_HEXSZ || !len)
                return GIT_SHA1_HEXSZ;
 
        mad.init_len = len;
        mad.cur_len = len;
        mad.hex = hex;
-       mad.hash = sha1;
+       mad.oid = oid;
 
        find_abbrev_len_packed(&mad);
 
@@ -622,13 +589,13 @@ int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
        return mad.cur_len;
 }
 
-const char *find_unique_abbrev(const unsigned char *sha1, int len)
+const char *find_unique_abbrev(const struct object_id *oid, int len)
 {
        static int bufno;
        static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
        char *hex = hexbuffer[bufno];
        bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
-       find_unique_abbrev_r(hex, sha1, len);
+       find_unique_abbrev_r(hex, oid, len);
        return hex;
 }
 
@@ -902,8 +869,8 @@ struct object *peel_to_type(const char *name, int namelen,
                        if (name)
                                error("%.*s: expected %s type, but the object "
                                      "dereferences to %s type",
-                                     namelen, name, typename(expected_type),
-                                     typename(o->type));
+                                     namelen, name, type_name(expected_type),
+                                     type_name(o->type));
                        return NULL;
                }
        }
@@ -1530,8 +1497,7 @@ static void diagnose_invalid_oid_path(const char *prefix,
        if (is_missing_file_error(errno)) {
                char *fullname = xstrfmt("%s%s", prefix, filename);
 
-               if (!get_tree_entry(tree_oid->hash, fullname,
-                                   oid.hash, &mode)) {
+               if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
                        die("Path '%s' exists, but not '%s'.\n"
                            "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
                            fullname,
@@ -1723,8 +1689,8 @@ static int get_oid_with_context_1(const char *name,
                                        filename, oid->hash, &oc->symlink_path,
                                        &oc->mode);
                        } else {
-                               ret = get_tree_entry(tree_oid.hash, filename,
-                                                    oid->hash, &oc->mode);
+                               ret = get_tree_entry(&tree_oid, filename, oid,
+                                                    &oc->mode);
                                if (ret && only_to_die) {
                                        diagnose_invalid_oid_path(prefix,
                                                                   filename,
index a8c272927842901190b07098911ef785a458bcf3..41e1c3fd3f787e04d6e4fa9eb7c56b617f1c5fa5 100644 (file)
@@ -1,9 +1,9 @@
 /* Plumbing with collition-detecting SHA1 code */
 
-#ifdef DC_SHA1_SUBMODULE
-#include "sha1collisiondetection/lib/sha1.h"
-#elif defined(DC_SHA1_EXTERNAL)
+#ifdef DC_SHA1_EXTERNAL
 #include <sha1dc/sha1.h>
+#elif defined(DC_SHA1_SUBMODULE)
+#include "sha1collisiondetection/lib/sha1.h"
 #else
 #include "sha1dc/sha1.h"
 #endif
index 284d04d67f885d8905130e138c45cc4ff4fbdc30..3eb8ff1b43db284bddac643c270e2148114c4bbf 100644 (file)
@@ -305,17 +305,17 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce
 }
 
 void replace_index_entry_in_base(struct index_state *istate,
-                                struct cache_entry *old,
-                                struct cache_entry *new)
+                                struct cache_entry *old_entry,
+                                struct cache_entry *new_entry)
 {
-       if (old->index &&
+       if (old_entry->index &&
            istate->split_index &&
            istate->split_index->base &&
-           old->index <= istate->split_index->base->cache_nr) {
-               new->index = old->index;
-               if (old != istate->split_index->base->cache[new->index - 1])
-                       free(istate->split_index->base->cache[new->index - 1]);
-               istate->split_index->base->cache[new->index - 1] = new;
+           old_entry->index <= istate->split_index->base->cache_nr) {
+               new_entry->index = old_entry->index;
+               if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
+                       free(istate->split_index->base->cache[new_entry->index - 1]);
+               istate->split_index->base->cache[new_entry->index - 1] = new_entry;
        }
 }
 
index df91c1bda8117fe7d0f25a0aaedce79370801ce0..43d66826eb712b9a9b6872458527266bd9818146 100644 (file)
@@ -21,7 +21,7 @@ struct split_index *init_split_index(struct index_state *istate);
 void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce);
 void replace_index_entry_in_base(struct index_state *istate,
                                 struct cache_entry *old,
-                                struct cache_entry *new);
+                                struct cache_entry *new_entry);
 int read_link_extension(struct index_state *istate,
                        const void *data, unsigned long sz);
 int write_link_extension(struct strbuf *sb,
index 1df674e9194ee6d5cd5386f477745ff6639b7b65..83d05024e6718f4b9481e1782ed84117d5e7598a 100644 (file)
--- a/strbuf.c
+++ b/strbuf.c
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "refs.h"
+#include "string-list.h"
 #include "utf8.h"
 
 int starts_with(const char *str, const char *prefix)
@@ -95,6 +96,7 @@ void strbuf_trim(struct strbuf *sb)
        strbuf_rtrim(sb);
        strbuf_ltrim(sb);
 }
+
 void strbuf_rtrim(struct strbuf *sb)
 {
        while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1]))
@@ -102,6 +104,13 @@ void strbuf_rtrim(struct strbuf *sb)
        sb->buf[sb->len] = '\0';
 }
 
+void strbuf_trim_trailing_dir_sep(struct strbuf *sb)
+{
+       while (sb->len > 0 && is_dir_sep((unsigned char)sb->buf[sb->len - 1]))
+               sb->len--;
+       sb->buf[sb->len] = '\0';
+}
+
 void strbuf_ltrim(struct strbuf *sb)
 {
        char *b = sb->buf;
@@ -163,6 +172,21 @@ struct strbuf **strbuf_split_buf(const char *str, size_t slen,
        return ret;
 }
 
+void strbuf_add_separated_string_list(struct strbuf *str,
+                                     const char *sep,
+                                     struct string_list *slist)
+{
+       struct string_list_item *item;
+       int sep_needed = 0;
+
+       for_each_string_list_item(item, slist) {
+               if (sep_needed)
+                       strbuf_addstr(str, sep);
+               strbuf_addstr(str, item->string);
+               sep_needed = 1;
+       }
+}
+
 void strbuf_list_free(struct strbuf **sbs)
 {
        struct strbuf **s = sbs;
@@ -612,14 +636,18 @@ ssize_t strbuf_read_file(struct strbuf *sb, const char *path, size_t hint)
 {
        int fd;
        ssize_t len;
+       int saved_errno;
 
        fd = open(path, O_RDONLY);
        if (fd < 0)
                return -1;
        len = strbuf_read(sb, fd, hint);
+       saved_errno = errno;
        close(fd);
-       if (len < 0)
+       if (len < 0) {
+               errno = saved_errno;
                return -1;
+       }
 
        return len;
 }
@@ -869,12 +897,12 @@ void strbuf_addftime(struct strbuf *sb, const char *fmt, const struct tm *tm,
        strbuf_setlen(sb, sb->len + len);
 }
 
-void strbuf_add_unique_abbrev(struct strbuf *sb, const unsigned char *sha1,
+void strbuf_add_unique_abbrev(struct strbuf *sb, const struct object_id *oid,
                              int abbrev_len)
 {
        int r;
        strbuf_grow(sb, GIT_SHA1_HEXSZ + 1);
-       r = find_unique_abbrev_r(sb->buf + sb->len, sha1, abbrev_len);
+       r = find_unique_abbrev_r(sb->buf + sb->len, oid, abbrev_len);
        strbuf_setlen(sb, sb->len + r);
 }
 
index 14c8c10d66b9aaa2d8f0c109cf0dd668701cb2eb..c4de5e4588bd4326d363b9387599cd94e29d2f8f 100644 (file)
--- a/strbuf.h
+++ b/strbuf.h
@@ -1,6 +1,8 @@
 #ifndef STRBUF_H
 #define STRBUF_H
 
+struct string_list;
+
 /**
  * strbuf's are meant to be used with all the usual C string and memory
  * APIs. Given that the length of the buffer is known, it's often better to
@@ -70,6 +72,12 @@ struct strbuf {
 extern char strbuf_slopbuf[];
 #define STRBUF_INIT  { .alloc = 0, .len = 0, .buf = strbuf_slopbuf }
 
+/*
+ * Predeclare this here, since cache.h includes this file before it defines the
+ * struct.
+ */
+struct object_id;
+
 /**
  * Life Cycle Functions
  * --------------------
@@ -179,6 +187,9 @@ extern void strbuf_trim(struct strbuf *);
 extern void strbuf_rtrim(struct strbuf *);
 extern void strbuf_ltrim(struct strbuf *);
 
+/* Strip trailing directory separators */
+extern void strbuf_trim_trailing_dir_sep(struct strbuf *);
+
 /**
  * Replace the contents of the strbuf with a reencoded form.  Returns -1
  * on error, 0 on success.
@@ -528,6 +539,20 @@ static inline struct strbuf **strbuf_split(const struct strbuf *sb,
        return strbuf_split_max(sb, terminator, 0);
 }
 
+/*
+ * Adds all strings of a string list to the strbuf, separated by the given
+ * separator.  For example, if sep is
+ *   ', '
+ * and slist contains
+ *   ['element1', 'element2', ..., 'elementN'],
+ * then write:
+ *   'element1, element2, ..., elementN'
+ * to str.  If only one element, just write "element1" to str.
+ */
+extern void strbuf_add_separated_string_list(struct strbuf *str,
+                                            const char *sep,
+                                            struct string_list *slist);
+
 /**
  * Free a NULL-terminated list of strbufs (for example, the return
  * values of the strbuf_split*() functions).
@@ -539,7 +564,7 @@ extern void strbuf_list_free(struct strbuf **);
  * the strbuf `sb`.
  */
 extern void strbuf_add_unique_abbrev(struct strbuf *sb,
-                                    const unsigned char *sha1,
+                                    const struct object_id *oid,
                                     int abbrev_len);
 
 /**
index 22d27df55ebb0926967c1b1afe027d6c112820ae..7d55ba64c7551de0f3be1dfcf56cd3344ebd8f59 100644 (file)
@@ -16,7 +16,7 @@ enum input_source {
 
 typedef int (*open_istream_fn)(struct git_istream *,
                               struct object_info *,
-                              const unsigned char *,
+                              const struct object_id *,
                               enum object_type *);
 typedef int (*close_istream_fn)(struct git_istream *);
 typedef ssize_t (*read_istream_fn)(struct git_istream *, char *, size_t);
@@ -29,7 +29,7 @@ struct stream_vtbl {
 #define open_method_decl(name) \
        int open_istream_ ##name \
        (struct git_istream *st, struct object_info *oi, \
-        const unsigned char *sha1, \
+        const struct object_id *oid, \
         enum object_type *type)
 
 #define close_method_decl(name) \
@@ -107,7 +107,7 @@ ssize_t read_istream(struct git_istream *st, void *buf, size_t sz)
        return st->vtbl->read(st, buf, sz);
 }
 
-static enum input_source istream_source(const unsigned char *sha1,
+static enum input_source istream_source(const struct object_id *oid,
                                        enum object_type *type,
                                        struct object_info *oi)
 {
@@ -116,7 +116,7 @@ static enum input_source istream_source(const unsigned char *sha1,
 
        oi->typep = type;
        oi->sizep = &size;
-       status = sha1_object_info_extended(sha1, oi, 0);
+       status = oid_object_info_extended(oid, oi, 0);
        if (status < 0)
                return stream_error;
 
@@ -132,14 +132,14 @@ static enum input_source istream_source(const unsigned char *sha1,
        }
 }
 
-struct git_istream *open_istream(const unsigned char *sha1,
+struct git_istream *open_istream(const struct object_id *oid,
                                 enum object_type *type,
                                 unsigned long *size,
                                 struct stream_filter *filter)
 {
        struct git_istream *st;
        struct object_info oi = OBJECT_INFO_INIT;
-       const unsigned char *real = lookup_replace_object(sha1);
+       const struct object_id *real = lookup_replace_object(oid);
        enum input_source src = istream_source(real, type, &oi);
 
        if (src < 0)
@@ -338,7 +338,7 @@ static struct stream_vtbl loose_vtbl = {
 static open_method_decl(loose)
 {
        st->u.loose.mapped = map_sha1_file(the_repository,
-                                          sha1, &st->u.loose.mapsize);
+                                          oid->hash, &st->u.loose.mapsize);
        if (!st->u.loose.mapped)
                return -1;
        if ((unpack_sha1_header(&st->z,
@@ -489,7 +489,7 @@ static struct stream_vtbl incore_vtbl = {
 
 static open_method_decl(incore)
 {
-       st->u.incore.buf = read_sha1_file_extended(sha1, type, &st->size, 0);
+       st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0);
        st->u.incore.read_ptr = 0;
        st->vtbl = &incore_vtbl;
 
@@ -510,7 +510,7 @@ int stream_blob_to_fd(int fd, const struct object_id *oid, struct stream_filter
        ssize_t kept = 0;
        int result = -1;
 
-       st = open_istream(oid->hash, &type, &sz, filter);
+       st = open_istream(oid, &type, &sz, filter);
        if (!st) {
                if (filter)
                        free_stream_filter(filter);
index 73c1d156b352898c9b5661a3e480f579b80c5a00..32f46267710b4e88cd0fc90e1a5a6f6388361c61 100644 (file)
@@ -8,7 +8,7 @@
 /* opaque */
 struct git_istream;
 
-extern struct git_istream *open_istream(const unsigned char *, enum object_type *, unsigned long *, struct stream_filter *);
+extern struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
 extern int close_istream(struct git_istream *);
 extern ssize_t read_istream(struct git_istream *, void *, size_t);
 
index 2aa8a1747f8586839aa3036fbbc59f6c716c6128..3f2075764feb53fc8c981aab39d16c6b191cb6d8 100644 (file)
@@ -9,7 +9,7 @@
 /*
  * submodule cache lookup structure
  * There is one shared set of 'struct submodule' entries which can be
- * looked up by their sha1 blob id of the .gitmodule file and either
+ * looked up by their sha1 blob id of the .gitmodules file and either
  * using path or name as key.
  * for_path stores submodule entries with path as key
  * for_name stores submodule entries with name as key
@@ -91,7 +91,7 @@ static void submodule_cache_clear(struct submodule_cache *cache)
        /*
         * We iterate over the name hash here to be symmetric with the
         * allocation of struct submodule entries. Each is allocated by
-        * their .gitmodule blob sha1 and submodule name.
+        * their .gitmodules blob sha1 and submodule name.
         */
        hashmap_iter_init(&cache->for_name, &iter);
        while ((entry = hashmap_iter_next(&iter)))
@@ -520,7 +520,7 @@ static const struct submodule *config_from(struct submodule_cache *cache,
        if (submodule)
                goto out;
 
-       config = read_sha1_file(oid.hash, &type, &config_size);
+       config = read_object_file(&oid, &type, &config_size);
        if (!config || type != OBJ_BLOB)
                goto out;
 
index b03e5f50458efdee9f26ac61982cf07e035e272e..9a50168b2375d9cac306a22c8be4559dde114d95 100644 (file)
@@ -541,9 +541,9 @@ static void show_submodule_header(struct diff_options *o, const char *path,
 
 output_header:
        strbuf_addf(&sb, "Submodule %s ", path);
-       strbuf_add_unique_abbrev(&sb, one->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, one, DEFAULT_ABBREV);
        strbuf_addstr(&sb, (fast_backward || fast_forward) ? ".." : "...");
-       strbuf_add_unique_abbrev(&sb, two->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, two, DEFAULT_ABBREV);
        if (message)
                strbuf_addf(&sb, " %s\n", message);
        else
@@ -591,7 +591,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                struct object_id *one, struct object_id *two,
                unsigned dirty_submodule)
 {
-       const struct object_id *old = the_hash_algo->empty_tree, *new = the_hash_algo->empty_tree;
+       const struct object_id *old_oid = the_hash_algo->empty_tree, *new_oid = the_hash_algo->empty_tree;
        struct commit *left = NULL, *right = NULL;
        struct commit_list *merge_bases = NULL;
        struct child_process cp = CHILD_PROCESS_INIT;
@@ -606,9 +606,9 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                goto done;
 
        if (left)
-               old = one;
+               old_oid = one;
        if (right)
-               new = two;
+               new_oid = two;
 
        cp.git_cmd = 1;
        cp.dir = path;
@@ -631,7 +631,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                argv_array_pushf(&cp.args, "--dst-prefix=%s%s/",
                                 o->b_prefix, path);
        }
-       argv_array_push(&cp.args, oid_to_hex(old));
+       argv_array_push(&cp.args, oid_to_hex(old_oid));
        /*
         * If the submodule has modified content, we will diff against the
         * work tree, under the assumption that the user has asked for the
@@ -639,7 +639,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
         * haven't yet been committed to the submodule yet.
         */
        if (!(dirty_submodule & DIRTY_SUBMODULE_MODIFIED))
-               argv_array_push(&cp.args, oid_to_hex(new));
+               argv_array_push(&cp.args, oid_to_hex(new_oid));
 
        prepare_submodule_repo_env(&cp.env_array);
        if (start_command(&cp))
@@ -818,7 +818,7 @@ static int check_has_commit(const struct object_id *oid, void *data)
 {
        struct has_commit_data *cb = data;
 
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
 
        switch (type) {
        case OBJ_COMMIT:
@@ -832,7 +832,7 @@ static int check_has_commit(const struct object_id *oid, void *data)
                return 0;
        default:
                die(_("submodule entry '%s' (%s) is a %s, not a commit"),
-                   cb->path, oid_to_hex(oid), typename(type));
+                   cb->path, oid_to_hex(oid), type_name(type));
        }
 }
 
@@ -1579,8 +1579,8 @@ static void submodule_reset_index(const char *path)
  * pass NULL for old or new respectively.
  */
 int submodule_move_head(const char *path,
-                        const char *old,
-                        const char *new,
+                        const char *old_head,
+                        const char *new_head,
                         unsigned flags)
 {
        int ret = 0;
@@ -1601,7 +1601,7 @@ int submodule_move_head(const char *path,
        else
                error_code_ptr = NULL;
 
-       if (old && !is_submodule_populated_gently(path, error_code_ptr))
+       if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
                return 0;
 
        sub = submodule_from_path(&null_oid, path);
@@ -1609,14 +1609,14 @@ int submodule_move_head(const char *path,
        if (!sub)
                die("BUG: could not get submodule information for '%s'", path);
 
-       if (old && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+       if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
                /* Check if the submodule has a dirty index. */
                if (submodule_has_dirty_index(sub))
                        return error(_("submodule '%s' has dirty index"), path);
        }
 
        if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
-               if (old) {
+               if (old_head) {
                        if (!submodule_uses_gitfile(path))
                                absorb_git_dir_into_superproject("", path,
                                        ABSORB_GITDIR_RECURSE_SUBMODULES);
@@ -1630,7 +1630,7 @@ int submodule_move_head(const char *path,
                        submodule_reset_index(path);
                }
 
-               if (old && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+               if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
                        char *gitdir = xstrfmt("%s/modules/%s",
                                    get_git_common_dir(), sub->name);
                        connect_work_tree_and_git_dir(path, gitdir);
@@ -1659,9 +1659,9 @@ int submodule_move_head(const char *path,
                argv_array_push(&cp.args, "-m");
 
        if (!(flags & SUBMODULE_MOVE_HEAD_FORCE))
-               argv_array_push(&cp.args, old ? old : EMPTY_TREE_SHA1_HEX);
+               argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX);
 
-       argv_array_push(&cp.args, new ? new : EMPTY_TREE_SHA1_HEX);
+       argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX);
 
        if (run_command(&cp)) {
                ret = -1;
@@ -1669,7 +1669,7 @@ int submodule_move_head(const char *path,
        }
 
        if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
-               if (new) {
+               if (new_head) {
                        child_process_init(&cp);
                        /* also set the HEAD accordingly */
                        cp.git_cmd = 1;
@@ -1678,7 +1678,7 @@ int submodule_move_head(const char *path,
 
                        prepare_submodule_repo_env(&cp.env_array);
                        argv_array_pushl(&cp.args, "update-ref", "HEAD",
-                                        "--no-deref", new, NULL);
+                                        "--no-deref", new_head, NULL);
 
                        if (run_command(&cp)) {
                                ret = -1;
index b9b7ef0030a732bb81745641413f9f013588c91d..9589f131273d4f04605c8dbf7dcce05aaea606ad 100644 (file)
@@ -117,7 +117,7 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule);
 #define SUBMODULE_MOVE_HEAD_FORCE   (1<<1)
 extern int submodule_move_head(const char *path,
                               const char *old,
-                              const char *new,
+                              const char *new_head,
                               unsigned flags);
 
 /*
index b3f7b449c366c2d7d7afd12139a54b7a588f9747..24ddebfabf97be1251452fe5b6bba847597431fd 100644 (file)
--- a/t/README
+++ b/t/README
@@ -84,9 +84,10 @@ appropriately before running "make".
 
 -x::
        Turn on shell tracing (i.e., `set -x`) during the tests
-       themselves. Implies `--verbose`. Note that in non-bash shells,
-       this can cause failures in some tests which redirect and test
-       the output of shell functions. Use with caution.
+       themselves. Implies `--verbose`.
+       Ignored in test scripts that set the variable 'test_untraceable'
+       to a non-empty value, unless it's run with a Bash version
+       supporting BASH_XTRACEFD, i.e. v4.1 or later.
 
 -d::
 --debug::
@@ -452,6 +453,22 @@ Don't:
    causing the next test to start in an unexpected directory.  Do so
    inside a subshell if necessary.
 
+ - save and verify the standard error of compound commands, i.e. group
+   commands, subshells, and shell functions (except test helper
+   functions like 'test_must_fail') like this:
+
+     ( cd dir && git cmd ) 2>error &&
+     test_cmp expect error
+
+   When running the test with '-x' tracing, then the trace of commands
+   executed in the compound command will be included in standard error
+   as well, quite possibly throwing off the subsequent checks examining
+   the output.  Instead, save only the relevant git command's standard
+   error:
+
+     ( cd dir && git cmd 2>../error ) &&
+     test_cmp expect error
+
  - Break the TAP output
 
    The raw output from your test may be interpreted by a TAP harness. TAP
@@ -655,7 +672,7 @@ library for your script to use.
                test_expect_code 1 git merge "merge msg" B master
        '
 
- - test_must_fail <git-command>
+ - test_must_fail [<options>] <git-command>
 
    Run a git command and ensure it fails in a controlled way.  Use
    this instead of "! <git-command>".  When git-command dies due to a
@@ -663,11 +680,21 @@ library for your script to use.
    treats it as just another expected failure, which would let such a
    bug go unnoticed.
 
- - test_might_fail <git-command>
+   Accepts the following options:
+
+     ok=<signal-name>[,<...>]:
+       Don't treat an exit caused by the given signal as error.
+       Multiple signals can be specified as a comma separated list.
+       Currently recognized signal names are: sigpipe, success.
+       (Don't use 'success', use 'test_might_fail' instead.)
+
+ - test_might_fail [<options>] <git-command>
 
    Similar to test_must_fail, but tolerate success, too.  Use this
    instead of "<git-command> || :" to catch failures due to segv.
 
+   Accepts the same options as test_must_fail.
+
  - test_cmp <expected> <actual>
 
    Check whether the content of the <actual> file matches the
index e760256406fa9c2fe0f9b2cde0ffb97ff11c6cab..1790ceab510d53182225d3a28bbd4b4ef51abcdd 100644 (file)
@@ -5,28 +5,29 @@
  *
  * The mtime can be changed to an absolute value:
  *
- *     test-chmtime =<seconds> file...
+ *     test-tool chmtime =<seconds> file...
  *
  * Relative to the current time as returned by time(3):
  *
- *     test-chmtime =+<seconds> (or =-<seconds>) file...
+ *     test-tool chmtime =+<seconds> (or =-<seconds>) file...
  *
  * Or relative to the current mtime of the file:
  *
- *     test-chmtime <seconds> file...
- *     test-chmtime +<seconds> (or -<seconds>) file...
+ *     test-tool chmtime <seconds> file...
+ *     test-tool chmtime +<seconds> (or -<seconds>) file...
  *
  * Examples:
  *
  * To just print the mtime use --verbose and set the file mtime offset to 0:
  *
- *     test-chmtime -v +0 file
+ *     test-tool chmtime -v +0 file
  *
  * To set the mtime to current time:
  *
- *     test-chmtime =+0 file
+ *     test-tool chmtime =+0 file
  *
  */
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include <utime.h>
 
@@ -56,7 +57,7 @@ static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
        return 1;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__chmtime(int argc, const char **argv)
 {
        static int verbose;
 
index 1a7b8bd3d650fe1111c77115d2c68644b6ac9edb..214003d5b2f9bbe978d5aa4d9c9ab3f9b8a990da 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "config.h"
 #include "string-list.h"
@@ -32,7 +33,7 @@
  * Examples:
  *
  * To print the value with highest priority for key "foo.bAr Baz.rock":
- *     test-config get_value "foo.bAr Baz.rock"
+ *     test-tool config get_value "foo.bAr Baz.rock"
  *
  */
 
@@ -77,7 +78,7 @@ static int early_config_cb(const char *var, const char *value, void *vdata)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__config(int argc, const char **argv)
 {
        int i, val;
        const char *v;
index bb72c47df570d9c07ae4567fd6d31ea49fe49656..92c4c2313e78a305dd0da1e7a853e58e325d2f84 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 
 static int rc;
@@ -28,7 +29,7 @@ static int is_in(const char *s, int ch)
 #define LOWER "abcdefghijklmnopqrstuvwxyz"
 #define UPPER "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
 
-int cmd_main(int argc, const char **argv)
+int cmd__ctype(int argc, const char **argv)
 {
        TEST_CLASS(isdigit, DIGIT);
        TEST_CLASS(isspace, " \n\r\t");
index ac8368797073adfdf203f60d93df55a988443dbd..a0837371aba17956331ddcdb04847c6c9edeefd2 100644 (file)
@@ -1,13 +1,14 @@
+#include "test-tool.h"
 #include "cache.h"
 
 static const char *usage_msg = "\n"
-"  test-date relative [time_t]...\n"
-"  test-date show:<format> [time_t]...\n"
-"  test-date parse [date]...\n"
-"  test-date approxidate [date]...\n"
-"  test-date timestamp [date]...\n"
-"  test-date is64bit\n"
-"  test-date time_t-is64bit\n";
+"  test-tool date relative [time_t]...\n"
+"  test-tool date show:<format> [time_t]...\n"
+"  test-tool date parse [date]...\n"
+"  test-tool date approxidate [date]...\n"
+"  test-tool date timestamp [date]...\n"
+"  test-tool date is64bit\n"
+"  test-tool date time_t-is64bit\n";
 
 static void show_relative_dates(const char **argv, struct timeval *now)
 {
@@ -81,7 +82,7 @@ static void parse_approx_timestamp(const char **argv, struct timeval *now)
        }
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__date(int argc, const char **argv)
 {
        struct timeval now;
        const char *x;
index 591730adc4f3940940fdb4691da0afb81648e353..34c7259248760ff8bdea495ac5effd5de2b04ae3 100644 (file)
@@ -8,14 +8,15 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "delta.h"
 #include "cache.h"
 
 static const char usage_str[] =
-       "test-delta (-d|-p) <from_file> <data_file> <out_file>";
+       "test-tool delta (-d|-p) <from_file> <data_file> <out_file>";
 
-int cmd_main(int argc, const char **argv)
+int cmd__delta(int argc, const char **argv)
 {
        int fd;
        struct stat st;
index bd1a857d5224a1a8d3c4a0d8e0ce54476d70372e..838760898b6f4ccfc0d7a3c4bd2c8653020a93ec 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 
 #if defined(GIT_WINDOWS_NATIVE)
@@ -157,7 +158,7 @@ static int cmd_dropcaches(void)
 
 #endif
 
-int cmd_main(int argc, const char **argv)
+int cmd__drop_caches(int argc, const char **argv)
 {
        cmd_sync();
        return cmd_dropcaches();
index ebf3aab22d6c197b99b0203dbe9881ccf417be86..98a4891f1dc936a486075703de319affdacb1c78 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "tree.h"
 #include "cache-tree.h"
@@ -54,7 +55,7 @@ static int dump_cache_tree(struct cache_tree *it,
        return errs;
 }
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_cache_tree(int ac, const char **av)
 {
        struct index_state istate;
        struct cache_tree *another = cache_tree();
index e44430b699db732252afa6fcb06686ec89b5811d..4e2fdb5e30d1ae30b1f75b7e3ca636afa4c0e097 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "split-index.h"
 #include "ewah/ewok.h"
@@ -7,7 +8,7 @@ static void show_bit(size_t pos, void *data)
        printf(" %d", (int)pos);
 }
 
-int cmd_main(int ac, const char **av)
+int cmd__dump_split_index(int ac, const char **av)
 {
        struct split_index *si;
        int i;
index f752532ffbcd130c3c3cb25ae20da41d4f74ae68..d7c55c2355ec7067e15655e99df8077344ed78fa 100644 (file)
@@ -54,8 +54,8 @@ int cmd_main(int ac, const char **av)
                printf("no untracked cache\n");
                return 0;
        }
-       printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
-       printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+       printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid));
+       printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid));
        printf("exclude_per_dir %s\n", uc->exclude_per_dir);
        printf("flags %08x\n", uc->dir_flags);
        if (uc->root)
index 90dc97a9d0444bc2dd8beaa1bfdd970f4280efb7..081115bf8eb7cb2e27c39f632af0e40001b5b6e9 100644 (file)
@@ -1,8 +1,9 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "object.h"
 #include "decorate.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__example_decorate(int argc, const char **argv)
 {
        struct decoration n;
        struct object_id one_oid = { {1} };
index 8d11d22d98649900b6d558cc174e2af1dbff9948..99b8dc1e2d9cdc3a0e0f464fdeee0f94733d00d1 100644 (file)
@@ -4,9 +4,10 @@
  * Copyright (C) 2007 by Nicolas Pitre, licensed under the GPL version 2.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__genrandom(int argc, const char **argv)
 {
        unsigned long count, next = 0;
        unsigned char *c;
index 1145d5167115a761d319b1e205500e765cd090cd..23d2b172fe708f711a15613e906637cd948324ef 100644 (file)
@@ -1,5 +1,7 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "hashmap.h"
+#include "strbuf.h"
 
 struct test_entry
 {
@@ -29,11 +31,12 @@ static int test_entry_cmp(const void *cmp_data,
                return strcmp(e1->key, key ? key : e2->key);
 }
 
-static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
-               char *value, int vlen)
+static struct test_entry *alloc_test_entry(unsigned int hash,
+                                          char *key, char *value)
 {
-       struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
-                       + vlen + 2);
+       size_t klen = strlen(key);
+       size_t vlen = strlen(value);
+       struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2));
        hashmap_entry_init(entry, hash);
        memcpy(entry->key, key, klen + 1);
        memcpy(entry->key + klen + 1, value, vlen + 1);
@@ -75,7 +78,7 @@ static unsigned int hash(unsigned int method, unsigned int i, const char *key)
 
 /*
  * Test performance of hashmap.[ch]
- * Usage: time echo "perfhashmap method rounds" | test-hashmap
+ * Usage: time echo "perfhashmap method rounds" | test-tool hashmap
  */
 static void perf_hashmap(unsigned int method, unsigned int rounds)
 {
@@ -85,11 +88,11 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
        unsigned int *hashes;
        unsigned int i, j;
 
-       entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
-       hashes = malloc(TEST_SIZE * sizeof(int));
+       ALLOC_ARRAY(entries, TEST_SIZE);
+       ALLOC_ARRAY(hashes, TEST_SIZE);
        for (i = 0; i < TEST_SIZE; i++) {
-               snprintf(buf, sizeof(buf), "%i", i);
-               entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
+               xsnprintf(buf, sizeof(buf), "%i", i);
+               entries[i] = alloc_test_entry(0, buf, "");
                hashes[i] = hash(method, i, entries[i]->key);
        }
 
@@ -142,9 +145,9 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
  *
  * perfhashmap method rounds -> test hashmap.[ch] performance
  */
-int cmd_main(int argc, const char **argv)
+int cmd__hashmap(int argc, const char **argv)
 {
-       char line[1024];
+       struct strbuf line = STRBUF_INIT;
        struct hashmap map;
        int icase;
 
@@ -153,44 +156,42 @@ int cmd_main(int argc, const char **argv)
        hashmap_init(&map, test_entry_cmp, &icase, 0);
 
        /* process commands from stdin */
-       while (fgets(line, sizeof(line), stdin)) {
+       while (strbuf_getline(&line, stdin) != EOF) {
                char *cmd, *p1 = NULL, *p2 = NULL;
-               int l1 = 0, l2 = 0, hash = 0;
+               unsigned int hash = 0;
                struct test_entry *entry;
 
                /* break line into command and up to two parameters */
-               cmd = strtok(line, DELIM);
+               cmd = strtok(line.buf, DELIM);
                /* ignore empty lines */
                if (!cmd || *cmd == '#')
                        continue;
 
                p1 = strtok(NULL, DELIM);
                if (p1) {
-                       l1 = strlen(p1);
                        hash = icase ? strihash(p1) : strhash(p1);
                        p2 = strtok(NULL, DELIM);
-                       if (p2)
-                               l2 = strlen(p2);
                }
 
-               if (!strcmp("hash", cmd) && l1) {
+               if (!strcmp("hash", cmd) && p1) {
 
                        /* print results of different hash functions */
-                       printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
-                                       strihash(p1), memihash(p1, l1));
+                       printf("%u %u %u %u\n",
+                              strhash(p1), memhash(p1, strlen(p1)),
+                              strihash(p1), memihash(p1, strlen(p1)));
 
-               } else if (!strcmp("add", cmd) && l1 && l2) {
+               } else if (!strcmp("add", cmd) && p1 && p2) {
 
                        /* create entry with key = p1, value = p2 */
-                       entry = alloc_test_entry(hash, p1, l1, p2, l2);
+                       entry = alloc_test_entry(hash, p1, p2);
 
                        /* add to hashmap */
                        hashmap_add(&map, entry);
 
-               } else if (!strcmp("put", cmd) && l1 && l2) {
+               } else if (!strcmp("put", cmd) && p1 && p2) {
 
                        /* create entry with key = p1, value = p2 */
-                       entry = alloc_test_entry(hash, p1, l1, p2, l2);
+                       entry = alloc_test_entry(hash, p1, p2);
 
                        /* add / replace entry */
                        entry = hashmap_put(&map, entry);
@@ -199,7 +200,7 @@ int cmd_main(int argc, const char **argv)
                        puts(entry ? get_value(entry) : "NULL");
                        free(entry);
 
-               } else if (!strcmp("get", cmd) && l1) {
+               } else if (!strcmp("get", cmd) && p1) {
 
                        /* lookup entry in hashmap */
                        entry = hashmap_get_from_hash(&map, hash, p1);
@@ -212,7 +213,7 @@ int cmd_main(int argc, const char **argv)
                                entry = hashmap_get_next(&map, entry);
                        }
 
-               } else if (!strcmp("remove", cmd) && l1) {
+               } else if (!strcmp("remove", cmd) && p1) {
 
                        /* setup static key */
                        struct hashmap_entry key;
@@ -238,7 +239,7 @@ int cmd_main(int argc, const char **argv)
                        printf("%u %u\n", map.tablesize,
                               hashmap_get_size(&map));
 
-               } else if (!strcmp("intern", cmd) && l1) {
+               } else if (!strcmp("intern", cmd) && p1) {
 
                        /* test that strintern works */
                        const char *i1 = strintern(p1);
@@ -252,7 +253,7 @@ int cmd_main(int argc, const char **argv)
                        else
                                printf("%s\n", i1);
 
-               } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
+               } else if (!strcmp("perfhashmap", cmd) && p1 && p2) {
 
                        perf_hashmap(atoi(p1), atoi(p2));
 
@@ -263,6 +264,7 @@ int cmd_main(int argc, const char **argv)
                }
        }
 
+       strbuf_release(&line);
        hashmap_free(&map, 1);
        return 0;
 }
index f569f6b7eff87227f82dbe6390fd31fb970a5fca..fcd10968cc10bdab8db146b8ba65080431e5ed24 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__index_version(int argc, const char **argv)
 {
        struct cache_header hdr;
        int version;
index 297fb01d61eded5d83d02175199efcd217376738..b99a37080d935fa27df4e372e0e8cddd9fd08b3c 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "parse-options.h"
 
@@ -184,14 +185,14 @@ static void analyze_run(void)
        }
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__lazy_init_name_hash(int argc, const char **argv)
 {
        const char *usage[] = {
-               "test-lazy-init-name-hash -d (-s | -m)",
-               "test-lazy-init-name-hash -p [-c c]",
-               "test-lazy-init-name-hash -a a [--step s] [-c c]",
-               "test-lazy-init-name-hash (-s | -m) [-c c]",
-               "test-lazy-init-name-hash -s -m [-c c]",
+               "test-tool lazy-init-name-hash -d (-s | -m)",
+               "test-tool lazy-init-name-hash -p [-c c]",
+               "test-tool lazy-init-name-hash -a a [--step s] [-c c]",
+               "test-tool lazy-init-name-hash (-s | -m) [-c c]",
+               "test-tool lazy-init-name-hash -s -m [-c c]",
                NULL
        };
        struct option options[] = {
index 356d8edef1d25524abaae0f0fcb3bf072d229c30..96857f26ac8540cf22e74aed72bbd30bc8147f00 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "tree.h"
 
-int cmd_main(int ac, const char **av)
+int cmd__match_trees(int ac, const char **av)
 {
        struct object_id hash1, hash2, shifted;
        struct tree *one, *two;
index 335cf6b6264cdaf9563736fbcfa40e7a3006a432..c5cffaa4b73ff52b2f166231ceb4a77b24ee8cef 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "mergesort.h"
 
@@ -22,7 +23,7 @@ static int compare_strings(const void *a, const void *b)
        return strcmp(x->text, y->text);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__mergesort(int argc, const char **argv)
 {
        struct line *line, *p = NULL, *lines = NULL;
        struct strbuf sb = STRBUF_INIT;
index 89d9b2f7bee05ff5c9fde31ba6798651ccee2947..229068894029338c52f5d0b9828c442658be67e5 100644 (file)
@@ -1,9 +1,10 @@
 /*
  * test-mktemp.c: code to exercise the creation of temporary files
  */
+#include "test-tool.h"
 #include "git-compat-util.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__mktemp(int argc, const char **argv)
 {
        if (argc != 2)
                usage("Expected 1 parameter defining the temporary file template");
index 06c09c6b886f843b2d3c6fdb8e00730afc7fe40b..8cb0d53840f3dc60d583fa1b72f95572d2148d11 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "thread-utils.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__online_cpus(int argc, const char **argv)
 {
        printf("%d\n", online_cpus());
        return 0;
index 2b3c5092a199835ea2e84339b473a9e29778178d..e115d44ac26e1d4fed48a5fd82632e5a93a487e6 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "string-list.h"
 
@@ -170,7 +171,7 @@ static struct test_data dirname_data[] = {
        { NULL,              NULL     }
 };
 
-int cmd_main(int argc, const char **argv)
+int cmd__path_utils(int argc, const char **argv)
 {
        if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
                char *buf = xmallocz(strlen(argv[2]));
diff --git a/t/helper/test-pkt-line.c b/t/helper/test-pkt-line.c
new file mode 100644 (file)
index 0000000..0f19e53
--- /dev/null
@@ -0,0 +1,64 @@
+#include "pkt-line.h"
+
+static void pack_line(const char *line)
+{
+       if (!strcmp(line, "0000") || !strcmp(line, "0000\n"))
+               packet_flush(1);
+       else if (!strcmp(line, "0001") || !strcmp(line, "0001\n"))
+               packet_delim(1);
+       else
+               packet_write_fmt(1, "%s", line);
+}
+
+static void pack(int argc, const char **argv)
+{
+       if (argc) { /* read from argv */
+               int i;
+               for (i = 0; i < argc; i++)
+                       pack_line(argv[i]);
+       } else { /* read from stdin */
+               char line[LARGE_PACKET_MAX];
+               while (fgets(line, sizeof(line), stdin)) {
+                       pack_line(line);
+               }
+       }
+}
+
+static void unpack(void)
+{
+       struct packet_reader reader;
+       packet_reader_init(&reader, 0, NULL, 0,
+                          PACKET_READ_GENTLE_ON_EOF |
+                          PACKET_READ_CHOMP_NEWLINE);
+
+       while (packet_reader_read(&reader) != PACKET_READ_EOF) {
+               switch (reader.status) {
+               case PACKET_READ_EOF:
+                       break;
+               case PACKET_READ_NORMAL:
+                       printf("%s\n", reader.line);
+                       break;
+               case PACKET_READ_FLUSH:
+                       printf("0000\n");
+                       break;
+               case PACKET_READ_DELIM:
+                       printf("0001\n");
+                       break;
+               }
+       }
+}
+
+int cmd_main(int argc, const char **argv)
+{
+       if (argc < 2)
+               die("too few arguments");
+
+       if (!strcmp(argv[1], "pack"))
+               pack(argc - 2, argv + 2);
+       else if (!strcmp(argv[1], "unpack"))
+               unpack();
+       else
+               die("invalid argument '%s'", argv[1]);
+
+       return 0;
+}
index ae58fff35972a09c08a47d2bc0abb67c96ba20eb..9807b649b14c0002bee6d10b200c27bb89a54812 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "prio-queue.h"
 
@@ -16,7 +17,7 @@ static void show(int *v)
        free(v);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__prio_queue(int argc, const char **argv)
 {
        struct prio_queue pq = { intcmp };
 
index 48255eef31a1e83d9a2bbb01670ba089a11c91f8..d674c88ba092d60366a14eaeccc9f4bc6f32660c 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__read_cache(int argc, const char **argv)
 {
        int i, cnt = 1;
        if (argc == 2)
index 7314b5943effac1fea8d39ee365a4b56fec723cb..7c4f43746e8792a368814664a70c51ca55ab43e1 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "refs.h"
 #include "worktree.h"
@@ -275,7 +276,7 @@ static struct command commands[] = {
        { NULL, NULL }
 };
 
-int cmd_main(int argc, const char **argv)
+int cmd__ref_store(int argc, const char **argv)
 {
        struct ref_store *refs;
        const char *func;
index b5ea8a97c54e1737d91dec894c1cc02e1baf64e5..10284cc56fa9f69703aa69f242edef113d6a40de 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "gettext.h"
 
@@ -36,7 +37,7 @@ static int test_regex_bug(void)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__regex(int argc, const char **argv)
 {
        const char *pat;
        const char *str;
@@ -47,8 +48,8 @@ int cmd_main(int argc, const char **argv)
        if (argc == 2 && !strcmp(argv[1], "--bug"))
                return test_regex_bug();
        else if (argc < 3)
-               usage("test-regex --bug\n"
-                     "test-regex <pattern> <string> [<options>]");
+               usage("test-tool regex --bug\n"
+                     "test-tool regex <pattern> <string> [<options>]");
 
        argv++;
        pat = *argv++;
index b8e6fe1d007449d30dd30ccd4319b26f151bbf23..4f8bc758213c47906d3dc1a4d4e02df2737c508d 100644 (file)
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "cache.h"
 #include "commit.h"
 #include "diff.h"
@@ -45,7 +46,7 @@ static int run_revision_walk(void)
        return got_revision;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__revision_walking(int argc, const char **argv)
 {
        if (argc < 2)
                return 1;
index 153342e44dd11ae357cc299a9214f4c365614a5e..2cc93bb69c522d99491cd8a9e02e211b2c3df807 100644 (file)
@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "run-command.h"
 #include "argv-array.h"
@@ -49,7 +50,7 @@ static int task_finished(int result,
        return 1;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__run_command(int argc, const char **argv)
 {
        struct child_process proc = CHILD_PROCESS_INIT;
        int jobs;
index d2a63bea4346fb76d38ba43508ee6e60599e41a9..d26d3e7c8b1a407e2cf40ce20750495f500d5170 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "lockfile.h"
 #include "tree.h"
@@ -5,7 +6,7 @@
 
 static struct lock_file index_lock;
 
-int cmd_main(int ac, const char **av)
+int cmd__scrap_cache_tree(int ac, const char **av)
 {
        setup_git_directory();
        hold_locked_index(&index_lock, LOCK_DIE_ON_ERROR);
index edfd52d82aeca0bb9ba2c5b2ce18bc39d27e5a34..ad5e69f9d3b0e03442f0d23b3b559bbfc163ee7b 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "sha1-array.h"
 
@@ -7,7 +8,7 @@ static int print_oid(const struct object_id *oid, void *data)
        return 0;
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__sha1_array(int argc, const char **argv)
 {
        struct oid_array array = OID_ARRAY_INIT;
        struct strbuf line = STRBUF_INIT;
index a1c13f54eca0db7d11a5df134d565171d70b8cce..1ba0675c75f0d2dab281d054b577272cd45c39f9 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int ac, const char **av)
+int cmd__sha1(int ac, const char **av)
 {
        git_SHA_CTX ctx;
        unsigned char sha1[20];
index 750b95a0a1c39b4d761b7f2f861f6df4c85ecb65..84594885c703887c3b09aceb51583b9895ac3b45 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 dd if=/dev/zero bs=1048576 count=100 2>/dev/null |
-/usr/bin/time t/helper/test-sha1 >/dev/null
+/usr/bin/time t/helper/test-tool sha1 >/dev/null
 
 while read expect cnt pfx
 do
@@ -11,7 +11,7 @@ do
                        test -z "$pfx" || echo "$pfx"
                        dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
                        perl -pe 'y/\000/g/'
-               } | ./t/helper/test-sha1 $cnt
+               } | ./t/helper/test-tool sha1 $cnt
        )
        if test "$expect" = "$actual"
        then
index b71edbd4429184b59b4bd1355d5cfb53970a1876..77ac5bc33f8eb635f78d8ba590c23bbbe4f29636 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "sigchain.h"
 
@@ -13,7 +14,7 @@ X(two)
 X(three)
 #undef X
 
-int cmd_main(int argc, const char **argv) {
+int cmd__sigchain(int argc, const char **argv) {
        sigchain_push(SIGTERM, one);
        sigchain_push(SIGTERM, two);
        sigchain_push(SIGTERM, three);
index e159c9a127f6854541ab497c8fd4b6efb9d4ea39..44e4a6d143e24cbd05b5d94404a5f5d4e732c880 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__strcmp_offset(int argc, const char **argv)
 {
        int result;
        size_t offset;
index 829ec3d7d2f58b9538bb3bbab47213eddb9e62ec..2123dda85bf10033dcbf0d801028b3705e73a507 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "string-list.h"
 
@@ -41,7 +42,7 @@ static int prefix_cb(struct string_list_item *item, void *cb_data)
        return starts_with(item->string, prefix);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__string_list(int argc, const char **argv)
 {
        if (argc == 5 && !strcmp(argv[1], "split")) {
                struct string_list list = STRING_LIST_INIT_DUP;
index f23db3b19a9911b554ca8eaf567cd0370d42af6e..5c6e4b010d61d2076f08e7077aa5ccbe6db665fc 100644 (file)
@@ -1,3 +1,4 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "config.h"
 #include "submodule-config.h"
@@ -10,7 +11,7 @@ static void die_usage(int argc, const char **argv, const char *msg)
        exit(1);
 }
 
-int cmd_main(int argc, const char **argv)
+int cmd__submodule_config(int argc, const char **argv)
 {
        const char **arg = argv;
        int my_argc = argc;
index 30c5765bfc3590421c21bc2350eed882752de3a0..92b69de635296d32d38d1f7d7589d5f8b6fbc296 100644 (file)
@@ -1,7 +1,8 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "run-command.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__subprocess(int argc, const char **argv)
 {
        struct child_process cp = CHILD_PROCESS_INIT;
        int nogit = 0;
diff --git a/t/helper/test-tool.c b/t/helper/test-tool.c
new file mode 100644 (file)
index 0000000..87066ce
--- /dev/null
@@ -0,0 +1,62 @@
+#include "git-compat-util.h"
+#include "test-tool.h"
+
+struct test_cmd {
+       const char *name;
+       int (*fn)(int argc, const char **argv);
+};
+
+static struct test_cmd cmds[] = {
+       { "chmtime", cmd__chmtime },
+       { "config", cmd__config },
+       { "ctype", cmd__ctype },
+       { "date", cmd__date },
+       { "delta", cmd__delta },
+       { "drop-caches", cmd__drop_caches },
+       { "dump-cache-tree", cmd__dump_cache_tree },
+       { "dump-split-index", cmd__dump_split_index },
+       { "example-decorate", cmd__example_decorate },
+       { "genrandom", cmd__genrandom },
+       { "hashmap", cmd__hashmap },
+       { "index-version", cmd__index_version },
+       { "lazy-init-name-hash", cmd__lazy_init_name_hash },
+       { "match-trees", cmd__match_trees },
+       { "mergesort", cmd__mergesort },
+       { "mktemp", cmd__mktemp },
+       { "online-cpus", cmd__online_cpus },
+       { "path-utils", cmd__path_utils },
+       { "prio-queue", cmd__prio_queue },
+       { "read-cache", cmd__read_cache },
+       { "ref-store", cmd__ref_store },
+       { "regex", cmd__regex },
+       { "revision-walking", cmd__revision_walking },
+       { "run-command", cmd__run_command },
+       { "scrap-cache-tree", cmd__scrap_cache_tree },
+       { "sha1-array", cmd__sha1_array },
+       { "sha1", cmd__sha1 },
+       { "sigchain", cmd__sigchain },
+       { "strcmp-offset", cmd__strcmp_offset },
+       { "string-list", cmd__string_list },
+       { "submodule-config", cmd__submodule_config },
+       { "subprocess", cmd__subprocess },
+       { "urlmatch-normalization", cmd__urlmatch_normalization },
+       { "wildmatch", cmd__wildmatch },
+       { "write-cache", cmd__write_cache },
+};
+
+int cmd_main(int argc, const char **argv)
+{
+       int i;
+
+       if (argc < 2)
+               die("I need a test name!");
+
+       for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+               if (!strcmp(cmds[i].name, argv[1])) {
+                       argv++;
+                       argc--;
+                       return cmds[i].fn(argc, argv);
+               }
+       }
+       die("There is no test named '%s'", argv[1]);
+}
diff --git a/t/helper/test-tool.h b/t/helper/test-tool.h
new file mode 100644 (file)
index 0000000..7116ddf
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __TEST_TOOL_H__
+#define __TEST_TOOL_H__
+
+int cmd__chmtime(int argc, const char **argv);
+int cmd__config(int argc, const char **argv);
+int cmd__ctype(int argc, const char **argv);
+int cmd__date(int argc, const char **argv);
+int cmd__delta(int argc, const char **argv);
+int cmd__drop_caches(int argc, const char **argv);
+int cmd__dump_cache_tree(int argc, const char **argv);
+int cmd__dump_split_index(int argc, const char **argv);
+int cmd__example_decorate(int argc, const char **argv);
+int cmd__genrandom(int argc, const char **argv);
+int cmd__hashmap(int argc, const char **argv);
+int cmd__index_version(int argc, const char **argv);
+int cmd__lazy_init_name_hash(int argc, const char **argv);
+int cmd__match_trees(int argc, const char **argv);
+int cmd__mergesort(int argc, const char **argv);
+int cmd__mktemp(int argc, const char **argv);
+int cmd__online_cpus(int argc, const char **argv);
+int cmd__path_utils(int argc, const char **argv);
+int cmd__prio_queue(int argc, const char **argv);
+int cmd__read_cache(int argc, const char **argv);
+int cmd__ref_store(int argc, const char **argv);
+int cmd__regex(int argc, const char **argv);
+int cmd__revision_walking(int argc, const char **argv);
+int cmd__run_command(int argc, const char **argv);
+int cmd__scrap_cache_tree(int argc, const char **argv);
+int cmd__sha1_array(int argc, const char **argv);
+int cmd__sha1(int argc, const char **argv);
+int cmd__sigchain(int argc, const char **argv);
+int cmd__strcmp_offset(int argc, const char **argv);
+int cmd__string_list(int argc, const char **argv);
+int cmd__submodule_config(int argc, const char **argv);
+int cmd__subprocess(int argc, const char **argv);
+int cmd__urlmatch_normalization(int argc, const char **argv);
+int cmd__wildmatch(int argc, const char **argv);
+int cmd__write_cache(int argc, const char **argv);
+
+#endif
index 49b6e836be257c0689601bf17138439cff0d61a0..8f4d67e646953c5b094b55ec8112f6ba3b9b9024 100644 (file)
@@ -1,9 +1,10 @@
+#include "test-tool.h"
 #include "git-compat-util.h"
 #include "urlmatch.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__urlmatch_normalization(int argc, const char **argv)
 {
-       const char usage[] = "test-urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
+       const char usage[] = "test-tool urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
        char *url1, *url2;
        int opt_p = 0, opt_l = 0;
 
index 921d7b3e7ea20efe85f15beb7c75b8a5263267fb..2c103d1824cfc7f035aea4a6453c9706f9b4491e 100644 (file)
@@ -1,6 +1,7 @@
+#include "test-tool.h"
 #include "cache.h"
 
-int cmd_main(int argc, const char **argv)
+int cmd__wildmatch(int argc, const char **argv)
 {
        int i;
        for (i = 2; i < argc; i++) {
@@ -16,6 +17,8 @@ int cmd_main(int argc, const char **argv)
                return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD);
        else if (!strcmp(argv[1], "pathmatch"))
                return !!wildmatch(argv[3], argv[2], 0);
+       else if (!strcmp(argv[1], "ipathmatch"))
+               return !!wildmatch(argv[3], argv[2], WM_CASEFOLD);
        else
                return 1;
 }
index b7ee0396692b6143e852af16695dd5800c0044be..017dc303800d9ba385a9836d85f81445c6b7e8b7 100644 (file)
@@ -1,9 +1,10 @@
+#include "test-tool.h"
 #include "cache.h"
 #include "lockfile.h"
 
 static struct lock_file index_lock;
 
-int cmd_main(int argc, const char **argv)
+int cmd__write_cache(int argc, const char **argv)
 {
        int i, cnt = 1, lockfd;
        if (argc == 2)
index 54fd5a6ca02757f77004d0f8babfcc6d616162c0..c27599474cf2f272b53e2e76997e5e38af0fe647 100644 (file)
@@ -39,7 +39,7 @@ native_path () {
        then
                path=$(cygpath --windows "$path")
        else
-               path=$(test-path-utils real_path "$path")
+               path=$(test-tool path-utils real_path "$path")
        fi &&
        echo "$path"
 }
index 4c1f81f1678d83ab8b9ee7704d400bdd47ea70f7..a8130f9119d629462efb6b52f91890c0352e4e85 100644 (file)
@@ -49,7 +49,7 @@ rawsvnrepo="$svnrepo"
 svnrepo="file://$svnrepo"
 
 poke() {
-       test-chmtime +1 "$1"
+       test-tool chmtime +1 "$1"
 }
 
 # We need this, because we should pass empty configuration directory to
index 75098465716512a3373e64d51a5cdf848e9f1101..501078249dc7d5d5b27dd6b75a22086767a0eddf 100644 (file)
@@ -85,7 +85,7 @@ pack_obj () {
 
 # Compute and append pack trailer to "$1"
 pack_trailer () {
-       test-sha1 -b <"$1" >trailer.tmp &&
+       test-tool sha1 -b <"$1" >trailer.tmp &&
        cat trailer.tmp >>"$1" &&
        rm -f trailer.tmp
 }
index cd220e378e201fc5bfbaf9c78b0a3183ebf12d11..e3809dcead1818400829b27721cf8197de71432b 100644 (file)
@@ -9,8 +9,8 @@ test_terminal () {
                echo >&4 "test_terminal: need to declare TTY prerequisite"
                return 127
        fi
-       perl "$TEST_DIRECTORY"/test-terminal.perl "$@"
-}
+       perl "$TEST_DIRECTORY"/test-terminal.perl "$@" 2>&7
+} 7>&2 2>&4
 
 test_lazy_prereq TTY '
        test_have_prereq PERL &&
index 3a0917fa61ef6e630ca4dc2bb28c2af671f98804..48637ef64bb658098d8bc1d0f00eba87f7daa681 100755 (executable)
@@ -36,7 +36,8 @@ sub format_times {
        return $out;
 }
 
-my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, $codespeed);
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
+    $codespeed, $sortby, $subsection, $reponame);
 while (scalar @ARGV) {
        my $arg = $ARGV[0];
        my $dir;
@@ -45,6 +46,36 @@ sub format_times {
                shift @ARGV;
                next;
        }
+       if ($arg =~ /--sort-by(?:=(.*))?/) {
+               shift @ARGV;
+               if (defined $1) {
+                       $sortby = $1;
+               } else {
+                       $sortby = shift @ARGV;
+                       if (! defined $sortby) {
+                               die "'--sort-by' requires an argument";
+                       }
+               }
+               next;
+       }
+       if ($arg eq "--subsection") {
+               shift @ARGV;
+               $subsection = $ARGV[0];
+               shift @ARGV;
+               if (! $subsection) {
+                       die "empty subsection";
+               }
+               next;
+       }
+       if ($arg eq "--reponame") {
+               shift @ARGV;
+               $reponame = $ARGV[0];
+               shift @ARGV;
+               if (! $reponame) {
+                       die "empty reponame";
+               }
+               next;
+       }
        last if -f $arg or $arg eq "--";
        if (! -d $arg) {
                my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
@@ -76,10 +107,15 @@ sub format_times {
 }
 
 my $resultsdir = "test-results";
-my $results_section = "";
-if (exists $ENV{GIT_PERF_SUBSECTION} and $ENV{GIT_PERF_SUBSECTION} ne "") {
-       $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION};
-       $results_section = $ENV{GIT_PERF_SUBSECTION};
+
+if (! $subsection and
+    exists $ENV{GIT_PERF_SUBSECTION} and
+    $ENV{GIT_PERF_SUBSECTION} ne "") {
+       $subsection = $ENV{GIT_PERF_SUBSECTION};
+}
+
+if ($subsection) {
+       $resultsdir .= "/" . $subsection;
 }
 
 my @subtests;
@@ -123,6 +159,11 @@ sub have_slash {
        return 0;
 }
 
+sub display_dir {
+       my ($d) = @_;
+       return exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d};
+}
+
 sub print_default_results {
        my %descrs;
        my $descrlen = 4; # "Test"
@@ -144,8 +185,7 @@ sub print_default_results {
        my %times;
        my @colwidth = ((0)x@dirs);
        for my $i (0..$#dirs) {
-               my $d = $dirs[$i];
-               my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+               my $w = length display_dir($dirs[$i]);
                $colwidth[$i] = $w if $w > $colwidth[$i];
        }
        for my $t (@subtests) {
@@ -164,8 +204,7 @@ sub print_default_results {
 
        printf "%-${descrlen}s", "Test";
        for my $i (0..$#dirs) {
-               my $d = $dirs[$i];
-               printf "   %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
+               printf "   %-$colwidth[$i]s", display_dir($dirs[$i]);
        }
        print "\n";
        print "-"x$totalwidth, "\n";
@@ -182,20 +221,65 @@ sub print_default_results {
        }
 }
 
+sub print_sorted_results {
+       my ($sortby) = @_;
+
+       if ($sortby ne "regression") {
+               die "only 'regression' is supported as '--sort-by' argument";
+       }
+
+       my @evolutions;
+       for my $t (@subtests) {
+               my ($prevr, $prevu, $prevs, $prevrev);
+               for my $i (0..$#dirs) {
+                       my $d = $dirs[$i];
+                       my ($r, $u, $s) = get_times("$resultsdir/$prefixes{$d}$t.times");
+                       if ($i > 0 and defined $r and defined $prevr and $prevr > 0) {
+                               my $percent = 100.0 * ($r - $prevr) / $prevr;
+                               push @evolutions, { "percent"  => $percent,
+                                                   "test"     => $t,
+                                                   "prevrev"  => $prevrev,
+                                                   "rev"      => $d,
+                                                   "prevr"    => $prevr,
+                                                   "r"        => $r,
+                                                   "prevu"    => $prevu,
+                                                   "u"        => $u,
+                                                   "prevs"    => $prevs,
+                                                   "s"        => $s};
+                       }
+                       ($prevr, $prevu, $prevs, $prevrev) = ($r, $u, $s, $d);
+               }
+       }
+
+       my @sorted_evolutions = sort { $b->{percent} <=> $a->{percent} } @evolutions;
+
+       for my $e (@sorted_evolutions) {
+               printf "%+.1f%%", $e->{percent};
+               print " " . $e->{test};
+               print " " . format_times($e->{prevr}, $e->{prevu}, $e->{prevs});
+               print " " . format_times($e->{r}, $e->{u}, $e->{s});
+               print " " . display_dir($e->{prevrev});
+               print " " . display_dir($e->{rev});
+               print "\n";
+       }
+}
+
 sub print_codespeed_results {
-       my ($results_section) = @_;
+       my ($subsection) = @_;
 
        my $project = "Git";
 
        my $executable = `uname -s -m`;
        chomp $executable;
 
-       if ($results_section ne "") {
-               $executable .= ", " . $results_section;
+       if ($subsection) {
+               $executable .= ", " . $subsection;
        }
 
        my $environment;
-       if (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
+       if ($reponame) {
+               $environment = $reponame;
+       } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
                $environment = $ENV{GIT_PERF_REPO_NAME};
        } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
                $environment = $ENV{GIT_TEST_INSTALLED};
@@ -227,13 +311,15 @@ sub print_codespeed_results {
                }
        }
 
-       print to_json(\@data, {utf8 => 1, pretty => 1}), "\n";
+       print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
 }
 
 binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
 
 if ($codespeed) {
-       print_codespeed_results($results_section);
+       print_codespeed_results($subsection);
+} elsif (defined $sortby) {
+       print_sorted_results($sortby);
 } else {
        print_default_results();
 }
index 9180ae9343a03824653e087aaa766b9d33e50a94..cdd105a5945239850a12479ff1138a5428e3b6d6 100755 (executable)
@@ -8,7 +8,7 @@ test_perf_default_repo
 
 count=1000
 test_perf "read_cache/discard_cache $count times" "
-       test-read-cache $count
+       test-tool read-cache $count
 "
 
 test_done
index 8de5a98cfc6c513bb35cd8355f3e831ab3a8007d..1afc08fe7f1990cdf7143f0dfd3cfaa66628ca73 100755 (executable)
@@ -7,8 +7,8 @@ test_perf_large_repo
 test_checkout_worktree
 
 test_expect_success 'verify both methods build the same hashmaps' '
-       test-lazy-init-name-hash --dump --single >out.single &&
-       if test-lazy-init-name-hash --dump --multi >out.multi
+       test-tool lazy-init-name-hash --dump --single >out.single &&
+       if test-tool lazy-init-name-hash --dump --multi >out.multi
        then
                test_set_prereq REPO_BIG_ENOUGH_FOR_MULTI &&
                sort <out.single >sorted.single &&
@@ -46,11 +46,11 @@ test_expect_success 'calibrate' '
 '
 
 test_perf "single-threaded, $desc" "
-       test-lazy-init-name-hash --single --count=$count
+       test-tool lazy-init-name-hash --single --count=$count
 "
 
 test_perf REPO_BIG_ENOUGH_FOR_MULTI "multi-threaded, $desc" "
-       test-lazy-init-name-hash --multi --count=$count
+       test-tool lazy-init-name-hash --multi --count=$count
 "
 
 test_done
index 261fe92fd93aaf65101dfe4d1b87741453985cbe..09595264f09fa4b1dc28b3e5d2a3d6482d998bcc 100755 (executable)
@@ -23,7 +23,7 @@ test_expect_success "setup repo" '
 
 count=3
 test_perf "write_locked_index $count times ($nr_files files)" "
-       test-write-cache $count
+       test-tool write-cache $count
 "
 
 test_done
index 7c9a35a646a6406e5c284a65748863a93098c2af..6e924f5fa3f900b4b82a55f282e810572a62f20b 100755 (executable)
@@ -16,7 +16,7 @@ test_perf 'sort(1)' '
 '
 
 test_perf 'string_list_sort()' '
-       test-string-list sort <unsorted >actual
+       test-tool string-list sort <unsorted >actual
 '
 
 test_expect_success 'string_list_sort() sorts like sort(1)' '
index 65e145c02d9eafc5a43d56c1fb30fef7be63d704..def7ecdbc786c6472e5b7909a97650a1034cecb2 100755 (executable)
@@ -118,7 +118,7 @@ test_expect_success "setup for fsmonitor" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -126,7 +126,7 @@ test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -134,7 +134,7 @@ test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -148,7 +148,7 @@ test_expect_success "setup without fsmonitor" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -156,7 +156,7 @@ test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
@@ -164,7 +164,7 @@ test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
 '
 
 if test -n "$GIT_PERF_7519_DROP_CACHE"; then
-       test-drop-caches
+       test-tool drop-caches
 fi
 
 test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
index 1a100d6134722b381bba9744f3ed01af3e4d3d8f..213da5d6b9437b7db7f3e5b824afea1df465d934 100755 (executable)
@@ -106,7 +106,6 @@ get_var_from_env_or_config () {
        conf_sec="$2"
        conf_var="$3"
        conf_opts="$4" # optional
-       # $5 can be set to a default value
 
        # Do nothing if the env variable is already set
        eval "test -z \"\${$env_var+x}\"" || return
@@ -122,13 +121,12 @@ get_var_from_env_or_config () {
        fi
        var="$conf_sec.$conf_var"
        conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") &&
-       eval "$env_var=\"$conf_value\"" && return
-
-       test -n "${5+x}" && eval "$env_var=\"$5\""
+       eval "$env_var=\"$conf_value\""
 }
 
 run_subsection () {
-       get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" "--int" 3
+       get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" "--int"
+       : ${GIT_PERF_REPEAT_COUNT:=3}
        export GIT_PERF_REPEAT_COUNT
 
        get_var_from_env_or_config "GIT_PERF_DIRS_OR_REVS" "perf" "dirsOrRevs"
index 9670e8cbe6cb9a9faa3519b0f11dc16713496188..3691023d510a0d97bf1390b781afe1ac9fa270f4 100755 (executable)
@@ -10,15 +10,6 @@ objpath() {
        echo "$1" | sed -e 's|\(..\)|\1/|'
 }
 
-objck() {
-       p=$(objpath "$1")
-       if test ! -f "$REAL/objects/$p"
-       then
-               echo "Object not found: $REAL/objects/$p"
-               false
-       fi
-}
-
 test_expect_success 'initial setup' '
        REAL="$(pwd)/.real" &&
        mv .git "$REAL"
@@ -26,30 +17,14 @@ test_expect_success 'initial setup' '
 
 test_expect_success 'bad setup: invalid .git file format' '
        echo "gitdir $REAL" >.git &&
-       if git rev-parse 2>.err
-       then
-               echo "git rev-parse accepted an invalid .git file"
-               false
-       fi &&
-       if ! grep "Invalid gitfile format" .err
-       then
-               echo "git rev-parse returned wrong error"
-               false
-       fi
+       test_must_fail git rev-parse 2>.err &&
+       test_i18ngrep "invalid gitfile format" .err
 '
 
 test_expect_success 'bad setup: invalid .git file path' '
        echo "gitdir: $REAL.not" >.git &&
-       if git rev-parse 2>.err
-       then
-               echo "git rev-parse accepted an invalid .git file path"
-               false
-       fi &&
-       if ! grep "Not a git repository" .err
-       then
-               echo "git rev-parse returned wrong error"
-               false
-       fi
+       test_must_fail git rev-parse 2>.err &&
+       test_i18ngrep "not a git repository" .err
 '
 
 test_expect_success 'final setup + check rev-parse --git-dir' '
@@ -60,7 +35,7 @@ test_expect_success 'final setup + check rev-parse --git-dir' '
 test_expect_success 'check hash-object' '
        echo "foo" >bar &&
        SHA=$(cat bar | git hash-object -w --stdin) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check cat-file' '
@@ -69,29 +44,21 @@ test_expect_success 'check cat-file' '
 '
 
 test_expect_success 'check update-index' '
-       if test -f "$REAL/index"
-       then
-               echo "Hmm, $REAL/index exists?"
-               false
-       fi &&
+       test_path_is_missing "$REAL/index" &&
        rm -f "$REAL/objects/$(objpath $SHA)" &&
        git update-index --add bar &&
-       if ! test -f "$REAL/index"
-       then
-               echo "$REAL/index not found"
-               false
-       fi &&
-       objck $SHA
+       test_path_is_file "$REAL/index" &&
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check write-tree' '
        SHA=$(git write-tree) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check commit-tree' '
        SHA=$(echo "commit bar" | git commit-tree $SHA) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check rev-list' '
index 46042f1f1338f628d5256f0e932a4037e98b34ab..4c214bd11c48859f0c4e64dbbe2d783e744eb1d1 100755 (executable)
@@ -10,7 +10,7 @@ one
 EOF
 
 test_expect_success 'sigchain works' '
-       { test-sigchain >actual; ret=$?; } &&
+       { test-tool sigchain >actual; ret=$?; } &&
        {
                # Signal death by raise() on Windows acts like exit(3),
                # regardless of the signal number. So we must allow that
@@ -24,7 +24,7 @@ test_expect_success 'sigchain works' '
 test_expect_success !MINGW 'signals are propagated using shell convention' '
        # we use exec here to avoid any sub-shell interpretation
        # of the exit code
-       git config alias.sigterm "!exec test-sigchain" &&
+       git config alias.sigterm "!exec test-tool sigchain" &&
        test_expect_code 143 git sigterm
 '
 
@@ -36,7 +36,7 @@ large_git () {
 }
 
 test_expect_success 'create blob' '
-       test-genrandom foo 16384 >file &&
+       test-tool genrandom foo 16384 >file &&
        git add file
 '
 
index 7ac9466d5055e02179467fa9e41004bbc89df6dc..64ff86df8eb02aa635af1bad0b6d1f31578f222a 100755 (executable)
@@ -10,7 +10,7 @@ check_relative() {
        t=$(($TEST_DATE_NOW - $1))
        echo "$t -> $2" >expect
        test_expect_${3:-success} "relative date ($2)" "
-       test-date relative $t >actual &&
+       test-tool date relative $t >actual &&
        test_i18ncmp expect actual
        "
 }
@@ -35,7 +35,7 @@ check_show () {
        zone=$5
        test_expect_success $prereqs "show date ($format:$time)" '
                echo "$time -> $expect" >expect &&
-               TZ=${zone:-$TZ} test-date show:"$format" "$time" >actual &&
+               TZ=${zone:-$TZ} test-tool date show:"$format" "$time" >actual &&
                test_cmp expect actual
        '
 }
@@ -71,7 +71,7 @@ check_show iso-local "$FUTURE" "2152-06-19 22:24:56 +0000" TIME_IS_64BIT,TIME_T_
 check_parse() {
        echo "$1 -> $2" >expect
        test_expect_${4:-success} "parse date ($1${3:+ TZ=$3})" "
-       TZ=${3:-$TZ} test-date parse '$1' >actual &&
+       TZ=${3:-$TZ} test-tool date parse '$1' >actual &&
        test_cmp expect actual
        "
 }
@@ -92,7 +92,7 @@ check_parse '2008-02-14 20:30:45' '2008-02-14 20:30:45 -0500' EST5
 check_approxidate() {
        echo "$1 -> $2 +0000" >expect
        test_expect_${3:-success} "parse approxidate ($1)" "
-       test-date approxidate '$1' >actual &&
+       test-tool date approxidate '$1' >actual &&
        test_cmp expect actual
        "
 }
index d27f438bf410d06f37ec76a6ce040316218a8138..c03f155a357446338fa831e061dc58d8e5f4c657 100755 (executable)
@@ -307,7 +307,7 @@ test_expect_success_multi 'needs work tree' '' '
                cd .git &&
                test_check_ignore "foo" 128
        ) &&
-       stderr_contains "fatal: This operation must be run in a work tree"
+       stderr_contains "fatal: this operation must be run in a work tree"
 '
 
 ############################################################################
@@ -775,6 +775,26 @@ test_expect_success PIPE 'streaming support for --stdin' '
        echo "$response" | grep "^::    two"
 '
 
+test_expect_success 'existing file and directory' '
+       test_when_finished "rm one" &&
+       test_when_finished "rmdir top-level-dir" &&
+       >one &&
+       mkdir top-level-dir &&
+       git check-ignore one top-level-dir >actual &&
+       grep one actual &&
+       grep top-level-dir actual
+'
+
+test_expect_success 'existing directory and file' '
+       test_when_finished "rm one" &&
+       test_when_finished "rmdir top-level-dir" &&
+       >one &&
+       mkdir top-level-dir &&
+       git check-ignore top-level-dir one >actual &&
+       grep one actual &&
+       grep top-level-dir actual
+'
+
 ############################################################################
 #
 # test whitespace handling
index 94045c3fad18d94e0e83acf757594cede066134b..e56dfce6680298c133bc2b2de12d9492fda66b7a 100755 (executable)
@@ -17,7 +17,7 @@ cat >expect <<'EOF'
 10
 EOF
 test_expect_success 'basic ordering' '
-       test-prio-queue 2 6 3 10 9 5 7 4 5 8 1 dump >actual &&
+       test-tool prio-queue 2 6 3 10 9 5 7 4 5 8 1 dump >actual &&
        test_cmp expect actual
 '
 
@@ -30,7 +30,7 @@ cat >expect <<'EOF'
 6
 EOF
 test_expect_success 'mixed put and get' '
-       test-prio-queue 6 2 4 get 5 3 get get 1 dump >actual &&
+       test-tool prio-queue 6 2 4 get 5 3 get get 1 dump >actual &&
        test_cmp expect actual
 '
 
@@ -43,7 +43,7 @@ NULL
 NULL
 EOF
 test_expect_success 'notice empty queue' '
-       test-prio-queue 1 2 get get get 1 2 get get get >actual &&
+       test-tool prio-queue 1 2 get get get 1 2 get get get >actual &&
        test_cmp expect actual
 '
 
index 9c217d948c14dfd75f73e27a0911fd194eb11bce..3f1f505e8937f391666a1b7e6d9b972a5f146974 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test hashmap and string hash functions'
 . ./test-lib.sh
 
 test_hashmap() {
-       echo "$1" | test-hashmap $3 > actual &&
+       echo "$1" | test-tool hashmap $3 > actual &&
        echo "$2" > expect &&
        test_cmp expect actual
 }
@@ -232,7 +232,7 @@ test_expect_success 'grow / shrink' '
        echo value40 >> expect &&
        echo size >> in &&
        echo 64 39 >> expect &&
-       cat in | test-hashmap > out &&
+       cat in | test-tool hashmap > out &&
        test_cmp expect out
 
 '
index 6d655cb161b2ed39eda97bb4acb54d1cf74a826d..419f31a8f7d4cb20041f054d25ddb25c7d43e3fb 100755 (executable)
@@ -11,7 +11,7 @@ then
 fi
 
 test_expect_success 'test-sha1 detects shattered pdf' '
-       test_must_fail test-sha1 <"$TEST_DATA/shattered-1.pdf" 2>err &&
+       test_must_fail test-tool sha1 <"$TEST_DATA/shattered-1.pdf" 2>err &&
        test_i18ngrep collision err &&
        grep 38762cf7f55934b34d179ae6a4c80cadccbb7f0a err
 '
index 46f8e583c37da7d03d715ea5cb1a4ee5bbe0ca28..9479a4aaabc1a4187fc702f864642ac493508bb9 100755 (executable)
@@ -19,7 +19,7 @@ write_script rot13-filter.pl "$PERL_PATH" \
 generate_random_characters () {
        LEN=$1
        NAME=$2
-       test-genrandom some-seed $LEN |
+       test-tool genrandom some-seed $LEN |
                perl -pe "s/./chr((ord($&) % 26) + ord('a'))/sge" >"$TEST_ROOT/$NAME"
 }
 
@@ -267,7 +267,7 @@ test_expect_success 'filtering large input to small output should use little mem
 '
 
 test_expect_success 'filter that does not read is fine' '
-       test-genrandom foo $((128 * 1024 + 1)) >big &&
+       test-tool genrandom foo $((128 * 1024 + 1)) >big &&
        echo "big filter=epipe" >.gitattributes &&
        test_config filter.epipe.clean "echo xyzzy" &&
        git add big &&
index 0c2fc81d7b0fa401db58c41cd2fed4469e80b058..04d474c84fd69121c686f5ca5adc40ce081f0e9d 100755 (executable)
@@ -291,7 +291,7 @@ test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' '
 test_expect_success 'OPT_CALLBACK() and callback errors work' '
        test_must_fail test-parse-options --no-length >output 2>output.err &&
        test_i18ncmp expect output &&
-       test_i18ncmp expect.err output.err
+       test_must_be_empty output.err
 '
 
 cat >expect <<\EOF
diff --git a/t/t0041-usage.sh b/t/t0041-usage.sh
new file mode 100755 (executable)
index 0000000..5b927b7
--- /dev/null
@@ -0,0 +1,107 @@
+#!/bin/sh
+
+test_description='Test commands behavior when given invalid argument value'
+
+. ./test-lib.sh
+
+test_expect_success 'setup ' '
+       test_commit "v1.0"
+'
+
+test_expect_success 'tag --contains <existent_tag>' '
+       git tag --contains "v1.0" >actual 2>actual.err &&
+       grep "v1.0" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --contains <inexistent_tag>' '
+       test_must_fail git tag --contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag --no-contains <existent_tag>' '
+       git tag --no-contains "v1.0" >actual 2>actual.err  &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --no-contains <inexistent_tag>' '
+       test_must_fail git tag --no-contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag usage error' '
+       test_must_fail git tag --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'branch --contains <existent_commit>' '
+       git branch --contains "master" >actual 2>actual.err &&
+       test_i18ngrep "master" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch --no-contains <existent_commit>' '
+       git branch --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --no-contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch usage error' '
+       test_must_fail git branch --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --contains <existent_object>' '
+       git for-each-ref --contains "master" >actual 2>actual.err &&
+       test_line_count = 2 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <existent_object>' '
+       git for-each-ref --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref usage error' '
+       test_must_fail git for-each-ref --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_done
index b29d749bb7b33406b2d433d96c35d252e305eed0..192c94eccd13c3b251cfc6910ad6ef175312ea13 100755 (executable)
@@ -80,7 +80,21 @@ test_expect_success 'merge (case change)' '
        git merge topic
 '
 
-
+test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' '
+       git reset --hard initial &&
+       mkdir -p dir1/dir2 &&
+       echo >dir1/dir2/a &&
+       echo >dir1/dir2/b &&
+       git add dir1/dir2/a &&
+       git add dir1/DIR2/b &&
+       git ls-files >actual &&
+       cat >expected <<-\EOF &&
+               camelcase
+               dir1/dir2/a
+               dir1/dir2/b
+       EOF
+       test_cmp expected actual
+'
 
 test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' '
        git reset --hard initial &&
index 7ea2bb515bd80cc026a18dbfdf4a66cb77d27f20..f46e3c4995509f62f954231f53321f1d24756eff 100755 (executable)
@@ -8,15 +8,15 @@ test_description='Test various path utilities'
 . ./test-lib.sh
 
 norm_path() {
-       expected=$(test-path-utils print_path "$2")
+       expected=$(test-tool path-utils print_path "$2")
        test_expect_success $3 "normalize path: $1 => $2" \
-       "test \"\$(test-path-utils normalize_path_copy '$1')\" = '$expected'"
+       "test \"\$(test-tool path-utils normalize_path_copy '$1')\" = '$expected'"
 }
 
 relative_path() {
-       expected=$(test-path-utils print_path "$3")
+       expected=$(test-tool path-utils print_path "$3")
        test_expect_success $4 "relative path: $1 $2 => $3" \
-       "test \"\$(test-path-utils relative_path '$1' '$2')\" = '$expected'"
+       "test \"\$(test-tool path-utils relative_path '$1' '$2')\" = '$expected'"
 }
 
 test_submodule_relative_url() {
@@ -37,7 +37,7 @@ test_git_path() {
 # On Windows, we are using MSYS's bash, which mangles the paths.
 # Absolute paths are anchored at the MSYS installation directory,
 # which means that the path / accounts for this many characters:
-rootoff=$(test-path-utils normalize_path_copy / | wc -c)
+rootoff=$(test-tool path-utils normalize_path_copy / | wc -c)
 # Account for the trailing LF:
 if test $rootoff = 2; then
        rootoff=        # we are on Unix
@@ -46,7 +46,7 @@ else
        # In MSYS2, the root directory "/" is translated into a Windows
        # directory *with* trailing slash. Let's test for that and adjust
        # our expected longest ancestor length accordingly.
-       case "$(test-path-utils print_path /)" in
+       case "$(test-tool path-utils print_path /)" in
        */) rootslash=1;;
        *) rootslash=0;;
        esac
@@ -61,7 +61,7 @@ ancestor() {
                expected=$(($expected+$rootoff))
        fi
        test_expect_success "longest ancestor: $1 $2 => $expected" \
-       "actual=\$(test-path-utils longest_ancestor_length '$1' '$2') &&
+       "actual=\$(test-tool path-utils longest_ancestor_length '$1' '$2') &&
         test \"\$actual\" = '$expected'"
 }
 
@@ -77,8 +77,8 @@ case $(uname -s) in
        ;;
 esac
 
-test_expect_success basename 'test-path-utils basename'
-test_expect_success dirname 'test-path-utils dirname'
+test_expect_success basename 'test-tool path-utils basename'
+test_expect_success dirname 'test-tool path-utils dirname'
 
 norm_path "" ""
 norm_path . ""
@@ -157,48 +157,48 @@ ancestor /foo/bar /foo:/bar 4
 ancestor /foo/bar /bar -1
 
 test_expect_success 'strip_path_suffix' '
-       test c:/msysgit = $(test-path-utils strip_path_suffix \
+       test c:/msysgit = $(test-tool path-utils strip_path_suffix \
                c:/msysgit/libexec//git-core libexec/git-core)
 '
 
 test_expect_success 'absolute path rejects the empty string' '
-       test_must_fail test-path-utils absolute_path ""
+       test_must_fail test-tool path-utils absolute_path ""
 '
 
 test_expect_success 'real path rejects the empty string' '
-       test_must_fail test-path-utils real_path ""
+       test_must_fail test-tool path-utils real_path ""
 '
 
 test_expect_success POSIX 'real path works on absolute paths 1' '
        nopath="hopefully-absent-path" &&
-       test "/" = "$(test-path-utils real_path "/")" &&
-       test "/$nopath" = "$(test-path-utils real_path "/$nopath")"
+       test "/" = "$(test-tool path-utils real_path "/")" &&
+       test "/$nopath" = "$(test-tool path-utils real_path "/$nopath")"
 '
 
 test_expect_success 'real path works on absolute paths 2' '
        nopath="hopefully-absent-path" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "$d")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "$d/$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "$d")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "$d/$nopath")"
 '
 
 test_expect_success POSIX 'real path removes extra leading slashes' '
        nopath="hopefully-absent-path" &&
-       test "/" = "$(test-path-utils real_path "///")" &&
-       test "/$nopath" = "$(test-path-utils real_path "///$nopath")" &&
+       test "/" = "$(test-tool path-utils real_path "///")" &&
+       test "/$nopath" = "$(test-tool path-utils real_path "///$nopath")" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "//$d")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "//$d/$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "//$d")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "//$d/$nopath")"
 '
 
 test_expect_success 'real path removes other extra slashes' '
        nopath="hopefully-absent-path" &&
        # Find an existing top-level directory for the remaining tests:
        d=$(pwd -P | sed -e "s|^\([^/]*/[^/]*\)/.*|\1|") &&
-       test "$d" = "$(test-path-utils real_path "$d///")" &&
-       test "$d/$nopath" = "$(test-path-utils real_path "$d///$nopath")"
+       test "$d" = "$(test-tool path-utils real_path "$d///")" &&
+       test "$d/$nopath" = "$(test-tool path-utils real_path "$d///$nopath")"
 '
 
 test_expect_success SYMLINKS 'real path works on symlinks' '
@@ -209,35 +209,35 @@ test_expect_success SYMLINKS 'real path works on symlinks' '
        mkdir third &&
        dir="$(cd .git; pwd -P)" &&
        dir2=third/../second/other/.git &&
-       test "$dir" = "$(test-path-utils real_path $dir2)" &&
+       test "$dir" = "$(test-tool path-utils real_path $dir2)" &&
        file="$dir"/index &&
-       test "$file" = "$(test-path-utils real_path $dir2/index)" &&
+       test "$file" = "$(test-tool path-utils real_path $dir2/index)" &&
        basename=blub &&
-       test "$dir/$basename" = "$(cd .git && test-path-utils real_path "$basename")" &&
+       test "$dir/$basename" = "$(cd .git && test-tool path-utils real_path "$basename")" &&
        ln -s ../first/file .git/syml &&
        sym="$(cd first; pwd -P)"/file &&
-       test "$sym" = "$(test-path-utils real_path "$dir2/syml")"
+       test "$sym" = "$(test-tool path-utils real_path "$dir2/syml")"
 '
 
 test_expect_success SYMLINKS 'prefix_path works with absolute paths to work tree symlinks' '
        ln -s target symlink &&
-       test "$(test-path-utils prefix_path prefix "$(pwd)/symlink")" = "symlink"
+       test "$(test-tool path-utils prefix_path prefix "$(pwd)/symlink")" = "symlink"
 '
 
 test_expect_success 'prefix_path works with only absolute path to work tree' '
        echo "" >expected &&
-       test-path-utils prefix_path prefix "$(pwd)" >actual &&
+       test-tool path-utils prefix_path prefix "$(pwd)" >actual &&
        test_cmp expected actual
 '
 
 test_expect_success 'prefix_path rejects absolute path to dir with same beginning as work tree' '
-       test_must_fail test-path-utils prefix_path prefix "$(pwd)a"
+       test_must_fail test-tool path-utils prefix_path prefix "$(pwd)a"
 '
 
 test_expect_success SYMLINKS 'prefix_path works with absolute path to a symlink to work tree having  same beginning as work tree' '
        git init repo &&
        ln -s repo repolink &&
-       test "a" = "$(cd repo && test-path-utils prefix_path prefix "$(pwd)/../repolink/a")"
+       test "a" = "$(cd repo && test-tool path-utils prefix_path prefix "$(pwd)/../repolink/a")"
 '
 
 relative_path /foo/a/b/c/      /foo/a/b/       c/
index 24c92b6cd7b1c54eb6541a81abd7e5812b3b99b0..d03149be9f667307a181ef17a3291ecc7eeb19cb 100755 (executable)
@@ -14,13 +14,13 @@ EOF
 >empty
 
 test_expect_success 'start_command reports ENOENT' '
-       test-run-command start-command-ENOENT ./does-not-exist
+       test-tool run-command start-command-ENOENT ./does-not-exist
 '
 
 test_expect_success 'run_command can run a command' '
        cat hello-script >hello.sh &&
        chmod +x hello.sh &&
-       test-run-command run-command ./hello.sh >actual 2>err &&
+       test-tool run-command run-command ./hello.sh >actual 2>err &&
 
        test_cmp hello-script actual &&
        test_cmp empty err
@@ -31,7 +31,7 @@ test_expect_success !MINGW 'run_command can run a script without a #! line' '
        cat hello-script
        EOF
        chmod +x hello &&
-       test-run-command run-command ./hello >actual 2>err &&
+       test-tool run-command run-command ./hello >actual 2>err &&
 
        test_cmp hello-script actual &&
        test_cmp empty err
@@ -45,7 +45,7 @@ test_expect_success 'run_command does not try to execute a directory' '
        EOF
 
        PATH=$PWD/bin1:$PWD/bin2:$PATH \
-               test-run-command run-command greet >actual 2>err &&
+               test-tool run-command run-command greet >actual 2>err &&
        test_cmp bin2/greet actual &&
        test_cmp empty err
 '
@@ -62,7 +62,7 @@ test_expect_success POSIXPERM 'run_command passes over non-executable file' '
        EOF
 
        PATH=$PWD/bin1:$PWD/bin2:$PATH \
-               test-run-command run-command greet >actual 2>err &&
+               test-tool run-command run-command greet >actual 2>err &&
        test_cmp bin2/greet actual &&
        test_cmp empty err
 '
@@ -70,7 +70,7 @@ test_expect_success POSIXPERM 'run_command passes over non-executable file' '
 test_expect_success POSIXPERM 'run_command reports EACCES' '
        cat hello-script >hello.sh &&
        chmod -x hello.sh &&
-       test_must_fail test-run-command run-command ./hello.sh 2>err &&
+       test_must_fail test-tool run-command run-command ./hello.sh 2>err &&
 
        grep "fatal: cannot exec.*hello.sh" err
 '
@@ -104,17 +104,17 @@ World
 EOF
 
 test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
-       test-run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
-       test-run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
-       test-run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
@@ -128,7 +128,7 @@ asking for a quick stop
 EOF
 
 test_expect_success 'run_command is asked to abort gracefully' '
-       test-run-command run-command-abort 3 false 2>actual &&
+       test-tool run-command run-command-abort 3 false 2>actual &&
        test_cmp expect actual
 '
 
@@ -137,14 +137,14 @@ no further jobs available
 EOF
 
 test_expect_success 'run_command outputs ' '
-       test-run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+       test-tool run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
        test_cmp expect actual
 '
 
 test_trace () {
        expect="$1"
        shift
-       GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
+       GIT_TRACE=1 test-tool run-command "$@" run-command true 2>&1 >/dev/null | \
                sed 's/.* run_command: //' >actual &&
        echo "$expect true" >expect &&
        test_cmp expect actual
index 113c728e676679f8e8e8eb0fce433a84bbee8050..8e215867b8c197dedef32c569e1d1f632d22b631 100755 (executable)
@@ -26,7 +26,7 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'revision walking can be done twice' '
-       test-revision-walking run-twice >run_twice_actual &&
+       test-tool revision-walking run-twice >run_twice_actual &&
        test_cmp run_twice_expected run_twice_actual
 '
 
index dbfc05ebdc3990bf4ea5b0163afb4c6a9e698fa7..c6ee9f66b11d55f312ee673106ddf295c39bb50d 100755 (executable)
@@ -10,9 +10,9 @@ test_description='Test string list functionality'
 test_split () {
        cat >expected &&
        test_expect_success "split $1 at $2, max $3" "
-               test-string-list split '$1' '$2' '$3' >actual &&
+               test-tool string-list split '$1' '$2' '$3' >actual &&
                test_cmp expected actual &&
-               test-string-list split_in_place '$1' '$2' '$3' >actual &&
+               test-tool string-list split_in_place '$1' '$2' '$3' >actual &&
                test_cmp expected actual
        "
 }
@@ -61,31 +61,31 @@ test_split ":" ":" "-1" <<EOF
 EOF
 
 test_expect_success "test filter_string_list" '
-       test "x-" = "x$(test-string-list filter - y)" &&
-       test "x-" = "x$(test-string-list filter no y)" &&
-       test yes = "$(test-string-list filter yes y)" &&
-       test yes = "$(test-string-list filter no:yes y)" &&
-       test yes = "$(test-string-list filter yes:no y)" &&
-       test y1:y2 = "$(test-string-list filter y1:y2 y)" &&
-       test y2:y1 = "$(test-string-list filter y2:y1 y)" &&
-       test "x-" = "x$(test-string-list filter x1:x2 y)"
+       test "x-" = "x$(test-tool string-list filter - y)" &&
+       test "x-" = "x$(test-tool string-list filter no y)" &&
+       test yes = "$(test-tool string-list filter yes y)" &&
+       test yes = "$(test-tool string-list filter no:yes y)" &&
+       test yes = "$(test-tool string-list filter yes:no y)" &&
+       test y1:y2 = "$(test-tool string-list filter y1:y2 y)" &&
+       test y2:y1 = "$(test-tool string-list filter y2:y1 y)" &&
+       test "x-" = "x$(test-tool string-list filter x1:x2 y)"
 '
 
 test_expect_success "test remove_duplicates" '
-       test "x-" = "x$(test-string-list remove_duplicates -)" &&
-       test "x" = "x$(test-string-list remove_duplicates "")" &&
-       test a = "$(test-string-list remove_duplicates a)" &&
-       test a = "$(test-string-list remove_duplicates a:a)" &&
-       test a = "$(test-string-list remove_duplicates a:a:a:a:a)" &&
-       test a:b = "$(test-string-list remove_duplicates a:b)" &&
-       test a:b = "$(test-string-list remove_duplicates a:a:b)" &&
-       test a:b = "$(test-string-list remove_duplicates a:b:b)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:b:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:b:c:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:b:b:c:c)" &&
-       test a:b:c = "$(test-string-list remove_duplicates a:a:a:b:b:b:c:c:c)"
+       test "x-" = "x$(test-tool string-list remove_duplicates -)" &&
+       test "x" = "x$(test-tool string-list remove_duplicates "")" &&
+       test a = "$(test-tool string-list remove_duplicates a)" &&
+       test a = "$(test-tool string-list remove_duplicates a:a)" &&
+       test a = "$(test-tool string-list remove_duplicates a:a:a:a:a)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:b)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:a:b)" &&
+       test a:b = "$(test-tool string-list remove_duplicates a:b:b)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:b:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:b:c:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:b:b:c:c)" &&
+       test a:b:c = "$(test-tool string-list remove_duplicates a:a:a:b:b:b:c:c:c)"
 '
 
 test_done
index 50b31ffe756658dfa8ce435381ad7f48fee91462..67484502a007e3fed09fe381898bee52d21d07bb 100755 (executable)
@@ -18,7 +18,7 @@ test_expect_success 'ordered enumeration' '
        {
                echo20 append 88 44 aa 55 &&
                echo for_each_unique
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        test_cmp expect actual
 '
 
@@ -28,7 +28,7 @@ test_expect_success 'ordered enumeration with duplicate suppression' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo for_each_unique
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        test_cmp expect actual
 '
 
@@ -36,7 +36,7 @@ test_expect_success 'lookup' '
        {
                echo20 append 88 44 aa 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 1
 '
@@ -45,7 +45,7 @@ test_expect_success 'lookup non-existing entry' '
        {
                echo20 append 88 44 aa 55 &&
                echo20 lookup 33
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
 '
@@ -55,7 +55,7 @@ test_expect_success 'lookup with duplicates' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 2 &&
        test "$n" -le 3
@@ -66,7 +66,7 @@ test_expect_success 'lookup non-existing entry with duplicates' '
                echo20 append 88 44 aa 55 &&
                echo20 append 88 44 aa 55 &&
                echo20 lookup 66
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -lt 0
 '
@@ -76,7 +76,7 @@ test_expect_success 'lookup with almost duplicate values' '
                echo "append 5555555555555555555555555555555555555555" &&
                echo "append 555555555555555555555555555555555555555f" &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -eq 0
 '
@@ -85,7 +85,7 @@ test_expect_success 'lookup with single duplicate value' '
        {
                echo20 append 55 55 &&
                echo20 lookup 55
-       } | test-sha1-array >actual &&
+       } | test-tool sha1-array >actual &&
        n=$(cat actual) &&
        test "$n" -ge 0 &&
        test "$n" -le 1
index 7d6d21425f483c412bb6d42ea551aaf5eaef4aa0..91fa639c4a74ed69ed69ff7360728b2f1ab08a66 100755 (executable)
@@ -8,7 +8,7 @@ while read s1 s2 expect
 do
        test_expect_success "strcmp_offset($s1, $s2)" '
                echo "$expect" >expect &&
-               test-strcmp-offset "$s1" "$s2" >actual &&
+               test-tool strcmp-offset "$s1" "$s2" >actual &&
                test_cmp expect actual
        '
 done <<-EOF
index 991ed2a48dbf15fb4cb794a587ad900071391e11..23fbe6434abd3f05057d4916f70e5d5b30115b68 100755 (executable)
@@ -9,11 +9,11 @@ Verify wrappers and compatibility functions.
 . ./test-lib.sh
 
 test_expect_success 'character classes (isspace, isalpha etc.)' '
-       test-ctype
+       test-tool ctype
 '
 
 test_expect_success 'mktemp to nonexistent directory prints filename' '
-       test_must_fail test-mktemp doesnotexist/testXXXXXX 2>err &&
+       test_must_fail test-tool mktemp doesnotexist/testXXXXXX 2>err &&
        grep "doesnotexist/test" err
 '
 
@@ -21,7 +21,7 @@ test_expect_success POSIXPERM,SANITY 'mktemp to unwritable directory prints file
        mkdir cannotwrite &&
        chmod -w cannotwrite &&
        test_when_finished "chmod +w cannotwrite" &&
-       test_must_fail test-mktemp cannotwrite/testXXXXXX 2>err &&
+       test_must_fail test-tool mktemp cannotwrite/testXXXXXX 2>err &&
        grep "cannotwrite/test" err
 '
 
@@ -31,7 +31,7 @@ test_expect_success 'git_mkstemps_mode does not fail if fd 0 is not open' '
 
 test_expect_success 'check for a bug in the regex routines' '
        # if this test fails, re-build git with NO_REGEX=1
-       test-regex --bug
+       test-tool regex --bug
 '
 
 test_done
index adfd4f0b5eea1c0c438647d3d6567479acc5d495..4ae0995cd9140d3030da7d975c74ff3d9e55b039 100755 (executable)
@@ -8,13 +8,13 @@ cache-tree extension.
  . ./test-lib.sh
 
 cmp_cache_tree () {
-       test-dump-cache-tree | sed -e '/#(ref)/d' >actual &&
+       test-tool dump-cache-tree | sed -e '/#(ref)/d' >actual &&
        sed "s/$_x40/SHA/" <actual >filtered &&
        test_cmp "$1" filtered
 }
 
 # We don't bother with actually checking the SHA1:
-# test-dump-cache-tree already verifies that all existing data is
+# test-tool dump-cache-tree already verifies that all existing data is
 # correct.
 generate_expected_cache_tree_rec () {
        dir="$1${1:+/}" &&
@@ -47,7 +47,7 @@ test_cache_tree () {
 
 test_invalid_cache_tree () {
        printf "invalid                                  %s ()\n" "" "$@" >expect &&
-       test-dump-cache-tree |
+       test-tool dump-cache-tree |
        sed -n -e "s/[0-9]* subtrees//" -e '/#(ref)/d' -e '/^invalid /p' >actual &&
        test_cmp expect actual
 }
@@ -115,14 +115,14 @@ test_expect_success 'update-index invalidates cache-tree' '
 '
 
 test_expect_success 'write-tree establishes cache-tree' '
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        git write-tree &&
        test_cache_tree
 '
 
-test_expect_success 'test-scrap-cache-tree works' '
+test_expect_success 'test-tool scrap-cache-tree works' '
        git read-tree HEAD &&
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        test_no_cache_tree
 '
 
@@ -170,7 +170,7 @@ test_expect_success 'commit in child dir has cache-tree' '
 '
 
 test_expect_success 'reset --hard gives cache-tree' '
-       test-scrap-cache-tree &&
+       test-tool scrap-cache-tree &&
        git reset --hard &&
        test_cache_tree
 '
@@ -246,9 +246,9 @@ test_expect_success 'switching trees does not invalidate shared index' '
        git update-index --split-index &&
        >split &&
        git add split &&
-       test-dump-split-index .git/index | grep -v ^own >before &&
+       test-tool dump-split-index .git/index | grep -v ^own >before &&
        git commit -m "as-is" &&
-       test-dump-split-index .git/index | grep -v ^own >after &&
+       test-tool dump-split-index .git/index | grep -v ^own >after &&
        test_cmp before after
 '
 
index 410d5768ca11b1d7e322c135cf18f60f41685b68..f99529d83853e2c468b1c948f3ec4408a58fb919 100755 (executable)
@@ -9,172 +9,172 @@ tu="$TEST_DIRECTORY/t0110/url"
 # Note that only file: URLs should be allowed without a host
 
 test_expect_success 'url scheme' '
-       ! test-urlmatch-normalization "" &&
-       ! test-urlmatch-normalization "_" &&
-       ! test-urlmatch-normalization "scheme" &&
-       ! test-urlmatch-normalization "scheme:" &&
-       ! test-urlmatch-normalization "scheme:/" &&
-       ! test-urlmatch-normalization "scheme://" &&
-       ! test-urlmatch-normalization "file" &&
-       ! test-urlmatch-normalization "file:" &&
-       ! test-urlmatch-normalization "file:/" &&
-       test-urlmatch-normalization "file://" &&
-       ! test-urlmatch-normalization "://acme.co" &&
-       ! test-urlmatch-normalization "x_test://acme.co" &&
-       ! test-urlmatch-normalization "-test://acme.co" &&
-       ! test-urlmatch-normalization "0test://acme.co" &&
-       ! test-urlmatch-normalization "+test://acme.co" &&
-       ! test-urlmatch-normalization ".test://acme.co" &&
-       ! test-urlmatch-normalization "schem%6e://" &&
-       test-urlmatch-normalization "x-Test+v1.0://acme.co" &&
-       test "$(test-urlmatch-normalization -p "AbCdeF://x.Y")" = "abcdef://x.y/"
+       ! test-tool urlmatch-normalization "" &&
+       ! test-tool urlmatch-normalization "_" &&
+       ! test-tool urlmatch-normalization "scheme" &&
+       ! test-tool urlmatch-normalization "scheme:" &&
+       ! test-tool urlmatch-normalization "scheme:/" &&
+       ! test-tool urlmatch-normalization "scheme://" &&
+       ! test-tool urlmatch-normalization "file" &&
+       ! test-tool urlmatch-normalization "file:" &&
+       ! test-tool urlmatch-normalization "file:/" &&
+       test-tool urlmatch-normalization "file://" &&
+       ! test-tool urlmatch-normalization "://acme.co" &&
+       ! test-tool urlmatch-normalization "x_test://acme.co" &&
+       ! test-tool urlmatch-normalization "-test://acme.co" &&
+       ! test-tool urlmatch-normalization "0test://acme.co" &&
+       ! test-tool urlmatch-normalization "+test://acme.co" &&
+       ! test-tool urlmatch-normalization ".test://acme.co" &&
+       ! test-tool urlmatch-normalization "schem%6e://" &&
+       test-tool urlmatch-normalization "x-Test+v1.0://acme.co" &&
+       test "$(test-tool urlmatch-normalization -p "AbCdeF://x.Y")" = "abcdef://x.y/"
 '
 
 test_expect_success 'url authority' '
-       ! test-urlmatch-normalization "scheme://user:pass@" &&
-       ! test-urlmatch-normalization "scheme://?" &&
-       ! test-urlmatch-normalization "scheme://#" &&
-       ! test-urlmatch-normalization "scheme:///" &&
-       ! test-urlmatch-normalization "scheme://:" &&
-       ! test-urlmatch-normalization "scheme://:555" &&
-       test-urlmatch-normalization "file://user:pass@" &&
-       test-urlmatch-normalization "file://?" &&
-       test-urlmatch-normalization "file://#" &&
-       test-urlmatch-normalization "file:///" &&
-       test-urlmatch-normalization "file://:" &&
-       ! test-urlmatch-normalization "file://:555" &&
-       test-urlmatch-normalization "scheme://user:pass@host" &&
-       test-urlmatch-normalization "scheme://@host" &&
-       test-urlmatch-normalization "scheme://%00@host" &&
-       ! test-urlmatch-normalization "scheme://%%@host" &&
-       ! test-urlmatch-normalization "scheme://host_" &&
-       test-urlmatch-normalization "scheme://user:pass@host/" &&
-       test-urlmatch-normalization "scheme://@host/" &&
-       test-urlmatch-normalization "scheme://host/" &&
-       test-urlmatch-normalization "scheme://host?x" &&
-       test-urlmatch-normalization "scheme://host#x" &&
-       test-urlmatch-normalization "scheme://host/@" &&
-       test-urlmatch-normalization "scheme://host?@x" &&
-       test-urlmatch-normalization "scheme://host#@x" &&
-       test-urlmatch-normalization "scheme://[::1]" &&
-       test-urlmatch-normalization "scheme://[::1]/" &&
-       ! test-urlmatch-normalization "scheme://hos%41/" &&
-       test-urlmatch-normalization "scheme://[invalid....:/" &&
-       test-urlmatch-normalization "scheme://invalid....:]/" &&
-       ! test-urlmatch-normalization "scheme://invalid....:[/" &&
-       ! test-urlmatch-normalization "scheme://invalid....:["
+       ! test-tool urlmatch-normalization "scheme://user:pass@" &&
+       ! test-tool urlmatch-normalization "scheme://?" &&
+       ! test-tool urlmatch-normalization "scheme://#" &&
+       ! test-tool urlmatch-normalization "scheme:///" &&
+       ! test-tool urlmatch-normalization "scheme://:" &&
+       ! test-tool urlmatch-normalization "scheme://:555" &&
+       test-tool urlmatch-normalization "file://user:pass@" &&
+       test-tool urlmatch-normalization "file://?" &&
+       test-tool urlmatch-normalization "file://#" &&
+       test-tool urlmatch-normalization "file:///" &&
+       test-tool urlmatch-normalization "file://:" &&
+       ! test-tool urlmatch-normalization "file://:555" &&
+       test-tool urlmatch-normalization "scheme://user:pass@host" &&
+       test-tool urlmatch-normalization "scheme://@host" &&
+       test-tool urlmatch-normalization "scheme://%00@host" &&
+       ! test-tool urlmatch-normalization "scheme://%%@host" &&
+       ! test-tool urlmatch-normalization "scheme://host_" &&
+       test-tool urlmatch-normalization "scheme://user:pass@host/" &&
+       test-tool urlmatch-normalization "scheme://@host/" &&
+       test-tool urlmatch-normalization "scheme://host/" &&
+       test-tool urlmatch-normalization "scheme://host?x" &&
+       test-tool urlmatch-normalization "scheme://host#x" &&
+       test-tool urlmatch-normalization "scheme://host/@" &&
+       test-tool urlmatch-normalization "scheme://host?@x" &&
+       test-tool urlmatch-normalization "scheme://host#@x" &&
+       test-tool urlmatch-normalization "scheme://[::1]" &&
+       test-tool urlmatch-normalization "scheme://[::1]/" &&
+       ! test-tool urlmatch-normalization "scheme://hos%41/" &&
+       test-tool urlmatch-normalization "scheme://[invalid....:/" &&
+       test-tool urlmatch-normalization "scheme://invalid....:]/" &&
+       ! test-tool urlmatch-normalization "scheme://invalid....:[/" &&
+       ! test-tool urlmatch-normalization "scheme://invalid....:["
 '
 
 test_expect_success 'url port checks' '
-       test-urlmatch-normalization "xyz://q@some.host:" &&
-       test-urlmatch-normalization "xyz://q@some.host:456/" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:0" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:0000000" &&
-       test-urlmatch-normalization "xyz://q@some.host:0000001?" &&
-       test-urlmatch-normalization "xyz://q@some.host:065535#" &&
-       test-urlmatch-normalization "xyz://q@some.host:65535" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:65536" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:99999" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:100000" &&
-       ! test-urlmatch-normalization "xyz://q@some.host:100001" &&
-       test-urlmatch-normalization "http://q@some.host:80" &&
-       test-urlmatch-normalization "https://q@some.host:443" &&
-       test-urlmatch-normalization "http://q@some.host:80/" &&
-       test-urlmatch-normalization "https://q@some.host:443?" &&
-       ! test-urlmatch-normalization "http://q@:8008" &&
-       ! test-urlmatch-normalization "http://:8080" &&
-       ! test-urlmatch-normalization "http://:" &&
-       test-urlmatch-normalization "xyz://q@some.host:456/" &&
-       test-urlmatch-normalization "xyz://[::1]:456/" &&
-       test-urlmatch-normalization "xyz://[::1]:/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:000/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:0%300/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:0x80/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:4294967297/" &&
-       ! test-urlmatch-normalization "xyz://[::1]:030f/"
+       test-tool urlmatch-normalization "xyz://q@some.host:" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:456/" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:0" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:0000000" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:0000001?" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:065535#" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:65535" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:65536" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:99999" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:100000" &&
+       ! test-tool urlmatch-normalization "xyz://q@some.host:100001" &&
+       test-tool urlmatch-normalization "http://q@some.host:80" &&
+       test-tool urlmatch-normalization "https://q@some.host:443" &&
+       test-tool urlmatch-normalization "http://q@some.host:80/" &&
+       test-tool urlmatch-normalization "https://q@some.host:443?" &&
+       ! test-tool urlmatch-normalization "http://q@:8008" &&
+       ! test-tool urlmatch-normalization "http://:8080" &&
+       ! test-tool urlmatch-normalization "http://:" &&
+       test-tool urlmatch-normalization "xyz://q@some.host:456/" &&
+       test-tool urlmatch-normalization "xyz://[::1]:456/" &&
+       test-tool urlmatch-normalization "xyz://[::1]:/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:000/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:0%300/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:0x80/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:4294967297/" &&
+       ! test-tool urlmatch-normalization "xyz://[::1]:030f/"
 '
 
 test_expect_success 'url port normalization' '
-       test "$(test-urlmatch-normalization -p "http://x:800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:0800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:00000800")" = "http://x:800/" &&
-       test "$(test-urlmatch-normalization -p "http://x:065535")" = "http://x:65535/" &&
-       test "$(test-urlmatch-normalization -p "http://x:1")" = "http://x:1/" &&
-       test "$(test-urlmatch-normalization -p "http://x:80")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "http://x:080")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "http://x:000000080")" = "http://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:443")" = "https://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:0443")" = "https://x/" &&
-       test "$(test-urlmatch-normalization -p "https://x:000000443")" = "https://x/"
+       test "$(test-tool urlmatch-normalization -p "http://x:800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:0800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:00000800")" = "http://x:800/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:065535")" = "http://x:65535/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:1")" = "http://x:1/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:80")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:080")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "http://x:000000080")" = "http://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:443")" = "https://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:0443")" = "https://x/" &&
+       test "$(test-tool urlmatch-normalization -p "https://x:000000443")" = "https://x/"
 '
 
 test_expect_success 'url general escapes' '
-       ! test-urlmatch-normalization "http://x.y?%fg" &&
-       test "$(test-urlmatch-normalization -p "X://W/%7e%41^%3a")" = "x://w/~A%5E%3A" &&
-       test "$(test-urlmatch-normalization -p "X://W/:/?#[]@")" = "x://w/:/?#[]@" &&
-       test "$(test-urlmatch-normalization -p "X://W/$&()*+,;=")" = "x://w/$&()*+,;=" &&
-       test "$(test-urlmatch-normalization -p "X://W/'\''")" = "x://w/'\''" &&
-       test "$(test-urlmatch-normalization -p "X://W?'\!'")" = "x://w/?'\!'"
+       ! test-tool urlmatch-normalization "http://x.y?%fg" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/%7e%41^%3a")" = "x://w/~A%5E%3A" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/:/?#[]@")" = "x://w/:/?#[]@" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/$&()*+,;=")" = "x://w/$&()*+,;=" &&
+       test "$(test-tool urlmatch-normalization -p "X://W/'\''")" = "x://w/'\''" &&
+       test "$(test-tool urlmatch-normalization -p "X://W?'\!'")" = "x://w/?'\!'"
 '
 
 test_expect_success !MINGW 'url high-bit escapes' '
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-1")")" = "x://q/%01%02%03%04%05%06%07%08%0E%0F%10%11%12" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-2")")" = "x://q/%13%14%15%16%17%18%19%1B%1C%1D%1E%1F%7F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-3")")" = "x://q/%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-4")")" = "x://q/%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-5")")" = "x://q/%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-6")")" = "x://q/%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-7")")" = "x://q/%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-8")")" = "x://q/%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-9")")" = "x://q/%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF" &&
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-10")")" = "x://q/%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-1")")" = "x://q/%01%02%03%04%05%06%07%08%0E%0F%10%11%12" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-2")")" = "x://q/%13%14%15%16%17%18%19%1B%1C%1D%1E%1F%7F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-3")")" = "x://q/%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-4")")" = "x://q/%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-5")")" = "x://q/%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-6")")" = "x://q/%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-7")")" = "x://q/%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-8")")" = "x://q/%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-9")")" = "x://q/%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF" &&
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-10")")" = "x://q/%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
 '
 
 test_expect_success 'url utf-8 escapes' '
-       test "$(test-urlmatch-normalization -p "$(cat "$tu-11")")" = "x://q/%C2%80%DF%BF%E0%A0%80%EF%BF%BD%F0%90%80%80%F0%AF%BF%BD"
+       test "$(test-tool urlmatch-normalization -p "$(cat "$tu-11")")" = "x://q/%C2%80%DF%BF%E0%A0%80%EF%BF%BD%F0%90%80%80%F0%AF%BF%BD"
 '
 
 test_expect_success 'url username/password escapes' '
-       test "$(test-urlmatch-normalization -p "x://%41%62(^):%70+d@foo")" = "x://Ab(%5E):p+d@foo/"
+       test "$(test-tool urlmatch-normalization -p "x://%41%62(^):%70+d@foo")" = "x://Ab(%5E):p+d@foo/"
 '
 
 test_expect_success 'url normalized lengths' '
-       test "$(test-urlmatch-normalization -l "Http://%4d%65:%4d^%70@The.Host")" = 25 &&
-       test "$(test-urlmatch-normalization -l "http://%41:%42@x.y/%61/")" = 17 &&
-       test "$(test-urlmatch-normalization -l "http://@x.y/^")" = 15
+       test "$(test-tool urlmatch-normalization -l "Http://%4d%65:%4d^%70@The.Host")" = 25 &&
+       test "$(test-tool urlmatch-normalization -l "http://%41:%42@x.y/%61/")" = 17 &&
+       test "$(test-tool urlmatch-normalization -l "http://@x.y/^")" = 15
 '
 
 test_expect_success 'url . and .. segments' '
-       test "$(test-urlmatch-normalization -p "x://y/.")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/./")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/.")" = "x://y/a" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./")" = "x://y/a/" &&
-       test "$(test-urlmatch-normalization -p "x://y/.?")" = "x://y/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/./?")" = "x://y/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/.?")" = "x://y/a?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./?")" = "x://y/a/?" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/.././../c")" = "x://y/c" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/../.././c/")" = "x://y/c/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./b/.././../c/././.././.")" = "x://y/" &&
-       ! test-urlmatch-normalization "x://y/a/./b/.././../c/././.././.." &&
-       test "$(test-urlmatch-normalization -p "x://y/a/./?/././..")" = "x://y/a/?/././.." &&
-       test "$(test-urlmatch-normalization -p "x://y/%2e/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/%2E/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/a/%2e./")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/b/.%2E/")" = "x://y/" &&
-       test "$(test-urlmatch-normalization -p "x://y/c/%2e%2E/")" = "x://y/"
+       test "$(test-tool urlmatch-normalization -p "x://y/.")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/./")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/.")" = "x://y/a" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./")" = "x://y/a/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/.?")" = "x://y/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/./?")" = "x://y/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/.?")" = "x://y/a?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./?")" = "x://y/a/?" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/.././../c")" = "x://y/c" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/../.././c/")" = "x://y/c/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./b/.././../c/././.././.")" = "x://y/" &&
+       ! test-tool urlmatch-normalization "x://y/a/./b/.././../c/././.././.." &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/./?/././..")" = "x://y/a/?/././.." &&
+       test "$(test-tool urlmatch-normalization -p "x://y/%2e/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/%2E/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/a/%2e./")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/b/.%2E/")" = "x://y/" &&
+       test "$(test-tool urlmatch-normalization -p "x://y/c/%2e%2E/")" = "x://y/"
 '
 
 # http://@foo specifies an empty user name but does not specify a password
 # http://foo  specifies neither a user name nor a password
 # So they should not be equivalent
 test_expect_success 'url equivalents' '
-       test-urlmatch-normalization "httP://x" "Http://X/" &&
-       test-urlmatch-normalization "Http://%4d%65:%4d^%70@The.Host" "hTTP://Me:%4D^p@the.HOST:80/" &&
-       ! test-urlmatch-normalization "https://@x.y/^" "httpS://x.y:443/^" &&
-       test-urlmatch-normalization "https://@x.y/^" "httpS://@x.y:0443/^" &&
-       test-urlmatch-normalization "https://@x.y/^/../abc" "httpS://@x.y:0443/abc" &&
-       test-urlmatch-normalization "https://@x.y/^/.." "httpS://@x.y:0443/"
+       test-tool urlmatch-normalization "httP://x" "Http://X/" &&
+       test-tool urlmatch-normalization "Http://%4d%65:%4d^%70@The.Host" "hTTP://Me:%4D^p@the.HOST:80/" &&
+       ! test-tool urlmatch-normalization "https://@x.y/^" "httpS://x.y:443/^" &&
+       test-tool urlmatch-normalization "https://@x.y/^" "httpS://@x.y:0443/^" &&
+       test-tool urlmatch-normalization "https://@x.y/^/../abc" "httpS://@x.y:0443/abc" &&
+       test-tool urlmatch-normalization "https://@x.y/^/.." "httpS://@x.y:0443/"
 '
 
 test_done
index 2361590d54856a1a284945e46fece2716487f624..438e778d6afa086e8823b2ac3d38c2a26f171dbd 100755 (executable)
@@ -7,10 +7,6 @@ test_description='Gettext Shell poison'
 
 . ./lib-gettext.sh
 
-test_expect_success GETTEXT_POISON "sanity: \$GIT_INTERNAL_GETTEXT_SH_SCHEME is set (to $GIT_INTERNAL_GETTEXT_SH_SCHEME)" '
-    test -n "$GIT_INTERNAL_GETTEXT_SH_SCHEME"
-'
-
 test_expect_success GETTEXT_POISON 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' '
     test "$GIT_INTERNAL_GETTEXT_SH_SCHEME" = "poison"
 '
index b19f3326946203409fe0e428b9fc73d34134d756..2ac3b940c611db08fd48e5782db13d2f8f1d48ec 100755 (executable)
@@ -282,7 +282,7 @@ test_expect_success "--batch-check with multiple sha1s gives correct format" '
 '
 
 test_expect_success 'setup blobs which are likely to delta' '
-       test-genrandom foo 10240 >foo &&
+       test-tool genrandom foo 10240 >foo &&
        { cat foo; echo plus; } >foo-plus &&
        git add foo foo-plus &&
        git commit -m foo &&
index c167f606ca7b8c1628ac8d00507d50f032278730..0c6f48f3024c81de765a8acb489e2d5e3ec42a56 100755 (executable)
@@ -15,8 +15,11 @@ test_description='sparse checkout tests
 . "$TEST_DIRECTORY"/lib-read-tree.sh
 
 test_expect_success 'setup' '
+       test_commit init &&
+       echo modified >>init.t &&
+
        cat >expected <<-EOF &&
-       100644 77f0ba1734ed79d12881f81b36ee134de6a3327b 0       init.t
+       100644 $(git hash-object init.t) 0      init.t
        100644 $EMPTY_BLOB 0    sub/added
        100644 $EMPTY_BLOB 0    sub/addedtoo
        100644 $EMPTY_BLOB 0    subsub/added
@@ -28,8 +31,6 @@ test_expect_success 'setup' '
        H subsub/added
        EOF
 
-       test_commit init &&
-       echo modified >>init.t &&
        mkdir sub subsub &&
        touch sub/added sub/addedtoo subsub/added &&
        git add init.t sub/added sub/addedtoo subsub/added &&
index 6fd264cff0d6de1961656c2cd1193d8ce37e9a1f..f9eb143f43420b0e2f2b864b4f83f78de7886a7b 100755 (executable)
@@ -103,9 +103,9 @@ test_expect_success 'packsize limit' '
                # mid1 and mid2 will fit within 256k limit but
                # appending mid3 will bust the limit and will
                # result in a separate packfile.
-               test-genrandom "a" $(( 66 * 1024 )) >mid1 &&
-               test-genrandom "b" $(( 80 * 1024 )) >mid2 &&
-               test-genrandom "c" $(( 128 * 1024 )) >mid3 &&
+               test-tool genrandom "a" $(( 66 * 1024 )) >mid1 &&
+               test-tool genrandom "b" $(( 80 * 1024 )) >mid2 &&
+               test-tool genrandom "c" $(( 128 * 1024 )) >mid3 &&
                git add mid1 mid2 mid3 &&
 
                count=0
index cbeb9bebeea67c3654c279b2c12fed4825840d8f..e95b1e67da7089a434fc5e8d687b2e2d5e39da01 100755 (executable)
@@ -914,7 +914,7 @@ test_expect_success 'get --expiry-date' '
        invalid1 = "abc"
        EOF
        cat >expect <<-EOF &&
-       $(test-date timestamp $rel)
+       $(test-tool date timestamp $rel)
        1275666415
        1510441871
        1510348087
@@ -1206,6 +1206,29 @@ test_expect_success 'git -c is not confused by empty environment' '
        GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
 '
 
+sq="'"
+test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
+       cat >expect <<-\EOF &&
+       env.one one
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       cat >expect <<-EOF &&
+       env.one one${sq}
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       test_must_fail env \
+               GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*"
+'
+
 test_expect_success 'git config --edit works' '
        git config -f tmp test.value no &&
        echo test.value=yes >expect &&
@@ -1564,10 +1587,10 @@ test_expect_success '--show-origin stdin with file include' '
 '
 
 test_expect_success !MINGW '--show-origin blob' '
-       cat >expect <<-\EOF &&
-               blob:a9d9f9e555b5c6f07cbe09d3f06fe3df11e09c08   user.custom=true
-       EOF
        blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") &&
+       cat >expect <<-EOF &&
+               blob:$blob      user.custom=true
+       EOF
        git config --blob=$blob --show-origin --list >output &&
        test_cmp expect output
 '
index f5422f1d33f5eac98e6f56ec4bf05f3f8d4c8be2..335d3f3211aa874fd3a8e0d0006dd9fc53a4e589 100755 (executable)
@@ -54,7 +54,7 @@ test_expect_success SETFACL 'Setup test repo' '
 
 test_expect_success SETFACL 'Objects creation does not break ACLs with restrictive umask' '
        # SHA1 for empty blob
-       check_perms_and_acl .git/objects/e6/9de29bb2d1d6434b8b29ae775ad8c2e48c5391
+       check_perms_and_acl .git/objects/$(echo $EMPTY_BLOB | sed -e "s,^\(..\),\1/,")
 '
 
 test_expect_success SETFACL 'git gc does not break ACLs with restrictive umask' '
index d9d2f545a4ed735e02f7d5cd6ceee7d873fdec94..f035ee40a313ae25a13bdb6a838eed3b34fd5f9d 100755 (executable)
@@ -224,7 +224,7 @@ test_expect_success 'conditional include, early config reading' '
                echo "[includeIf \"gitdir:foo/\"]path=bar6" >>.git/config &&
                echo "[test]six=6" >.git/bar6 &&
                echo 6 >expect &&
-               test-config read_early_config test.six >actual &&
+               test-tool config read_early_config test.six >actual &&
                test_cmp expect actual
        )
 '
index bafed5c9b88481992ca6cf4cd6c13596c7baf16b..3e00d1af01fb771bd3fd26dcb265a3d80da37934 100755 (executable)
@@ -18,7 +18,7 @@ check_config () {
        then
                printf "%s\n" "$@"
        fi >expect &&
-       test_expect_code $expect_code test-config "$op" "$key" >actual &&
+       test_expect_code $expect_code test-tool config "$op" "$key" >actual &&
        test_cmp expect actual
 }
 
@@ -125,7 +125,7 @@ test_expect_success 'find string value for a key' '
 '
 
 test_expect_success 'check line error when NULL string is queried' '
-       test_expect_code 128 test-config get_string case.foo 2>result &&
+       test_expect_code 128 test-tool config get_string case.foo 2>result &&
        test_i18ngrep "fatal: .*case\.foo.*\.git/config.*line 7" result
 '
 
@@ -155,13 +155,13 @@ test_expect_success 'find value from a configset' '
                baz = ball
        EOF
        echo silk >expect &&
-       test-config configset_get_value my.new config2 .git/config >actual &&
+       test-tool config configset_get_value my.new config2 .git/config >actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'find value with highest priority from a configset' '
        echo hask >expect &&
-       test-config configset_get_value case.baz config2 .git/config >actual &&
+       test-tool config configset_get_value case.baz config2 .git/config >actual &&
        test_cmp expect actual
 '
 
@@ -173,20 +173,20 @@ test_expect_success 'find value_list for a key from a configset' '
        lama
        ball
        EOF
-       test-config configset_get_value case.baz config2 .git/config >actual &&
+       test-tool config configset_get_value case.baz config2 .git/config >actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'proper error on non-existent files' '
        echo "Error (-1) reading configuration file non-existent-file." >expect &&
-       test_expect_code 2 test-config configset_get_value foo.bar non-existent-file 2>actual &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar non-existent-file 2>actual &&
        test_cmp expect actual
 '
 
 test_expect_success 'proper error on directory "files"' '
        echo "Error (-1) reading configuration file a-directory." >expect &&
        mkdir a-directory &&
-       test_expect_code 2 test-config configset_get_value foo.bar a-directory 2>output &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar a-directory 2>output &&
        grep "^warning:" output &&
        grep "^Error" output >actual &&
        test_cmp expect actual
@@ -196,7 +196,7 @@ test_expect_success POSIXPERM,SANITY 'proper error on non-accessible files' '
        chmod -r .git/config &&
        test_when_finished "chmod +r .git/config" &&
        echo "Error (-1) reading configuration file .git/config." >expect &&
-       test_expect_code 2 test-config configset_get_value foo.bar .git/config 2>output &&
+       test_expect_code 2 test-tool config configset_get_value foo.bar .git/config 2>output &&
        grep "^warning:" output &&
        grep "^Error" output >actual &&
        test_cmp expect actual
@@ -207,14 +207,14 @@ test_expect_success 'proper error on error in default config files' '
        test_when_finished "mv .git/config.old .git/config" &&
        echo "[" >>.git/config &&
        echo "fatal: bad config line 34 in file .git/config" >expect &&
-       test_expect_code 128 test-config get_value foo.bar 2>actual &&
+       test_expect_code 128 test-tool config get_value foo.bar 2>actual &&
        test_i18ncmp expect actual
 '
 
 test_expect_success 'proper error on error in custom config files' '
        echo "[" >>syntax-error &&
        echo "fatal: bad config line 1 in file syntax-error" >expect &&
-       test_expect_code 128 test-config configset_get_value foo.bar syntax-error 2>actual &&
+       test_expect_code 128 test-tool config configset_get_value foo.bar syntax-error 2>actual &&
        test_i18ncmp expect actual
 '
 
@@ -267,7 +267,7 @@ test_expect_success 'iteration shows correct origins' '
        name=
        scope=cmdline
        EOF
-       GIT_CONFIG_PARAMETERS=$cmdline_config test-config iterate >actual &&
+       GIT_CONFIG_PARAMETERS=$cmdline_config test-tool config iterate >actual &&
        test_cmp expect actual
 '
 
index 3dda215e8e2f37c049a3169cecdb3e43ddea5dfb..413642aa5672800d1b7be448bc97d175add07ee4 100755 (executable)
@@ -6,7 +6,7 @@ test_description='Test read_early_config()'
 
 test_expect_success 'read early config' '
        test_config early.config correct &&
-       test-config read_early_config early.config >output &&
+       test-tool config read_early_config early.config >output &&
        test correct = "$(cat output)"
 '
 
@@ -15,7 +15,7 @@ test_expect_success 'in a sub-directory' '
        mkdir -p sub &&
        (
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test sub = "$(cat output)"
 '
@@ -27,7 +27,7 @@ test_expect_success 'ceiling' '
                GIT_CEILING_DIRECTORIES="$PWD" &&
                export GIT_CEILING_DIRECTORIES &&
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test -z "$(cat output)"
 '
@@ -42,7 +42,7 @@ test_expect_success 'ceiling #2' '
                GIT_CEILING_DIRECTORIES="$PWD" &&
                export GIT_CEILING_DIRECTORIES XDG_CONFIG_HOME &&
                cd sub &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        ) >output &&
        test xdg = "$(cat output)"
 '
@@ -54,7 +54,7 @@ test_expect_success 'read config file in right order' '
        (
                cd foo &&
                echo "[test]source = repo" >>.git/config &&
-               GIT_CONFIG_PARAMETERS=$cmdline_config test-config \
+               GIT_CONFIG_PARAMETERS=$cmdline_config test-tool config \
                        read_early_config test.source >actual &&
                cat >expected <<-\EOF &&
                home
@@ -71,7 +71,7 @@ test_with_config () {
        (
                cd throwaway &&
                echo "$*" >.git/config &&
-               test-config read_early_config early.config
+               test-tool config read_early_config early.config
        )
 }
 
index e8115df5bad8d9ef46542cfd3c96c32da0bfa4ad..a74c38b5fb22a6d274c26ed008c6bec355801e34 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test main ref store api'
 
 . ./test-lib.sh
 
-RUN="test-ref-store main"
+RUN="test-tool ref-store main"
 
 test_expect_success 'pack_refs(PACK_REFS_ALL | PACK_REFS_PRUNE)' '
        test_commit one &&
@@ -45,7 +45,7 @@ test_expect_success 'rename_refs(master, new-master)' '
 '
 
 test_expect_success 'for_each_ref(refs/heads/)' '
-       $RUN for-each-ref refs/heads/ | cut -c 42- >actual &&
+       $RUN for-each-ref refs/heads/ | cut -d" " -f 2- >actual &&
        cat >expected <<-\EOF &&
        master 0x0
        new-master 0x0
@@ -71,7 +71,7 @@ test_expect_success 'verify_ref(new-master)' '
 '
 
 test_expect_success 'for_each_reflog()' '
-       $RUN for-each-reflog | sort | cut -c 42- >actual &&
+       $RUN for-each-reflog | sort -k2 | cut -c 42- >actual &&
        cat >expected <<-\EOF &&
        HEAD 0x1
        refs/heads/master 0x0
index c32d4cc4652a4496ce8fa6aa1c10c797ae7d760a..e093782cc37c495a122eb8676797b1988b828c29 100755 (executable)
@@ -4,7 +4,7 @@ test_description='test submodule ref store api'
 
 . ./test-lib.sh
 
-RUN="test-ref-store submodule:sub"
+RUN="test-tool ref-store submodule:sub"
 
 test_expect_success 'setup' '
        git init sub &&
index 8842d0329fb7947e811d4ffccff3bba9e8885d5d..2211f9831fb07f933c8c3f0c7cfd89cc5512c253 100755 (executable)
@@ -4,8 +4,8 @@ test_description='test worktree ref store api'
 
 . ./test-lib.sh
 
-RWT="test-ref-store worktree:wt"
-RMAIN="test-ref-store worktree:main"
+RWT="test-tool ref-store worktree:wt"
+RMAIN="test-tool ref-store worktree:main"
 
 test_expect_success 'setup' '
        test_commit first &&
index 553e26d9ceb83fc6529beb95ff71ffa0a899a166..8293131001ee0e816406df858dc47bc2fb9be3ce 100755 (executable)
@@ -339,8 +339,8 @@ test_expect_failure 'reflog with non-commit entries displays all entries' '
 '
 
 test_expect_success 'reflog expire operates on symref not referrent' '
-       git branch -l the_symref &&
-       git branch -l referrent &&
+       git branch --create-reflog the_symref &&
+       git branch --create-reflog referrent &&
        git update-ref referrent HEAD &&
        git symbolic-ref refs/heads/the_symref refs/heads/referrent &&
        test_when_finished "rm -f .git/refs/heads/referrent.lock" &&
index 6ac7734d79be21a82feeadff10064bb4ca7ad47b..596907758d5d47ae8026098a2ce4acd8c01df6aa 100755 (executable)
@@ -10,6 +10,7 @@ test_expect_success 'setup' '
        git commit -m one
 '
 
+commit=$(git rev-parse --short HEAD)
 cat >expect <<'EOF'
 Reflog: HEAD@{0} (C O Mitter <committer@example.com>)
 Reflog message: commit (initial): one
@@ -20,8 +21,8 @@ test_expect_success 'log -g shows reflog headers' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{0}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{0}: commit (initial): one
 EOF
 test_expect_success 'oneline reflog format' '
        git log -g -1 --oneline >actual &&
@@ -33,8 +34,8 @@ test_expect_success 'reflog default format' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-commit e46513e
+cat >expect <<EOF
+commit $commit
 Reflog: HEAD@{0} (C O Mitter <committer@example.com>)
 Reflog message: commit (initial): one
 Author: A U Thor <author@example.com>
@@ -56,8 +57,8 @@ test_expect_success 'using @{now} syntax shows reflog date (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
 EOF
 test_expect_success 'using @{now} syntax shows reflog date (oneline)' '
        git log -g -1 --oneline HEAD@{now} >actual &&
@@ -82,8 +83,8 @@ test_expect_success 'using --date= shows reflog date (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{Thu Apr 7 15:13:13 2005 -0700}: commit (initial): one
 EOF
 test_expect_success 'using --date= shows reflog date (oneline)' '
        git log -g -1 --oneline --date=default >actual &&
@@ -109,8 +110,8 @@ test_expect_success 'log.date does not invoke "--date" magic (multiline)' '
        test_cmp expect actual
 '
 
-cat >expect <<'EOF'
-e46513e HEAD@{0}: commit (initial): one
+cat >expect <<EOF
+$commit HEAD@{0}: commit (initial): one
 EOF
 test_expect_success 'log.date does not invoke "--date" magic (oneline)' '
        test_config log.date raw &&
index b06210ec5e8ffa30b59314b1c79ebc9bdb8f6476..02cf2013fc6f68cdb62739e9dff94fa08f5607be 100755 (executable)
@@ -341,7 +341,7 @@ test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
 
 test_expect_success 'relative $GIT_WORK_TREE and git subprocesses' '
        GIT_DIR=repo.git GIT_WORK_TREE=repo.git/work \
-       test-subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
+       test-tool subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
        echo "$(pwd)/repo.git/work" >expected &&
        test_cmp expected actual
 '
@@ -360,7 +360,7 @@ test_expect_success 'GIT_DIR set (1)' '
        (
                cd work &&
                GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual
        )
 '
@@ -371,7 +371,7 @@ test_expect_success 'GIT_DIR set (2)' '
        (
                cd work &&
                GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual
        )
 '
@@ -382,7 +382,7 @@ test_expect_success 'Auto discovery' '
        (
                cd work &&
                git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual &&
                echo haha >data1 &&
                git add data1 &&
@@ -400,7 +400,7 @@ test_expect_success '$GIT_DIR/common overrides core.worktree' '
        (
                cd work &&
                git rev-parse --git-common-dir >actual &&
-               test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+               test-tool path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
                test_cmp expect actual &&
                echo haha >data2 &&
                git add data2 &&
index 79a0251efa6e049be4fffa41004e73b9950d2bd7..4ee009da666f22f6ff1ddce93dc1f30924c6cc26 100755 (executable)
@@ -157,7 +157,7 @@ test_expect_success 'relative path not found' '
 test_expect_success 'relative path outside worktree' '
        test_must_fail git rev-parse HEAD:../file.txt >output 2>error &&
        test -z "$(cat output)" &&
-       grep "outside repository" error
+       test_i18ngrep "outside repository" error
 '
 
 test_expect_success 'relative path when cwd is outside worktree' '
index b23c4e3fab604f957ec6359eae75400dd1e53174..93c77eac45321cb24823431c4d89dc393049edbf 100755 (executable)
@@ -42,7 +42,7 @@ commit_subject () {
 
 error_message () {
        (cd clone &&
-        test_must_fail git rev-parse --verify "$@")
+        test_must_fail git rev-parse --verify "$@" 2>../error)
 }
 
 test_expect_success '@{upstream} resolves to correct full name' '
@@ -159,8 +159,8 @@ test_expect_success 'branch@{u} error message when no upstream' '
        cat >expect <<-EOF &&
        fatal: no upstream configured for branch ${sq}non-tracking${sq}
        EOF
-       error_message non-tracking@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message non-tracking@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success '@{u} error message when no upstream' '
@@ -175,8 +175,8 @@ test_expect_success 'branch@{u} error message with misspelt branch' '
        cat >expect <<-EOF &&
        fatal: no such branch: ${sq}no-such-branch${sq}
        EOF
-       error_message no-such-branch@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message no-such-branch@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success '@{u} error message when not on a branch' '
@@ -192,8 +192,8 @@ test_expect_success 'branch@{u} error message if upstream branch not fetched' '
        cat >expect <<-EOF &&
        fatal: upstream branch ${sq}refs/heads/side${sq} not stored as a remote-tracking branch
        EOF
-       error_message bad-upstream@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message bad-upstream@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success 'pull works when tracking a local branch' '
@@ -209,8 +209,9 @@ test_expect_success '@{u} works when tracking a local branch' '
        test refs/heads/master = "$(full_name @{u})"
 '
 
+commit=$(git rev-parse HEAD)
 cat >expect <<EOF
-commit 8f489d01d0cc65c3b0f09504ec50b5ed02a70bd5
+commit $commit
 Reflog: master@{0} (C O Mitter <committer@example.com>)
 Reflog message: branch: Created from HEAD
 Author: A U Thor <author@example.com>
@@ -224,7 +225,7 @@ test_expect_success 'log -g other@{u}' '
 '
 
 cat >expect <<EOF
-commit 8f489d01d0cc65c3b0f09504ec50b5ed02a70bd5
+commit $commit
 Reflog: master@{Thu Apr 7 15:17:13 2005 -0700} (C O Mitter <committer@example.com>)
 Reflog message: branch: Created from HEAD
 Author: A U Thor <author@example.com>
index 13ae12dfa7d494f50fc2e728d3e7ee36ff5534bc..e6854b828e2e68ad217721eb6139970b7c5958c0 100755 (executable)
@@ -39,6 +39,10 @@ A few rules for repo setup:
 11. When user's cwd is outside worktree, cwd remains unchanged,
     prefix is NULL.
 "
+
+# This test heavily relies on the standard error of nested function calls.
+test_untraceable=UnfortunatelyYes
+
 . ./test-lib.sh
 
 here=$(pwd)
index 079d2411450ca960d3d82fb9c7d59921c4cbe60a..c4422312f4e482e38172b1ac8b87e29d8708af2e 100755 (executable)
@@ -68,7 +68,7 @@ test_expect_success 'GIT_INDEX_VERSION takes precedence over config' '
                git config --add index.version 2 &&
                git add a 2>&1 &&
                echo 4 >expect &&
-               test-index-version <.git/index >actual &&
+               test-tool index-version <.git/index >actual &&
                test_cmp expect actual
        )
 '
index a66936fe9bde2ba20ab0e57400b6286b0710212a..e4f4c4df4ee3686e5ad1621134a827b941b38bca 100755 (executable)
@@ -11,8 +11,8 @@ sane_unset GIT_FSMONITOR_TEST
 test_expect_success 'enable split index' '
        git config splitIndex.maxPercentChange 100 &&
        git update-index --split-index &&
-       test-dump-split-index .git/index >actual &&
-       indexversion=$(test-index-version <.git/index) &&
+       test-tool dump-split-index .git/index >actual &&
+       indexversion=$(test-tool index-version <.git/index) &&
        if test "$indexversion" = "4"
        then
                own=432ef4b63f32193984f339431fd50ca796493569
@@ -39,7 +39,7 @@ test_expect_success 'add one file' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        base $base
        100644 $EMPTY_BLOB 0    one
@@ -57,8 +57,8 @@ test_expect_success 'disable split index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       BASE=$(test-dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -73,7 +73,7 @@ test_expect_success 'enable split index again, "one" now belongs to base index"'
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -91,7 +91,7 @@ test_expect_success 'modify original file, base index untouched' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -111,7 +111,7 @@ test_expect_success 'add another file, which stays index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -130,7 +130,7 @@ test_expect_success 'remove file not in base index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        q_to_tab >expect <<-EOF &&
        $BASE
        100644 2e0996000b7e9019eabcad29391bf0f5c7702f0b 0Q
@@ -147,7 +147,7 @@ test_expect_success 'remove file in base index' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -165,7 +165,7 @@ test_expect_success 'add original file back' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 $EMPTY_BLOB 0    one
@@ -195,7 +195,7 @@ test_expect_success 'unify index, two files remain' '
        EOF
        test_cmp ls-files.expect ls-files.actual &&
 
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -229,8 +229,8 @@ test_expect_success 'set core.splitIndex config variable to true' '
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       two
        EOF
        test_cmp ls-files.expect ls-files.actual &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -248,7 +248,7 @@ test_expect_success 'set core.splitIndex config variable to false' '
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       two
        EOF
        test_cmp ls-files.expect ls-files.actual &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        not a split index
        EOF
@@ -259,8 +259,8 @@ test_expect_success 'set core.splitIndex config variable to true' '
        git config core.splitIndex true &&
        : >three &&
        git update-index --add three &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -269,7 +269,7 @@ test_expect_success 'set core.splitIndex config variable to true' '
        test_cmp expect actual &&
        : >four &&
        git update-index --add four &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       four
@@ -283,8 +283,8 @@ test_expect_success 'check behavior with splitIndex.maxPercentChange unset' '
        git config --unset splitIndex.maxPercentChange &&
        : >five &&
        git update-index --add five &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -293,7 +293,7 @@ test_expect_success 'check behavior with splitIndex.maxPercentChange unset' '
        test_cmp expect actual &&
        : >six &&
        git update-index --add six &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       six
@@ -307,8 +307,8 @@ test_expect_success 'check splitIndex.maxPercentChange set to 0' '
        git config splitIndex.maxPercentChange 0 &&
        : >seven &&
        git update-index --add seven &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -317,8 +317,8 @@ test_expect_success 'check splitIndex.maxPercentChange set to 0' '
        test_cmp expect actual &&
        : >eight &&
        git update-index --add eight &&
-       BASE=$(test-dump-split-index .git/index | grep "^base") &&
-       test-dump-split-index .git/index | sed "/^own/d" >actual &&
+       BASE=$(test-tool dump-split-index .git/index | grep "^base") &&
+       test-tool dump-split-index .git/index | sed "/^own/d" >actual &&
        cat >expect <<-EOF &&
        $BASE
        replacements:
@@ -332,12 +332,12 @@ test_expect_success 'shared index files expire after 2 weeks by default' '
        git update-index --add ten &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_under_2_weeks_ago=$((5-14*86400)) &&
-       test-chmtime =$just_under_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_under_2_weeks_ago .git/sharedindex.* &&
        : >eleven &&
        git update-index --add eleven &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_over_2_weeks_ago=$((-1-14*86400)) &&
-       test-chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
        : >twelve &&
        git update-index --add twelve &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -345,12 +345,12 @@ test_expect_success 'shared index files expire after 2 weeks by default' '
 
 test_expect_success 'check splitIndex.sharedIndexExpire set to 16 days' '
        git config splitIndex.sharedIndexExpire "16.days.ago" &&
-       test-chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_2_weeks_ago .git/sharedindex.* &&
        : >thirteen &&
        git update-index --add thirteen &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        just_over_16_days_ago=$((-1-16*86400)) &&
-       test-chmtime =$just_over_16_days_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_over_16_days_ago .git/sharedindex.* &&
        : >fourteen &&
        git update-index --add fourteen &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -359,13 +359,13 @@ test_expect_success 'check splitIndex.sharedIndexExpire set to 16 days' '
 test_expect_success 'check splitIndex.sharedIndexExpire set to "never" and "now"' '
        git config splitIndex.sharedIndexExpire never &&
        just_10_years_ago=$((-365*10*86400)) &&
-       test-chmtime =$just_10_years_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_10_years_ago .git/sharedindex.* &&
        : >fifteen &&
        git update-index --add fifteen &&
        test $(ls .git/sharedindex.* | wc -l) -gt 2 &&
        git config splitIndex.sharedIndexExpire now &&
        just_1_second_ago=-1 &&
-       test-chmtime =$just_1_second_ago .git/sharedindex.* &&
+       test-tool chmtime =$just_1_second_ago .git/sharedindex.* &&
        : >sixteen &&
        git update-index --add sixteen &&
        test $(ls .git/sharedindex.* | wc -l) -le 2
@@ -435,7 +435,7 @@ test_expect_success 'writing split index with null sha1 does not write cache tre
        commit=$(git commit-tree $tree -p HEAD <msg) &&
        git update-ref HEAD "$commit" &&
        GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
-       (test-dump-cache-tree >cache-tree.out || true) &&
+       (test-tool dump-cache-tree >cache-tree.out || true) &&
        test_line_count = 0 cache-tree.out
 '
 
index bb4f2e0c631f1de7421f50b9fa64f11276fe9645..1fa670625c5be87294eec9c5fe86bf2defff2ce2 100755 (executable)
@@ -189,8 +189,12 @@ test_expect_success 'no advice given for explicit detached head state' '
 # Detached HEAD tests for GIT_PRINT_SHA1_ELLIPSIS (new format)
 test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not asked to' "
 
+       commit=$(git rev-parse --short=12 master^) &&
+       commit2=$(git rev-parse --short=12 master~2) &&
+       commit3=$(git rev-parse --short=12 master~3) &&
+
        # The first detach operation is more chatty than the following ones.
-       cat >1st_detach <<-'EOF' &&
+       cat >1st_detach <<-EOF &&
        Note: checking out 'HEAD^'.
 
        You are in 'detached HEAD' state. You can look around, make experimental
@@ -202,18 +206,18 @@ test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not as
 
          git checkout -b <new-branch-name>
 
-       HEAD is now at 7c7cd714e262 three
+       HEAD is now at \$commit three
        EOF
 
        # The remaining ones just show info about previous and current HEADs.
-       cat >2nd_detach <<-'EOF' &&
-       Previous HEAD position was 7c7cd714e262 three
-       HEAD is now at 139b20d8e6c5 two
+       cat >2nd_detach <<-EOF &&
+       Previous HEAD position was \$commit three
+       HEAD is now at \$commit2 two
        EOF
 
-       cat >3rd_detach <<-'EOF' &&
-       Previous HEAD position was 139b20d8e6c5 two
-       HEAD is now at d79ce1670bdc one
+       cat >3rd_detach <<-EOF &&
+       Previous HEAD position was \$commit2 two
+       HEAD is now at \$commit3 one
        EOF
 
        reset &&
@@ -261,8 +265,12 @@ test_expect_success 'describe_detached_head prints no SHA-1 ellipsis when not as
 # Detached HEAD tests for GIT_PRINT_SHA1_ELLIPSIS (old format)
 test_expect_success 'describe_detached_head does print SHA-1 ellipsis when asked to' "
 
+       commit=$(git rev-parse --short=12 master^) &&
+       commit2=$(git rev-parse --short=12 master~2) &&
+       commit3=$(git rev-parse --short=12 master~3) &&
+
        # The first detach operation is more chatty than the following ones.
-       cat >1st_detach <<-'EOF' &&
+       cat >1st_detach <<-EOF &&
        Note: checking out 'HEAD^'.
 
        You are in 'detached HEAD' state. You can look around, make experimental
@@ -274,18 +282,18 @@ test_expect_success 'describe_detached_head does print SHA-1 ellipsis when asked
 
          git checkout -b <new-branch-name>
 
-       HEAD is now at 7c7cd714e262... three
+       HEAD is now at \$commit... three
        EOF
 
        # The remaining ones just show info about previous and current HEADs.
-       cat >2nd_detach <<-'EOF' &&
-       Previous HEAD position was 7c7cd714e262... three
-       HEAD is now at 139b20d8e6c5... two
+       cat >2nd_detach <<-EOF &&
+       Previous HEAD position was \$commit... three
+       HEAD is now at \$commit2... two
        EOF
 
-       cat >3rd_detach <<-'EOF' &&
-       Previous HEAD position was 139b20d8e6c5... two
-       HEAD is now at d79ce1670bdc... one
+       cat >3rd_detach <<-EOF &&
+       Previous HEAD position was \$commit2... two
+       HEAD is now at \$commit3... one
        EOF
 
        reset &&
index f46d0499bc6ea95236f7ca05060b8f8fbbdbbc50..e74d58b9e198a4ea2195d3fc6906e651a0a1d650 100755 (executable)
@@ -68,13 +68,13 @@ test_expect_success 'do not touch files that are already up-to-date' '
        git add file1 file2 &&
        git commit -m base &&
        echo modified >file1 &&
-       test-chmtime =1000000000 file2 &&
+       test-tool chmtime =1000000000 file2 &&
        git update-index -q --refresh &&
        git checkout HEAD -- file1 file2 &&
        echo one >expect &&
        test_cmp expect file1 &&
        echo "1000000000        file2" >expect &&
-       test-chmtime -v +0 file2 >actual &&
+       test-tool chmtime -v +0 file2 >actual &&
        test_cmp expect actual
 '
 
index 2b959449730e14dd4e650ce9115a463d71eedca4..d0d2e4f7ec3310ec51da7144fa87151129f393c0 100755 (executable)
@@ -451,32 +451,68 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' '
 '
 
 post_checkout_hook () {
-       test_when_finished "rm -f .git/hooks/post-checkout" &&
-       mkdir -p .git/hooks &&
-       write_script .git/hooks/post-checkout <<-\EOF
-       echo $* >hook.actual
+       gitdir=${1:-.git}
+       test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+       mkdir -p $gitdir/hooks &&
+       write_script $gitdir/hooks/post-checkout <<-\EOF
+       {
+               echo $*
+               git rev-parse --git-dir --show-toplevel
+       } >hook.actual
        EOF
 }
 
 test_expect_success '"add" invokes post-checkout hook (branch)' '
        post_checkout_hook &&
-       printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/gumby &&
+               echo $(pwd)/gumby
+       } >hook.expect &&
        git worktree add gumby &&
-       test_cmp hook.expect hook.actual
+       test_cmp hook.expect gumby/hook.actual
 '
 
 test_expect_success '"add" invokes post-checkout hook (detached)' '
        post_checkout_hook &&
-       printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/grumpy &&
+               echo $(pwd)/grumpy
+       } >hook.expect &&
        git worktree add --detach grumpy &&
-       test_cmp hook.expect hook.actual
+       test_cmp hook.expect grumpy/hook.actual
 '
 
 test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
        post_checkout_hook &&
        rm -f hook.actual &&
        git worktree add --no-checkout gloopy &&
-       test_path_is_missing hook.actual
+       test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+       post_checkout_hook &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/guppy &&
+               echo $(pwd)/guppy
+       } >hook.expect &&
+       git -C gloopy worktree add --detach ../guppy &&
+       test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+       rm -rf bare &&
+       git clone --bare . bare &&
+       {
+               echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 &&
+               echo $(pwd)/bare/worktrees/goozy &&
+               echo $(pwd)/goozy
+       } >hook.expect &&
+       post_checkout_hook bare &&
+       git -C bare worktree add --detach ../goozy &&
+       test_cmp hook.expect goozy/hook.actual
 '
 
 test_done
index a0f1e3bb800ec6943648eeffb9e7ca84e328fa41..b7d6d5d45adf6067ab2f39801f658f778f9b2855 100755 (executable)
@@ -78,10 +78,9 @@ test_expect_success 'not prune locked checkout' '
 
 test_expect_success 'not prune recent checkouts' '
        test_when_finished rm -r .git/worktrees &&
-       mkdir zz &&
-       mkdir -p .git/worktrees/jlm &&
-       echo "$(pwd)"/zz >.git/worktrees/jlm/gitdir &&
-       rmdir zz &&
+       git worktree add jlm HEAD &&
+       test -d .git/worktrees/jlm &&
+       rm -rf jlm &&
        git worktree prune --verbose --expire=2.days.ago &&
        test -d .git/worktrees/jlm
 '
index 8298aaf97f706ea796a12614acafd19636aeaa70..5d5b3632ba0a7cf5364ab4db1df8ca7ea285b4d5 100755 (executable)
@@ -7,7 +7,8 @@ test_description='test git worktree move, remove, lock and unlock'
 test_expect_success 'setup' '
        test_commit init &&
        git worktree add source &&
-       git worktree list --porcelain | grep "^worktree" >actual &&
+       git worktree list --porcelain >out &&
+       grep "^worktree" out >actual &&
        cat <<-EOF >expected &&
        worktree $(pwd)
        worktree $(pwd)/source
@@ -59,4 +60,86 @@ test_expect_success 'unlock worktree twice' '
        test_path_is_missing .git/worktrees/source/locked
 '
 
+test_expect_success 'move non-worktree' '
+       mkdir abc &&
+       test_must_fail git worktree move abc def
+'
+
+test_expect_success 'move locked worktree' '
+       git worktree lock source &&
+       test_when_finished "git worktree unlock source" &&
+       test_must_fail git worktree move source destination
+'
+
+test_expect_success 'move worktree' '
+       toplevel="$(pwd)" &&
+       git worktree move source destination &&
+       test_path_is_missing source &&
+       git worktree list --porcelain >out &&
+       grep "^worktree.*/destination" out &&
+       ! grep "^worktree.*/source" out &&
+       git -C destination log --format=%s >actual2 &&
+       echo init >expected2 &&
+       test_cmp expected2 actual2
+'
+
+test_expect_success 'move main worktree' '
+       test_must_fail git worktree move . def
+'
+
+test_expect_success 'move worktree to another dir' '
+       mkdir some-dir &&
+       git worktree move destination some-dir &&
+       test_when_finished "git worktree move some-dir/destination destination" &&
+       test_path_is_missing destination &&
+       git worktree list --porcelain >out &&
+       grep "^worktree.*/some-dir/destination" out &&
+       git -C some-dir/destination log --format=%s >actual2 &&
+       echo init >expected2 &&
+       test_cmp expected2 actual2
+'
+
+test_expect_success 'remove main worktree' '
+       test_must_fail git worktree remove .
+'
+
+test_expect_success 'remove locked worktree' '
+       git worktree lock destination &&
+       test_when_finished "git worktree unlock destination" &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with dirty tracked file' '
+       echo dirty >>destination/init.t &&
+       test_when_finished "git -C destination checkout init.t" &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with untracked file' '
+       : >destination/untracked &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'force remove worktree with untracked file' '
+       git worktree remove --force destination &&
+       test_path_is_missing destination
+'
+
+test_expect_success 'remove missing worktree' '
+       git worktree add to-be-gone &&
+       test -d .git/worktrees/to-be-gone &&
+       mv to-be-gone gone &&
+       git worktree remove to-be-gone &&
+       test_path_is_missing .git/worktrees/to-be-gone
+'
+
+test_expect_success 'NOT remove missing-but-locked worktree' '
+       git worktree add gone-but-locked &&
+       git worktree lock gone-but-locked &&
+       test -d .git/worktrees/gone-but-locked &&
+       mv gone-but-locked really-gone-now &&
+       test_must_fail git worktree remove gone-but-locked &&
+       test_path_is_dir .git/worktrees/gone-but-locked
+'
+
 test_done
index c8bce8c2e4314aaf466019438818293102c12c9c..685ec45639a5e9a2bf929e10aa30976e9a4c456b 100755 (executable)
@@ -8,19 +8,20 @@ test_description='git update-index --again test.
 
 . ./test-lib.sh
 
-cat > expected <<\EOF
-100644 3b18e512dba79e4c8300dd08aeb37f8e728b8dad 0      file1
-100644 9db8893856a8a02eaa73470054b7c1c5a7c82e47 0      file2
-EOF
-test_expect_success 'update-index --add' \
-       'echo hello world >file1 &&
-        echo goodbye people >file2 &&
-        git update-index --add file1 file2 &&
-        git ls-files -s >current &&
-        cmp current expected'
+test_expect_success 'update-index --add' '
+       echo hello world >file1 &&
+       echo goodbye people >file2 &&
+       git update-index --add file1 file2 &&
+       git ls-files -s >current &&
+       cat >expected <<-EOF &&
+       100644 $(git hash-object file1) 0       file1
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
-test_expect_success 'update-index --again' \
-       'rm -f file1 &&
+test_expect_success 'update-index --again' '
+       rm -f file1 &&
        echo hello everybody >file2 &&
        if git update-index --again
        then
@@ -29,25 +30,23 @@ test_expect_success 'update-index --again' \
        else
                echo happy - failed as expected
        fi &&
-        git ls-files -s >current &&
-        cmp current expected'
+       git ls-files -s >current &&
+       cmp current expected
+'
 
-cat > expected <<\EOF
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --remove --again' \
-       'git update-index --remove --again &&
-        git ls-files -s >current &&
-        cmp current expected'
+test_expect_success 'update-index --remove --again' '
+       git update-index --remove --again &&
+       git ls-files -s >current &&
+       cat >expected <<-EOF &&
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
 test_expect_success 'first commit' 'git commit -m initial'
 
-cat > expected <<\EOF
-100644 53ab446c3f4e42ce9bb728a0ccb283a101be4979 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index again' \
-       'mkdir -p dir1 &&
+test_expect_success 'update-index again' '
+       mkdir -p dir1 &&
        echo hello world >dir1/file3 &&
        echo goodbye people >file2 &&
        git update-index --add file2 dir1/file3 &&
@@ -55,30 +54,38 @@ test_expect_success 'update-index again' \
        echo happy >dir1/file3 &&
        git update-index --again &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $(git hash-object file2) 0       file2
+       EOF
+       cmp current expected
+'
 
-cat > expected <<\EOF
-100644 d7fb3f695f06c759dbf3ab00046e7cc2da22d10f 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --update from subdir' \
-       'echo not so happy >file2 &&
+file2=$(git hash-object file2)
+test_expect_success 'update-index --update from subdir' '
+       echo not so happy >file2 &&
        (cd dir1 &&
        cat ../file2 >file3 &&
        git update-index --again
        ) &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $file2 0 file2
+       EOF
+       test_cmp current expected
+'
 
-cat > expected <<\EOF
-100644 594fb5bb1759d90998e2bf2a38261ae8e243c760 0      dir1/file3
-100644 0f1ae1422c2bf43f117d3dbd715c988a9ed2103f 0      file2
-EOF
-test_expect_success 'update-index --update with pathspec' \
-       'echo very happy >file2 &&
+test_expect_success 'update-index --update with pathspec' '
+       echo very happy >file2 &&
        cat file2 >dir1/file3 &&
        git update-index --again dir1/ &&
        git ls-files -s >current &&
-       cmp current expected'
+       cat >expected <<-EOF &&
+       100644 $(git hash-object dir1/file3) 0  dir1/file3
+       100644 $file2 0 file2
+       EOF
+       cmp current expected
+'
 
 test_done
index cc830da58d920718b7b0a990a359afa9dd783b4c..7e2e7dd4ae5842bc49ccaf5dc116deb3b3d6b993 100755 (executable)
@@ -33,7 +33,7 @@ test_expect_success 'setup' '
 '
 
 test_expect_success 'index is at version 2' '
-       test "$(test-index-version < .git/index)" = 2
+       test "$(test-tool index-version < .git/index)" = 2
 '
 
 test_expect_success 'update-index --skip-worktree' '
@@ -42,7 +42,7 @@ test_expect_success 'update-index --skip-worktree' '
 '
 
 test_expect_success 'index is at version 3 after having some skip-worktree entries' '
-       test "$(test-index-version < .git/index)" = 3
+       test "$(test-tool index-version < .git/index)" = 3
 '
 
 test_expect_success 'ls-files -t' '
@@ -55,7 +55,7 @@ test_expect_success 'update-index --no-skip-worktree' '
 '
 
 test_expect_success 'index version is back to 2 when there is no skip-worktree entry' '
-       test "$(test-index-version < .git/index)" = 2
+       test "$(test-tool index-version < .git/index)" = 2
 '
 
 test_done
index 32ac6e09bdc81acfb8de5cf887302794d20c8ece..1db7e6a1abbebb63f811fa6ecbcd1db67607298e 100755 (executable)
@@ -85,9 +85,9 @@ test_expect_success '--chmod=+x and chmod=-x in the same argument list' '
        >B &&
        git add A B &&
        git update-index --chmod=+x A --chmod=-x B &&
-       cat >expect <<-\EOF &&
-       100755 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       A
-       100644 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 0       B
+       cat >expect <<-EOF &&
+       100755 $EMPTY_BLOB 0    A
+       100644 $EMPTY_BLOB 0    B
        EOF
        git ls-files --stage A B >actual &&
        test_cmp expect actual
index bdf5198b7eff11f9a7bfb57239f5905d92fe4bf7..08af596ba6c6b032eb1696c3beaba7c1776b4fd5 100755 (executable)
@@ -4,7 +4,7 @@ test_description='Test the lazy init name hash with various folder structures'
 
 . ./test-lib.sh
 
-if test 1 -eq $($GIT_BUILD_DIR/t/helper/test-online-cpus)
+if test 1 -eq $($GIT_BUILD_DIR/t/helper/test-tool online-cpus)
 then
        skip_all='skipping lazy-init tests, single cpu'
        test_done
@@ -21,7 +21,7 @@ test_expect_success 'no buffer overflow in lazy_init_name_hash' '
        ) |
        sed "s/^/100644 $EMPTY_BLOB     /" |
        git update-index --index-info &&
-       test-lazy-init-name-hash -m
+       test-tool lazy-init-name-hash -m
 '
 
 test_done
index cdc38fe5d1a0d171aa3cdf9ac8ee73dd2cd3b18d..3563e77b374c69a138bdfa3b274b75b6094c55a7 100755 (executable)
@@ -525,20 +525,22 @@ test_expect_success 'merge-recursive w/ empty work tree - ours has rename' '
                GIT_INDEX_FILE="$PWD/ours-has-rename-index" &&
                export GIT_INDEX_FILE &&
                mkdir "$GIT_WORK_TREE" &&
-               git read-tree -i -m $c7 &&
-               git update-index --ignore-missing --refresh &&
-               git merge-recursive $c0 -- $c7 $c3 &&
-               git ls-files -s >actual-files
-       ) 2>actual-err &&
-       >expected-err &&
+               git read-tree -i -m $c7 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git update-index --ignore-missing --refresh 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git merge-recursive $c0 -- $c7 $c3 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git ls-files -s >actual-files 2>actual-err &&
+               test_must_be_empty actual-err
+       ) &&
        cat >expected-files <<-EOF &&
        100644 $o3 0    b/c
        100644 $o0 0    c
        100644 $o0 0    d/e
        100644 $o0 0    e
        EOF
-       test_cmp expected-files actual-files &&
-       test_cmp expected-err actual-err
+       test_cmp expected-files actual-files
 '
 
 test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' '
@@ -548,20 +550,22 @@ test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' '
                GIT_INDEX_FILE="$PWD/theirs-has-rename-index" &&
                export GIT_INDEX_FILE &&
                mkdir "$GIT_WORK_TREE" &&
-               git read-tree -i -m $c3 &&
-               git update-index --ignore-missing --refresh &&
-               git merge-recursive $c0 -- $c3 $c7 &&
-               git ls-files -s >actual-files
-       ) 2>actual-err &&
-       >expected-err &&
+               git read-tree -i -m $c3 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git update-index --ignore-missing --refresh 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git merge-recursive $c0 -- $c3 $c7 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git ls-files -s >actual-files 2>actual-err &&
+               test_must_be_empty actual-err
+       ) &&
        cat >expected-files <<-EOF &&
        100644 $o3 0    b/c
        100644 $o0 0    c
        100644 $o0 0    d/e
        100644 $o0 0    e
        EOF
-       test_cmp expected-files actual-files &&
-       test_cmp expected-err actual-err
+       test_cmp expected-files actual-files
 '
 
 test_expect_success 'merge removes empty directories' '
index 163a14a1c2cb77b2a644a7342fd554940f7dd2e3..dce102130fb77ddda7cdc9aa9211507d9c2f2e09 100755 (executable)
@@ -4,266 +4,431 @@ test_description='wildmatch tests'
 
 . ./test-lib.sh
 
-match() {
-    if [ $1 = 1 ]; then
-       test_expect_success "wildmatch:     match '$3' '$4'" "
-           test-wildmatch wildmatch '$3' '$4'
-       "
-    else
-       test_expect_success "wildmatch:  no match '$3' '$4'" "
-           ! test-wildmatch wildmatch '$3' '$4'
-       "
-    fi
+should_create_test_file() {
+       file=$1
+
+       case $file in
+       # `touch .` will succeed but obviously not do what we intend
+       # here.
+       ".")
+               return 1
+               ;;
+       # We cannot create a file with an empty filename.
+       "")
+               return 1
+               ;;
+       # The tests that are testing that e.g. foo//bar is matched by
+       # foo/*/bar can't be tested on filesystems since there's no
+       # way we're getting a double slash.
+       *//*)
+               return 1
+               ;;
+       # When testing the difference between foo/bar and foo/bar/ we
+       # can't test the latter.
+       */)
+               return 1
+               ;;
+       # On Windows, \ in paths is silently converted to /, which
+       # would result in the "touch" below working, but the test
+       # itself failing. See 6fd1106aa4 ("t3700: Skip a test with
+       # backslashes in pathspec", 2009-03-13) for prior art and
+       # details.
+       *\\*)
+               if ! test_have_prereq BSLASHPSPEC
+               then
+                       return 1
+               fi
+               # NOTE: The ;;& bash extension is not portable, so
+               # this test needs to be at the end of the pattern
+               # list.
+               #
+               # If we want to add more conditional returns we either
+               # need a new case statement, or turn this whole thing
+               # into a series of "if" tests.
+               ;;
+       esac
+
+
+       # On Windows proper (i.e. not Cygwin) many file names which
+       # under Cygwin would be emulated don't work.
+       if test_have_prereq MINGW
+       then
+               case $file in
+               " ")
+                       # Files called " " are forbidden on Windows
+                       return 1
+                       ;;
+               *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**)
+                       # Files with various special characters aren't
+                       # allowed on Windows. Sourced from
+                       # https://stackoverflow.com/a/31976060
+                       return 1
+                       ;;
+               esac
+       fi
+
+       return 0
 }
 
-imatch() {
-    if [ $1 = 1 ]; then
-       test_expect_success "iwildmatch:    match '$2' '$3'" "
-           test-wildmatch iwildmatch '$2' '$3'
-       "
-    else
-       test_expect_success "iwildmatch: no match '$2' '$3'" "
-           ! test-wildmatch iwildmatch '$2' '$3'
-       "
-    fi
+match_with_function() {
+       text=$1
+       pattern=$2
+       match_expect=$3
+       match_function=$4
+
+       if test "$match_expect" = 1
+       then
+               test_expect_success "$match_function: match '$text' '$pattern'" "
+                       test-tool wildmatch $match_function '$text' '$pattern'
+               "
+       elif test "$match_expect" = 0
+       then
+               test_expect_success "$match_function: no match '$text' '$pattern'" "
+                       test_must_fail test-tool wildmatch $match_function '$text' '$pattern'
+               "
+       else
+               test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+       fi
+
+}
+
+match_with_ls_files() {
+       text=$1
+       pattern=$2
+       match_expect=$3
+       match_function=$4
+       ls_files_args=$5
+
+       match_stdout_stderr_cmp="
+               tr -d '\0' <actual.raw >actual &&
+               >expect.err &&
+               test_cmp expect.err actual.err &&
+               test_cmp expect actual"
+
+       if test "$match_expect" = 'E'
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" "
+                               printf '%s' '$text' >expect &&
+                               test_must_fail git$ls_files_args ls-files -z -- '$pattern'
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+               fi
+       elif test "$match_expect" = 1
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" "
+                               printf '%s' '$text' >expect &&
+                               git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+                               $match_stdout_stderr_cmp
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+               fi
+       elif test "$match_expect" = 0
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" "
+                               >expect &&
+                               git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+                               $match_stdout_stderr_cmp
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false'
+               fi
+       else
+               test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+       fi
 }
 
-pathmatch() {
-    if [ $1 = 1 ]; then
-       test_expect_success "pathmatch:     match '$2' '$3'" "
-           test-wildmatch pathmatch '$2' '$3'
-       "
-    else
-       test_expect_success "pathmatch:  no match '$2' '$3'" "
-           ! test-wildmatch pathmatch '$2' '$3'
-       "
-    fi
+match() {
+       if test "$#" = 6
+       then
+               # When test-tool wildmatch and git ls-files produce the same
+               # result.
+               match_glob=$1
+               match_file_glob=$match_glob
+               match_iglob=$2
+               match_file_iglob=$match_iglob
+               match_pathmatch=$3
+               match_file_pathmatch=$match_pathmatch
+               match_pathmatchi=$4
+               match_file_pathmatchi=$match_pathmatchi
+               text=$5
+               pattern=$6
+       elif test "$#" = 10
+       then
+               match_glob=$1
+               match_iglob=$2
+               match_pathmatch=$3
+               match_pathmatchi=$4
+               match_file_glob=$5
+               match_file_iglob=$6
+               match_file_pathmatch=$7
+               match_file_pathmatchi=$8
+               text=$9
+               pattern=${10}
+       fi
+
+       test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' '
+               if test -e .git/created_test_file
+               then
+                       git reset &&
+                       git clean -df
+               fi
+       '
+
+       printf '%s' "$text" >.git/expected_test_file
+
+       test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" '
+               file=$(cat .git/expected_test_file) &&
+               if should_create_test_file "$file"
+               then
+                       dirs=${file%/*}
+                       if test "$file" != "$dirs"
+                       then
+                               mkdir -p -- "$dirs" &&
+                               touch -- "./$text"
+                       else
+                               touch -- "./$file"
+                       fi &&
+                       git add -A &&
+                       printf "%s" "$file" >.git/created_test_file
+               elif test -e .git/created_test_file
+               then
+                       rm .git/created_test_file
+               fi
+       '
+
+       # $1: Case sensitive glob match: test-tool wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_glob "wildmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
+
+       # $2: Case insensitive glob match: test-tool wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
+
+       # $3: Case sensitive path match: test-tool wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
+
+       # $4: Case insensitive path match: test-tool wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
 }
 
-# Basic wildmat features
-match 1 1 foo foo
-match 0 0 foo bar
-match 1 1 '' ""
-match 1 1 foo '???'
-match 0 0 foo '??'
-match 1 1 foo '*'
-match 1 1 foo 'f*'
-match 0 0 foo '*f'
-match 1 1 foo '*foo*'
-match 1 1 foobar '*ob*a*r*'
-match 1 1 aaaaaaabababab '*ab'
-match 1 1 'foo*' 'foo\*'
-match 0 0 foobar 'foo\*bar'
-match 1 1 'f\oo' 'f\\oo'
-match 1 1 ball '*[al]?'
-match 0 0 ten '[ten]'
-match 0 1 ten '**[!te]'
-match 0 0 ten '**[!ten]'
-match 1 1 ten 't[a-g]n'
-match 0 0 ten 't[!a-g]n'
-match 1 1 ton 't[!a-g]n'
-match 1 1 ton 't[^a-g]n'
-match 1 x 'a]b' 'a[]]b'
-match 1 x a-b 'a[]-]b'
-match 1 x 'a]b' 'a[]-]b'
-match 0 x aab 'a[]-]b'
-match 1 x aab 'a[]a-]b'
-match 1 1 ']' ']'
+# Basic wildmatch features
+match 1 1 1 1 foo foo
+match 0 0 0 0 foo bar
+match 1 1 1 1 '' ""
+match 1 1 1 1 foo '???'
+match 0 0 0 0 foo '??'
+match 1 1 1 1 foo '*'
+match 1 1 1 1 foo 'f*'
+match 0 0 0 0 foo '*f'
+match 1 1 1 1 foo '*foo*'
+match 1 1 1 1 foobar '*ob*a*r*'
+match 1 1 1 1 aaaaaaabababab '*ab'
+match 1 1 1 1 'foo*' 'foo\*'
+match 0 0 0 0 foobar 'foo\*bar'
+match 1 1 1 1 'f\oo' 'f\\oo'
+match 1 1 1 1 ball '*[al]?'
+match 0 0 0 0 ten '[ten]'
+match 0 0 1 1 ten '**[!te]'
+match 0 0 0 0 ten '**[!ten]'
+match 1 1 1 1 ten 't[a-g]n'
+match 0 0 0 0 ten 't[!a-g]n'
+match 1 1 1 1 ton 't[!a-g]n'
+match 1 1 1 1 ton 't[^a-g]n'
+match 1 1 1 1 'a]b' 'a[]]b'
+match 1 1 1 1 a-b 'a[]-]b'
+match 1 1 1 1 'a]b' 'a[]-]b'
+match 0 0 0 0 aab 'a[]-]b'
+match 1 1 1 1 aab 'a[]a-]b'
+match 1 1 1 1 ']' ']'
 
 # Extended slash-matching features
-match 0 0 'foo/baz/bar' 'foo*bar'
-match 0 0 'foo/baz/bar' 'foo**bar'
-match 0 1 'foobazbar' 'foo**bar'
-match 1 1 'foo/baz/bar' 'foo/**/bar'
-match 1 0 'foo/baz/bar' 'foo/**/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar'
-match 1 0 'foo/bar' 'foo/**/bar'
-match 1 0 'foo/bar' 'foo/**/**/bar'
-match 0 0 'foo/bar' 'foo?bar'
-match 0 0 'foo/bar' 'foo[/]bar'
-match 0 0 'foo/bar' 'foo[^a-z]bar'
-match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 0 'foo' '**/foo'
-match 1 x 'XXX/foo' '**/foo'
-match 1 0 'bar/baz/foo' '**/foo'
-match 0 0 'bar/baz/foo' '*/foo'
-match 0 0 'foo/bar/baz' '**/bar*'
-match 1 0 'deep/foo/bar/baz' '**/bar/*'
-match 0 0 'deep/foo/bar/baz/' '**/bar/*'
-match 1 0 'deep/foo/bar/baz/' '**/bar/**'
-match 0 0 'deep/foo/bar' '**/bar/*'
-match 1 0 'deep/foo/bar/' '**/bar/**'
-match 0 0 'foo/bar/baz' '**/bar**'
-match 1 0 'foo/bar/baz/x' '*/bar/**'
-match 0 0 'deep/foo/bar/baz/x' '*/bar/**'
-match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*'
+match 0 0 1 1 'foo/baz/bar' 'foo*bar'
+match 0 0 1 1 'foo/baz/bar' 'foo**bar'
+match 0 0 1 1 'foobazbar' 'foo**bar'
+match 1 1 1 1 'foo/baz/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/**/bar'
+match 0 0 1 1 'foo/bar' 'foo?bar'
+match 0 0 1 1 'foo/bar' 'foo[/]bar'
+match 0 0 1 1 'foo/bar' 'foo[^a-z]bar'
+match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 0 0 'foo' '**/foo'
+match 1 1 1 1 'XXX/foo' '**/foo'
+match 1 1 1 1 'bar/baz/foo' '**/foo'
+match 0 0 1 1 'bar/baz/foo' '*/foo'
+match 0 0 1 1 'foo/bar/baz' '**/bar*'
+match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*'
+match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**'
+match 0 0 0 0 'deep/foo/bar' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/' '**/bar/**'
+match 0 0 1 1 'foo/bar/baz' '**/bar**'
+match 1 1 1 1 'foo/bar/baz/x' '*/bar/**'
+match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**'
+match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*'
 
 # Various additional tests
-match 0 0 'acrt' 'a[c-c]st'
-match 1 1 'acrt' 'a[c-c]rt'
-match 0 0 ']' '[!]-]'
-match 1 x 'a' '[!]-]'
-match 0 0 '' '\'
-match 0 x '\' '\'
-match 0 x 'XXX/\' '*/\'
-match 1 x 'XXX/\' '*/\\'
-match 1 1 'foo' 'foo'
-match 1 1 '@foo' '@foo'
-match 0 0 'foo' '@foo'
-match 1 1 '[ab]' '\[ab]'
-match 1 1 '[ab]' '[[]ab]'
-match 1 x '[ab]' '[[:]ab]'
-match 0 x '[ab]' '[[::]ab]'
-match 1 x '[ab]' '[[:digit]ab]'
-match 1 x '[ab]' '[\[:]ab]'
-match 1 1 '?a?b' '\??\?b'
-match 1 1 'abc' '\a\b\c'
-match 0 0 'foo' ''
-match 1 0 'foo/bar/baz/to' '**/t[o]'
+match 0 0 0 0 'acrt' 'a[c-c]st'
+match 1 1 1 1 'acrt' 'a[c-c]rt'
+match 0 0 0 0 ']' '[!]-]'
+match 1 1 1 1 'a' '[!]-]'
+match 0 0 0 0 '' '\'
+match 0 0 0 0 \
+      1 1 1 1 '\' '\'
+match 0 0 0 0 'XXX/\' '*/\'
+match 1 1 1 1 'XXX/\' '*/\\'
+match 1 1 1 1 'foo' 'foo'
+match 1 1 1 1 '@foo' '@foo'
+match 0 0 0 0 'foo' '@foo'
+match 1 1 1 1 '[ab]' '\[ab]'
+match 1 1 1 1 '[ab]' '[[]ab]'
+match 1 1 1 1 '[ab]' '[[:]ab]'
+match 0 0 0 0 '[ab]' '[[::]ab]'
+match 1 1 1 1 '[ab]' '[[:digit]ab]'
+match 1 1 1 1 '[ab]' '[\[:]ab]'
+match 1 1 1 1 '?a?b' '\??\?b'
+match 1 1 1 1 'abc' '\a\b\c'
+match 0 0 0 0 \
+      E E E E 'foo' ''
+match 1 1 1 1 'foo/bar/baz/to' '**/t[o]'
 
 # Character class tests
-match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
-match 0 x 'a' '[[:digit:][:upper:][:space:]]'
-match 1 x 'A' '[[:digit:][:upper:][:space:]]'
-match 1 x '1' '[[:digit:][:upper:][:space:]]'
-match 0 x '1' '[[:digit:][:upper:][:spaci:]]'
-match 1 x ' ' '[[:digit:][:upper:][:space:]]'
-match 0 x '.' '[[:digit:][:upper:][:space:]]'
-match 1 x '.' '[[:digit:][:punct:][:space:]]'
-match 1 x '5' '[[:xdigit:]]'
-match 1 x 'f' '[[:xdigit:]]'
-match 1 x 'D' '[[:xdigit:]]'
-match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
-match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
-match 1 x '5' '[a-c[:digit:]x-z]'
-match 1 x 'b' '[a-c[:digit:]x-z]'
-match 1 x 'y' '[a-c[:digit:]x-z]'
-match 0 x 'q' '[a-c[:digit:]x-z]'
-
-# Additional tests, including some malformed wildmats
-match 1 x ']' '[\\-^]'
-match 0 0 '[' '[\\-^]'
-match 1 x '-' '[\-_]'
-match 1 x ']' '[\]]'
-match 0 0 '\]' '[\]]'
-match 0 0 '\' '[\]]'
-match 0 0 'ab' 'a[]b'
-match 0 x 'a[]b' 'a[]b'
-match 0 x 'ab[' 'ab['
-match 0 0 'ab' '[!'
-match 0 0 'ab' '[-'
-match 1 1 '-' '[-]'
-match 0 0 '-' '[a-'
-match 0 0 '-' '[!a-'
-match 1 x '-' '[--A]'
-match 1 x '5' '[--A]'
-match 1 1 ' ' '[ --]'
-match 1 1 '$' '[ --]'
-match 1 1 '-' '[ --]'
-match 0 0 '0' '[ --]'
-match 1 x '-' '[---]'
-match 1 x '-' '[------]'
-match 0 0 'j' '[a-e-n]'
-match 1 x '-' '[a-e-n]'
-match 1 x 'a' '[!------]'
-match 0 0 '[' '[]-a]'
-match 1 x '^' '[]-a]'
-match 0 0 '^' '[!]-a]'
-match 1 x '[' '[!]-a]'
-match 1 1 '^' '[a^bc]'
-match 1 x '-b]' '[a-]b]'
-match 0 0 '\' '[\]'
-match 1 1 '\' '[\\]'
-match 0 0 '\' '[!\\]'
-match 1 1 'G' '[A-\\]'
-match 0 0 'aaabbb' 'b*a'
-match 0 0 'aabcaa' '*ba*'
-match 1 1 ',' '[,]'
-match 1 1 ',' '[\\,]'
-match 1 1 '\' '[\\,]'
-match 1 1 '-' '[,-.]'
-match 0 0 '+' '[,-.]'
-match 0 0 '-.]' '[,-.]'
-match 1 1 '2' '[\1-\3]'
-match 1 1 '3' '[\1-\3]'
-match 0 0 '4' '[\1-\3]'
-match 1 1 '\' '[[-\]]'
-match 1 1 '[' '[[-\]]'
-match 1 1 ']' '[[-\]]'
-match 0 0 '-' '[[-\]]'
+match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
+match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]'
+match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]'
+match 1 1 1 1 '5' '[[:xdigit:]]'
+match 1 1 1 1 'f' '[[:xdigit:]]'
+match 1 1 1 1 'D' '[[:xdigit:]]'
+match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '5' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'b' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'y' '[a-c[:digit:]x-z]'
+match 0 0 0 0 'q' '[a-c[:digit:]x-z]'
 
-# Test recursion and the abort code (use "wildtest -i" to see iteration counts)
-match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
-match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
-match 0 x foo '*/*/*'
-match 0 x foo/bar '*/*/*'
-match 1 x foo/bba/arr '*/*/*'
-match 0 x foo/bb/aa/rr '*/*/*'
-match 1 x foo/bb/aa/rr '**/**/**'
-match 1 x abcXdefXghi '*X*i'
-match 0 x ab/cXd/efXg/hi '*X*i'
-match 1 x ab/cXd/efXg/hi '*/*X*/*/*i'
-match 1 x ab/cXd/efXg/hi '**/*X*/**/*i'
+# Additional tests, including some malformed wildmatch patterns
+match 1 1 1 1 ']' '[\\-^]'
+match 0 0 0 0 '[' '[\\-^]'
+match 1 1 1 1 '-' '[\-_]'
+match 1 1 1 1 ']' '[\]]'
+match 0 0 0 0 '\]' '[\]]'
+match 0 0 0 0 '\' '[\]]'
+match 0 0 0 0 'ab' 'a[]b'
+match 0 0 0 0 \
+      1 1 1 1 'a[]b' 'a[]b'
+match 0 0 0 0 \
+      1 1 1 1 'ab[' 'ab['
+match 0 0 0 0 'ab' '[!'
+match 0 0 0 0 'ab' '[-'
+match 1 1 1 1 '-' '[-]'
+match 0 0 0 0 '-' '[a-'
+match 0 0 0 0 '-' '[!a-'
+match 1 1 1 1 '-' '[--A]'
+match 1 1 1 1 '5' '[--A]'
+match 1 1 1 1 ' ' '[ --]'
+match 1 1 1 1 '$' '[ --]'
+match 1 1 1 1 '-' '[ --]'
+match 0 0 0 0 '0' '[ --]'
+match 1 1 1 1 '-' '[---]'
+match 1 1 1 1 '-' '[------]'
+match 0 0 0 0 'j' '[a-e-n]'
+match 1 1 1 1 '-' '[a-e-n]'
+match 1 1 1 1 'a' '[!------]'
+match 0 0 0 0 '[' '[]-a]'
+match 1 1 1 1 '^' '[]-a]'
+match 0 0 0 0 '^' '[!]-a]'
+match 1 1 1 1 '[' '[!]-a]'
+match 1 1 1 1 '^' '[a^bc]'
+match 1 1 1 1 '-b]' '[a-]b]'
+match 0 0 0 0 '\' '[\]'
+match 1 1 1 1 '\' '[\\]'
+match 0 0 0 0 '\' '[!\\]'
+match 1 1 1 1 'G' '[A-\\]'
+match 0 0 0 0 'aaabbb' 'b*a'
+match 0 0 0 0 'aabcaa' '*ba*'
+match 1 1 1 1 ',' '[,]'
+match 1 1 1 1 ',' '[\\,]'
+match 1 1 1 1 '\' '[\\,]'
+match 1 1 1 1 '-' '[,-.]'
+match 0 0 0 0 '+' '[,-.]'
+match 0 0 0 0 '-.]' '[,-.]'
+match 1 1 1 1 '2' '[\1-\3]'
+match 1 1 1 1 '3' '[\1-\3]'
+match 0 0 0 0 '4' '[\1-\3]'
+match 1 1 1 1 '\' '[[-\]]'
+match 1 1 1 1 '[' '[[-\]]'
+match 1 1 1 1 ']' '[[-\]]'
+match 0 0 0 0 '-' '[[-\]]'
 
-pathmatch 1 foo foo
-pathmatch 0 foo fo
-pathmatch 1 foo/bar foo/bar
-pathmatch 1 foo/bar 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/**'
-pathmatch 1 foo/bba/arr 'foo*'
-pathmatch 1 foo/bba/arr 'foo**'
-pathmatch 1 foo/bba/arr 'foo/*arr'
-pathmatch 1 foo/bba/arr 'foo/**arr'
-pathmatch 0 foo/bba/arr 'foo/*z'
-pathmatch 0 foo/bba/arr 'foo/**z'
-pathmatch 1 foo/bar 'foo?bar'
-pathmatch 1 foo/bar 'foo[/]bar'
-pathmatch 1 foo/bar 'foo[^a-z]bar'
-pathmatch 0 foo '*/*/*'
-pathmatch 0 foo/bar '*/*/*'
-pathmatch 1 foo/bba/arr '*/*/*'
-pathmatch 1 foo/bb/aa/rr '*/*/*'
-pathmatch 1 abcXdefXghi '*X*i'
-pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i'
-pathmatch 1 ab/cXd/efXg/hi '*Xg*i'
+# Test recursion
+match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
+match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
+match 0 0 0 0 foo '*/*/*'
+match 0 0 0 0 foo/bar '*/*/*'
+match 1 1 1 1 foo/bba/arr '*/*/*'
+match 0 0 1 1 foo/bb/aa/rr '*/*/*'
+match 1 1 1 1 foo/bb/aa/rr '**/**/**'
+match 1 1 1 1 abcXdefXghi '*X*i'
+match 0 0 1 1 ab/cXd/efXg/hi '*X*i'
+match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i'
+match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i'
 
-# Case-sensitivity features
-match 0 x 'a' '[A-Z]'
-match 1 x 'A' '[A-Z]'
-match 0 x 'A' '[a-z]'
-match 1 x 'a' '[a-z]'
-match 0 x 'a' '[[:upper:]]'
-match 1 x 'A' '[[:upper:]]'
-match 0 x 'A' '[[:lower:]]'
-match 1 x 'a' '[[:lower:]]'
-match 0 x 'A' '[B-Za]'
-match 1 x 'a' '[B-Za]'
-match 0 x 'A' '[B-a]'
-match 1 x 'a' '[B-a]'
-match 0 x 'z' '[Z-y]'
-match 1 x 'Z' '[Z-y]'
+# Extra pathmatch tests
+match 0 0 0 0 foo fo
+match 1 1 1 1 foo/bar foo/bar
+match 1 1 1 1 foo/bar 'foo/*'
+match 0 0 1 1 foo/bba/arr 'foo/*'
+match 1 1 1 1 foo/bba/arr 'foo/**'
+match 0 0 1 1 foo/bba/arr 'foo*'
+match 0 0 1 1 \
+      1 1 1 1 foo/bba/arr 'foo**'
+match 0 0 1 1 foo/bba/arr 'foo/*arr'
+match 0 0 1 1 foo/bba/arr 'foo/**arr'
+match 0 0 0 0 foo/bba/arr 'foo/*z'
+match 0 0 0 0 foo/bba/arr 'foo/**z'
+match 0 0 1 1 foo/bar 'foo?bar'
+match 0 0 1 1 foo/bar 'foo[/]bar'
+match 0 0 1 1 foo/bar 'foo[^a-z]bar'
+match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i'
 
-imatch 1 'a' '[A-Z]'
-imatch 1 'A' '[A-Z]'
-imatch 1 'A' '[a-z]'
-imatch 1 'a' '[a-z]'
-imatch 1 'a' '[[:upper:]]'
-imatch 1 'A' '[[:upper:]]'
-imatch 1 'A' '[[:lower:]]'
-imatch 1 'a' '[[:lower:]]'
-imatch 1 'A' '[B-Za]'
-imatch 1 'a' '[B-Za]'
-imatch 1 'A' '[B-a]'
-imatch 1 'a' '[B-a]'
-imatch 1 'z' '[Z-y]'
-imatch 1 'Z' '[Z-y]'
+# Extra case-sensitivity tests
+match 0 1 0 1 'a' '[A-Z]'
+match 1 1 1 1 'A' '[A-Z]'
+match 0 1 0 1 'A' '[a-z]'
+match 1 1 1 1 'a' '[a-z]'
+match 0 1 0 1 'a' '[[:upper:]]'
+match 1 1 1 1 'A' '[[:upper:]]'
+match 0 1 0 1 'A' '[[:lower:]]'
+match 1 1 1 1 'a' '[[:lower:]]'
+match 0 1 0 1 'A' '[B-Za]'
+match 1 1 1 1 'a' '[B-Za]'
+match 0 1 0 1 'A' '[B-a]'
+match 1 1 1 1 'a' '[B-a]'
+match 0 1 0 1 'z' '[Z-y]'
+match 1 1 1 1 'Z' '[Z-y]'
 
 test_done
index 503a88d0296a2620ed0ba6e70f784fcd58dc4a9b..da97b8a62b26735fa61983e329614270677e250b 100755 (executable)
@@ -48,9 +48,9 @@ test_expect_success 'git branch HEAD should fail' '
 cat >expect <<EOF
 $_z40 $HEAD $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150200 +0000        branch: Created from master
 EOF
-test_expect_success 'git branch -l d/e/f should create a branch and a log' '
+test_expect_success 'git branch --create-reflog d/e/f should create a branch and a log' '
        GIT_COMMITTER_DATE="2005-05-26 23:30" \
-       git branch -l d/e/f &&
+       git -c core.logallrefupdates=false branch --create-reflog d/e/f &&
        test_path_is_file .git/refs/heads/d/e/f &&
        test_path_is_file .git/logs/refs/heads/d/e/f &&
        test_cmp expect .git/logs/refs/heads/d/e/f
@@ -81,7 +81,7 @@ test_expect_success 'git branch -m dumps usage' '
 
 test_expect_success 'git branch -m m broken_symref should work' '
        test_when_finished "git branch -D broken_symref" &&
-       git branch -l m &&
+       git branch --create-reflog m &&
        git symbolic-ref refs/heads/broken_symref refs/heads/i_am_broken &&
        git branch -m m broken_symref &&
        git reflog exists refs/heads/broken_symref &&
@@ -89,13 +89,13 @@ test_expect_success 'git branch -m m broken_symref should work' '
 '
 
 test_expect_success 'git branch -m m m/m should work' '
-       git branch -l m &&
+       git branch --create-reflog m &&
        git branch -m m m/m &&
        git reflog exists refs/heads/m/m
 '
 
 test_expect_success 'git branch -m n/n n should work' '
-       git branch -l n/n &&
+       git branch --create-reflog n/n &&
        git branch -m n/n n &&
        git reflog exists refs/heads/n
 '
@@ -377,9 +377,9 @@ mv .git/config-saved .git/config
 git config branch.s/s.dummy Hello
 
 test_expect_success 'git branch -m s/s s should work when s/t is deleted' '
-       git branch -l s/s &&
+       git branch --create-reflog s/s &&
        git reflog exists refs/heads/s/s &&
-       git branch -l s/t &&
+       git branch --create-reflog s/t &&
        git reflog exists refs/heads/s/t &&
        git branch -d s/t &&
        git branch -m s/s s &&
@@ -443,7 +443,7 @@ test_expect_success 'git branch --copy dumps usage' '
 '
 
 test_expect_success 'git branch -c d e should work' '
-       git branch -l d &&
+       git branch --create-reflog d &&
        git reflog exists refs/heads/d &&
        git config branch.d.dummy Hello &&
        git branch -c d e &&
@@ -458,7 +458,7 @@ test_expect_success 'git branch -c d e should work' '
 '
 
 test_expect_success 'git branch --copy is a synonym for -c' '
-       git branch -l copy &&
+       git branch --create-reflog copy &&
        git reflog exists refs/heads/copy &&
        git config branch.copy.dummy Hello &&
        git branch --copy copy copy-to &&
@@ -485,7 +485,7 @@ test_expect_success 'git branch -c ee ef should copy ee to create branch ef' '
 '
 
 test_expect_success 'git branch -c f/f g/g should work' '
-       git branch -l f/f &&
+       git branch --create-reflog f/f &&
        git reflog exists refs/heads/f/f &&
        git config branch.f/f.dummy Hello &&
        git branch -c f/f g/g &&
@@ -496,7 +496,7 @@ test_expect_success 'git branch -c f/f g/g should work' '
 '
 
 test_expect_success 'git branch -c m2 m2 should work' '
-       git branch -l m2 &&
+       git branch --create-reflog m2 &&
        git reflog exists refs/heads/m2 &&
        git config branch.m2.dummy Hello &&
        git branch -c m2 m2 &&
@@ -505,18 +505,18 @@ test_expect_success 'git branch -c m2 m2 should work' '
 '
 
 test_expect_success 'git branch -c zz zz/zz should fail' '
-       git branch -l zz &&
+       git branch --create-reflog zz &&
        git reflog exists refs/heads/zz &&
        test_must_fail git branch -c zz zz/zz
 '
 
 test_expect_success 'git branch -c b/b b should fail' '
-       git branch -l b/b &&
+       git branch --create-reflog b/b &&
        test_must_fail git branch -c b/b b
 '
 
 test_expect_success 'git branch -C o/q o/p should work when o/p exists' '
-       git branch -l o/q &&
+       git branch --create-reflog o/q &&
        git reflog exists refs/heads/o/q &&
        git reflog exists refs/heads/o/p &&
        git branch -C o/q o/p
@@ -528,7 +528,7 @@ test_expect_success 'git branch -c -f o/q o/p should work when o/p exists' '
        git branch -c -f o/q o/p
 '
 
-test_expect_success 'git branch -c qq rr/qq should fail when r exists' '
+test_expect_success 'git branch -c qq rr/qq should fail when rr exists' '
        git branch qq &&
        git branch rr &&
        test_must_fail git branch -c qq rr/qq
@@ -569,10 +569,10 @@ test_expect_success 'git branch -C master5 master5 should work when master is ch
 '
 
 test_expect_success 'git branch -C ab cd should overwrite existing config for cd' '
-       git branch -l cd &&
+       git branch --create-reflog cd &&
        git reflog exists refs/heads/cd &&
        git config branch.cd.dummy CD &&
-       git branch -l ab &&
+       git branch --create-reflog ab &&
        git reflog exists refs/heads/ab &&
        git config branch.ab.dummy AB &&
        git branch -C ab cd &&
@@ -684,7 +684,7 @@ test_expect_success 'renaming a symref is not allowed' '
 '
 
 test_expect_success SYMLINKS 'git branch -m u v should fail when the reflog for u is a symlink' '
-       git branch -l u &&
+       git branch --create-reflog u &&
        mv .git/logs/refs/heads/u real-u &&
        ln -s real-u .git/logs/refs/heads/u &&
        test_must_fail git branch -m u v
index 86bf909ee3dfca78f678fdefe961772dcd78d6b1..61748088ebcbed700a570bc0e297f48435844340 100755 (executable)
@@ -22,7 +22,7 @@ test_expect_success 'setup: create a few commits with notes' '
        git commit -m 3rd &&
        COMMIT_FILE=.git/objects/5e/e1c35e83ea47cd3cc4f8cbee0568915fbbbd29 &&
        test -f $COMMIT_FILE &&
-       test-chmtime =+0 $COMMIT_FILE &&
+       test-tool chmtime =+0 $COMMIT_FILE &&
        git notes add -m "Note #3"
 '
 
index 8ac58d5ea5e4b8b75deaa74f3d6bca29f37dbcb6..72d9564747adf2d37ea2e61a6d2e479096fe6508 100755 (executable)
@@ -277,4 +277,38 @@ EOF
        test_cmp From_.msg out
 '
 
+test_expect_success 'rebase--am.sh and --show-current-patch' '
+       test_create_repo conflict-apply &&
+       (
+               cd conflict-apply &&
+               test_commit init &&
+               echo one >>init.t &&
+               git commit -a -m one &&
+               echo two >>init.t &&
+               git commit -a -m two &&
+               git tag two &&
+               test_must_fail git rebase --onto init HEAD^ &&
+               GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+               grep "show.*$(git rev-parse two)" stderr
+       )
+'
+
+test_expect_success 'rebase--merge.sh and --show-current-patch' '
+       test_create_repo conflict-merge &&
+       (
+               cd conflict-merge &&
+               test_commit init &&
+               echo one >>init.t &&
+               git commit -a -m one &&
+               echo two >>init.t &&
+               git commit -a -m two &&
+               git tag two &&
+               test_must_fail git rebase --merge --onto init HEAD^ &&
+               git rebase --show-current-patch >actual.patch &&
+               GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+               grep "show.*REBASE_HEAD" stderr &&
+               test "$(git rev-parse REBASE_HEAD)" = "$(git rev-parse two)"
+       )
+'
+
 test_done
index ef2887bd852b15a0d1c1e6fb1ea9eff09664f27e..756de26c19c90429237456d3fb0bbbfd80ccfda9 100755 (executable)
@@ -225,6 +225,14 @@ test_expect_success 'stop on conflicting pick' '
        test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo)
 '
 
+test_expect_success 'show conflicted patch' '
+       GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+       grep "show.*REBASE_HEAD" stderr &&
+       # the original stopped-sha1 is abbreviated
+       stopped_sha1="$(git rev-parse $(cat ".git/rebase-merge/stopped-sha"))" &&
+       test "$(git rev-parse REBASE_HEAD)" = "$stopped_sha1"
+'
+
 test_expect_success 'abort' '
        git rebase --abort &&
        test $(git rev-parse new-branch1) = $(git rev-parse HEAD) &&
@@ -703,13 +711,13 @@ test_expect_success 'rebase -i continue with unstaged submodule' '
 test_expect_success 'avoid unnecessary reset' '
        git checkout master &&
        git reset --hard &&
-       test-chmtime =123456789 file3 &&
+       test-tool chmtime =123456789 file3 &&
        git update-index --refresh &&
        HEAD=$(git rev-parse HEAD) &&
        set_fake_editor &&
        git rebase -i HEAD~4 &&
        test $HEAD = $(git rev-parse HEAD) &&
-       MTIME=$(test-chmtime -v +0 file3 | sed 's/[^0-9].*$//') &&
+       MTIME=$(test-tool chmtime -v +0 file3 | sed 's/[^0-9].*$//') &&
        test 123456789 = $MTIME
 '
 
@@ -919,10 +927,8 @@ test_expect_success 'rebase --exec works without -i ' '
 test_expect_success 'rebase -i --exec without <CMD>' '
        git reset --hard execute &&
        set_fake_editor &&
-       test_must_fail git rebase -i --exec 2>tmp &&
-       sed -e "1d" tmp >actual &&
-       test_must_fail git rebase -h >expected &&
-       test_cmp expected actual &&
+       test_must_fail git rebase -i --exec 2>actual &&
+       test_i18ngrep "requires a value" actual &&
        git checkout master
 '
 
index ff8c360cd58bc11366ac2cf9e7c0b141e4ac3a39..cb7c6de84abf88bf90ac9716bb24ac91b8f64bf9 100755 (executable)
@@ -3,6 +3,7 @@
 test_description='rebase should handle arbitrary git message'
 
 . ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
 
 cat >F <<\EOF
 This is an example of a commit log message
@@ -25,6 +26,7 @@ test_expect_success setup '
        test_tick &&
        git commit -m "Initial commit" &&
        git branch diff-in-message &&
+       git branch empty-message-merge &&
 
        git checkout -b multi-line-subject &&
        cat F >file2 &&
@@ -45,6 +47,11 @@ test_expect_success setup '
 
        git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 &&
 
+       git checkout empty-message-merge &&
+       echo file3 >file3 &&
+       git add file3 &&
+       git commit --allow-empty-message -m "" &&
+
        git checkout master &&
 
        echo One >file1 &&
@@ -69,4 +76,20 @@ test_expect_success 'rebase commit with diff in message' '
        test_cmp G G0
 '
 
+test_expect_success 'rebase -m commit with empty message' '
+       test_must_fail git rebase -m master empty-message-merge &&
+       git rebase --abort &&
+       git rebase -m --allow-empty-message master empty-message-merge
+'
+
+test_expect_success 'rebase -i commit with empty message' '
+       git checkout diff-in-message &&
+       set_fake_editor &&
+       test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+               git rebase -i HEAD^ &&
+       git rebase --abort &&
+       FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+               git rebase -i --allow-empty-message HEAD^
+'
+
 test_done
index 6b84e6042a6fcc9cf850a53ad2a885597fb178fc..e7292f5b9b938018f515f858f2dadc29162261ad 100755 (executable)
@@ -24,8 +24,23 @@ But otherwise with a sane description." &&
        >elif &&
        git add elif &&
        test_tick &&
-       git commit -m second
+       git commit -m second &&
 
+       git checkout -b side2 &&
+       >afile &&
+       git add afile &&
+       test_tick &&
+       git commit -m third &&
+       echo hello >afile &&
+       test_tick &&
+       git commit -a -m fourth &&
+       git checkout -b side-merge &&
+       git reset --hard HEAD^^ &&
+       git merge --no-ff -m "A merge commit log message that has a long
+summary that spills over multiple lines.
+
+But otherwise with a sane description." side2 &&
+       git branch side-merge-original
 '
 
 test_expect_success rebase '
@@ -36,6 +51,15 @@ test_expect_success rebase '
        git cat-file commit side@{1} | sed -e "1,/^\$/d" >expect &&
        test_cmp expect actual
 
+'
+test_expect_success rebasep '
+
+       git checkout side-merge &&
+       git rebase -p side &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect &&
+       test_cmp expect actual
+
 '
 
 test_done
index 7c91a85f43a7a11295819adc5da5f8e5fca9e4ea..9214d0bb511e24f401188790858dc1e0ca1b92fd 100755 (executable)
@@ -24,7 +24,7 @@ test_expect_success 'interactive rebase --continue works with touched file' '
        git checkout master &&
 
        FAKE_LINES="edit 1" git rebase -i HEAD^ &&
-       test-chmtime =-60 F1 &&
+       test-tool chmtime =-60 F1 &&
        git rebase --continue
 '
 
@@ -36,7 +36,7 @@ test_expect_success 'non-interactive rebase --continue works with touched file'
        test_must_fail git rebase --onto master master topic &&
        echo "Resolved" >F2 &&
        git add F2 &&
-       test-chmtime =-60 F1 &&
+       test-tool chmtime =-60 F1 &&
        git rebase --continue
 '
 
index 4f2a263b63e14348032959059c15b160fecba39c..c9a1f783f5471b3b5b1fa7327ad3cbf0fd0f965d 100755 (executable)
@@ -86,7 +86,7 @@ test_expect_success 'cherry-pick on stat-dirty working tree' '
        (
                cd copy &&
                git checkout initial &&
-               test-chmtime +40 oops &&
+               test-tool chmtime +40 oops &&
                git cherry-pick added
        )
 '
@@ -150,7 +150,10 @@ test_expect_success 'cherry-pick works with dirty renamed file' '
        test_tick &&
        git commit -m renamed &&
        echo modified >renamed &&
-       git cherry-pick refs/heads/unrelated
+       test_must_fail git cherry-pick refs/heads/unrelated >out &&
+       test_i18ngrep "Refusing to lose dirty file at renamed" out &&
+       test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+       grep -q "^modified$" renamed
 '
 
 test_done
index 0acf4b14614c750d3c2324d8a941aa77d5c7ad56..9f93445f1e496b930f4f7cb53146344315bc1c8e 100755 (executable)
@@ -247,9 +247,9 @@ test_expect_success '--abort after last commit in sequence' '
 test_expect_success 'cherry-pick does not implicitly stomp an existing operation' '
        pristine_detach initial &&
        test_expect_code 1 git cherry-pick base..anotherpick &&
-       test-chmtime -v +0 .git/sequencer >expect &&
+       test-tool chmtime -v +0 .git/sequencer >expect &&
        test_expect_code 128 git cherry-pick unrelatedpick &&
-       test-chmtime -v +0 .git/sequencer >actual &&
+       test-tool chmtime -v +0 .git/sequencer >actual &&
        test_cmp expect actual
 '
 
index 46f15169f55c03742717f36f8d0e99241391b2ea..b8fbdefcdc34ffa6fb54fc1ad375a583a194fb38 100755 (executable)
@@ -232,7 +232,7 @@ test_expect_success 'Call "rm" from outside the work tree' '
 test_expect_success 'refresh index before checking if it is up-to-date' '
 
        git reset --hard &&
-       test-chmtime -86400 frotz/nitfol &&
+       test-tool chmtime -86400 frotz/nitfol &&
        git rm frotz/nitfol &&
        test ! -f frotz/nitfol
 
index 2748805642201d7c514792bab8d8b3940fb4086c..07af05d7aee65c51245431292a21ec38eef19f53 100755 (executable)
@@ -187,7 +187,7 @@ test_expect_success 'git add --refresh with pathspec' '
        echo >foo && echo >bar && echo >baz &&
        git add foo bar baz && H=$(git rev-parse :foo) && git rm -f foo &&
        echo "100644 $H 3       foo" | git update-index --index-info &&
-       test-chmtime -60 bar baz &&
+       test-tool chmtime -60 bar baz &&
        >expect &&
        git add --refresh bar >actual &&
        test_cmp expect actual &&
index 058698df6a4a9811b9db84fb5900472c47c61798..8954481995129292c99208b59b368e231d819605 100755 (executable)
@@ -10,6 +10,19 @@ then
        test_done
 fi
 
+diff_cmp () {
+       for x
+       do
+               sed  -e '/^index/s/[0-9a-f]*[1-9a-f][0-9a-f]*\.\./1234567../' \
+                    -e '/^index/s/\.\.[0-9a-f]*[1-9a-f][0-9a-f]*/..9abcdef/' \
+                    -e '/^index/s/ 00*\.\./ 0000000../' \
+                    -e '/^index/s/\.\.00*$/..0000000/' \
+                    -e '/^index/s/\.\.00* /..0000000 /' \
+                    "$x" >"$x.filtered"
+       done
+       test_cmp "$1.filtered" "$2.filtered"
+}
+
 test_expect_success 'setup (initial)' '
        echo content >file &&
        git add file &&
@@ -22,20 +35,20 @@ test_expect_success 'status works (initial)' '
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-new file mode 100644
-index 0000000..d95f3ad
---- /dev/null
-+++ b/file
-@@ -0,0 +1 @@
-+content
-EOF
+       cat >expected <<-\EOF
+       new file mode 100644
+       index 0000000..d95f3ad
+       --- /dev/null
+       +++ b/file
+       @@ -0,0 +1 @@
+       +content
+       EOF
 '
 
 test_expect_success 'diff works (initial)' '
        (echo d; echo 1) | git add -i >output &&
        sed -ne "/new file/,/content/p" <output >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 test_expect_success 'revert works (initial)' '
        git add file &&
@@ -59,20 +72,20 @@ test_expect_success 'status works (commit)' '
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1 +1,2 @@
- baseline
-+content
-EOF
+       cat >expected <<-\EOF
+       index 180b47c..b6f2c08 100644
+       --- a/file
+       +++ b/file
+       @@ -1 +1,2 @@
       baseline
+       +content
+       EOF
 '
 
 test_expect_success 'diff works (commit)' '
        (echo d; echo 1) | git add -i >output &&
        sed -ne "/^index/,/content/p" <output >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 test_expect_success 'revert works (commit)' '
        git add file &&
@@ -83,39 +96,32 @@ test_expect_success 'revert works (commit)' '
 
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-EOF
-'
-
-test_expect_success 'setup fake editor' '
-       >fake_editor.sh &&
-       chmod a+x fake_editor.sh &&
-       test_set_editor "$(pwd)/fake_editor.sh"
+       cat >expected <<-\EOF
+       EOF
 '
 
 test_expect_success 'dummy edit works' '
+       test_set_editor : &&
        (echo e; echo a) | git add -p &&
        git diff > diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,1 +1,4 @@
- this
-+patch
--does not
- apply
-EOF
+       cat >patch <<-\EOF
+       @@ -1,1 +1,4 @@
       this
+       +patch
+       -does not
       apply
+       EOF
 '
 
 test_expect_success 'setup fake editor' '
-       echo "#!$SHELL_PATH" >fake_editor.sh &&
-       cat >>fake_editor.sh <<\EOF &&
-mv -f "$1" oldpatch &&
-mv -f patch "$1"
-EOF
-       chmod a+x fake_editor.sh &&
+       write_script "fake_editor.sh" <<-\EOF &&
+       mv -f "$1" oldpatch &&
+       mv -f patch "$1"
+       EOF
        test_set_editor "$(pwd)/fake_editor.sh"
 '
 
@@ -126,10 +132,10 @@ test_expect_success 'bad edit rejected' '
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-this patch
-is garbage
-EOF
+       cat >patch <<-\EOF
+       this patch
+       is garbage
+       EOF
 '
 
 test_expect_success 'garbage edit rejected' '
@@ -139,34 +145,34 @@ test_expect_success 'garbage edit rejected' '
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,0 +1,0 @@
- baseline
-+content
-+newcontent
-+lines
-EOF
+       cat >patch <<-\EOF
+       @@ -1,0 +1,0 @@
       baseline
+       +content
+       +newcontent
+       +lines
+       EOF
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b5dd6c9..f910ae9 100644
---- a/file
-+++ b/file
-@@ -1,4 +1,4 @@
- baseline
- content
--newcontent
-+more
- lines
-EOF
+       cat >expected <<-\EOF
+       diff --git a/file b/file
+       index b5dd6c9..f910ae9 100644
+       --- a/file
+       +++ b/file
+       @@ -1,4 +1,4 @@
       baseline
       content
+       -newcontent
+       +more
       lines
+       EOF
 '
 
 test_expect_success 'real edit works' '
        (echo e; echo n; echo d) | git add -p &&
        git diff >output &&
-       test_cmp expected output
+       diff_cmp expected output
 '
 
 test_expect_success 'skip files similarly as commit -a' '
@@ -178,7 +184,7 @@ test_expect_success 'skip files similarly as commit -a' '
        git reset &&
        git commit -am commit &&
        git diff >expected &&
-       test_cmp expected output &&
+       diff_cmp expected output &&
        git reset --hard HEAD^
 '
 rm -f .gitignore
@@ -222,52 +228,67 @@ test_expect_success 'setup again' '
 
 # Write the patch file with a new line at the top and bottom
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
-'
-
-# Expected output, similar to the patch but w/ diff at the top
+       cat >patch <<-\EOF
+       index 180b47c..b6f2c08 100644
+       --- a/file
+       +++ b/file
+       @@ -1,2 +1,4 @@
+       +firstline
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       EOF
+'
+
+# Expected output, diff is similar to the patch but w/ diff at the top
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b6f2c08..61b9053 100755
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
+       echo diff --git a/file b/file >expected &&
+       cat patch |sed "/^index/s/ 100644/ 100755/" >>expected &&
+       cat >expected-output <<-\EOF
+       --- a/file
+       +++ b/file
+       @@ -1,2 +1,4 @@
+       +firstline
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       @@ -1,2 +1,3 @@
+       +firstline
+        baseline
+        content
+       @@ -1,2 +2,3 @@
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       EOF
 '
 
 # Test splitting the first patch, then adding both
-test_expect_success 'add first line works' '
+test_expect_success C_LOCALE_OUTPUT 'add first line works' '
        git commit -am "clear local changes" &&
        git apply patch &&
-       (echo s; echo y; echo y) | git add -p file &&
-       git diff --cached > diff &&
-       test_cmp expected diff
+       printf "%s\n" s y y | git add -p file 2>error |
+               sed -n -e "s/^Stage this hunk[^@]*\(@@ .*\)/\1/" \
+                      -e "/^[-+@ \\\\]"/p  >output &&
+       test_must_be_empty error &&
+       git diff --cached >diff &&
+       diff_cmp expected diff &&
+       test_cmp expected-output output
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/non-empty b/non-empty
-deleted file mode 100644
-index d95f3ad..0000000
---- a/non-empty
-+++ /dev/null
-@@ -1 +0,0 @@
--content
-EOF
+       cat >expected <<-\EOF
+       diff --git a/non-empty b/non-empty
+       deleted file mode 100644
+       index d95f3ad..0000000
+       --- a/non-empty
+       +++ /dev/null
+       @@ -1 +0,0 @@
+       -content
+       EOF
 '
 
 test_expect_success 'deleting a non-empty file' '
@@ -278,15 +299,15 @@ test_expect_success 'deleting a non-empty file' '
        rm non-empty &&
        echo y | git add -p non-empty &&
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/empty b/empty
-deleted file mode 100644
-index e69de29..0000000
-EOF
+       cat >expected <<-\EOF
+       diff --git a/empty b/empty
+       deleted file mode 100644
+       index e69de29..0000000
+       EOF
 '
 
 test_expect_success 'deleting an empty file' '
@@ -297,23 +318,17 @@ test_expect_success 'deleting an empty file' '
        rm empty &&
        echo y | git add -p empty &&
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'split hunk setup' '
        git reset --hard &&
-       for i in 10 20 30 40 50 60
-       do
-               echo $i
-       done >test &&
+       test_write_lines 10 20 30 40 50 60 >test &&
        git add test &&
        test_tick &&
        git commit -m test &&
 
-       for i in 10 15 20 21 22 23 24 30 40 50 60
-       do
-               echo $i
-       done >test
+       test_write_lines 10 15 20 21 22 23 24 30 40 50 60 >test
 '
 
 test_expect_success 'split hunk "add -p (edit)"' '
@@ -334,17 +349,7 @@ test_expect_success 'split hunk "add -p (edit)"' '
 '
 
 test_expect_failure 'split hunk "add -p (no, yes, edit)"' '
-       cat >test <<-\EOF &&
-       5
-       10
-       20
-       21
-       30
-       31
-       40
-       50
-       60
-       EOF
+       test_write_lines 5 10 20 21 30 31 40 50 60 >test &&
        git reset &&
        # test sequence is s(plit), n(o), y(es), e(dit)
        # q n q q is there to make sure we exit at the end.
@@ -355,6 +360,63 @@ test_expect_failure 'split hunk "add -p (no, yes, edit)"' '
        ! grep "^+31" actual
 '
 
+test_expect_success 'setup expected diff' '
+       cat >expected <<-\EOF
+       diff --git a/test b/test
+       index 0889435..341cc6b 100644
+       --- a/test
+       +++ b/test
+       @@ -1,6 +1,9 @@
+       +5
+        10
+        20
+       +21
+        30
+        40
+        50
+        60
+       +61
+       \ No newline at end of file
+       EOF
+'
+
+test_expect_success 'can stage individual lines of patch' '
+       git reset &&
+       printf 61 >>test &&
+       printf "%s\n" l "1,2 4-" |
+       EDITOR=: git add -p 2>error &&
+       test_must_be_empty error &&
+       git diff --cached HEAD >actual &&
+       diff_cmp expected actual
+'
+
+test_expect_success 'setup expected diff' '
+       cat >expected <<-\EOF
+       diff --git a/test b/test
+       index 0889435..cc6163b 100644
+       --- a/test
+       +++ b/test
+       @@ -1,6 +1,8 @@
+       +5
+        10
+        20
+        30
+        40
+        50
+        60
+       +61
+       \ No newline at end of file
+       EOF
+'
+
+test_expect_success 'can reset individual lines of patch' '
+       printf "%s\n" l -13 |
+       EDITOR=: git reset -p 2>error &&
+       test_must_be_empty error &&
+       git diff --cached HEAD >actual &&
+       diff_cmp expected actual
+'
+
 test_expect_success 'patch mode ignores unmerged entries' '
        git reset --hard &&
        test_commit conflict &&
@@ -378,7 +440,7 @@ test_expect_success 'patch mode ignores unmerged entries' '
        +changed
        EOF
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success TTY 'diffs can be colorized' '
@@ -392,6 +454,26 @@ test_expect_success TTY 'diffs can be colorized' '
        grep "$(printf "\\033")" output
 '
 
+test_expect_success TTY 'diffFilter filters diff' '
+       git reset --hard &&
+
+       echo content >test &&
+       test_config interactive.diffFilter "sed s/^/foo:/" &&
+       printf y | test_terminal git add -p >output 2>&1 &&
+
+       # avoid depending on the exact coloring or content of the prompts,
+       # and just make sure we saw our diff prefixed
+       grep foo:.*content output
+'
+
+test_expect_success TTY 'detect bogus diffFilter output' '
+       git reset --hard &&
+
+       echo content >test &&
+       test_config interactive.diffFilter "echo too-short" &&
+       printf y | test_must_fail test_terminal git add -p
+'
+
 test_expect_success 'patch-mode via -i prompts for files' '
        git reset --hard &&
 
@@ -407,7 +489,7 @@ test_expect_success 'patch-mode via -i prompts for files' '
 
        echo test >expect &&
        git diff --cached --name-only >actual &&
-       test_cmp expect actual
+       diff_cmp expect actual
 '
 
 test_expect_success 'add -p handles globs' '
@@ -541,4 +623,42 @@ test_expect_success 'status ignores dirty submodules (except HEAD)' '
        ! grep dirty-otherwise output
 '
 
+test_expect_success 'set up pathological context' '
+       git reset --hard &&
+       test_write_lines a a a a a a a a a a a >a &&
+       git add a &&
+       git commit -m a &&
+       test_write_lines c b a a a a a a a b a a a a >a &&
+       test_write_lines     a a a a a a a b a a a a >expected-1 &&
+       test_write_lines   b a a a a a a a b a a a a >expected-2 &&
+       # check editing can cope with missing header and deleted context lines
+       # as well as changes to other lines
+       test_write_lines +b " a" >patch
+'
+
+test_expect_success 'add -p works with pathological context lines' '
+       git reset &&
+       printf "%s\n" n y |
+       git add -p &&
+       git cat-file blob :a >actual &&
+       test_cmp expected-1 actual
+'
+
+test_expect_success 'add -p patch editing works with pathological context lines' '
+       git reset &&
+       # n q q below is in case edit fails
+       printf "%s\n" e y    n q q |
+       git add -p &&
+       git cat-file blob :a >actual &&
+       test_cmp expected-2 actual
+'
+
+test_expect_success 'add -p selecting lines works with pathological context lines' '
+       git reset &&
+       printf "%s\n" l 2 y |
+       GIT_EDITOR=./editor git add -p &&
+       git cat-file blob :a >actual &&
+       test_cmp expected-2 actual
+'
+
 test_done
index bfde4057ad2afcdd3bd38cbb43ffa2ce241aaa67..3ea5b9bb3ff0a4e439b1fc6cd8f9df86386f4126 100755 (executable)
@@ -228,4 +228,56 @@ test_expect_success 'stash previously ignored file' '
        test_path_is_file ignored.d/foo
 '
 
+test_expect_success 'stash -u -- <untracked> doesnt print error' '
+       >untracked &&
+       git stash push -u -- untracked 2>actual &&
+       test_path_is_missing untracked &&
+       test_line_count = 0 actual
+'
+
+test_expect_success 'stash -u -- <untracked> leaves rest of working tree in place' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- untracked &&
+       test_path_is_missing untracked &&
+       test_path_is_file tracked
+'
+
+test_expect_success 'stash -u -- <tracked> <untracked> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- tracked untracked &&
+       test_path_is_missing tracked &&
+       test_path_is_missing untracked
+'
+
+test_expect_success 'stash --all -- <ignored> stashes ignored file' '
+       >ignored.d/bar &&
+       git stash push --all -- ignored.d/bar &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash --all -- <tracked> <ignored> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >ignored.d/bar &&
+       git stash push --all -- tracked ignored.d/bar &&
+       test_path_is_missing tracked &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <ignored> leaves ignored file alone' '
+       >ignored.d/bar &&
+       git stash push -u -- ignored.d/bar &&
+       test_path_is_file ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <non-existant> shows no changes when there are none' '
+       git stash push -u -- non-existant >actual &&
+       echo "No local changes to save" >expect &&
+       test_i18ncmp expect actual
+'
+
 test_done
index eadf4f62444350c711a313d2161c9fa17c7aa405..a07816d5605f30ccb7f7ce6fe83288fff535538c 100755 (executable)
@@ -134,11 +134,15 @@ test_expect_success 'favour same basenames over different ones' '
        git rm path1 &&
        mkdir subdir &&
        git mv another-path subdir/path1 &&
-       git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+       git status >out &&
+       test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
 
 test_expect_success 'favour same basenames even with minor differences' '
        git show HEAD:path1 | sed "s/15/16/" > subdir/path1 &&
-       git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+       git status >out &&
+       test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
 
 test_expect_success 'two files with same basename and same content' '
        git reset --hard &&
@@ -148,7 +152,8 @@ test_expect_success 'two files with same basename and same content' '
        git add dir &&
        git commit -m 2 &&
        git mv dir other-dir &&
-       git status | test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file"
+       git status >out &&
+       test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file" out
 '
 
 test_expect_success 'setup for many rename source candidates' '
index 13e7f621ab79f95cc7c3057d9de5710813049102..cf0f3a1ee75dd28c6b5ce2a5db7c3ad6afafd21f 100755 (executable)
@@ -73,7 +73,7 @@ test_expect_success 'diff identical, but newly created symlink and file' '
        >expected &&
        rm -f frotz nitfol &&
        echo xyzzy >nitfol &&
-       test-chmtime +10 nitfol &&
+       test-tool chmtime +10 nitfol &&
        if test_have_prereq SYMLINKS
        then
                ln -s xyzzy frotz
index f10798b2dff35df131fabb351240ebf75952b446..f8d853595b99bfff64521a502ee062b7e5f35e5f 100755 (executable)
@@ -76,7 +76,7 @@ test_expect_success setup '
 
        mkdir dir3 &&
        cp dir/sub dir3/sub &&
-       test-chmtime +1 dir3/sub &&
+       test-tool chmtime +1 dir3/sub &&
 
        git config log.showroot false &&
        git commit --amend &&
@@ -361,6 +361,11 @@ diff --no-index --raw dir2 dir
 diff --no-index --raw --abbrev=4 dir2 dir
 :noellipses diff --no-index --raw --abbrev=4 dir2 dir
 diff --no-index --raw --no-abbrev dir2 dir
+
+diff-tree --pretty --root --stat --compact-summary initial
+diff-tree --pretty -R --root --stat --compact-summary initial
+diff-tree --stat --compact-summary initial mode
+diff-tree -R --stat --compact-summary initial mode
 EOF
 
 test_expect_success 'log -S requires an argument' '
diff --git a/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial
new file mode 100644 (file)
index 0000000..d6451ff
--- /dev/null
@@ -0,0 +1,12 @@
+$ git diff-tree --pretty --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+
+ dir/sub (new) | 2 ++
+ file0 (new)   | 3 +++
+ file2 (new)   | 3 +++
+ 3 files changed, 8 insertions(+)
+$
diff --git a/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial
new file mode 100644 (file)
index 0000000..1989e55
--- /dev/null
@@ -0,0 +1,12 @@
+$ git diff-tree --pretty -R --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+
+ dir/sub (gone) | 2 --
+ file0 (gone)   | 3 ---
+ file2 (gone)   | 3 ---
+ 3 files changed, 8 deletions(-)
+$
diff --git a/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode
new file mode 100644 (file)
index 0000000..9c7c8f6
--- /dev/null
@@ -0,0 +1,4 @@
+$ git diff-tree --stat --compact-summary initial mode
+ file0 (mode +x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
diff --git a/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode
new file mode 100644 (file)
index 0000000..e38f3d3
--- /dev/null
@@ -0,0 +1,4 @@
+$ git diff-tree -R --stat --compact-summary initial mode
+ file0 (mode -x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
index 1795ffc3aaf3008f3ef3adc566803a66603975dd..22f9f88f0afc54f1dfeebbea623a4c41fde709f6 100755 (executable)
@@ -33,6 +33,7 @@ diffpatterns="
        css
        fortran
        fountain
+       golang
        html
        java
        matlab
diff --git a/t/t4018/golang-complex-function b/t/t4018/golang-complex-function
new file mode 100644 (file)
index 0000000..e057dce
--- /dev/null
@@ -0,0 +1,8 @@
+type Test struct {
+       a Type
+}
+
+func (t *Test) RIGHT(a Type) (Type, error) {
+       t.a = a
+       return ChangeMe, nil
+}
diff --git a/t/t4018/golang-func b/t/t4018/golang-func
new file mode 100644 (file)
index 0000000..8e9c9ac
--- /dev/null
@@ -0,0 +1,4 @@
+func RIGHT() {
+       a := 5
+       b := ChangeMe
+}
diff --git a/t/t4018/golang-interface b/t/t4018/golang-interface
new file mode 100644 (file)
index 0000000..553bede
--- /dev/null
@@ -0,0 +1,4 @@
+type RIGHT interface {
+       a() Type
+       b() ChangeMe
+}
diff --git a/t/t4018/golang-long-func b/t/t4018/golang-long-func
new file mode 100644 (file)
index 0000000..ac3a77b
--- /dev/null
@@ -0,0 +1,5 @@
+func RIGHT(aVeryVeryVeryLongVariableName AVeryVeryVeryLongType,
+       anotherLongVariableName AnotherLongType) {
+       a := 5
+       b := ChangeMe
+}
diff --git a/t/t4018/golang-struct b/t/t4018/golang-struct
new file mode 100644 (file)
index 0000000..5deda77
--- /dev/null
@@ -0,0 +1,4 @@
+type RIGHT struct {
+       a Type
+       b ChangeMe
+}
index 2f1737fcef185486dc626d616c37fdadc11deba1..0352bf81a90a38adf14fb7a980c98600e1f650b2 100755 (executable)
@@ -147,7 +147,7 @@ test_expect_success 'git diff --ignore-all-space, both files outside repo' '
 '
 
 test_expect_success 'git diff --quiet ignores stat-change only entries' '
-       test-chmtime +10 a &&
+       test-tool chmtime +10 a &&
        echo modified >>b &&
        test_expect_code 1 git diff --quiet
 '
index 9f563db20a867156a825dfc0ce810c4f59109ac8..6e2cf933f761683781525b36330e38c758aaabd6 100755 (executable)
@@ -19,17 +19,33 @@ test_expect_success 'preparation' '
        git commit -m message "$name"
 '
 
+cat >expect72 <<-'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
+EOF
+test_expect_success "format-patch: small change with long name gives more space to the name" '
+       git format-patch -1 --stdout >output &&
+       grep " | " output >actual &&
+       test_cmp expect72 actual
+'
+
 while read cmd args
 do
-       cat >expect <<-'EOF'
+       cat >expect80 <<-'EOF'
         ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
        EOF
        test_expect_success "$cmd: small change with long name gives more space to the name" '
                git $cmd $args >output &&
                grep " | " output >actual &&
-               test_cmp expect actual
+               test_cmp expect80 actual
        '
+done <<\EOF
+diff HEAD^ HEAD --stat
+show --stat
+log -1 --stat
+EOF
 
+while read cmd args
+do
        cat >expect <<-'EOF'
         ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
        EOF
@@ -79,11 +95,11 @@ test_expect_success 'preparation for big change tests' '
        git commit -m message abcd
 '
 
-cat >expect80 <<'EOF'
- abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72 <<'EOF'
+ abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 EOF
-cat >expect80-graph <<'EOF'
-|  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+|  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 EOF
 cat >expect200 <<'EOF'
  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -107,7 +123,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect200 diff HEAD^ HEAD --stat
 respects expect200 show --stat
 respects expect200 log -1 --stat
@@ -135,7 +151,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect40 diff HEAD^ HEAD --stat
 respects expect40 show --stat
 respects expect40 log -1 --stat
@@ -163,7 +179,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect40 diff HEAD^ HEAD --stat
 respects expect40 show --stat
 respects expect40 log -1 --stat
@@ -250,11 +266,11 @@ show --stat
 log -1 --stat
 EOF
 
-cat >expect80 <<'EOF'
- ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72 <<'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
 EOF
-cat >expect80-graph <<'EOF'
-|  ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+|  ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
 EOF
 cat >expect200 <<'EOF'
  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -278,7 +294,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect200 diff HEAD^ HEAD --stat
 respects expect200 show --stat
 respects expect200 log -1 --stat
@@ -308,7 +324,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect1 diff HEAD^ HEAD --stat
 respects expect1 show --stat
 respects expect1 log -1 --stat
index 27cb0009fb1ed52b749785c31081b3097fef0022..c7c688fcc4bbdfe97a5c595fddc96e2590e021a2 100755 (executable)
@@ -89,4 +89,21 @@ test_expect_success 'traditional, whitespace-damaged, colon in timezone' '
        test_cmp expected "post image.txt"
 '
 
+cat >diff-from-svn <<\EOF
+Index: Makefile
+===================================================================
+diff --git a/branches/Makefile
+deleted file mode 100644
+--- a/branches/Makefile        (revision 13)
++++ /dev/null  (nonexistent)
+@@ +1 0,0 @@
+-
+EOF
+
+test_expect_success 'apply handles a diff generated by Subversion' '
+       >Makefile &&
+       git apply -p2 diff-from-svn &&
+       test_path_is_missing Makefile
+'
+
 test_done
index 73b67b4280b99e0328e201e6b69c3d88b766ea84..1eccfb71d0c8e26f88708459f065dea622baf9d3 100755 (executable)
@@ -662,6 +662,11 @@ test_expect_success 'am pauses on conflict' '
        test -d .git/rebase-apply
 '
 
+test_expect_success 'am --show-current-patch' '
+       git am --show-current-patch >actual.patch &&
+       test_cmp .git/rebase-apply/0001 actual.patch
+'
+
 test_expect_success 'am --skip works' '
        echo goodbye >expected &&
        git am --skip &&
@@ -1045,4 +1050,16 @@ test_expect_success 'am works with multi-line in-body headers' '
        git cat-file commit HEAD | grep "^$LONG$"
 '
 
+test_expect_success 'am --quit keeps HEAD where it is' '
+       mkdir .git/rebase-apply &&
+       >.git/rebase-apply/last &&
+       >.git/rebase-apply/next &&
+       git rev-parse HEAD^ >.git/ORIG_HEAD &&
+       git rev-parse HEAD >expected &&
+       git am --quit &&
+       test_path_is_missing .git/rebase-apply &&
+       git rev-parse HEAD >actual &&
+       test_cmp expected actual
+'
+
 test_done
index 9473c2779ef0df109f6943a949583676ea5ceabf..9d8d3c72e7efe68b8e0011c1ec9b77d9a0820b30 100755 (executable)
@@ -46,9 +46,8 @@ do
 
        test_expect_success "am$with3 --skip continue after failed am$with3" '
                test_must_fail git am$with3 --skip >output &&
-               test_i18ngrep "^Applying" output >output.applying &&
-               test_i18ngrep "^Applying: 6$" output.applying &&
-               test_i18ncmp file-2-expect file-2 &&
+               test_i18ngrep "^Applying: 6$" output &&
+               test_cmp file-2-expect file-2 &&
                test ! -f .git/MERGE_RR
        '
 
@@ -172,7 +171,7 @@ test_expect_success 'am --skip leaves index stat info alone' '
        git checkout -f --orphan skip-stat-info &&
        git reset &&
        test_commit skip-should-be-untouched &&
-       test-chmtime =0 skip-should-be-untouched.t &&
+       test-tool chmtime =0 skip-should-be-untouched.t &&
        git update-index --refresh &&
        git diff-files --exit-code --quiet &&
        test_must_fail git am 0001-*.patch &&
@@ -184,7 +183,7 @@ test_expect_success 'am --abort leaves index stat info alone' '
        git checkout -f --orphan abort-stat-info &&
        git reset &&
        test_commit abort-should-be-untouched &&
-       test-chmtime =0 abort-should-be-untouched.t &&
+       test-tool chmtime =0 abort-should-be-untouched.t &&
        git update-index --refresh &&
        git diff-files --exit-code --quiet &&
        test_must_fail git am 0001-*.patch &&
index d97d2bebc9850c8ba96a8263ed081fced2ecc153..deafaa3e07546cea3d0bcf3aa3c00e2ba7e41406 100755 (executable)
@@ -166,7 +166,7 @@ test_expect_success 'first postimage wins' '
        git commit -q -a -m "prefer first over second" &&
        test -f $rr/postimage &&
 
-       oldmtimepost=$(test-chmtime -v -60 $rr/postimage | cut -f 1) &&
+       oldmtimepost=$(test-tool chmtime -v -60 $rr/postimage | cut -f 1) &&
 
        git checkout -b third master &&
        git show second^:a1 | sed "s/To die: t/To die! T/" >a1 &&
@@ -179,7 +179,7 @@ test_expect_success 'first postimage wins' '
 '
 
 test_expect_success 'rerere updates postimage timestamp' '
-       newmtimepost=$(test-chmtime -v +0 $rr/postimage | cut -f 1) &&
+       newmtimepost=$(test-tool chmtime -v +0 $rr/postimage | cut -f 1) &&
        test $oldmtimepost -lt $newmtimepost
 '
 
@@ -220,9 +220,9 @@ test_expect_success 'set up for garbage collection tests' '
        almost_60_days_ago=$((60-60*86400)) &&
        just_over_60_days_ago=$((-1-60*86400)) &&
 
-       test-chmtime =$just_over_60_days_ago $rr/preimage &&
-       test-chmtime =$almost_60_days_ago $rr/postimage &&
-       test-chmtime =$almost_15_days_ago $rr2/preimage
+       test-tool chmtime =$just_over_60_days_ago $rr/preimage &&
+       test-tool chmtime =$almost_60_days_ago $rr/postimage &&
+       test-tool chmtime =$almost_15_days_ago $rr2/preimage
 '
 
 test_expect_success 'gc preserves young or recently used records' '
@@ -232,8 +232,8 @@ test_expect_success 'gc preserves young or recently used records' '
 '
 
 test_expect_success 'old records rest in peace' '
-       test-chmtime =$just_over_60_days_ago $rr/postimage &&
-       test-chmtime =$just_over_15_days_ago $rr2/preimage &&
+       test-tool chmtime =$just_over_60_days_ago $rr/postimage &&
+       test-tool chmtime =$just_over_15_days_ago $rr2/preimage &&
        git rerere gc &&
        ! test -f $rr/preimage &&
        ! test -f $rr2/preimage
@@ -249,8 +249,8 @@ rerere_gc_custom_expiry_test () {
                >"$rr/postimage" &&
 
                two_days_ago=$((-2*86400)) &&
-               test-chmtime =$two_days_ago "$rr/preimage" &&
-               test-chmtime =$two_days_ago "$rr/postimage" &&
+               test-tool chmtime =$two_days_ago "$rr/preimage" &&
+               test-tool chmtime =$two_days_ago "$rr/postimage" &&
 
                find .git/rr-cache -type f | sort >original &&
 
@@ -512,7 +512,7 @@ test_expect_success 'multiple identical conflicts' '
        count_pre_post 2 0 &&
 
        # Pretend that the conflicts were made quite some time ago
-       find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+       find .git/rr-cache/ -type f | xargs test-tool chmtime -172800 &&
 
        # Unresolved entries have not expired yet
        git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
@@ -568,7 +568,7 @@ test_expect_success 'multiple identical conflicts' '
        git rerere &&
 
        # Pretend that the resolutions are old again
-       find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+       find .git/rr-cache/ -type f | xargs test-tool chmtime -172800 &&
 
        # Resolved entries have not expired yet
        git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
index da10478f59da1a301edf7def229d37fbc964dce9..ff6649ed9a70721523da3c55142a9622b152243a 100755 (executable)
@@ -127,6 +127,11 @@ test_expect_success !MINGW 'shortlog can read --format=raw output' '
        test_cmp expect out
 '
 
+test_expect_success 'shortlog from non-git directory refuses extra arguments' '
+       test_must_fail env GIT_DIR=non-existing git shortlog foo 2>out &&
+       test_i18ngrep "too many arguments" out
+'
+
 test_expect_success 'shortlog should add newline when input line matches wraplen' '
        cat >expect <<\EOF &&
 A U Thor (2):
index fe2d4f15a73f082c516a03b1877c4cf82982138a..af4d9b88762889fb5d2b390f2251bbb39a52d266 100755 (executable)
@@ -101,7 +101,7 @@ test_expect_success \
      ten=0123456789 && hundred=$ten$ten$ten$ten$ten$ten$ten$ten$ten$ten &&
      echo long filename >a/four$hundred &&
      mkdir a/bin &&
-     test-genrandom "frotz" 500000 >a/bin/sh &&
+     test-tool genrandom "frotz" 500000 >a/bin/sh &&
      printf "A\$Format:%s\$O" "$SUBSTFORMAT" >a/substfile1 &&
      printf "A not substituted O" >a/substfile2 &&
      if test_have_prereq SYMLINKS; then
@@ -192,7 +192,7 @@ test_expect_success \
     'validate file modification time' \
     'mkdir extract &&
      "$TAR" xf b.tar -C extract a/a &&
-     test-chmtime -v +0 extract/a/a |cut -f 1 >b.mtime &&
+     test-tool chmtime -v +0 extract/a/a |cut -f 1 >b.mtime &&
      echo "1117231200" >expected.mtime &&
      test_cmp expected.mtime b.mtime'
 
index 9c68b992511b7098df0570ca37e0add468a8a093..65ff60f2ee9b4af61f3f2d7d0f889fc4529e2b60 100755 (executable)
@@ -16,8 +16,8 @@ test_expect_success \
      perl -e "print \"a\" x 4096;" > a &&
      perl -e "print \"b\" x 4096;" > b &&
      perl -e "print \"c\" x 4096;" > c &&
-     test-genrandom "seed a" 2097152 > a_big &&
-     test-genrandom "seed b" 2097152 > b_big &&
+     test-tool genrandom "seed a" 2097152 > a_big &&
+     test-tool genrandom "seed b" 2097152 > b_big &&
      git update-index --add a a_big b b_big c &&
      cat c >d && echo foo >>d && git update-index --add d &&
      tree=$(git write-tree) &&
@@ -311,8 +311,8 @@ test_expect_success 'unpacking with --strict' '
        rm -f .git/index &&
        tail -n 10 LIST | git update-index --index-info &&
        ST=$(git write-tree) &&
-       PACK5=$( git rev-list --objects "$LIST" "$LI" "$ST" | \
-               git pack-objects test-5 ) &&
+       git rev-list --objects "$LIST" "$LI" "$ST" >actual &&
+       PACK5=$( git pack-objects test-5 <actual ) &&
        PACK6=$( (
                        echo "$LIST"
                        echo "$LI"
@@ -358,8 +358,8 @@ test_expect_success 'index-pack with --strict' '
        rm -f .git/index &&
        tail -n 10 LIST | git update-index --index-info &&
        ST=$(git write-tree) &&
-       PACK5=$( git rev-list --objects "$LIST" "$LI" "$ST" | \
-               git pack-objects test-5 ) &&
+       git rev-list --objects "$LIST" "$LI" "$ST" >actual &&
+       PACK5=$( git pack-objects test-5 <actual ) &&
        PACK6=$( (
                        echo "$LIST"
                        echo "$LI"
index cae8c2e8822ccc1e464e3f5b71c99c1f6b1c2323..76f9798ab958cae2414cbcc496bbb22af85f0ac5 100755 (executable)
@@ -12,7 +12,7 @@ test_expect_success \
      for i in a b c
      do
          echo $i >$i &&
-         test-genrandom "$i" 32768 >>$i &&
+        test-tool genrandom "$i" 32768 >>$i &&
          git update-index --add $i || return 1
      done &&
      echo d >d && cat c >>d && git update-index --add d &&
index c2fc584dac3d7e96748866dd0a4ae31f7cae3fc2..bb9b8bb3097c05f6e28c37fa24fa27d5bb5b805d 100755 (executable)
@@ -15,17 +15,17 @@ test_expect_success \
      while test $i -le 100
      do
          iii=$(printf '%03i' $i)
-         test-genrandom "bar" 200 > wide_delta_$iii &&
-         test-genrandom "baz $iii" 50 >> wide_delta_$iii &&
-         test-genrandom "foo"$i 100 > deep_delta_$iii &&
-         test-genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
-         test-genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
+        test-tool genrandom "bar" 200 > wide_delta_$iii &&
+        test-tool genrandom "baz $iii" 50 >> wide_delta_$iii &&
+        test-tool genrandom "foo"$i 100 > deep_delta_$iii &&
+        test-tool genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
+        test-tool genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
          echo $iii >file_$iii &&
-         test-genrandom "$iii" 8192 >>file_$iii &&
+        test-tool genrandom "$iii" 8192 >>file_$iii &&
          git update-index --add file_$iii deep_delta_$iii wide_delta_$iii &&
          i=$(expr $i + 1) || return 1
      done &&
-     { echo 101 && test-genrandom 100 8192; } >file_101 &&
+     { echo 101 && test-tool genrandom 100 8192; } >file_101 &&
      git update-index --add file_101 &&
      tree=$(git write-tree) &&
      commit=$(git commit-tree $tree </dev/null) && {
@@ -262,4 +262,9 @@ EOF
     grep "^warning:.* expected .tagger. line" err
 '
 
+test_expect_success 'index-pack --fsck-objects also warns upon missing tagger in tag' '
+    git index-pack --fsck-objects tag-test-${pack1}.pack 2>err &&
+    grep "^warning:.* expected .tagger. line" err
+'
+
 test_done
index 5940ce2084a6e9cc935a0e5784ff8b6bff8e035d..3634e258f8bf66c2c1917c1598f6f21486c08684 100755 (executable)
@@ -19,14 +19,14 @@ test_description='resilience to pack corruptions with redundant objects'
 # 3) object header is always 2 bytes.
 
 create_test_files() {
-    test-genrandom "foo" 2000 > file_1 &&
-    test-genrandom "foo" 1800 > file_2 &&
-    test-genrandom "foo" 1800 > file_3 &&
+    test-tool genrandom "foo" 2000 > file_1 &&
+    test-tool genrandom "foo" 1800 > file_2 &&
+    test-tool genrandom "foo" 1800 > file_3 &&
     echo " base " >> file_1 &&
     echo " delta1 " >> file_2 &&
     echo " delta delta2 " >> file_3 &&
-    test-genrandom "bar" 150 >> file_2 &&
-    test-genrandom "baz" 100 >> file_3
+    test-tool genrandom "bar" 150 >> file_2 &&
+    test-tool genrandom "baz" 100 >> file_3
 }
 
 create_new_pack() {
index 6694c19a1eecf10117b843bbacbc5bb47924c9c8..f0f6e2a5f3d015cb8d753c3d965c1c42e27b5784 100755 (executable)
@@ -15,7 +15,7 @@ add_blob() {
        BLOB_FILE=.git/objects/$(echo $BLOB | sed "s/^../&\//") &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =+0 $BLOB_FILE
+       test-tool chmtime =+0 $BLOB_FILE
 }
 
 test_expect_success setup '
@@ -33,7 +33,7 @@ test_expect_success 'prune stale packs' '
        orig_pack=$(echo .git/objects/pack/*.pack) &&
        : > .git/objects/tmp_1.pack &&
        : > .git/objects/tmp_2.pack &&
-       test-chmtime =-86501 .git/objects/tmp_1.pack &&
+       test-tool chmtime =-86501 .git/objects/tmp_1.pack &&
        git prune --expire 1.day &&
        test_path_is_file $orig_pack &&
        test_path_is_file .git/objects/tmp_2.pack &&
@@ -47,7 +47,7 @@ test_expect_success 'prune --expire' '
        git prune --expire=1.hour.ago &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =-86500 $BLOB_FILE &&
+       test-tool chmtime =-86500 $BLOB_FILE &&
        git prune --expire 1.day &&
        verbose test $before = $(git count-objects | sed "s/ .*//") &&
        test_path_is_missing $BLOB_FILE
@@ -57,11 +57,11 @@ test_expect_success 'prune --expire' '
 test_expect_success 'gc: implicit prune --expire' '
 
        add_blob &&
-       test-chmtime =-$((2*$week-30)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week-30)) $BLOB_FILE &&
        git gc &&
        verbose test $((1 + $before)) = $(git count-objects | sed "s/ .*//") &&
        test_path_is_file $BLOB_FILE &&
-       test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week+1)) $BLOB_FILE &&
        git gc &&
        verbose test $before = $(git count-objects | sed "s/ .*//") &&
        test_path_is_missing $BLOB_FILE
@@ -141,7 +141,7 @@ test_expect_success 'prune: do not prune heads listed as an argument' '
 test_expect_success 'gc --no-prune' '
 
        add_blob &&
-       test-chmtime =-$((5001*$day)) $BLOB_FILE &&
+       test-tool chmtime =-$((5001*$day)) $BLOB_FILE &&
        git config gc.pruneExpire 2.days.ago &&
        git gc --no-prune &&
        verbose test 1 = $(git count-objects | sed "s/ .*//") &&
@@ -163,7 +163,7 @@ test_expect_success 'gc respects gc.pruneExpire' '
 test_expect_success 'gc --prune=<date>' '
 
        add_blob &&
-       test-chmtime =-$((5001*$day)) $BLOB_FILE &&
+       test-tool chmtime =-$((5001*$day)) $BLOB_FILE &&
        git gc --prune=5002.days.ago &&
        test_path_is_file $BLOB_FILE &&
        git gc --prune=5000.days.ago &&
@@ -205,7 +205,7 @@ test_expect_success 'prune --expire=never' '
 
 test_expect_success 'gc: prune old objects after local clone' '
        add_blob &&
-       test-chmtime =-$((2*$week+1)) $BLOB_FILE &&
+       test-tool chmtime =-$((2*$week+1)) $BLOB_FILE &&
        git clone --no-hardlinks . aclone &&
        (
                cd aclone &&
index 20e2473a03b645d690b25598bc0cb2e421034b6c..f6d600fd82fd499a80f67f0a4ff1a60b4a1db1b5 100755 (executable)
@@ -284,7 +284,7 @@ test_expect_success JGIT 'jgit can read our bitmaps' '
 '
 
 test_expect_success 'splitting packs does not generate bogus bitmaps' '
-       test-genrandom foo $((1024 * 1024)) >rand &&
+       test-tool genrandom foo $((1024 * 1024)) >rand &&
        git add rand &&
        git commit -m "commit with big file" &&
        git -c pack.packSizeLimit=500k repack -adb &&
index 9372508c993e72ad99da004ca2400df81c721006..4fe4ad9d6166d9a82cb944fadfc0317b5052b973 100755 (executable)
@@ -163,8 +163,8 @@ test_expect_success 'bogus offset inside v2 extended table' '
 
 test_expect_success 'bogus OFS_DELTA in packfile' '
        # Generate a pack with a delta in it.
-       base=$(test-genrandom foo 3000 | git hash-object --stdin -w) &&
-       delta=$(test-genrandom foo 2000 | git hash-object --stdin -w) &&
+       base=$(test-tool genrandom foo 3000 | git hash-object --stdin -w) &&
+       delta=$(test-tool genrandom foo 2000 | git hash-object --stdin -w) &&
        do_pack "$base $delta" --delta-base-offset &&
        rm -f .git/objects/??/* &&
 
index f7dbdfb412f3ee139ce88e56d1cd8be166d3bbd2..f31995d3d28d9fcd590c91aed7686aef87e332af 100755 (executable)
@@ -73,7 +73,7 @@ make_pack () {
 }
 
 test_expect_success 'setup' '
-       test-genrandom base 4096 >base &&
+       test-tool genrandom base 4096 >base &&
        for i in one two
        do
                # we want shared content here to encourage deltas...
index 2ed479b712aed7a8f11c8495c0eba72788eb4f26..0f06c40eb13f31d8f06962dd1c88c5ff5810618f 100755 (executable)
@@ -47,7 +47,7 @@ test_description='pack-objects breaks long cross-pack delta chains'
 # repeatedly-modified file to generate the delta chain).
 
 test_expect_success 'create series of packs' '
-       test-genrandom foo 4096 >content &&
+       test-tool genrandom foo 4096 >content &&
        prev= &&
        for i in $(test_seq 1 10)
        do
index d375d7110d102d6b3ea194e4f09a9b5f391ed496..911eae1bf7518485ace0fbd417e3ea0bbb18a9cf 100755 (executable)
@@ -180,7 +180,7 @@ test_expect_success 'receive-pack runs auto-gc in remote repo' '
            # And create a file that follows the temporary object naming
            # convention for the auto-gc to remove
            : >.git/objects/tmp_test_object &&
-           test-chmtime =-1209601 .git/objects/tmp_test_object
+           test-tool chmtime =-1209601 .git/objects/tmp_test_object
        ) &&
        (
            cd parent &&
index ec9ba9bf6e7c709a0a1cc25fcfdec92c4c5c9b55..0680dec808574fb334612163ebd43d31418a37d5 100755 (executable)
@@ -482,24 +482,24 @@ test_expect_success 'set up tests of missing reference' '
 test_expect_success 'test lonely missing ref' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy
-       ) >/dev/null 2>error-m &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy 2>../error-m
+       ) &&
        test_i18ncmp expect-error error-m
 '
 
 test_expect_success 'test missing ref after existing' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy
-       ) >/dev/null 2>error-em &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy 2>../error-em
+       ) &&
        test_i18ncmp expect-error error-em
 '
 
 test_expect_success 'test missing ref before existing' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A
-       ) >/dev/null 2>error-me &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A 2>../error-me
+       ) &&
        test_i18ncmp expect-error error-me
 '
 
index 668c54be41eb4fda17e9b43000081c7b374990c2..ae5a530a2dc61a7926abfd471e3325d1638ca457 100755 (executable)
@@ -222,12 +222,9 @@ test_expect_success 'fetch uses remote ref names to describe new refs' '
        (
                cd descriptive &&
                git fetch o 2>actual &&
-               grep " -> refs/crazyheads/descriptive-branch$" actual |
-               test_i18ngrep "new branch" &&
-               grep " -> descriptive-tag$" actual |
-               test_i18ngrep "new tag" &&
-               grep " -> crazy$" actual |
-               test_i18ngrep "new ref"
+               test_i18ngrep "new branch.* -> refs/crazyheads/descriptive-branch$" actual &&
+               test_i18ngrep "new tag.* -> descriptive-tag$" actual &&
+               test_i18ngrep "new ref.* -> crazy$" actual
        ) &&
        git checkout master
 '
@@ -543,82 +540,232 @@ test_expect_success "should be able to fetch with duplicate refspecs" '
 set_config_tristate () {
        # var=$1 val=$2
        case "$2" in
-       unset)  test_unconfig "$1" ;;
-       *)      git config "$1" "$2" ;;
+       unset)
+               test_unconfig "$1"
+               ;;
+       *)
+               git config "$1" "$2"
+               key=$(echo $1 | sed -e 's/^remote\.origin/fetch/')
+               git_fetch_c="$git_fetch_c -c $key=$2"
+               ;;
        esac
 }
 
 test_configured_prune () {
-       fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4
+       test_configured_prune_type "$@" "name"
+       test_configured_prune_type "$@" "link"
+}
 
-       test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" '
+test_configured_prune_type () {
+       fetch_prune=$1
+       remote_origin_prune=$2
+       fetch_prune_tags=$3
+       remote_origin_prune_tags=$4
+       expected_branch=$5
+       expected_tag=$6
+       cmdline=$7
+       mode=$8
+
+       if test -z "$cmdline_setup"
+       then
+               test_expect_success 'setup cmdline_setup variable for subsequent test' '
+                       remote_url="file://$(git -C one config remote.origin.url)" &&
+                       remote_fetch="$(git -C one config remote.origin.fetch)" &&
+                       cmdline_setup="\"$remote_url\" \"$remote_fetch\""
+               '
+       fi
+
+       if test "$mode" = 'link'
+       then
+               new_cmdline=""
+
+               if test "$cmdline" = ""
+               then
+                       new_cmdline=$cmdline_setup
+               else
+                       new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g')
+               fi
+
+               if test "$fetch_prune_tags" = 'true' ||
+                  test "$remote_origin_prune_tags" = 'true'
+               then
+                       if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/
+                       then
+                               new_cmdline="$new_cmdline refs/tags/*:refs/tags/*"
+                       fi
+               fi
+
+               cmdline="$new_cmdline"
+       fi
+
+       test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" '
                # make sure a newbranch is there in . and also in one
                git branch -f newbranch &&
+               git tag -f newtag &&
                (
                        cd one &&
                        test_unconfig fetch.prune &&
+                       test_unconfig fetch.pruneTags &&
                        test_unconfig remote.origin.prune &&
-                       git fetch &&
-                       git rev-parse --verify refs/remotes/origin/newbranch
+                       test_unconfig remote.origin.pruneTags &&
+                       git fetch '"$cmdline_setup"' &&
+                       git rev-parse --verify refs/remotes/origin/newbranch &&
+                       git rev-parse --verify refs/tags/newtag
                ) &&
 
                # now remove it
                git branch -d newbranch &&
+               git tag -d newtag &&
 
                # then test
                (
                        cd one &&
+                       git_fetch_c="" &&
                        set_config_tristate fetch.prune $fetch_prune &&
+                       set_config_tristate fetch.pruneTags $fetch_prune_tags &&
                        set_config_tristate remote.origin.prune $remote_origin_prune &&
-
-                       git fetch $cmdline &&
-                       case "$expected" in
+                       set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags &&
+
+                       if test "$mode" != "link"
+                       then
+                               git_fetch_c=""
+                       fi &&
+                       git$git_fetch_c fetch '"$cmdline"' &&
+                       case "$expected_branch" in
                        pruned)
                                test_must_fail git rev-parse --verify refs/remotes/origin/newbranch
                                ;;
                        kept)
                                git rev-parse --verify refs/remotes/origin/newbranch
                                ;;
+                       esac &&
+                       case "$expected_tag" in
+                       pruned)
+                               test_must_fail git rev-parse --verify refs/tags/newtag
+                               ;;
+                       kept)
+                               git rev-parse --verify refs/tags/newtag
+                               ;;
                        esac
                )
        '
 }
 
-test_configured_prune unset unset ""           kept
-test_configured_prune unset unset "--no-prune" kept
-test_configured_prune unset unset "--prune"    pruned
-
-test_configured_prune false unset ""           kept
-test_configured_prune false unset "--no-prune" kept
-test_configured_prune false unset "--prune"    pruned
-
-test_configured_prune true  unset ""           pruned
-test_configured_prune true  unset "--prune"    pruned
-test_configured_prune true  unset "--no-prune" kept
-
-test_configured_prune unset false ""           kept
-test_configured_prune unset false "--no-prune" kept
-test_configured_prune unset false "--prune"    pruned
-
-test_configured_prune false false ""           kept
-test_configured_prune false false "--no-prune" kept
-test_configured_prune false false "--prune"    pruned
-
-test_configured_prune true  false ""           kept
-test_configured_prune true  false "--prune"    pruned
-test_configured_prune true  false "--no-prune" kept
-
-test_configured_prune unset true  ""           pruned
-test_configured_prune unset true  "--no-prune" kept
-test_configured_prune unset true  "--prune"    pruned
-
-test_configured_prune false true  ""           pruned
-test_configured_prune false true  "--no-prune" kept
-test_configured_prune false true  "--prune"    pruned
-
-test_configured_prune true  true  ""           pruned
-test_configured_prune true  true  "--prune"    pruned
-test_configured_prune true  true  "--no-prune" kept
+# $1 config: fetch.prune
+# $2 config: remote.<name>.prune
+# $3 config: fetch.pruneTags
+# $4 config: remote.<name>.pruneTags
+# $5 expect: branch to be pruned?
+# $6 expect: tag to be pruned?
+# $7 git-fetch $cmdline:
+#
+#                     $1    $2    $3    $4    $5     $6     $7
+test_configured_prune unset unset unset unset kept   kept   ""
+test_configured_prune unset unset unset unset kept   kept   "--no-prune"
+test_configured_prune unset unset unset unset pruned kept   "--prune"
+test_configured_prune unset unset unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune unset unset unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune false unset unset unset kept   kept   ""
+test_configured_prune false unset unset unset kept   kept   "--no-prune"
+test_configured_prune false unset unset unset pruned kept   "--prune"
+
+test_configured_prune true  unset unset unset pruned kept   ""
+test_configured_prune true  unset unset unset pruned kept   "--prune"
+test_configured_prune true  unset unset unset kept   kept   "--no-prune"
+
+test_configured_prune unset false unset unset kept   kept   ""
+test_configured_prune unset false unset unset kept   kept   "--no-prune"
+test_configured_prune unset false unset unset pruned kept   "--prune"
+
+test_configured_prune false false unset unset kept   kept   ""
+test_configured_prune false false unset unset kept   kept   "--no-prune"
+test_configured_prune false false unset unset pruned kept   "--prune"
+test_configured_prune false false unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune false false unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune true  false unset unset kept   kept   ""
+test_configured_prune true  false unset unset pruned kept   "--prune"
+test_configured_prune true  false unset unset kept   kept   "--no-prune"
+
+test_configured_prune unset true  unset unset pruned kept   ""
+test_configured_prune unset true  unset unset kept   kept   "--no-prune"
+test_configured_prune unset true  unset unset pruned kept   "--prune"
+
+test_configured_prune false true  unset unset pruned kept   ""
+test_configured_prune false true  unset unset kept   kept   "--no-prune"
+test_configured_prune false true  unset unset pruned kept   "--prune"
+
+test_configured_prune true  true  unset unset pruned kept   ""
+test_configured_prune true  true  unset unset pruned kept   "--prune"
+test_configured_prune true  true  unset unset kept   kept   "--no-prune"
+test_configured_prune true  true  unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune true  true  unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+# --prune-tags on its own does nothing, needs --prune as well, same
+# for for fetch.pruneTags without fetch.prune
+test_configured_prune unset unset unset unset kept kept     "--prune-tags"
+test_configured_prune unset unset true unset  kept kept     ""
+test_configured_prune unset unset unset true  kept kept     ""
+
+# These will prune the tags
+test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags"
+test_configured_prune true  unset true  unset pruned pruned ""
+test_configured_prune unset true  unset true  pruned pruned ""
+
+# remote.<name>.pruneTags overrides fetch.pruneTags, just like
+# remote.<name>.prune overrides fetch.prune if set.
+test_configured_prune true  unset true unset pruned pruned  ""
+test_configured_prune false true  false true  pruned pruned ""
+test_configured_prune true  false true  false kept   kept   ""
+
+# When --prune-tags is supplied it's ignored if an explicit refspec is
+# given, same for the configuration options.
+test_configured_prune unset unset unset unset pruned kept \
+       "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset true  unset pruned kept \
+       "--prune origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset unset true pruned  kept \
+       "--prune origin +refs/heads/*:refs/remotes/origin/*"
+
+# Pruning that also takes place if a file:// url replaces a named
+# remote. However, because there's no implicit
+# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the
+# command-line negates --prune-tags, the branches will not be pruned.
+test_configured_prune_type unset unset unset unset kept   kept   "origin --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept   kept   "origin --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept   pruned "origin --prune --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name"
+test_configured_prune_type unset unset unset unset kept   pruned "--prune --prune-tags origin" "link"
+test_configured_prune_type unset unset true  unset pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset true  unset kept   pruned "--prune origin" "link"
+test_configured_prune_type unset unset unset true  pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset unset true  kept   pruned "--prune origin" "link"
+test_configured_prune_type true  unset true  unset pruned pruned "origin" "name"
+test_configured_prune_type true  unset true  unset kept   pruned "origin" "link"
+test_configured_prune_type unset  true true  unset pruned pruned "origin" "name"
+test_configured_prune_type unset  true true  unset kept   pruned "origin" "link"
+test_configured_prune_type unset  true unset true  pruned pruned "origin" "name"
+test_configured_prune_type unset  true unset true  kept   pruned "origin" "link"
+
+# When all remote.origin.fetch settings are deleted a --prune
+# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so
+# tags, but not tracking branches, will be deleted.
+test_expect_success 'remove remote.origin.fetch "one"' '
+       (
+               cd one &&
+               git config --unset-all remote.origin.fetch
+       )
+'
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
 
 test_expect_success 'all boundary commits are excluded' '
        test_commit base &&
@@ -693,8 +840,8 @@ test_expect_success C_LOCALE_OUTPUT 'fetch aligned output' '
        test_commit looooooooooooong-tag &&
        (
                cd full-output &&
-               git -c fetch.output=full fetch origin 2>&1 | \
-                       grep -e "->" | cut -c 22- >../actual
+               git -c fetch.output=full fetch origin >actual 2>&1 &&
+               grep -e "->" actual | cut -c 22- >../actual
        ) &&
        cat >expect <<-\EOF &&
        master               -> origin/master
@@ -708,8 +855,8 @@ test_expect_success C_LOCALE_OUTPUT 'fetch compact output' '
        test_commit extraaa &&
        (
                cd compact &&
-               git -c fetch.output=compact fetch origin 2>&1 | \
-                       grep -e "->" | cut -c 22- >../actual
+               git -c fetch.output=compact fetch origin >actual 2>&1 &&
+               grep -e "->" actual | cut -c 22- >../actual
        ) &&
        cat >expect <<-\EOF &&
        master     -> origin/*
index 177897ea0b1e00cc4ec0f0e43510411ab19f1681..82239138d585bb859a1f2f81394cb6b6d8313bf4 100755 (executable)
@@ -1418,7 +1418,7 @@ test_expect_success 'receive.denyCurrentBranch = updateInstead' '
                cd testrepo &&
                git reset --hard HEAD^ &&
                test $(git -C .. rev-parse HEAD^) = $(git rev-parse HEAD) &&
-               test-chmtime +100 path1
+               test-tool chmtime +100 path1
        ) &&
        git push testrepo master &&
        (
index a552ad4ead899fbb431c68bef558811d25cd8c63..9cc4b569c0566da2f5c2778f404e67e20b29ed06 100755 (executable)
@@ -85,7 +85,7 @@ test_expect_success "fetch --recurse-submodules -j2 has the same output behaviou
        add_upstream_commit &&
        (
                cd downstream &&
-               GIT_TRACE=$(pwd)/../trace.out git fetch --recurse-submodules -j2 2>../actual.err
+               GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err
        ) &&
        test_must_be_empty actual.out &&
        test_i18ncmp expect.err actual.err &&
@@ -485,7 +485,7 @@ test_expect_success "don't fetch submodule when newly recorded commits are alrea
        )
 '
 
-test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodule entry" '
+test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodules entry" '
        (
                cd downstream &&
                git fetch --recurse-submodules
index 2e42cf331648a8ba57b3806443e70b94b09eb382..91f28c2f783df7391d22bd893e2488e56658515b 100755 (executable)
@@ -18,14 +18,6 @@ setup_repository () {
        )
 }
 
-verify_stderr () {
-       cat >expected &&
-       # We're not interested in the error
-       # "fatal: The remote end hung up unexpectedly":
-       test_i18ngrep -E '^(fatal|warning):' <error | grep -v 'hung up' >actual | sort &&
-       test_i18ncmp expected actual
-}
-
 test_expect_success 'setup' '
        git commit --allow-empty -m "Initial" &&
        git branch branch1 &&
@@ -48,9 +40,7 @@ test_expect_success 'fetch conflict: config vs. config' '
                "+refs/heads/branch2:refs/remotes/origin/branch1" && (
                cd ccc &&
                test_must_fail git fetch origin 2>error &&
-               verify_stderr <<-\EOF
-               fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
-               EOF
+               test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
        )
 '
 
@@ -77,9 +67,7 @@ test_expect_success 'fetch conflict: arg vs. arg' '
                test_must_fail git fetch origin \
                        refs/heads/*:refs/remotes/origin/* \
                        refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
-               verify_stderr <<-\EOF
-               fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
-               EOF
+               test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
        )
 '
 
@@ -90,10 +78,8 @@ test_expect_success 'fetch conflict: criss-cross args' '
                git fetch origin \
                        refs/heads/branch1:refs/remotes/origin/branch2 \
                        refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
-               verify_stderr <<-\EOF
-               warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2
-               warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1
-               EOF
+               test_i18ngrep "warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2" error &&
+               test_i18ngrep "warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1" error
        )
 '
 
index d38bf32470c592f89cbd31aa08a3c1de6fffe540..21340e89c9650e43fda9a6176c6fe814360edb8b 100755 (executable)
@@ -234,7 +234,7 @@ test_expect_success TTY 'push --no-progress silences progress but not status' '
        test_commit no-progress &&
        test_terminal git push --no-progress >output 2>&1 &&
        test_i18ngrep "^To http" output &&
-       test_i18ngrep ! "^Writing objects"
+       test_i18ngrep ! "^Writing objects" output
 '
 
 test_expect_success 'push --progress shows progress to non-tty' '
index 463783789c8ccda6a483197f6626afbfca7d8ccf..b47a95871cac3dd8593b3b9262d238be3914ac62 100755 (executable)
@@ -217,17 +217,32 @@ test_expect_success 'invalid push option in config' '
        test_refs master HEAD@{1}
 '
 
+test_expect_success 'push options keep quoted characters intact (direct)' '
+       mk_repo_pair &&
+       git -C upstream config receive.advertisePushOptions true &&
+       test_commit -C workbench one &&
+       git -C workbench push --push-option="\"embedded quotes\"" up master &&
+       echo "\"embedded quotes\"" >expect &&
+       test_cmp expect upstream/.git/hooks/pre-receive.push_options
+'
+
 . "$TEST_DIRECTORY"/lib-httpd.sh
 start_httpd
 
-test_expect_success 'push option denied properly by http server' '
+# set up http repository for fetching/pushing, with push options config
+# bool set to $1
+mk_http_pair () {
        test_when_finished "rm -rf test_http_clone" &&
-       test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
+       test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' &&
        mk_repo_pair &&
-       git -C upstream config receive.advertisePushOptions false &&
+       git -C upstream config receive.advertisePushOptions "$1" &&
        git -C upstream config http.receivepack true &&
        cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
-       git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+       git clone "$HTTPD_URL"/smart/upstream test_http_clone
+}
+
+test_expect_success 'push option denied properly by http server' '
+       mk_http_pair false &&
        test_commit -C test_http_clone one &&
        test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual &&
        test_i18ngrep "the receiving end does not support push options" actual &&
@@ -235,13 +250,7 @@ test_expect_success 'push option denied properly by http server' '
 '
 
 test_expect_success 'push options work properly across http' '
-       test_when_finished "rm -rf test_http_clone" &&
-       test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
-       mk_repo_pair &&
-       git -C upstream config receive.advertisePushOptions true &&
-       git -C upstream config http.receivepack true &&
-       cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
-       git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+       mk_http_pair true &&
 
        test_commit -C test_http_clone one &&
        git -C test_http_clone push origin master &&
@@ -260,6 +269,15 @@ test_expect_success 'push options work properly across http' '
        test_cmp expect actual
 '
 
+test_expect_success 'push options keep quoted characters intact (http)' '
+       mk_http_pair true &&
+
+       test_commit -C test_http_clone one &&
+       git -C test_http_clone push --push-option="\"embedded quotes\"" origin master &&
+       echo "\"embedded quotes\"" >expect &&
+       test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
+'
+
 stop_httpd
 
 test_done
index 10cb0be2b7ea42e5a1767edcd3515f933e6ef4af..0b0e987fdb73fcd7d8f54c393ee7deb15a4b5b8c 100755 (executable)
@@ -44,7 +44,7 @@ test_pack_input_limit () {
 }
 
 test_expect_success "create known-size (1024 bytes) commit" '
-       test-genrandom foo 1024 >one-k &&
+       test-tool genrandom foo 1024 >one-k &&
        git add one-k &&
        test_commit one-k
 '
index 113c87007f31abced0d93b3702f0b54f50ff4679..faaa51ccc562545c18180410e68483019a80832d 100755 (executable)
@@ -39,7 +39,7 @@ test_expect_success 'push to repo path with path separator (colon)' '
        # so make it likely for us to generate a delta by having
        # a non-trivial file with multiple versions.
 
-       test-genrandom foo 4096 >file.bin &&
+       test-tool genrandom foo 4096 >file.bin &&
        git add file.bin &&
        git commit -m bin &&
 
index 755b05a8ae9de940be8d9ad4c89473d836dcdf20..0d4c52016b2b3651fa638fcc416f22d236c87fc2 100755 (executable)
@@ -50,7 +50,7 @@ test_expect_success 'no-op fetch -v stderr is as expected' '
 '
 
 test_expect_success 'no-op fetch without "-v" is quiet' '
-       (cd clone && git fetch) 2>stderr &&
+       (cd clone && git fetch 2>../stderr) &&
        ! test -s stderr
 '
 
index 191d6d3a780325b6f3e294b6b42540569ef129d1..df822d9a3e9e7c7b4b7031ffa75716cd7ba6103a 100755 (executable)
@@ -21,7 +21,7 @@ test_expect_success CLONE_2GB 'setup' '
         do
                printf "Generating blob $i/$blobcount\r" >&2 &&
                printf "blob\nmark :$i\ndata $blobsize\n" &&
-               #test-genrandom $i $blobsize &&
+               #test-tool genrandom $i $blobsize &&
                printf "%-${blobsize}s" $i &&
                echo "M 100644 :$i $i" >> commit
                i=$(($i+1)) ||
index 29d8631184320cfb11b034df29a12c9260d2ea66..cee556536757128861e166ea8e2cab7f975282c6 100755 (executable)
@@ -143,4 +143,15 @@ test_expect_success 'manual prefetch of missing objects' '
        test_line_count = 0 observed.oids
 '
 
+test_expect_success 'partial clone with transfer.fsckobjects=1 uses index-pack --fsck-objects' '
+       git init src &&
+       test_commit -C src x &&
+       test_config -C src uploadpack.allowfilter 1 &&
+       test_config -C src uploadpack.allowanysha1inwant 1 &&
+
+       GIT_TRACE="$(pwd)/trace" git -c transfer.fsckobjects=1 \
+               clone --filter="blob:none" "file://$(pwd)/src" dst &&
+       grep "git index-pack.*--fsck-objects" trace
+'
+
 test_done
diff --git a/t/t5701-git-serve.sh b/t/t5701-git-serve.sh
new file mode 100755 (executable)
index 0000000..72d7bc5
--- /dev/null
@@ -0,0 +1,176 @@
+#!/bin/sh
+
+test_description='test git-serve and server commands'
+
+. ./test-lib.sh
+
+test_expect_success 'test capability advertisement' '
+       cat >expect <<-EOF &&
+       version 2
+       agent=git/$(git version | cut -d" " -f3)
+       ls-refs
+       fetch=shallow
+       0000
+       EOF
+
+       git serve --advertise-capabilities >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'stateless-rpc flag does not list capabilities' '
+       # Empty request
+       test-pkt-line pack >in <<-EOF &&
+       0000
+       EOF
+       git serve --stateless-rpc >out <in &&
+       test_must_be_empty out &&
+
+       # EOF
+       git serve --stateless-rpc >out &&
+       test_must_be_empty out
+'
+
+test_expect_success 'request invalid capability' '
+       test-pkt-line pack >in <<-EOF &&
+       foobar
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "unknown capability" err
+'
+
+test_expect_success 'request with no command' '
+       test-pkt-line pack >in <<-EOF &&
+       agent=git/test
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "no command requested" err
+'
+
+test_expect_success 'request invalid command' '
+       test-pkt-line pack >in <<-EOF &&
+       command=foo
+       agent=git/test
+       0000
+       EOF
+       test_must_fail git serve --stateless-rpc 2>err <in &&
+       test_i18ngrep "invalid command" err
+'
+
+# Test the basics of ls-refs
+#
+test_expect_success 'setup some refs and tags' '
+       test_commit one &&
+       git branch dev master &&
+       test_commit two &&
+       git symbolic-ref refs/heads/release refs/heads/master &&
+       git tag -a -m "annotated tag" annotated-tag
+'
+
+test_expect_success 'basics of ls-refs' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse HEAD) HEAD
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release
+       $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag
+       $(git rev-parse refs/tags/one) refs/tags/one
+       $(git rev-parse refs/tags/two) refs/tags/two
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'basic ref-prefixes' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       ref-prefix refs/heads/master
+       ref-prefix refs/tags/one
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/tags/one) refs/tags/one
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'refs/heads prefix' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       ref-prefix refs/heads/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'peel parameter' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       peel
+       ref-prefix refs/tags/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag peeled:$(git rev-parse refs/tags/annotated-tag^{})
+       $(git rev-parse refs/tags/one) refs/tags/one
+       $(git rev-parse refs/tags/two) refs/tags/two
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_expect_success 'symrefs parameter' '
+       test-pkt-line pack >in <<-EOF &&
+       command=ls-refs
+       0001
+       symrefs
+       ref-prefix refs/heads/
+       0000
+       EOF
+
+       cat >expect <<-EOF &&
+       $(git rev-parse refs/heads/dev) refs/heads/dev
+       $(git rev-parse refs/heads/master) refs/heads/master
+       $(git rev-parse refs/heads/release) refs/heads/release symref-target:refs/heads/master
+       0000
+       EOF
+
+       git serve --stateless-rpc <in >out &&
+       test-pkt-line unpack <out >actual &&
+       test_cmp actual expect
+'
+
+test_done
diff --git a/t/t5702-protocol-v2.sh b/t/t5702-protocol-v2.sh
new file mode 100755 (executable)
index 0000000..56f7c3c
--- /dev/null
@@ -0,0 +1,273 @@
+#!/bin/sh
+
+test_description='test git wire-protocol version 2'
+
+TEST_NO_CREATE_REPO=1
+
+. ./test-lib.sh
+
+# Test protocol v2 with 'git://' transport
+#
+. "$TEST_DIRECTORY"/lib-git-daemon.sh
+start_git_daemon --export-all --enable=receive-pack
+daemon_parent=$GIT_DAEMON_DOCUMENT_ROOT_PATH/parent
+
+test_expect_success 'create repo to be served by git-daemon' '
+       git init "$daemon_parent" &&
+       test_commit -C "$daemon_parent" one
+'
+
+test_expect_success 'list refs with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote --symref "$GIT_DAEMON_URL/parent" >actual &&
+
+       # Client requested to use protocol v2
+       grep "git> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "git< version 2" log &&
+
+       git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect &&
+       test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote "$GIT_DAEMON_URL/parent" master >actual &&
+
+       cat >expect <<-EOF &&
+       $(git -C "$daemon_parent" rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+       EOF
+
+       test_cmp actual expect
+'
+
+test_expect_success 'clone with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               clone "$GIT_DAEMON_URL/parent" daemon_child &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "clone> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C "$daemon_parent" two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               fetch &&
+
+       git -C daemon_child log -1 --format=%s origin/master >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'pull with git:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               pull &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'push with git:// and a config of v2 does not request v2' '
+       test_when_finished "rm -f log" &&
+
+       # Till v2 for push is designed, make sure that if a client has
+       # protocol.version configured to use v2, that the client instead falls
+       # back and uses v0.
+
+       test_commit -C daemon_child three &&
+
+       # Push to another branch, as the target repository has the
+       # master branch checked out and we cannot push into it.
+       GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+               push origin HEAD:client_branch &&
+
+       git -C daemon_child log -1 --format=%s >actual &&
+       git -C "$daemon_parent" log -1 --format=%s client_branch >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       ! grep "push> .*\\\0\\\0version=2\\\0$" log &&
+       # Server responded using protocol v2
+       ! grep "push< version 2" log
+'
+
+stop_git_daemon
+
+# Test protocol v2 with 'file://' transport
+#
+test_expect_success 'create repo to be served by file:// transport' '
+       git init file_parent &&
+       test_commit -C file_parent one
+'
+
+test_expect_success 'list refs with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote --symref "file://$(pwd)/file_parent" >actual &&
+
+       # Server responded using protocol v2
+       grep "git< version 2" log &&
+
+       git ls-remote --symref "file://$(pwd)/file_parent" >expect &&
+       test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               ls-remote "file://$(pwd)/file_parent" master >actual &&
+
+       cat >expect <<-EOF &&
+       $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+       EOF
+
+       test_cmp actual expect
+'
+
+test_expect_success 'clone with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+               clone "file://$(pwd)/file_parent" file_child &&
+
+       git -C file_child log -1 --format=%s >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with file:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C file_parent two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+               fetch origin &&
+
+       git -C file_child log -1 --format=%s origin/master >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "fetch< version 2" log
+'
+
+test_expect_success 'ref advertisment is filtered during fetch using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C file_parent three &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+               fetch origin master &&
+
+       git -C file_child log -1 --format=%s origin/master >actual &&
+       git -C file_parent log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       ! grep "refs/tags/one" log &&
+       ! grep "refs/tags/two" log &&
+       ! grep "refs/tags/three" log
+'
+
+# Test protocol v2 with 'http://' transport
+#
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'create repo to be served by http:// transport' '
+       git init "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" config http.receivepack true &&
+       test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" one
+'
+
+test_expect_success 'clone with http:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE_CURL="$(pwd)/log" git -c protocol.version=2 \
+               clone "$HTTPD_URL/smart/http_parent" http_child &&
+
+       git -C http_child log -1 --format=%s >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Client requested to use protocol v2
+       grep "Git-Protocol: version=2" log &&
+       # Server responded using protocol v2
+       grep "git< version 2" log
+'
+
+test_expect_success 'fetch with http:// using protocol v2' '
+       test_when_finished "rm -f log" &&
+
+       test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" two &&
+
+       GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+               fetch &&
+
+       git -C http_child log -1 --format=%s origin/master >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+       test_cmp expect actual &&
+
+       # Server responded using protocol v2
+       grep "git< version 2" log
+'
+
+test_expect_success 'push with http:// and a config of v2 does not request v2' '
+       test_when_finished "rm -f log" &&
+       # Till v2 for push is designed, make sure that if a client has
+       # protocol.version configured to use v2, that the client instead falls
+       # back and uses v0.
+
+       test_commit -C http_child three &&
+
+       # Push to another branch, as the target repository has the
+       # master branch checked out and we cannot push into it.
+       GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+               push origin HEAD:client_branch &&
+
+       git -C http_child log -1 --format=%s >actual &&
+       git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s client_branch >expect &&
+       test_cmp expect actual &&
+
+       # Client didnt request to use protocol v2
+       ! grep "Git-Protocol: version=2" log &&
+       # Server didnt respond using protocol v2
+       ! grep "git< version 2" log
+'
+
+
+stop_httpd
+
+test_done
index d911afd24cd7d43fc412adb34144b90b66082b73..872788ac8ca4ecc0a54c14a04e3f72661306f1d3 100755 (executable)
@@ -20,10 +20,7 @@ test_expect_success 'curl redirects respect whitelist' '
        test_must_fail env GIT_ALLOW_PROTOCOL=http:https \
                           GIT_SMART_HTTP=0 \
                git clone "$HTTPD_URL/ftp-redir/repo.git" 2>stderr &&
-       {
-               test_i18ngrep "ftp.*disabled" stderr ||
-               test_i18ngrep "your curl version is too old"
-       }
+       test_i18ngrep -E "(ftp.*disabled|your curl version is too old)" stderr
 '
 
 test_expect_success 'curl limits redirects' '
diff --git a/t/t6015-rev-list-show-all-parents.sh b/t/t6015-rev-list-show-all-parents.sh
deleted file mode 100755 (executable)
index 3c73c93..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-
-test_description='--show-all --parents does not rewrite TREESAME commits'
-
-. ./test-lib.sh
-
-test_expect_success 'set up --show-all --parents test' '
-       test_commit one foo.txt &&
-       commit1=$(git rev-list -1 HEAD) &&
-       test_commit two bar.txt &&
-       commit2=$(git rev-list -1 HEAD) &&
-       test_commit three foo.txt &&
-       commit3=$(git rev-list -1 HEAD)
-       '
-
-test_expect_success '--parents rewrites TREESAME parents correctly' '
-       echo $commit3 $commit1 > expected &&
-       echo $commit1 >> expected &&
-       git rev-list --parents HEAD -- foo.txt > actual &&
-       test_cmp expected actual
-       '
-
-test_expect_success '--parents --show-all does not rewrites TREESAME parents' '
-       echo $commit3 $commit2 > expected &&
-       echo $commit2 $commit1 >> expected &&
-       echo $commit1 >> expected &&
-       git rev-list --parents --show-all HEAD -- foo.txt > actual &&
-       test_cmp expected actual
-       '
-
-test_done
index 05ebba7afa29977196370d178af728ec1d0d9a81..a1fad6980b936152d599fee8cac82610404a0673 100755 (executable)
@@ -242,10 +242,12 @@ test_expect_success 'merge of identical changes in a renamed file' '
        rm -f A M N &&
        git reset --hard &&
        git checkout change+rename &&
-       GIT_MERGE_VERBOSITY=3 git merge change | test_i18ngrep "^Skipped B" &&
+       GIT_MERGE_VERBOSITY=3 git merge change >out &&
+       test_i18ngrep "^Skipped B" out &&
        git reset --hard HEAD^ &&
        git checkout change &&
-       GIT_MERGE_VERBOSITY=3 git merge change+rename | test_i18ngrep "^Skipped B"
+       GIT_MERGE_VERBOSITY=3 git merge change+rename >out &&
+       test_i18ngrep "^Skipped B" out
 '
 
 test_expect_success 'setup for rename + d/f conflicts' '
@@ -633,10 +635,10 @@ test_expect_success 'setup avoid unnecessary update, normal rename' '
 
 test_expect_success 'avoid unnecessary update, normal rename' '
        git checkout -q avoid-unnecessary-update-1^0 &&
-       test-chmtime =1000000000 rename &&
-       test-chmtime -v +0 rename >expect &&
+       test-tool chmtime =1000000000 rename &&
+       test-tool chmtime -v +0 rename >expect &&
        git merge merge-branch-1 &&
-       test-chmtime -v +0 rename >actual &&
+       test-tool chmtime -v +0 rename >actual &&
        test_cmp expect actual # "rename" should have stayed intact
 '
 
@@ -666,10 +668,10 @@ test_expect_success 'setup to test avoiding unnecessary update, with D/F conflic
 
 test_expect_success 'avoid unnecessary update, with D/F conflict' '
        git checkout -q avoid-unnecessary-update-2^0 &&
-       test-chmtime =1000000000 df &&
-       test-chmtime -v +0 df >expect &&
+       test-tool chmtime =1000000000 df &&
+       test-tool chmtime -v +0 df >expect &&
        git merge merge-branch-2 &&
-       test-chmtime -v +0 df >actual &&
+       test-tool chmtime -v +0 df >actual &&
        test_cmp expect actual # "df" should have stayed intact
 '
 
@@ -698,10 +700,10 @@ test_expect_success 'setup avoid unnecessary update, dir->(file,nothing)' '
 
 test_expect_success 'avoid unnecessary update, dir->(file,nothing)' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 df &&
-       test-chmtime -v +0 df >expect &&
+       test-tool chmtime =1000000000 df &&
+       test-tool chmtime -v +0 df >expect &&
        git merge side &&
-       test-chmtime -v +0 df >actual &&
+       test-tool chmtime -v +0 df >actual &&
        test_cmp expect actual # "df" should have stayed intact
 '
 
@@ -728,10 +730,10 @@ test_expect_success 'setup avoid unnecessary update, modify/delete' '
 
 test_expect_success 'avoid unnecessary update, modify/delete' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 file &&
-       test-chmtime -v +0 file >expect &&
+       test-tool chmtime =1000000000 file &&
+       test-tool chmtime -v +0 file >expect &&
        test_must_fail git merge side &&
-       test-chmtime -v +0 file >actual &&
+       test-tool chmtime -v +0 file >actual &&
        test_cmp expect actual # "file" should have stayed intact
 '
 
@@ -757,10 +759,10 @@ test_expect_success 'setup avoid unnecessary update, rename/add-dest' '
 
 test_expect_success 'avoid unnecessary update, rename/add-dest' '
        git checkout -q master^0 &&
-       test-chmtime =1000000000 newfile &&
-       test-chmtime -v +0 newfile >expect &&
+       test-tool chmtime =1000000000 newfile &&
+       test-tool chmtime -v +0 newfile >expect &&
        git merge side &&
-       test-chmtime -v +0 newfile >actual &&
+       test-tool chmtime -v +0 newfile >actual &&
        test_cmp expect actual # "file" should have stayed intact
 '
 
index 8f17fd9da8ef6b54ae0a97500f2154b1c9d968b3..716283b274677dc6c954044c2f11b902ace79527 100755 (executable)
@@ -146,6 +146,48 @@ test_expect_success 'status -s -b (diverged from upstream)' '
        test_i18ncmp expect actual
 '
 
+cat >expect <<\EOF
+## b1...origin/master [different]
+EOF
+
+test_expect_success 'status -s -b --no-ahead-behind (diverged from upstream)' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status -s -b --no-ahead-behind | head -1
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' have diverged,
+and have 1 and 1 different commits each, respectively.
+EOF
+
+test_expect_success 'status --long --branch' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status --long -b | head -3
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' refer to different commits.
+EOF
+
+test_expect_success 'status --long --branch --no-ahead-behind' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status --long -b --no-ahead-behind | head -2
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
 cat >expect <<\EOF
 ## b5...brokenbase [gone]
 EOF
diff --git a/t/t6043-merge-rename-directories.sh b/t/t6043-merge-rename-directories.sh
new file mode 100755 (executable)
index 0000000..2e28f29
--- /dev/null
@@ -0,0 +1,3998 @@
+#!/bin/sh
+
+test_description="recursive merge with directory renames"
+# includes checking of many corner cases, with a similar methodology to:
+#   t6042: corner cases with renames but not criss-cross merges
+#   t6036: corner cases with both renames and criss-cross merges
+#
+# The setup for all of them, pictorially, is:
+#
+#      A
+#      o
+#     / \
+#  O o   ?
+#     \ /
+#      o
+#      B
+#
+# To help make it easier to follow the flow of tests, they have been
+# divided into sections and each test will start with a quick explanation
+# of what commits O, A, and B contain.
+#
+# Notation:
+#    z/{b,c}   means  files z/b and z/c both exist
+#    x/d_1     means  file x/d exists with content d1.  (Purpose of the
+#                     underscore notation is to differentiate different
+#                     files that might be renamed into each other's paths.)
+
+. ./test-lib.sh
+
+
+###########################################################################
+# SECTION 1: Basic cases we should be able to handle
+###########################################################################
+
+# Testcase 1a, Basic directory rename.
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e/f}
+#   Expected: y/{b,c,d,e/f}
+
+test_expect_success '1a-setup: Simple directory rename detection' '
+       test_create_repo 1a &&
+       (
+               cd 1a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               mkdir z/e &&
+               echo f >z/e/f &&
+               git add z/d z/e/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1a-check: Simple directory rename detection' '
+       (
+               cd 1a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d    B:z/e/f &&
+               test_cmp expect actual &&
+
+               git hash-object y/d >actual &&
+               git rev-parse B:z/d >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_must_fail git rev-parse HEAD:z/e/f &&
+               test_path_is_missing z/d &&
+               test_path_is_missing z/e/f
+       )
+'
+
+# Testcase 1b, Merge a directory with another
+#   Commit O: z/{b,c},   y/d
+#   Commit A: z/{b,c,e}, y/d
+#   Commit B: y/{b,c,d}
+#   Expected: y/{b,c,d,e}
+
+test_expect_success '1b-setup: Merge a directory with another' '
+       test_create_repo 1b &&
+       (
+               cd 1b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y &&
+               git mv z/c y &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1b-check: Merge a directory with another' '
+       (
+               cd 1b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:y/d    A:z/e &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/e
+       )
+'
+
+# Testcase 1c, Transitive renaming
+#   (Related to testcases 3a and 6d -- when should a transitive rename apply?)
+#   (Related to testcases 9c and 9d -- can transitivity repeat?)
+#   (Related to testcase 12b -- joint-transitivity?)
+#   Commit O: z/{b,c},   x/d
+#   Commit A: y/{b,c},   x/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c,d}  (because x/d -> z/d -> y/d)
+
+test_expect_success '1c-setup: Transitive renaming' '
+       test_create_repo 1c &&
+       (
+               cd 1c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1c-check: Transitive renaming' '
+       (
+               cd 1c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d
+       )
+'
+
+# Testcase 1d, Directory renames (merging two directories into one new one)
+#              cause a rename/rename(2to1) conflict
+#   (Related to testcases 1c and 7b)
+#   Commit O. z/{b,c},        y/{d,e}
+#   Commit A. x/{b,c},        y/{d,e,m,wham_1}
+#   Commit B. z/{b,c,n,wham_2}, x/{d,e}
+#   Expected: x/{b,c,d,e,m,n}, CONFLICT:(y/wham_1 & z/wham_2 -> x/wham)
+#   Note: y/m & z/n should definitely move into x.  By the same token, both
+#         y/wham_1 & z/wham_2 should too...giving us a conflict.
+
+test_expect_success '1d-setup: Directory renames cause a rename/rename(2to1) conflict' '
+       test_create_repo 1d &&
+       (
+               cd 1d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               echo e >y/e &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z x &&
+               echo m >y/m &&
+               echo wham1 >y/wham &&
+               git add y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y x &&
+               echo n >z/n &&
+               echo wham2 >z/wham &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1d-check: Directory renames cause a rename/rename(2to1) conflict' '
+       (
+               cd 1d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 8 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:x/c :0:x/d :0:x/e :0:x/m :0:x/n &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  O:y/e  A:y/m  B:z/n &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :0:x/wham &&
+               git rev-parse >actual \
+                       :2:x/wham :3:x/wham &&
+               git rev-parse >expect \
+                        A:y/wham  B:z/wham &&
+               test_cmp expect actual &&
+
+               test_path_is_missing x/wham &&
+               test_path_is_file x/wham~HEAD &&
+               test_path_is_file x/wham~B^0 &&
+
+               git hash-object >actual \
+                       x/wham~HEAD x/wham~B^0 &&
+               git rev-parse >expect \
+                       A:y/wham    B:z/wham &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 1e, Renamed directory, with all filenames being renamed too
+#   (Related to testcases 9f & 9g)
+#   Commit O: z/{oldb,oldc}
+#   Commit A: y/{newb,newc}
+#   Commit B: z/{oldb,oldc,d}
+#   Expected: y/{newb,newc,d}
+
+test_expect_success '1e-setup: Renamed directory, with all files being renamed too' '
+       test_create_repo 1e &&
+       (
+               cd 1e &&
+
+               mkdir z &&
+               echo b >z/oldb &&
+               echo c >z/oldc &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/oldb y/newb &&
+               git mv z/oldc y/newc &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1e-check: Renamed directory, with all files being renamed too' '
+       (
+               cd 1e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/newb HEAD:y/newc HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/oldb    O:z/oldc    B:z/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/d
+       )
+'
+
+# Testcase 1f, Split a directory into two other directories
+#   (Related to testcases 3a, all of section 2, and all of section 4)
+#   Commit O: z/{b,c,d,e,f}
+#   Commit A: z/{b,c,d,e,f,g}
+#   Commit B: y/{b,c}, x/{d,e,f}
+#   Expected: y/{b,c}, x/{d,e,f,g}
+
+test_expect_success '1f-setup: Split a directory into two other directories' '
+       test_create_repo 1f &&
+       (
+               cd 1f &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               echo f >z/f &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo g >z/g &&
+               git add z/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               git mv z/e x/ &&
+               git mv z/f x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1f-check: Split a directory into two other directories' '
+       (
+               cd 1f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d HEAD:x/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    O:z/f    A:z/g &&
+               test_cmp expect actual &&
+               test_path_is_missing z/g &&
+               test_must_fail git rev-parse HEAD:z/g
+       )
+'
+
+###########################################################################
+# Rules suggested by testcases in section 1:
+#
+#   We should still detect the directory rename even if it wasn't just
+#   the directory renamed, but the files within it. (see 1b)
+#
+#   If renames split a directory into two or more others, the directory
+#   with the most renames, "wins" (see 1c).  However, see the testcases
+#   in section 2, plus testcases 3a and 4a.
+###########################################################################
+
+
+###########################################################################
+# SECTION 2: Split into multiple directories, with equal number of paths
+#
+# Explore the splitting-a-directory rules a bit; what happens in the
+# edge cases?
+#
+# Note that there is a closely related case of a directory not being
+# split on either side of history, but being renamed differently on
+# each side.  See testcase 8e for that.
+###########################################################################
+
+# Testcase 2a, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c,d}
+#   Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2a-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2a &&
+       (
+               cd 2a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2a-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*directory rename split" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 2b, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c}, x/d
+#   Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2b-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2b &&
+       (
+               cd 2b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir x &&
+               echo d >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2b-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:x/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:x/d &&
+               test_cmp expect actual &&
+               test_i18ngrep ! "CONFLICT.*directory rename split" out
+       )
+'
+
+###########################################################################
+# Rules suggested by section 2:
+#
+#   None; the rule was already covered in section 1.  These testcases are
+#   here just to make sure the conflict resolution and necessary warning
+#   messages are handled correctly.
+###########################################################################
+
+
+###########################################################################
+# SECTION 3: Path in question is the source path for some rename already
+#
+# Combining cases from Section 1 and trying to handle them could lead to
+# directory renaming detection being over-applied.  So, this section
+# provides some good testcases to check that the implementation doesn't go
+# too far.
+###########################################################################
+
+# Testcase 3a, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 1c, 1f, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: z/{b,c,d} (no change)
+#   Commit B: y/{b,c}, x/d
+#   Expected: y/{b,c}, x/d
+test_expect_success '3a-setup: Avoid implicit rename if involved as source on other side' '
+       test_create_repo 3a &&
+       (
+               cd 3a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3a-check: Avoid implicit rename if involved as source on other side' '
+       (
+               cd 3a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 3b, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 5c and 7c, also kind of 1e and 1f)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}, x/d
+#   Commit B: z/{b,c}, w/d
+#   Expected: y/{b,c}, CONFLICT:(z/d -> x/d vs. w/d)
+#   NOTE: We're particularly checking that since z/d is already involved as
+#         a source in a file rename on the same side of history, that we don't
+#         get it involved in directory rename detection.  If it were, we might
+#         end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a
+#         rename/rename/rename(1to3) conflict, which is just weird.
+test_expect_success '3b-setup: Avoid implicit rename if involved as source on current side' '
+       test_create_repo 3b &&
+       (
+               cd 3b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               git mv z/d w/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3b-check: Avoid implicit rename if involved as source on current side' '
+       (
+               cd 3b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/d.*x/d.*w/d out &&
+               test_i18ngrep ! CONFLICT.*rename/rename.*y/d out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:z/d :2:x/d :3:w/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:z/d  O:z/d  O:z/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/d &&
+               git hash-object >actual \
+                       x/d   w/d &&
+               git rev-parse >expect \
+                       O:z/d O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 3:
+#
+#   Avoid directory-rename-detection for a path, if that path is the source
+#   of a rename on either side of a merge.
+###########################################################################
+
+
+###########################################################################
+# SECTION 4: Partially renamed directory; still exists on both sides of merge
+#
+# What if we were to attempt to do directory rename detection when someone
+# "mostly" moved a directory but still left some files around, or,
+# equivalently, fully renamed a directory in one commmit and then recreated
+# that directory in a later commit adding some new files and then tried to
+# merge?
+#
+# It's hard to divine user intent in these cases, because you can make an
+# argument that, depending on the intermediate history of the side being
+# merged, that some users will want files in that directory to
+# automatically be detected and renamed, while users with a different
+# intermediate history wouldn't want that rename to happen.
+#
+# I think that it is best to simply not have directory rename detection
+# apply to such cases.  My reasoning for this is four-fold: (1) it's
+# easiest for users in general to figure out what happened if we don't
+# apply directory rename detection in any such case, (2) it's an easy rule
+# to explain ["We don't do directory rename detection if the directory
+# still exists on both sides of the merge"], (3) we can get some hairy
+# edge/corner cases that would be really confusing and possibly not even
+# representable in the index if we were to even try, and [related to 3] (4)
+# attempting to resolve this issue of divining user intent by examining
+# intermediate history goes against the spirit of three-way merges and is a
+# path towards crazy corner cases that are far more complex than what we're
+# already dealing with.
+#
+# Note that the wording of the rule ("We don't do directory rename
+# detection if the directory still exists on both sides of the merge.")
+# also excludes "renaming" of a directory into a subdirectory of itself
+# (e.g. /some/dir/* -> /some/dir/subdir/*).  It may be possible to carve
+# out an exception for "renaming"-beneath-itself cases without opening
+# weird edge/corner cases for other partial directory renames, but for now
+# we are keeping the rule simple.
+#
+# This section contains a test for a partially-renamed-directory case.
+###########################################################################
+
+# Testcase 4a, Directory split, with original directory still present
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d,e}
+#   Commit A: y/{b,c,d}, z/e
+#   Commit B: z/{b,c,d,e,f}
+#   Expected: y/{b,c,d}, z/{e,f}
+#   NOTE: Even though most files from z moved to y, we don't want f to follow.
+
+test_expect_success '4a-setup: Directory split, with original directory still present' '
+       test_create_repo 4a &&
+       (
+               cd 4a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo f >z/f &&
+               git add z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '4a-check: Directory split, with original directory still present' '
+       (
+               cd 4a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/e HEAD:z/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    B:z/f &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 4:
+#
+#   Directory-rename-detection should be turned off for any directories (as
+#   a source for renames) that exist on both sides of the merge.  (The "as
+#   a source for renames" clarification is due to cases like 1c where
+#   the target directory exists on both sides and we do want the rename
+#   detection.)  But, sadly, see testcase 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 5: Files/directories in the way of subset of to-be-renamed paths
+#
+# Implicitly renaming files due to a detected directory rename could run
+# into problems if there are files or directories in the way of the paths
+# we want to rename.  Explore such cases in this section.
+###########################################################################
+
+# Testcase 5a, Merge directories, other side adds files to original and target
+#   Commit O: z/{b,c},       y/d
+#   Commit A: z/{b,c,e_1,f}, y/{d,e_2}
+#   Commit B: y/{b,c,d}
+#   Expected: z/e_1, y/{b,c,d,e_2,f} + CONFLICT warning
+#   NOTE: While directory rename detection is active here causing z/f to
+#         become y/f, we did not apply this for z/e_1 because that would
+#         give us an add/add conflict for y/e_1 vs y/e_2.  This problem with
+#         this add/add, is that both versions of y/e are from the same side
+#         of history, giving us no way to represent this conflict in the
+#         index.
+
+test_expect_success '5a-setup: Merge directories, other side adds files to original and target' '
+       test_create_repo 5a &&
+       (
+               cd 5a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >z/e &&
+               echo f >z/f &&
+               echo e2 >y/e &&
+               git add z/e z/f y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5a-check: Merge directories, other side adds files to original and target' '
+       (
+               cd 5a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*implicit dir rename" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/d :0:y/e :0:z/e :0:y/f &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  A:y/e  A:z/e  A:z/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 5b, Rename/delete in order to get add/add/add conflict
+#   (Related to testcase 8d; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: y/{b,c,d_2}
+#   Commit B: z/{b,c,d_1,e}, y/d_3
+#   Expected: y/{b,c,e}, CONFLICT(add/add: y/d_2 vs. y/d_3)
+#   NOTE: If z/d_1 in commit B were to be involved in dir rename detection, as
+#         we normaly would since z/ is being renamed to y/, then this would be
+#         a rename/delete (z/d_1 -> y/d_1 vs. deleted) AND an add/add/add
+#         conflict of y/d_1 vs. y/d_2 vs. y/d_3.  Add/add/add is not
+#         representable in the index, so the existence of y/d_3 needs to
+#         cause us to bail on directory rename detection for that path, falling
+#         back to git behavior without the directory rename detection.
+
+test_expect_success '5b-setup: Rename/delete in order to get add/add/add conflict' '
+       test_create_repo 5b &&
+       (
+               cd 5b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               echo d3 >y/d &&
+               echo e >z/e &&
+               git add y/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5b-check: Rename/delete in order to get add/add/add conflict' '
+       (
+               cd 5b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  A:y/d  B:y/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               test_path_is_file y/d
+       )
+'
+
+# Testcase 5c, Transitive rename would cause rename/rename/rename/add/add/add
+#   (Directory rename detection would result in transitive rename vs.
+#    rename/rename(1to2) and turn it into a rename/rename(1to3).  Further,
+#    rename paths conflict with separate adds on the other side)
+#   (Related to testcases 3b and 7c)
+#   Commit O: z/{b,c}, x/d_1
+#   Commit A: y/{b,c,d_2}, w/d_1
+#   Commit B: z/{b,c,d_1,e}, w/d_3, y/d_4
+#   Expected: A mess, but only a rename/rename(1to2)/add/add mess.  Use the
+#             presence of y/d_4 in B to avoid doing transitive rename of
+#             x/d_1 -> z/d_1 -> y/d_1, so that the only paths we have at
+#             y/d are y/d_2 and y/d_4.  We still do the move from z/e to y/e,
+#             though, because it doesn't have anything in the way.
+
+test_expect_success '5c-setup: Transitive rename would cause rename/rename/rename/add/add/add' '
+       test_create_repo 5c &&
+       (
+               cd 5c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               mkdir w &&
+               mkdir y &&
+               echo d3 >w/d &&
+               echo d4 >y/d &&
+               echo e >z/e &&
+               git add w/ y/ z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5c-check: Transitive rename would cause rename/rename/rename/add/add/add' '
+       (
+               cd 5c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*z/d" out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 9 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               git rev-parse >actual \
+                       :2:w/d :3:w/d :1:x/d :2:y/d :3:y/d :3:z/d &&
+               git rev-parse >expect \
+                        O:x/d  B:w/d  O:x/d  A:y/d  B:y/d  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       w/d~HEAD w/d~B^0 z/d &&
+               git rev-parse >expect \
+                       O:x/d    B:w/d   O:x/d &&
+               test_cmp expect actual &&
+               test_path_is_missing x/d &&
+               test_path_is_file y/d &&
+               grep -q "<<<<" y/d  # conflict markers should be present
+       )
+'
+
+# Testcase 5d, Directory/file/file conflict due to directory rename
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c,d_1}
+#   Commit B: z/{b,c,d_2,f}, y/d/e
+#   Expected: y/{b,c,d/e,f}, z/d_2, CONFLICT(file/directory), y/d_1~HEAD
+#   Note: The fact that y/d/ exists in B makes us bail on directory rename
+#         detection for z/d_2, but that doesn't prevent us from applying the
+#         directory rename detection for z/f -> y/f.
+
+test_expect_success '5d-setup: Directory/file/file conflict due to directory rename' '
+       test_create_repo 5d &&
+       (
+               cd 5d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d1 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir -p y/d &&
+               echo e >y/d/e &&
+               echo d2 >z/d &&
+               echo f >z/f &&
+               git add y/d/e z/d z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5d-check: Directory/file/file conflict due to directory rename' '
+       (
+               cd 5d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (file/directory).*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:z/d :0:y/f :2:y/d :0:y/d/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d  B:z/f  A:y/d  B:y/d/e &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~HEAD >actual &&
+               git rev-parse A:y/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 5:
+#
+#   If a subset of to-be-renamed files have a file or directory in the way,
+#   "turn off" the directory rename for those specific sub-paths, falling
+#   back to old handling.  But, sadly, see testcases 8a and 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 6: Same side of the merge was the one that did the rename
+#
+# It may sound obvious that you only want to apply implicit directory
+# renames to directories if the _other_ side of history did the renaming.
+# If you did make an implementation that didn't explicitly enforce this
+# rule, the majority of cases that would fall under this section would
+# also be solved by following the rules from the above sections.  But
+# there are still a few that stick out, so this section covers them just
+# to make sure we also get them right.
+###########################################################################
+
+# Testcase 6a, Tricky rename/delete
+#   Commit O: z/{b,c,d}
+#   Commit A: z/b
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/b, CONFLICT(rename/delete, z/c -> y/c vs. NULL)
+#   Note: We're just checking here that the rename of z/b and z/c to put
+#         them under y/ doesn't accidentally catch z/d and make it look like
+#         it is also involved in a rename/delete conflict.
+
+test_expect_success '6a-setup: Tricky rename/delete' '
+       test_create_repo 6a &&
+       (
+               cd 6a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git rm z/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6a-check: Tricky rename/delete' '
+       (
+               cd 6a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*z/c.*y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/c &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6b, Same rename done on both sides
+#   (Related to testcases 6c and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   Note: If we did directory rename detection here, we'd move z/d into y/,
+#         but B did that rename and still decided to put the file into z/,
+#         so we probably shouldn't apply directory rename detection for it.
+
+test_expect_success '6b-setup: Same rename done on both sides' '
+       test_create_repo 6b &&
+       (
+               cd 6b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6b-check: Same rename done on both sides' '
+       (
+               cd 6b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6c, Rename only done on same side
+#   (Related to testcases 6b and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Seems obvious, but just checking that the implementation doesn't
+#         "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6c-setup: Rename only done on same side' '
+       test_create_repo 6c &&
+       (
+               cd 6c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6c-check: Rename only done on same side' '
+       (
+               cd 6c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6d, We don't always want transitive renaming
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: z/{b,c}, x/d (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6d-setup: We do not always want transitive renaming' '
+       test_create_repo 6d &&
+       (
+               cd 6d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               git mv x z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6d-check: We do not always want transitive renaming' '
+       (
+               cd 6d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6e, Add/add from one-side
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c,d_1}, z/d_2
+#   Expected: y/{b,c,d_1}, z/d_2
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c} +
+#         add/add conflict on y/d_1 vs y/d_2.
+
+test_expect_success '6e-setup: Add/add from one side' '
+       test_create_repo 6e &&
+       (
+               cd 6e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo d1 > y/d &&
+               mkdir z &&
+               echo d2 > z/d &&
+               git add y/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6e-check: Add/add from one side' '
+       (
+               cd 6e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:y/d    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 6:
+#
+#   Only apply implicit directory renames to directories if the other
+#   side of history is the one doing the renaming.
+###########################################################################
+
+
+###########################################################################
+# SECTION 7: More involved Edge/Corner cases
+#
+# The ruleset we have generated in the above sections seems to provide
+# well-defined merges.  But can we find edge/corner cases that either (a)
+# are harder for users to understand, or (b) have a resolution that is
+# non-intuitive or suboptimal?
+#
+# The testcases in this section dive into cases that I've tried to craft in
+# a way to find some that might be surprising to users or difficult for
+# them to understand (the next section will look at non-intuitive or
+# suboptimal merge results).  Some of the testcases are similar to ones
+# from past sections, but have been simplified to try to highlight error
+# messages using a "modified" path (due to the directory rename).  Are
+# users okay with these?
+#
+# In my opinion, testcases that are difficult to understand from this
+# section is due to difficulty in the testcase rather than the directory
+# renaming (similar to how t6042 and t6036 have difficult resolutions due
+# to the problem setup itself being complex).  And I don't think the
+# error messages are a problem.
+#
+# On the other hand, the testcases in section 8 worry me slightly more...
+###########################################################################
+
+# Testcase 7a, rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/b, x/c, z/d
+#   Expected: y/d, CONFLICT(rename/rename for both z/b and z/c)
+#   NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d.
+
+test_expect_success '7a-setup: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       test_create_repo 7a &&
+       (
+               cd 7a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               mkdir x &&
+               git mv z/b w/ &&
+               git mv z/c x/ &&
+               echo d > z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7a-check: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       (
+               cd 7a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/b.*y/b.*w/b" out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/c.*y/c.*x/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:x/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   x/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7b, rename/rename(2to1), but only due to transitive rename
+#   (Related to testcase 1d)
+#   Commit O: z/{b,c},     x/d_1, w/d_2
+#   Commit A: y/{b,c,d_2}, x/d_1
+#   Commit B: z/{b,c,d_1},        w/d_2
+#   Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d)
+
+test_expect_success '7b-setup: rename/rename(2to1), but only due to transitive rename' '
+       test_create_repo 7b &&
+       (
+               cd 7b &&
+
+               mkdir z &&
+               mkdir x &&
+               mkdir w &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 > x/d &&
+               echo d2 > w/d &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7b-check: rename/rename(2to1), but only due to transitive rename' '
+       (
+               cd 7b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:w/d  O:x/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing y/d &&
+               test_path_is_file y/d~HEAD &&
+               test_path_is_file y/d~B^0 &&
+
+               git hash-object >actual \
+                       y/d~HEAD y/d~B^0 &&
+               git rev-parse >expect \
+                       O:w/d    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7c, rename/rename(1to...2or3); transitive rename may add complexity
+#   (Related to testcases 3b and 5c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}, w/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(x/d -> w/d vs. y/d)
+#   NOTE: z/ was renamed to y/ so we do want to report
+#         neither CONFLICT(x/d -> w/d vs. z/d)
+#         nor CONFLiCT x/d -> w/d vs. y/d vs. z/d)
+
+test_expect_success '7c-setup: rename/rename(1to...2or3); transitive rename may add complexity' '
+       test_create_repo 7c &&
+       (
+               cd 7c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7c-check: rename/rename(1to...2or3); transitive rename may add complexity' '
+       (
+               cd 7c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:x/d :2:w/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d  O:x/d  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7d, transitive rename involved in rename/delete; how is it reported?
+#   (Related somewhat to testcases 5b and 8d)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(delete x/d vs rename to y/d)
+#   NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d)
+
+test_expect_success '7d-setup: transitive rename involved in rename/delete; how is it reported?' '
+       test_create_repo 7d &&
+       (
+               cd 7d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm -rf x &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7d-check: transitive rename involved in rename/delete; how is it reported?' '
+       (
+               cd 7d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7e, transitive rename in rename/delete AND dirs in the way
+#   (Very similar to 'both rename source and destination involved in D/F conflict' from t6022-merge-rename.sh)
+#   (Also related to testcases 9c and 9d)
+#   Commit O: z/{b,c},     x/d_1
+#   Commit A: y/{b,c,d/g}, x/d/f
+#   Commit B: z/{b,c,d_1}
+#   Expected: rename/delete(x/d_1->y/d_1 vs. None) + D/F conflict on y/d
+#             y/{b,c,d/g}, y/d_1~B^0, x/d/f
+
+#   NOTE: The main path of interest here is d_1 and where it ends up, but
+#         this is actually a case that has two potential directory renames
+#         involved and D/F conflict(s), so it makes sense to walk through
+#         each step.
+#
+#         Commit A renames z/ -> y/.  Thus everything that B adds to z/
+#         should be instead moved to y/.  This gives us the D/F conflict on
+#         y/d because x/d_1 -> z/d_1 -> y/d_1 conflicts with y/d/g.
+#
+#         Further, commit B renames x/ -> z/, thus everything A adds to x/
+#         should instead be moved to z/...BUT we removed z/ and renamed it
+#         to y/, so maybe everything should move not from x/ to z/, but
+#         from x/ to z/ to y/.  Doing so might make sense from the logic so
+#         far, but note that commit A had both an x/ and a y/; it did the
+#         renaming of z/ to y/ and created x/d/f and it clearly made these
+#         things separate, so it doesn't make much sense to push these
+#         together.  Doing so is what I'd call a doubly transitive rename;
+#         see testcases 9c and 9d for further discussion of this issue and
+#         how it's resolved.
+
+test_expect_success '7e-setup: transitive rename in rename/delete AND dirs in the way' '
+       test_create_repo 7e &&
+       (
+               cd 7e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm x/d &&
+               mkdir -p x/d &&
+               mkdir -p y/d &&
+               echo f >x/d/f &&
+               echo g >y/d/g &&
+               git add x/d/f y/d/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7e-check: transitive rename in rename/delete AND dirs in the way' '
+       (
+               cd 7e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:x/d/f :0:y/d/g :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        A:x/d/f  A:y/d/g  O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~B^0 >actual &&
+               git rev-parse O:x/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 8: Suboptimal merges
+#
+# As alluded to in the last section, the ruleset we have built up for
+# detecting directory renames unfortunately has some special cases where it
+# results in slightly suboptimal or non-intuitive behavior.  This section
+# explores these cases.
+#
+# To be fair, we already had non-intuitive or suboptimal behavior for most
+# of these cases in git before introducing implicit directory rename
+# detection, but it'd be nice if there was a modified ruleset out there
+# that handled these cases a bit better.
+###########################################################################
+
+# Testcase 8a, Dual-directory rename, one into the others' way
+#   Commit O. x/{a,b},   y/{c,d}
+#   Commit A. x/{a,b,e}, y/{c,d,f}
+#   Commit B. y/{a,b},   z/{c,d}
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: y/{a,b,f},   z/{c,d},   x/e
+#   Currently expected:       y/{a,b,e,f}, z/{c,d}
+#   Optimal:                  y/{a,b,e},   z/{c,d,f}
+#
+# Note: Both x and y got renamed and it'd be nice to detect both, and we do
+# better with directory rename detection than git did without, but the
+# simple rule from section 5 prevents me from handling this as optimally as
+# we potentially could.
+
+test_expect_success '8a-setup: Dual-directory rename, one into the others way' '
+       test_create_repo 8a &&
+       (
+               cd 8a &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a >x/a &&
+               echo b >x/b &&
+               echo c >y/c &&
+               echo d >y/d &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >x/e &&
+               echo f >y/f &&
+               git add x/e y/f &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8a-check: Dual-directory rename, one into the others way' '
+       (
+               cd 8a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:y/e HEAD:y/f HEAD:z/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    A:x/e    A:y/f    O:y/c    O:y/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8b, Dual-directory rename, one into the others' way, with conflicting filenames
+#   Commit O. x/{a_1,b_1},     y/{a_2,b_2}
+#   Commit A. x/{a_1,b_1,e_1}, y/{a_2,b_2,e_2}
+#   Commit B. y/{a_1,b_1},     z/{a_2,b_2}
+#
+#   w/o dir-rename detection: y/{a_1,b_1,e_2}, z/{a_2,b_2}, x/e_1
+#   Currently expected:       <same>
+#   Scary:                    y/{a_1,b_1},     z/{a_2,b_2}, CONFLICT(add/add, e_1 vs. e_2)
+#   Optimal:                  y/{a_1,b_1,e_1}, z/{a_2,b_2,e_2}
+#
+# Note: Very similar to 8a, except instead of 'e' and 'f' in directories x and
+# y, both are named 'e'.  Without directory rename detection, neither file
+# moves directories.  Implement directory rename detection suboptimally, and
+# you get an add/add conflict, but both files were added in commit A, so this
+# is an add/add conflict where one side of history added both files --
+# something we can't represent in the index.  Obviously, we'd prefer the last
+# resolution, but our previous rules are too coarse to allow it.  Using both
+# the rules from section 4 and section 5 save us from the Scary resolution,
+# making us fall back to pre-directory-rename-detection behavior for both
+# e_1 and e_2.
+
+test_expect_success '8b-setup: Dual-directory rename, one into the others way, with conflicting filenames' '
+       test_create_repo 8b &&
+       (
+               cd 8b &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a1 >x/a &&
+               echo b1 >x/b &&
+               echo a2 >y/a &&
+               echo b2 >y/b &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >x/e &&
+               echo e2 >y/e &&
+               git add x/e y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8b-check: Dual-directory rename, one into the others way, with conflicting filenames' '
+       (
+               cd 8b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:z/a HEAD:z/b HEAD:x/e HEAD:y/e &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    O:y/a    O:y/b    A:x/e    A:y/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8c, modify/delete or rename+modify/delete?
+#   (Related to testcases 5b, 8d, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d_modified,e}
+#   Expected: y/{b,c,e}, CONFLICT(modify/delete: on z/d)
+#
+#   Note: It could easily be argued that the correct resolution here is
+#         y/{b,c,e}, CONFLICT(rename/delete: z/d -> y/d vs deleted)
+#         and that the modifed version of d should be present in y/ after
+#         the merge, just marked as conflicted.  Indeed, I previously did
+#         argue that.  But applying directory renames to the side of
+#         history where a file is merely modified results in spurious
+#         rename/rename(1to2) conflicts -- see testcase 9h.  See also
+#         notes in 8d.
+
+test_expect_success '8c-setup: modify/delete or rename+modify/delete?' '
+       test_create_repo 8c &&
+       (
+               cd 8c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >z/d &&
+               test_chmod +x z/d &&
+               echo e >z/e &&
+               git add z/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8c-check: modify/delete or rename+modify/delete' '
+       (
+               cd 8c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (modify/delete).* z/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :1:z/d :3:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  O:z/d  B:z/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :2:z/d &&
+               git ls-files -s z/d | grep ^100755 &&
+               test_path_is_file z/d &&
+               test_path_is_missing y/d
+       )
+'
+
+# Testcase 8d, rename/delete...or not?
+#   (Related to testcase 5b; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e}
+#   Expected: y/{b,c,e}
+#
+#   Note: It would also be somewhat reasonable to resolve this as
+#             y/{b,c,e}, CONFLICT(rename/delete: x/d -> y/d or deleted)
+#
+#   In this case, I'm leaning towards: commit A was the one that deleted z/d
+#   and it did the rename of z to y, so the two "conflicts" (rename vs.
+#   delete) are both coming from commit A, which is illogical.  Conflicts
+#   during merging are supposed to be about opposite sides doing things
+#   differently.
+
+test_expect_success '8d-setup: rename/delete...or not?' '
+       test_create_repo 8d &&
+       (
+               cd 8d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8d-check: rename/delete...or not?' '
+       (
+               cd 8d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8e, Both sides rename, one side adds to original directory
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/{b,c}, z/d
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: z/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Currently expected:       y/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Optimal:                  ??
+#
+# Notes: In commit A, directory z got renamed to y.  In commit B, directory z
+#        did NOT get renamed; the directory is still present; instead it is
+#        considered to have just renamed a subset of paths in directory z
+#        elsewhere.  Therefore, the directory rename done in commit A to z/
+#        applies to z/d and maps it to y/d.
+#
+#        It's possible that users would get confused about this, but what
+#        should we do instead?  Silently leaving at z/d seems just as bad or
+#        maybe even worse.  Perhaps we could print a big warning about z/d
+#        and how we're moving to y/d in this case, but when I started thinking
+#        about the ramifications of doing that, I didn't know how to rule out
+#        that opening other weird edge and corner cases so I just punted.
+
+test_expect_success '8e-setup: Both sides rename, one side adds to original directory' '
+       test_create_repo 8e &&
+       (
+               cd 8e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z w &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8e-check: Both sides rename, one side adds to original directory' '
+       (
+               cd 8e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/c.*y/c.*w/c out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/b.*y/b.*w/b out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:w/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   w/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/b &&
+               test_path_is_missing z/c
+       )
+'
+
+###########################################################################
+# SECTION 9: Other testcases
+#
+# This section consists of miscellaneous testcases I thought of during
+# the implementation which round out the testing.
+###########################################################################
+
+# Testcase 9a, Inner renamed directory within outer renamed directory
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d/{e,f,g}}
+#   Commit A: y/{b,c}, x/w/{e,f,g}
+#   Commit B: z/{b,c,d/{e,f,g,h},i}
+#   Expected: y/{b,c,i}, x/w/{e,f,g,h}
+#   NOTE: The only reason this one is interesting is because when a directory
+#         is split into multiple other directories, we determine by the weight
+#         of which one had the most paths going to it.  A naive implementation
+#         of that could take the new file in commit B at z/i to x/w/i or x/i.
+
+test_expect_success '9a-setup: Inner renamed directory within outer renamed directory' '
+       test_create_repo 9a &&
+       (
+               cd 9a &&
+
+               mkdir -p z/d &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo e >z/d/e &&
+               echo f >z/d/f &&
+               echo g >z/d/g &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir x &&
+               git mv z/d x/w &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo h >z/d/h &&
+               echo i >z/i &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9a-check: Inner renamed directory within outer renamed directory' '
+       (
+               cd 9a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/i &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/i &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       HEAD:x/w/e HEAD:x/w/f HEAD:x/w/g HEAD:x/w/h &&
+               git rev-parse >expect \
+                       O:z/d/e    O:z/d/f    O:z/d/g    B:z/d/h &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9b, Transitive rename with content merge
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c},   x/d_1
+#   Commit A: y/{b,c},   x/d_2
+#   Commit B: z/{b,c,d_3}
+#   Expected: y/{b,c,d_merged}
+
+test_expect_success '9b-setup: Transitive rename with content merge' '
+       test_create_repo 9b &&
+       (
+               cd 9b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               test_seq 1 10 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_seq 1 11 >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_seq 0 10 >x/d &&
+               git mv x/d z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9b-check: Transitive rename with content merge' '
+       (
+               cd 9b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               test_seq 0 11 >expected &&
+               test_cmp expected y/d &&
+               git add expected &&
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    :0:expected &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d &&
+
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse O:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse A:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse B:z/d)
+       )
+'
+
+# Testcase 9c, Doubly transitive rename?
+#   (Related to testcase 1c, 7e, and 9d)
+#   Commit O: z/{b,c},     x/{d,e},    w/f
+#   Commit A: y/{b,c},     x/{d,e,f,g}
+#   Commit B: z/{b,c,d,e},             w/f
+#   Expected: y/{b,c,d,e}, x/{f,g}
+#
+#   NOTE: x/f and x/g may be slightly confusing here.  The rename from w/f to
+#         x/f is clear.  Let's look beyond that.  Here's the logic:
+#            Commit B renamed x/ -> z/
+#            Commit A renamed z/ -> y/
+#         So, we could possibly further rename x/f to z/f to y/f, a doubly
+#         transient rename.  However, where does it end?  We can chain these
+#         indefinitely (see testcase 9d).  What if there is a D/F conflict
+#         at z/f/ or y/f/?  Or just another file conflict at one of those
+#         paths?  In the case of an N-long chain of transient renamings,
+#         where do we "abort" the rename at?  Can the user make sense of
+#         the resulting conflict and resolve it?
+#
+#         To avoid this confusion I use the simple rule that if the other side
+#         of history did a directory rename to a path that your side renamed
+#         away, then ignore that particular rename from the other side of
+#         history for any implicit directory renames.
+
+test_expect_success '9c-setup: Doubly transitive rename?' '
+       test_create_repo 9c &&
+       (
+               cd 9c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               echo e >x/e &&
+               mkdir w &&
+               echo f >w/f &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/f x/ &&
+               echo g >x/g &&
+               git add x/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               git mv x/e z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9c-check: Doubly transitive rename?' '
+       (
+               cd 9c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> z rename to x/f" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d    O:x/e    O:w/f    A:x/g &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9d, N-fold transitive rename?
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: z/a, y/b, x/c, w/d, v/e, u/f
+#   Commit A:  y/{a,b},  w/{c,d},  u/{e,f}
+#   Commit B: z/{a,t}, x/{b,c}, v/{d,e}, u/f
+#   Expected: <see NOTE first>
+#
+#   NOTE: z/ -> y/ (in commit A)
+#         y/ -> x/ (in commit B)
+#         x/ -> w/ (in commit A)
+#         w/ -> v/ (in commit B)
+#         v/ -> u/ (in commit A)
+#         So, if we add a file to z, say z/t, where should it end up?  In u?
+#         What if there's another file or directory named 't' in one of the
+#         intervening directories and/or in u itself?  Also, shouldn't the
+#         same logic that places 't' in u/ also move ALL other files to u/?
+#         What if there are file or directory conflicts in any of them?  If
+#         we attempted to do N-way (N-fold? N-ary? N-uple?) transitive renames
+#         like this, would the user have any hope of understanding any
+#         conflicts or how their working tree ended up?  I think not, so I'm
+#         ruling out N-ary transitive renames for N>1.
+#
+#   Therefore our expected result is:
+#     z/t, y/a, x/b, w/c, u/d, u/e, u/f
+#   The reason that v/d DOES get transitively renamed to u/d is that u/ isn't
+#   renamed somewhere.  A slightly sub-optimal result, but it uses fairly
+#   simple rules that are consistent with what we need for all the other
+#   testcases and simplifies things for the user.
+
+test_expect_success '9d-setup: N-way transitive rename?' '
+       test_create_repo 9d &&
+       (
+               cd 9d &&
+
+               mkdir z y x w v u &&
+               echo a >z/a &&
+               echo b >y/b &&
+               echo c >x/c &&
+               echo d >w/d &&
+               echo e >v/e &&
+               echo f >u/f &&
+               git add z y x w v u &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/a y/ &&
+               git mv x/c w/ &&
+               git mv v/e u/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo t >z/t &&
+               git mv y/b x/ &&
+               git mv w/d v/ &&
+               git add z/t &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9d-check: N-way transitive rename?' '
+       (
+               cd 9d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying z -> y rename to z/t" out &&
+               test_i18ngrep "WARNING: Avoiding applying y -> x rename to y/a" out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> w rename to x/b" out &&
+               test_i18ngrep "WARNING: Avoiding applying w -> v rename to w/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:z/t \
+                       HEAD:y/a HEAD:x/b HEAD:w/c \
+                       HEAD:u/d HEAD:u/e HEAD:u/f &&
+               git rev-parse >expect \
+                       B:z/t    \
+                       O:z/a    O:y/b    O:x/c    \
+                       O:w/d    O:v/e    A:u/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9e, N-to-1 whammo
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: dir1/{a,b}, dir2/{d,e}, dir3/{g,h}, dirN/{j,k}
+#   Commit A: dir1/{a,b,c,yo}, dir2/{d,e,f,yo}, dir3/{g,h,i,yo}, dirN/{j,k,l,yo}
+#   Commit B: combined/{a,b,d,e,g,h,j,k}
+#   Expected: combined/{a,b,c,d,e,f,g,h,i,j,k,l}, CONFLICT(Nto1) warnings,
+#             dir1/yo, dir2/yo, dir3/yo, dirN/yo
+
+test_expect_success '9e-setup: N-to-1 whammo' '
+       test_create_repo 9e &&
+       (
+               cd 9e &&
+
+               mkdir dir1 dir2 dir3 dirN &&
+               echo a >dir1/a &&
+               echo b >dir1/b &&
+               echo d >dir2/d &&
+               echo e >dir2/e &&
+               echo g >dir3/g &&
+               echo h >dir3/h &&
+               echo j >dirN/j &&
+               echo k >dirN/k &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo c  >dir1/c &&
+               echo yo >dir1/yo &&
+               echo f  >dir2/f &&
+               echo yo >dir2/yo &&
+               echo i  >dir3/i &&
+               echo yo >dir3/yo &&
+               echo l  >dirN/l &&
+               echo yo >dirN/yo &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv dir1 combined &&
+               git mv dir2/* combined/ &&
+               git mv dir3/* combined/ &&
+               git mv dirN/* combined/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success C_LOCALE_OUTPUT '9e-check: N-to-1 whammo' '
+       (
+               cd 9e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               grep "CONFLICT (implicit dir rename): Cannot map more than one path to combined/yo" out >error_line &&
+               grep -q dir1/yo error_line &&
+               grep -q dir2/yo error_line &&
+               grep -q dir3/yo error_line &&
+               grep -q dirN/yo error_line &&
+
+               git ls-files -s >out &&
+               test_line_count = 16 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:combined/a :0:combined/b :0:combined/c \
+                       :0:combined/d :0:combined/e :0:combined/f \
+                       :0:combined/g :0:combined/h :0:combined/i \
+                       :0:combined/j :0:combined/k :0:combined/l &&
+               git rev-parse >expect \
+                        O:dir1/a      O:dir1/b      A:dir1/c \
+                        O:dir2/d      O:dir2/e      A:dir2/f \
+                        O:dir3/g      O:dir3/h      A:dir3/i \
+                        O:dirN/j      O:dirN/k      A:dirN/l &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:dir1/yo :0:dir2/yo :0:dir3/yo :0:dirN/yo &&
+               git rev-parse >expect \
+                        A:dir1/yo  A:dir2/yo  A:dir3/yo  A:dirN/yo &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9f, Renamed directory that only contained immediate subdirs
+#   (Related to testcases 1e & 9g)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{a,b}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{a,b}/$more_files, priority/c
+
+test_expect_success '9f-setup: Renamed directory that only contained immediate subdirs' '
+       test_create_repo 9f &&
+       (
+               cd 9f &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv goal/ priority &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9f-check: Renamed directory that only contained immediate subdirs' '
+       (
+               cd 9f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/a/foo \
+                       HEAD:priority/b/bar \
+                       HEAD:priority/b/baz \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9g, Renamed directory that only contained immediate subdirs, immediate subdirs renamed
+#   (Related to testcases 1e & 9f)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{alpha,bravo}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{alpha,bravo}/$more_files, priority/c
+
+test_expect_success '9g-setup: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       test_create_repo 9g &&
+       (
+               cd 9g &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir priority &&
+               git mv goal/a/ priority/alpha &&
+               git mv goal/b/ priority/beta &&
+               rmdir goal/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '9g-check: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       (
+               cd 9g &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/alpha/foo \
+                       HEAD:priority/beta/bar  \
+                       HEAD:priority/beta/baz  \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9h, Avoid implicit rename if involved as source on other side
+#   (Extremely closely related to testcase 3a)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: z/{b,c,d_2}
+#   Commit B: y/{b,c}, x/d_1
+#   Expected: y/{b,c}, x/d_2
+#   NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with
+#         a rename/rename(1to2) conflict (z/d -> y/d vs. x/d)
+test_expect_success '9h-setup: Avoid dir rename on merely modified path' '
+       test_create_repo 9h &&
+       (
+               cd 9h &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nd\n" >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               echo more >>z/d &&
+               git add z/d &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9h-check: Avoid dir rename on merely modified path' '
+       (
+               cd 9h &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    A:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 9:
+#
+#   If the other side of history did a directory rename to a path that your
+#   side renamed away, then ignore that particular rename from the other
+#   side of history for any implicit directory renames.
+###########################################################################
+
+###########################################################################
+# SECTION 10: Handling untracked files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling, at least in the case of directory renames.
+###########################################################################
+
+# Testcase 10a, Overwrite untracked: normal rename/delete
+#   Commit O: z/{b,c_1}
+#   Commit A: z/b + untracked z/c + untracked z/d
+#   Commit B: z/{b,d_1}
+#   Expected: Aborted Merge +
+#       ERROR_MSG(untracked working tree files would be overwritten by merge)
+
+test_expect_success '10a-setup: Overwrite untracked with normal rename/delete' '
+       test_create_repo 10a &&
+       (
+               cd 10a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10a-check: Overwrite untracked with normal rename/delete' '
+       (
+               cd 10a &&
+
+               git checkout A^0 &&
+               echo very >z/c &&
+               echo important >z/d &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "The following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo very >expect &&
+               test_cmp expect z/c &&
+
+               echo important >expect &&
+               test_cmp expect z/d &&
+
+               git rev-parse HEAD:z/b >actual &&
+               git rev-parse O:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10b, Overwrite untracked: dir rename + delete
+#   Commit O: z/{b,c_1}
+#   Commit A: y/b + untracked y/{c,d,e}
+#   Commit B: z/{b,d_1,e}
+#   Expected: Failed Merge; y/b + untracked y/c + untracked y/d on disk +
+#             z/c_1 -> z/d_1 rename recorded at stage 3 for y/d +
+#       ERROR_MSG(refusing to lose untracked file at 'y/d')
+
+test_expect_success '10b-setup: Overwrite untracked with dir rename + delete' '
+       test_create_repo 10b &&
+       (
+               cd 10b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10b-check: Overwrite untracked with dir rename + delete' '
+       (
+               cd 10b &&
+
+               git checkout A^0 &&
+               echo very >y/c &&
+               echo important >y/d &&
+               echo contents >y/e &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/delete).*Version B\^0 of y/d left in tree at y/d~B\^0" out &&
+               test_i18ngrep "Error: Refusing to lose untracked file at y/e; writing to y/e~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/d :3:y/e &&
+               git rev-parse >expect \
+                       O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               echo very >expect &&
+               test_cmp expect y/c &&
+
+               echo important >expect &&
+               test_cmp expect y/d &&
+
+               echo contents >expect &&
+               test_cmp expect y/e
+       )
+'
+
+# Testcase 10c, Overwrite untracked: dir rename/rename(1to2)
+#   Commit O: z/{a,b}, x/{c,d}
+#   Commit A: y/{a,b}, w/c, x/d + different untracked y/c
+#   Commit B: z/{a,b,c}, x/d
+#   Expected: Failed Merge; y/{a,b} + x/d + untracked y/c +
+#             CONFLICT(rename/rename) x/c -> w/c vs y/c +
+#             y/c~B^0 +
+#             ERROR_MSG(Refusing to lose untracked file at y/c)
+
+test_expect_success '10c-setup: Overwrite untracked with dir rename/rename(1to2)' '
+       test_create_repo 10c &&
+       (
+               cd 10c &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir w &&
+               git mv x/c w/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10c-check: Overwrite untracked with dir rename/rename(1to2)' '
+       (
+               cd 10c &&
+
+               git checkout A^0 &&
+               echo important >y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/c; adding as y/c~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~B^0 >actual &&
+               git rev-parse O:x/c >expect &&
+               test_cmp expect actual &&
+
+               echo important >expect &&
+               test_cmp expect y/c
+       )
+'
+
+# Testcase 10d, Delete untracked w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b,c_1},        x/{d,e,f_2}
+#   Commit A: y/{a,b},            x/{d,e,f_2,wham_1} + untracked y/wham
+#   Commit B: z/{a,b,c_1,wham_2}, y/{d,e}
+#   Expected: Failed Merge; y/{a,b,d,e} + untracked y/{wham,wham~B^0,wham~HEAD}+
+#             CONFLICT(rename/rename) z/c_1 vs x/f_2 -> y/wham
+#             ERROR_MSG(Refusing to lose untracked file at y/wham)
+
+test_expect_success '10d-setup: Delete untracked with dir rename/rename(2to1)' '
+       test_create_repo 10d &&
+       (
+               cd 10d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >x/d &&
+               echo e >x/e &&
+               echo f >x/f &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/c x/wham &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/f z/wham &&
+               git mv x/ y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10d-check: Delete untracked with dir rename/rename(2to1)' '
+       (
+               cd 10d &&
+
+               git checkout A^0 &&
+               echo important >y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/d :0:y/e :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/e  O:z/c     O:x/f &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+
+               echo important >expect &&
+               test_cmp expect y/wham &&
+
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/f      O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10e, Does git complain about untracked file that's not in the way?
+#   Commit O: z/{a,b}
+#   Commit A: y/{a,b} + untracked z/c
+#   Commit B: z/{a,b,c}
+#   Expected: y/{a,b,c} + untracked z/c
+
+test_expect_success '10e-setup: Does git complain about untracked file that is not really in the way?' '
+       test_create_repo 10e &&
+       (
+               cd 10e &&
+
+               mkdir z &&
+               echo a >z/a &&
+               echo b >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >z/c &&
+               git add z/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '10e-check: Does git complain about untracked file that is not really in the way?' '
+       (
+               cd 10e &&
+
+               git checkout A^0 &&
+               mkdir z &&
+               echo random >z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep ! "following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  B:z/c &&
+               test_cmp expect actual &&
+
+               echo random >expect &&
+               test_cmp expect z/c
+       )
+'
+
+###########################################################################
+# SECTION 11: Handling dirty (not up-to-date) files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling.  This was true even of normal renames, but there are additional
+# codepaths that need special handling with directory renames.  Add
+# testcases for both renamed-by-directory-rename-detection and standard
+# rename cases.
+###########################################################################
+
+# Testcase 11a, Avoid losing dirty contents with simple rename
+#   Commit O: z/{a,b_v1},
+#   Commit A: z/{a,c_v1}, and z/c_v1 has uncommitted mods
+#   Commit B: z/{a,b_v2}
+#   Expected: ERROR_MSG(Refusing to lose dirty file at z/c) +
+#             z/a, staged version of z/c has sha1sum matching B:z/b_v2,
+#             z/c~HEAD with contents of B:z/b_v2,
+#             z/c with uncommitted mods on top of A:z/c_v1
+
+test_expect_success '11a-setup: Avoid losing dirty contents with simple rename' '
+       test_create_repo 11a &&
+       (
+               cd 11a &&
+
+               mkdir z &&
+               echo a >z/a &&
+               test_seq 1 10 >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/b z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >>z/b &&
+               git add z/b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11a-check: Avoid losing dirty contents with simple rename' '
+       (
+               cd 11a &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:z/a :2:z/c &&
+               git rev-parse >expect \
+                        O:z/a  B:z/b &&
+               test_cmp expect actual &&
+
+               git hash-object z/c~HEAD >actual &&
+               git rev-parse B:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11b, Avoid losing dirty file involved in directory rename
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/a,         x/{b,c_v2}
+#   Expected: y/{a,c_v2}, x/b, z/c_v1 with uncommitted mods untracked,
+#             ERROR_MSG(Refusing to lose dirty file at z/c)
+
+
+test_expect_success '11b-setup: Avoid losing dirty file involved in directory rename' '
+       test_create_repo 11b &&
+       (
+               cd 11b &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo 11 >>x/c &&
+               git add x/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11b-check: Avoid losing dirty file involved in directory rename' '
+       (
+               cd 11b &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11c, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: y/a,         x/{b,c_v1}
+#   Commit A: y/{a,c_v1},  x/b,       and y/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: Abort_msg("following files would be overwritten by merge") +
+#             y/c left untouched (still has uncommitted mods)
+
+test_expect_success '11c-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11c &&
+       (
+               cd 11c &&
+
+               mkdir y x &&
+               echo a >y/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add y x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11c-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11c &&
+
+               git checkout A^0 &&
+               echo stuff >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "following files would be overwritten by merge" err &&
+
+               grep -q stuff y/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected y/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out
+       )
+'
+
+# Testcase 11d, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: D/F: y/c_v2 vs y/c/d) +
+#             Warning_Msg("Refusing to lose dirty file at z/c) +
+#             y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods
+
+test_expect_success '11d-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11d &&
+       (
+               cd 11d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11d-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11d &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c/d :3:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:y/c/d  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~HEAD >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11e, Avoid deleting not-up-to-date with dir rename/rename(1to2)/add
+#   Commit O: z/{a,b},      x/{c_1,d}
+#   Commit A: y/{a,b,c_2},  x/d, w/c_1, and y/c_2 has uncommitted mods
+#   Commit B: z/{a,b,c_1},  x/d
+#   Expected: Failed Merge; y/{a,b} + x/d +
+#             CONFLICT(rename/rename) x/c_1 -> w/c_1 vs y/c_1 +
+#             ERROR_MSG(Refusing to lose dirty file at y/c)
+#             y/c~B^0 has O:x/c_1 contents
+#             y/c~HEAD has A:y/c_2 contents
+#             y/c has dirty file from before merge
+
+test_expect_success '11e-setup: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       test_create_repo 11e &&
+       (
+               cd 11e &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               echo different >y/c &&
+               mkdir w &&
+               git mv x/c w/ &&
+               git add y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11e-check: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       (
+               cd 11e &&
+
+               git checkout A^0 &&
+               echo mods >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 4 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo different >expected &&
+               echo mods >>expected &&
+               test_cmp expected y/c &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :2:y/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  A:y/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/c~B^0 y/c~HEAD &&
+               git rev-parse >expect \
+                       O:x/c   A:y/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11f, Avoid deleting not-up-to-date w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b},        x/{c_1,d_2}
+#   Commit A: y/{a,b,wham_1}, x/d_2, except y/wham has uncommitted mods
+#   Commit B: z/{a,b,wham_2}, x/c_1
+#   Expected: Failed Merge; y/{a,b} + untracked y/{wham~B^0,wham~B^HEAD} +
+#             y/wham with dirty changes from before merge +
+#             CONFLICT(rename/rename) x/c vs x/d -> y/wham
+#             ERROR_MSG(Refusing to lose dirty file at y/wham)
+
+test_expect_success '11f-setup: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       test_create_repo 11f &&
+       (
+               cd 11f &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               test_seq 1 10 >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               git mv x/c y/wham &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/wham &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11f-check: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       (
+               cd 11f &&
+
+               git checkout A^0 &&
+               echo important >>y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               test_seq 1 10 >expected &&
+               echo important >>expected &&
+               test_cmp expected y/wham &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/d      O:x/c &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/c     O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 12: Everything else
+#
+# Tests suggested by others.  Tests added after implementation completed
+# and submitted.  Grab bag.
+###########################################################################
+
+# Testcase 12a, Moving one directory hierarchy into another
+#   (Related to testcase 9a)
+#   Commit O: node1/{leaf1,leaf2}, node2/{leaf3,leaf4}
+#   Commit A: node1/{leaf1,leaf2,node2/{leaf3,leaf4}}
+#   Commit B: node1/{leaf1,leaf2,leaf5}, node2/{leaf3,leaf4,leaf6}
+#   Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}}
+
+test_expect_success '12a-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12a &&
+       (
+               cd 12a &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo leaf5 >node1/leaf5 &&
+               echo leaf6 >node2/leaf6 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12a-check: Moving one directory hierarchy into another' '
+       (
+               cd 12a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/leaf1 HEAD:node1/leaf2 HEAD:node1/leaf5 \
+                       HEAD:node1/node2/leaf3 \
+                       HEAD:node1/node2/leaf4 \
+                       HEAD:node1/node2/leaf6 &&
+               git rev-parse >expect \
+                       O:node1/leaf1    O:node1/leaf2    B:node1/leaf5 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       B:node2/leaf6 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12b, Moving two directory hierarchies into each other
+#   (Related to testcases 1c and 12c)
+#   Commit O: node1/{leaf1, leaf2}, node2/{leaf3, leaf4}
+#   Commit A: node1/{leaf1, leaf2, node2/{leaf3, leaf4}}
+#   Commit B: node2/{leaf3, leaf4, node1/{leaf1, leaf2}}
+#   Expected: node1/node2/node1/{leaf1, leaf2},
+#             node2/node1/node2/{leaf3, leaf4}
+#   NOTE: Without directory renames, we would expect
+#                   node2/node1/{leaf1, leaf2},
+#                   node1/node2/{leaf3, leaf4}
+#         with directory rename detection, we note that
+#             commit A renames node2/ -> node1/node2/
+#             commit B renames node1/ -> node2/node1/
+#         therefore, applying those directory renames to the initial result
+#         (making all four paths experience a transitive renaming), yields
+#         the expected result.
+#
+#         You may ask, is it weird to have two directories rename each other?
+#         To which, I can do no more than shrug my shoulders and say that
+#         even simple rules give weird results when given weird inputs.
+
+test_expect_success '12b-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12b &&
+       (
+               cd 12b &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12b-check: Moving one directory hierarchy into another' '
+       (
+               cd 12b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/node2/node1/leaf1 \
+                       HEAD:node1/node2/node1/leaf2 \
+                       HEAD:node2/node1/node2/leaf3 \
+                       HEAD:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12c, Moving two directory hierarchies into each other w/ content merge
+#   (Related to testcase 12b)
+#   Commit O: node1/{       leaf1_1, leaf2_1}, node2/{leaf3_1, leaf4_1}
+#   Commit A: node1/{       leaf1_2, leaf2_2,  node2/{leaf3_2, leaf4_2}}
+#   Commit B: node2/{node1/{leaf1_3, leaf2_3},        leaf3_3, leaf4_3}
+#   Expected: Content merge conflicts for each of:
+#               node1/node2/node1/{leaf1, leaf2},
+#               node2/node1/node2/{leaf3, leaf4}
+#   NOTE: This is *exactly* like 12c, except that every path is modified on
+#         each side of the merge.
+
+test_expect_success '12c-setup: Moving one directory hierarchy into another w/ content merge' '
+       test_create_repo 12c &&
+       (
+               cd 12c &&
+
+               mkdir -p node1 node2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf1\n" >node1/leaf1 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf2\n" >node1/leaf2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf3\n" >node2/leaf3 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf4\n" >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               for i in `git ls-files`; do echo side A >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               for i in `git ls-files`; do echo side B >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12c-check: Moving one directory hierarchy into another w/ content merge' '
+       (
+               cd 12c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 &&
+
+               git ls-files -u >out &&
+               test_line_count = 12 out &&
+
+               git rev-parse >actual \
+                       :1:node1/node2/node1/leaf1 \
+                       :1:node1/node2/node1/leaf2 \
+                       :1:node2/node1/node2/leaf3 \
+                       :1:node2/node1/node2/leaf4 \
+                       :2:node1/node2/node1/leaf1 \
+                       :2:node1/node2/node1/leaf2 \
+                       :2:node2/node1/node2/leaf3 \
+                       :2:node2/node1/node2/leaf4 \
+                       :3:node1/node2/node1/leaf1 \
+                       :3:node1/node2/node1/leaf2 \
+                       :3:node2/node1/node2/leaf3 \
+                       :3:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       A:node1/leaf1 \
+                       A:node1/leaf2 \
+                       A:node1/node2/leaf3 \
+                       A:node1/node2/leaf4 \
+                       B:node2/node1/leaf1 \
+                       B:node2/node1/leaf2 \
+                       B:node2/leaf3 \
+                       B:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+test_done
index a5d901502414f25616a474152ee0f5816465bd37..bae78c4e89e2402d5ab37e3b7cd094dde41d69b3 100755 (executable)
@@ -378,4 +378,12 @@ check_describe tags/A --all A
 check_describe tags/c --all c
 check_describe heads/branch_A --all --match='branch_*' branch_A
 
+test_expect_success 'describe complains about tree object' '
+       test_must_fail git describe HEAD^{tree}
+'
+
+test_expect_success 'describe complains about missing object' '
+       test_must_fail git describe $_z40
+'
+
 test_done
index 2e2fb0e9572f3d570311470aebf47da37974b8f8..a54a52aaa4e680bdbc97750c4ae4855a45dcdb80 100755 (executable)
@@ -512,7 +512,7 @@ test_expect_success 'merge-msg with "merging" an annotated tag' '
 
        test_when_finished "git reset --hard" &&
        annote=$(git rev-parse annote) &&
-       git merge --no-commit $annote &&
+       git merge --no-commit --no-ff $annote &&
        {
                cat <<-EOF
                Merge tag '\''$annote'\''
index c128dfc5790790de9edf1b4d2cfa8b028c1036bc..295d1475bde0151df65e0098e6e4a169193906ed 100755 (executable)
@@ -373,11 +373,8 @@ test_expect_success 'Quoting style: tcl' '
 
 for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do
        test_expect_success "more than one quoting style: $i" "
-               git for-each-ref $i 2>&1 | (read line &&
-               case \$line in
-               \"error: more than one quoting style\"*) : happy;;
-               *) false
-               esac)
+               test_must_fail git for-each-ref $i 2>err &&
+               grep '^error: more than one quoting style' err
        "
 done
 
index 41b0be575d523071152c19a224a7ef914ce2ee1a..d5255dd5760389fbbabf1c2ffc787500e4184758 100755 (executable)
@@ -87,7 +87,7 @@ test_expect_success 'background auto gc does not run if gc.log is present and re
        test_must_fail git gc --auto 2>err &&
        test_i18ngrep "^error:" err &&
        test_config gc.logexpiry 5.days &&
-       test-chmtime =-345600 .git/gc.log &&
+       test-tool chmtime =-345600 .git/gc.log &&
        test_must_fail git gc --auto &&
        test_config gc.logexpiry 2.days &&
        run_and_wait_for_auto_gc &&
index 394b169eada7968875e931c5a43f25a808cfc18f..765cced60b1055e1187e1fdc1c1c881814cebbf6 100755 (executable)
@@ -73,7 +73,7 @@ for repack in '' true; do
 
        test_expect_success "simulate time passing ($title)" '
                find .git/objects -type f |
-               xargs test-chmtime -v -86400
+               xargs test-tool chmtime -v -86400
        '
 
        test_expect_success "start writing new commit with old blob ($title)" '
@@ -104,7 +104,7 @@ for repack in '' true; do
        test_expect_success "abandon objects again ($title)" '
                git reset --hard HEAD^ &&
                find .git/objects -type f |
-               xargs test-chmtime -v -86400
+               xargs test-tool chmtime -v -86400
        '
 
        test_expect_success "start writing new commit with same tree ($title)" '
index 6e5031f56fb4f211e0ba43911d4495b1bb0b5b62..e96cbdb105824bf7edaf5106113b8c534765abff 100755 (executable)
@@ -21,8 +21,8 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-    grep "^R100..*path0/COPYING..*path1/COPYING"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+    grep "^R100..*path0/COPYING..*path1/COPYING" actual'
 
 test_expect_success \
     'moving the file back into subdirectory' \
@@ -35,8 +35,14 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-    grep "^R100..*path1/COPYING..*path0/COPYING"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+    grep "^R100..*path1/COPYING..*path0/COPYING" actual'
+
+test_expect_success \
+    'mv --dry-run does not move file' \
+    'git mv -n path0/COPYING MOVED &&
+     test -f path0/COPYING &&
+     test ! -f MOVED'
 
 test_expect_success \
     'checking -k on non-existing file' \
@@ -116,10 +122,9 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path0/COPYING..*path2/COPYING" &&
-     git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path0/README..*path2/README"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+     grep "^R100..*path0/COPYING..*path2/COPYING" actual &&
+     grep "^R100..*path0/README..*path2/README" actual'
 
 test_expect_success \
     'succeed when source is a prefix of destination' \
@@ -135,10 +140,9 @@ test_expect_success \
 
 test_expect_success \
     'checking the commit' \
-    'git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path2/COPYING..*path1/path2/COPYING" &&
-     git diff-tree -r -M --name-status  HEAD^ HEAD | \
-     grep "^R100..*path2/README..*path1/path2/README"'
+    'git diff-tree -r -M --name-status  HEAD^ HEAD >actual &&
+     grep "^R100..*path2/COPYING..*path1/path2/COPYING" actual &&
+     grep "^R100..*path2/README..*path1/path2/README" actual'
 
 test_expect_success \
     'do not move directory over existing directory' \
index 7cb60799be1a109e2210350137c8754a5bc4bb7d..ec4b160ddb9f966044e729f35cc1edfcc79eed14 100755 (executable)
@@ -187,7 +187,8 @@ test_expect_success 'author information is preserved' '
                        test \$GIT_COMMIT != $(git rev-parse master) || \
                        echo Hallo" \
                preserved-author) &&
-       test 1 = $(git rev-list --author="B V Uips" preserved-author | wc -l)
+       git rev-list --author="B V Uips" preserved-author >actual &&
+       test_line_count = 1 actual
 '
 
 test_expect_success "remove a certain author's commits" '
@@ -205,7 +206,8 @@ test_expect_success "remove a certain author's commits" '
        cnt1=$(git rev-list master | wc -l) &&
        cnt2=$(git rev-list removed-author | wc -l) &&
        test $cnt1 -eq $(($cnt2 + 1)) &&
-       test 0 = $(git rev-list --author="B V Uips" removed-author | wc -l)
+       git rev-list --author="B V Uips" removed-author >actual &&
+       test_line_count = 0 actual
 '
 
 test_expect_success 'barf on invalid name' '
@@ -258,7 +260,8 @@ test_expect_success 'Subdirectory filter with disappearing trees' '
        git commit -m "Re-adding foo" &&
 
        git filter-branch -f --subdirectory-filter foo &&
-       test $(git rev-list master | wc -l) = 3
+       git rev-list master >actual &&
+       test_line_count = 3 actual
 '
 
 test_expect_success 'Tag name filtering retains tag message' '
@@ -470,4 +473,18 @@ test_expect_success 'tree-filter deals with object name vs pathname ambiguity' '
        git show HEAD:$ambiguous
 '
 
+test_expect_success 'rewrite repository including refs that point at non-commit object' '
+       test_when_finished "git reset --hard original" &&
+       tree=$(git rev-parse HEAD^{tree}) &&
+       test_when_finished "git replace -d $tree" &&
+       echo A >new &&
+       git add new &&
+       new_tree=$(git write-tree) &&
+       git replace $tree $new_tree &&
+       git tag -a -m "tag to a tree" treetag $new_tree &&
+       git reset --hard HEAD &&
+       git filter-branch -f -- --all >filter-output 2>&1 &&
+       ! fgrep fatal filter-output
+'
+
 test_done
index a9af2de9960b345878ac0f85c33b1efd3e038d28..2aac77af701989dc16980268155d6e40500354bb 100755 (executable)
@@ -452,6 +452,21 @@ test_expect_success \
        test_cmp expect actual
 '
 
+get_tag_header annotated-tag-edit $commit commit $time >expect
+echo "An edited message" >>expect
+test_expect_success 'set up editor' '
+       write_script fakeeditor <<-\EOF
+       sed -e "s/A message/An edited message/g" <"$1" >"$1-"
+       mv "$1-" "$1"
+       EOF
+'
+test_expect_success \
+       'creating an annotated tag with -m message --edit should succeed' '
+       GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit &&
+       get_tag_msg annotated-tag-edit >actual &&
+       test_cmp expect actual
+'
+
 cat >msgfile <<EOF
 Another message
 in a file.
@@ -465,6 +480,21 @@ test_expect_success \
        test_cmp expect actual
 '
 
+get_tag_header file-annotated-tag-edit $commit commit $time >expect
+sed -e "s/Another message/Another edited message/g" msgfile >>expect
+test_expect_success 'set up editor' '
+       write_script fakeeditor <<-\EOF
+       sed -e "s/Another message/Another edited message/g" <"$1" >"$1-"
+       mv "$1-" "$1"
+       EOF
+'
+test_expect_success \
+       'creating an annotated tag with -F messagefile --edit should succeed' '
+       GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit &&
+       get_tag_msg file-annotated-tag-edit >actual &&
+       test_cmp expect actual
+'
+
 cat >inputmsg <<EOF
 A message from the
 standard input
index f5f46a95b4be9bca4ca30e70cbe75051a191c96a..7541ba5edbcae1f1555c367b7f8bfc795913ff60 100755 (executable)
@@ -110,13 +110,6 @@ test_expect_success TTY 'configuration can disable pager' '
        ! test -e paginated.out
 '
 
-test_expect_success TTY 'git config uses a pager if configured to' '
-       rm -f paginated.out &&
-       test_config pager.config true &&
-       test_terminal git config --list &&
-       test -e paginated.out
-'
-
 test_expect_success TTY 'configuration can enable pager (from subdir)' '
        rm -f paginated.out &&
        mkdir -p subdir &&
@@ -252,6 +245,48 @@ test_expect_success TTY 'git branch --set-upstream-to ignores pager.branch' '
        ! test -e paginated.out
 '
 
+test_expect_success TTY 'git config ignores pager.config when setting' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config config foo.bar bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --edit ignores pager.config' '
+       rm -f paginated.out editor.used &&
+       write_script editor <<-\EOF &&
+               touch editor.used
+       EOF
+       EDITOR=./editor test_terminal git -c pager.config config --edit &&
+       ! test -e paginated.out &&
+       test -e editor.used
+'
+
+test_expect_success TTY 'git config --get ignores pager.config' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config config --get foo.bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-urlmatch defaults to paging' '
+       rm -f paginated.out &&
+       test_terminal git -c http."https://foo.com/".bar=foo \
+                         config --get-urlmatch http https://foo.com &&
+       test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-all respects pager.config' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config=false config --get-all foo.bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --list defaults to paging' '
+       rm -f paginated.out &&
+       test_terminal git config --list &&
+       test -e paginated.out
+'
+
+
 # A colored commit log will begin with an appropriate ANSI escape
 # for the first color; the text "commit" comes later.
 colorful() {
index e5fb892f9575fda4baf0b2a0e6b31cf13a0d6c0b..c61e304e97376b09de51299a797dfb388335a365 100755 (executable)
@@ -14,6 +14,9 @@ test_description='test untracked cache'
 # See <20160803174522.5571-1-pclouds@gmail.com> if you want to know
 # more.
 
+GIT_FORCE_UNTRACKED_CACHE=true
+export GIT_FORCE_UNTRACKED_CACHE
+
 sync_mtime () {
        find . -type d -ls >/dev/null
 }
@@ -22,6 +25,12 @@ avoid_racy() {
        sleep 1
 }
 
+status_is_clean() {
+       >../status.expect &&
+       git status --porcelain >../status.actual &&
+       test_cmp ../status.expect ../status.actual
+}
+
 test_lazy_prereq UNTRACKED_CACHE '
        { git update-index --test-untracked-cache; ret=$?; } &&
        test $ret -ne 1
@@ -683,4 +692,85 @@ test_expect_success 'untracked cache survives a commit' '
        test_cmp ../before ../after
 '
 
+test_expect_success 'teardown worktree' '
+       cd ..
+'
+
+test_expect_success SYMLINKS 'setup worktree for symlink test' '
+       git init worktree-symlink &&
+       cd worktree-symlink &&
+       git config core.untrackedCache true &&
+       mkdir one two &&
+       touch one/file two/file &&
+       git add one/file two/file &&
+       git commit -m"first commit" &&
+       git rm -rf one &&
+       ln -s two one &&
+       git add one &&
+       git commit -m"second commit"
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' '
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' '
+       git config core.untrackedCache false &&
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
+test_expect_success 'setup worktree for non-symlink test' '
+       git init worktree-non-symlink &&
+       cd worktree-non-symlink &&
+       git config core.untrackedCache true &&
+       mkdir one two &&
+       touch one/file two/file &&
+       git add one/file two/file &&
+       git commit -m"first commit" &&
+       git rm -rf one &&
+       cp two/file one &&
+       git add one &&
+       git commit -m"second commit"
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=true' '
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       test-dump-untracked-cache >../actual &&
+       grep -F "recurse valid" ../actual >../actual.grep &&
+       cat >../expect.grep <<EOF &&
+/ 0000000000000000000000000000000000000000 recurse valid
+/two/ 0000000000000000000000000000000000000000 recurse valid
+EOF
+       status_is_clean &&
+       test_cmp ../expect.grep ../actual.grep
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=false' '
+       git config core.untrackedCache false &&
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
 test_done
index e319fa2e8470791340a479989a81d934b79405da..8f795327a00f6c1b751b82e7e395c8387543d388 100755 (executable)
@@ -390,6 +390,68 @@ test_expect_success 'verify upstream fields in branch header' '
        )
 '
 
+test_expect_success 'verify --[no-]ahead-behind with V2 format' '
+       git checkout master &&
+       test_when_finished "rm -rf sub_repo" &&
+       git clone . sub_repo &&
+       (
+               ## Confirm local master tracks remote master.
+               cd sub_repo &&
+               HUF=$(git rev-parse HEAD) &&
+
+               # Confirm --no-ahead-behind reports traditional branch.ab with 0/0 for equal branches.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +0 -0
+               EOF
+
+               git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               # Confirm --ahead-behind reports traditional branch.ab with 0/0.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +0 -0
+               EOF
+
+               git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               ## Test non-equal ahead/behind.
+               echo xyz >file_xyz &&
+               git add file_xyz &&
+               git commit -m xyz &&
+
+               HUF=$(git rev-parse HEAD) &&
+
+               # Confirm --no-ahead-behind reports branch.ab with ?/? for non-equal branches.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +? -?
+               EOF
+
+               git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               # Confirm --ahead-behind reports traditional branch.ab with 1/0.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +1 -0
+               EOF
+
+               git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual
+       )
+'
+
 test_expect_success 'create and add submodule, submodule appears clean (A. S...)' '
        git checkout master &&
        git clone . sub_repo &&
index a39e69a3ebd1c39ddf8feb861f8a39c8315aa7f8..152104412f212190c18279b64e80bb9f58242d47 100755 (executable)
@@ -821,6 +821,21 @@ test_expect_success 'moving the superproject does not break submodules' '
        )
 '
 
+test_expect_success 'moving the submodule does not break the superproject' '
+       (
+               cd addtest2 &&
+               git submodule status
+       ) >actual &&
+       sed -e "s/^ \([^ ]* repo\) .*/-\1/" <actual >expect &&
+       mv addtest2/repo addtest2/repo.bak &&
+       test_when_finished "mv addtest2/repo.bak addtest2/repo" &&
+       (
+               cd addtest2 &&
+               git submodule status
+       ) >actual &&
+       test_cmp expect actual
+'
+
 test_expect_success 'submodule add --name allows to replace a submodule with another at the same path' '
        (
                cd addtest2 &&
index c20717181e95fc74738bdf53aa3e0b0797d77d6a..fc018e3638a8bebebf0f1c689567e9867bfbbee8 100755 (executable)
@@ -6,7 +6,7 @@
 test_description='Test submodules on detached working tree
 
 This test verifies that "git submodule" initialization, update and addition works
-on detahced working trees
+on detached working trees
 '
 
 TEST_NO_CREATE_REPO=1
index 46c09c77654597b2ee501271b4688cc6137df8f1..0bde5850ac547c90dadd9e21341ebad80a1471e6 100755 (executable)
@@ -41,7 +41,7 @@ test_expect_success 'configuration parsing with error' '
        EOF
        (
                cd repo &&
-               test_must_fail test-submodule-config "" s 2>actual &&
+               test_must_fail test-tool submodule-config "" s 2>actual &&
                test_i18ngrep "bad config" actual
        )
 '
@@ -55,7 +55,7 @@ EOF
 
 test_expect_success 'test parsing and lookup of submodule config by path' '
        (cd super &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD^ a \
                        HEAD b \
                        HEAD^ submodule \
@@ -67,7 +67,7 @@ test_expect_success 'test parsing and lookup of submodule config by path' '
 
 test_expect_success 'test parsing and lookup of submodule config by name' '
        (cd super &&
-               test-submodule-config --name \
+               test-tool submodule-config --name \
                        HEAD^ a \
                        HEAD a \
                        HEAD^ submodule \
@@ -89,7 +89,7 @@ test_expect_success 'error in one submodule config lets continue' '
                git add .gitmodules &&
                mv .gitmodules.bak .gitmodules &&
                git commit -m "add error" &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                >actual &&
@@ -100,7 +100,7 @@ test_expect_success 'error in one submodule config lets continue' '
 test_expect_success 'error message contains blob reference' '
        (cd super &&
                sha1=$(git rev-parse HEAD) &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                2>actual_err &&
@@ -114,9 +114,9 @@ test_expect_success 'using different treeishs works' '
                git tag new_tag &&
                tree=$(git rev-parse HEAD^{tree}) &&
                commit=$(git rev-parse HEAD^{commit}) &&
-               test-submodule-config $commit b >expect &&
-               test-submodule-config $tree b >actual.1 &&
-               test-submodule-config new_tag b >actual.2 &&
+               test-tool submodule-config $commit b >expect &&
+               test-tool submodule-config $tree b >actual.1 &&
+               test-tool submodule-config new_tag b >actual.2 &&
                test_cmp expect actual.1 &&
                test_cmp expect actual.2
        )
@@ -130,7 +130,7 @@ test_expect_success 'error in history in fetchrecursesubmodule lets continue' '
                git config --unset -f .gitmodules \
                        submodule.submodule.fetchrecursesubmodules &&
                git commit -m "add error in fetchrecursesubmodules" &&
-               test-submodule-config \
+               test-tool submodule-config \
                        HEAD b \
                        HEAD submodule \
                                >actual &&
index 50052e28727dab74037a115003b6083256d7f344..7afadb175a64c629214b7d6961ac345edb14ed84 100755 (executable)
@@ -1672,12 +1672,12 @@ test_expect_success '"Initial commit" should not be noted in commit template' '
 '
 
 test_expect_success '--no-optional-locks prevents index update' '
-       test-chmtime =1234567890 .git/index &&
+       test-tool chmtime =1234567890 .git/index &&
        git --no-optional-locks status &&
-       test-chmtime -v +0 .git/index >out &&
+       test-tool chmtime -v +0 .git/index >out &&
        grep ^1234567890 out &&
        git status &&
-       test-chmtime -v +0 .git/index >out &&
+       test-tool chmtime -v +0 .git/index >out &&
        ! grep ^1234567890 out
 '
 
index eb2d13bbcf8abefd7af2be6f9cb3bf97e389ab15..756beb0d8eb466d78b235af363b6a36dde37c79e 100755 (executable)
@@ -314,4 +314,43 @@ test_expect_success 'splitting the index results in the same state' '
        test_cmp expect actual
 '
 
+test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' '
+       test_create_repo dot-git &&
+       (
+               cd dot-git &&
+               mkdir -p .git/hooks &&
+               : >tracked &&
+               : >modified &&
+               mkdir dir1 &&
+               : >dir1/tracked &&
+               : >dir1/modified &&
+               mkdir dir2 &&
+               : >dir2/tracked &&
+               : >dir2/modified &&
+               write_integration_script &&
+               git config core.fsmonitor .git/hooks/fsmonitor-test &&
+               git update-index --untracked-cache &&
+               git update-index --fsmonitor &&
+               GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \
+               git status &&
+               test-dump-untracked-cache >../before
+       ) &&
+       cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF &&
+       printf ".git\0"
+       printf ".git/index\0"
+       printf "dir1/.git\0"
+       printf "dir1/.git/index\0"
+       EOF
+       (
+               cd dot-git &&
+               GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \
+               git status &&
+               test-dump-untracked-cache >../after
+       ) &&
+       grep "directory invalidation" trace-before >>before &&
+       grep "directory invalidation" trace-after >>after &&
+       # UNTR extension unchanged, dir invalidation count unchanged
+       test_cmp before after
+'
+
 test_done
index dfde6a675a8cd28297324627923a861bc7410ff2..6736d8d13139c946b07165d5bbad6c8d14617cbd 100755 (executable)
@@ -700,6 +700,42 @@ test_expect_success 'merge --no-ff --edit' '
        test_cmp expected actual
 '
 
+test_expect_success 'merge annotated/signed tag w/o tracking' '
+       test_when_finished "rm -rf dst; git tag -d anno1" &&
+       git tag -a -m "anno c1" anno1 c1 &&
+       git init dst &&
+       git rev-parse c1 >dst/expect &&
+       (
+               # c0 fast-forwards to c1 but because this repository
+               # is not a "downstream" whose refs/tags follows along
+               # tag from the "upstream", this pull defaults to --no-ff
+               cd dst &&
+               git pull .. c0 &&
+               git pull .. anno1 &&
+               git rev-parse HEAD^2 >actual &&
+               test_cmp expect actual
+       )
+'
+
+test_expect_success 'merge annotated/signed tag w/ tracking' '
+       test_when_finished "rm -rf dst; git tag -d anno1" &&
+       git tag -a -m "anno c1" anno1 c1 &&
+       git init dst &&
+       git rev-parse c1 >dst/expect &&
+       (
+               # c0 fast-forwards to c1 and because this repository
+               # is a "downstream" whose refs/tags follows along
+               # tag from the "upstream", this pull defaults to --ff
+               cd dst &&
+               git remote add origin .. &&
+               git pull origin c0 &&
+               git fetch origin &&
+               git merge anno1 &&
+               git rev-parse HEAD >actual &&
+               test_cmp expect actual
+       )
+'
+
 test_expect_success GPG 'merge --ff-only tag' '
        git reset --hard c0 &&
        git commit --allow-empty -m "A newer commit" &&
@@ -718,7 +754,7 @@ test_expect_success GPG 'merge --no-edit tag should skip editor' '
        git tag -f -s -m "A newer commit" signed &&
        git reset --hard c0 &&
 
-       EDITOR=false git merge --no-edit signed &&
+       EDITOR=false git merge --no-edit --no-ff signed &&
        git rev-parse signed^0 >expect &&
        git rev-parse HEAD^2 >actual &&
        test_cmp expect actual
index 9444d6a9b9026748028178c4b66ee8bbbc1d89fa..dd8ab7ede182fc3c3da840fee083bf23a94ce13c 100755 (executable)
@@ -92,12 +92,15 @@ test_expect_success 'will not overwrite removed file with staged changes' '
        test_cmp important c1.c
 '
 
-test_expect_failure 'will not overwrite unstaged changes in renamed file' '
+test_expect_success 'will not overwrite unstaged changes in renamed file' '
        git reset --hard c1 &&
        git mv c1.c other.c &&
        git commit -m rename &&
        cp important other.c &&
-       git merge c1a &&
+       test_must_fail git merge c1a >out &&
+       test_i18ngrep "Refusing to lose dirty file at other.c" out &&
+       test_path_is_file other.c~HEAD &&
+       test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) &&
        test_cmp important other.c
 '
 
index 987573c41fcd4aee3d5f8fd8aa587c97551c1872..8a586ab0210bf6a7fcff7729d46bb882d50fe330 100755 (executable)
@@ -90,7 +90,7 @@ test_expect_success 'unpacked objects receive timestamp of pack file' '
        tmppack=".git/objects/pack/tmp_pack" &&
        ln "$packfile" "$tmppack" &&
        git repack -A -l -d &&
-       test-chmtime -v +0 "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
+       test-tool chmtime -v +0 "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
                > mtimes &&
        compare_mtimes < mtimes
 '
@@ -103,7 +103,7 @@ test_expect_success 'do not bother loosening old objects' '
        git prune-packed &&
        git cat-file -p $obj1 &&
        git cat-file -p $obj2 &&
-       test-chmtime =-86400 .git/objects/pack/pack-$pack2.pack &&
+       test-tool chmtime =-86400 .git/objects/pack/pack-$pack2.pack &&
        git repack -A -d --unpack-unreachable=1.hour.ago &&
        git cat-file -p $obj1 &&
        test_must_fail git cat-file -p $obj2
@@ -117,7 +117,7 @@ test_expect_success 'keep packed objects found only in index' '
        git reset HEAD^ &&
        git reflog expire --expire=now --all &&
        git add file &&
-       test-chmtime =-86400 .git/objects/pack/* &&
+       test-tool chmtime =-86400 .git/objects/pack/* &&
        git gc --prune=1.hour.ago &&
        git cat-file blob :file
 '
index 0059a1f837882c504e717135c9c92bc5b1d7f62c..0c685d35986eebf4dd72861b650c82fb6b515b51 100755 (executable)
@@ -12,7 +12,7 @@ test_expect_success GETTEXT_LOCALE 'setup' '
 '
 
 test_have_prereq GETTEXT_LOCALE &&
-test-regex "HALLÓ" "Halló" ICASE &&
+test-tool regex "HALLÓ" "Halló" ICASE &&
 test_set_prereq REGEX_LOCALE
 
 test_expect_success REGEX_LOCALE 'grep literal string, no -F' '
index 19601fb546d5a329e4889b352d817fe6f7fb7fb3..e80eacbb1b81a319232c0b5f28aab3c82d795a93 100755 (executable)
@@ -224,6 +224,7 @@ Message-Id: MESSAGE-ID-STRING
 X-Mailer: X-MAILER-STRING
 In-Reply-To: <unique-message-id@example.com>
 References: <unique-message-id@example.com>
+Reply-To: Reply <reply@example.com>
 
 Result: OK
 EOF
@@ -316,6 +317,7 @@ test_expect_success $PREREQ 'Show all headers' '
                --dry-run \
                --suppress-cc=sob \
                --from="Example <from@example.com>" \
+               --reply-to="Reply <reply@example.com>" \
                --to=to@example.com \
                --cc=cc@example.com \
                --bcc=bcc@example.com \
index b28a028f5503508a4a09c6a07950027d253a1846..7e8894a4a70648fd12d3ab4425f1beac2c3e4641 100755 (executable)
@@ -4,7 +4,7 @@ test_description='check that example code compiles and runs'
 . ./test-lib.sh
 
 test_expect_success 'decorate' '
-       test-example-decorate
+       test-tool example-decorate
 '
 
 test_done
index 8a8ba65a2ae583aa5d0b0526e604a8f9613b5a5e..c937330a5f3a7f6b5c4d8e406564bd88dd0c889b 100755 (executable)
@@ -288,12 +288,12 @@ test_expect_success 'able to dcommit to a subdirectory' '
 
 test_expect_success 'dcommit should not fail with a touched file' '
        test_commit "commit-new-file-foo2" foo2 &&
-       test-chmtime =-60 foo &&
+       test-tool chmtime =-60 foo &&
        git svn dcommit
 '
 
 test_expect_success 'rebase should not fail with a touched file' '
-       test-chmtime =-60 foo &&
+       test-tool chmtime =-60 foo &&
        git svn rebase
 '
 
index cd480edf1606fda8d973ee3decb8e18e03356153..a735fa37170fca27b48fba218a408cb2ea6bac8a 100755 (executable)
@@ -33,8 +33,8 @@ test_expect_success 'init and fetch a moved directory' '
        git svn fetch -i thunk &&
        test "$(git rev-parse --verify refs/remotes/thunk@2)" \
           = "$(git rev-parse --verify refs/remotes/thunk~1)" &&
-       test "$(git cat-file blob refs/remotes/thunk:readme |\
-                sed -n -e "3p")" = goodbye &&
+       git cat-file blob refs/remotes/thunk:readme >actual &&
+       test "$(sed -n -e "3p" actual)" = goodbye &&
        test -z "$(git config --get svn-remote.svn.fetch \
                 "^trunk:refs/remotes/thunk@2$")"
        '
@@ -48,8 +48,8 @@ test_expect_success 'init and fetch from one svn-remote' '
         git svn fetch -i svn/thunk &&
        test "$(git rev-parse --verify refs/remotes/svn/trunk)" \
           = "$(git rev-parse --verify refs/remotes/svn/thunk~1)" &&
-       test "$(git cat-file blob refs/remotes/svn/thunk:readme |\
-                sed -n -e "3p")" = goodbye
+       git cat-file blob refs/remotes/svn/thunk:readme >actual &&
+       test "$(sed -n -e "3p" actual)" = goodbye
         '
 
 test_expect_success 'follow deleted parent' '
@@ -107,7 +107,8 @@ test_expect_success 'follow deleted directory' '
        git svn init --minimize-url -i glob "$svnrepo"/glob &&
        git svn fetch -i glob &&
        test "$(git cat-file blob refs/remotes/glob:blob/bye)" = hi &&
-       test "$(git ls-tree refs/remotes/glob | wc -l )" -eq 1
+       git ls-tree refs/remotes/glob >actual &&
+       test_line_count = 1 actual
        '
 
 # ref: r9270 of the Subversion repository: (http://svn.collab.net/repos/svn)
@@ -204,8 +205,9 @@ test_expect_success "follow-parent is atomic" '
 test_expect_success "track multi-parent paths" '
        svn_cmd cp -m "resurrect /glob" "$svnrepo"/r9270 "$svnrepo"/glob &&
        git svn multi-fetch &&
-       test $(git cat-file commit refs/remotes/glob | \
-              grep "^parent " | wc -l) -eq 2
+       git cat-file commit refs/remotes/glob >actual &&
+       grep "^parent " actual >actual2 &&
+       test_line_count = 2 actual2
        '
 
 test_expect_success "multi-fetch continues to work" "
index a94286c8ec89823805989f4363072417e9c20165..6990f64364200c75f46d9f6ee3d67ebea3549d5b 100755 (executable)
@@ -47,8 +47,8 @@ test_expect_success 'test refspec globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/*/src/a:refs/remotes/tags/*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/end | \
-           sed -e "s/^.\{41\}//" > output.end &&
+       git log --pretty=oneline refs/remotes/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/end~1)" = \
                "$(git rev-parse refs/remotes/branches/start)" &&
@@ -75,14 +75,16 @@ test_expect_success 'test left-hand-side only globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/end~3) = \
             $(git rev-parse refs/remotes/two/branches/start) &&
-       git log --pretty=oneline refs/remotes/two/tags/end | \
-           sed -e "s/^.\{41\}//" > output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 
index 8d99e848d47634ea340885fde48bc798a2c0b7a2..c1e7542a371330ecd6b27899cf3f6d4d07d78a29 100755 (executable)
@@ -47,8 +47,8 @@ test_expect_success 'test refspec globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/*/src/a:refs/remotes/tags/*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/end | \
-           sed -e "s/^.\{41\}//" > output.end &&
+       git log --pretty=oneline refs/remotes/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/end~1)" = \
                "$(git rev-parse refs/remotes/branches/v1/start)" &&
@@ -75,14 +75,16 @@ test_expect_success 'test left-hand-side only globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/v1/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/v1/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/v1/start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/end~3) = \
             $(git rev-parse refs/remotes/two/branches/v1/start) &&
-       git log --pretty=oneline refs/remotes/two/tags/end | \
-           sed -e "s/^.\{41\}//" > output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 cat > expect.four <<EOF
@@ -124,14 +126,16 @@ test_expect_success 'test another branch' '
        git config --add svn-remote.four.tags \
                         "tags/*:refs/remotes/four/tags/*" &&
        git svn fetch four &&
-       test $(git rev-list refs/remotes/four/tags/next | wc -l) -eq 5 &&
-       test $(git rev-list refs/remotes/four/branches/v2/start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/four/tags/next >actual &&
+       test_line_count = 5 actual &&
+       git rev-list refs/remotes/four/branches/v2/start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/four/branches/v2/start~2) = \
             $(git rev-parse refs/remotes/four/trunk) &&
        test $(git rev-parse refs/remotes/four/tags/next~2) = \
             $(git rev-parse refs/remotes/four/branches/v2/start) &&
-       git log --pretty=oneline refs/remotes/four/tags/next | \
-           sed -e "s/^.\{41\}//" > output.four &&
+       git log --pretty=oneline refs/remotes/four/tags/next >actual &&
+       sed -e "s/^.\{41\}//" actual >output.four &&
        test_cmp expect.four output.four
        '
 
index dde0a3c2229abab27d1592e740db35ebfed544aa..ad37d980c91dd303ba975bc501748e05fd82efe5 100755 (executable)
@@ -21,37 +21,37 @@ uuid=161ce429-a9dd-4828-af4a-52023f968c89
 
 bar_url=http://mayonaise/svnrepo/bar
 test_expect_success 'verify metadata for /bar' "
-       git cat-file commit refs/remotes/bar | \
-          grep '^git-svn-id: $bar_url@12 $uuid$' &&
-       git cat-file commit refs/remotes/bar~1 | \
-          grep '^git-svn-id: $bar_url@11 $uuid$' &&
-       git cat-file commit refs/remotes/bar~2 | \
-          grep '^git-svn-id: $bar_url@10 $uuid$' &&
-       git cat-file commit refs/remotes/bar~3 | \
-          grep '^git-svn-id: $bar_url@9 $uuid$' &&
-       git cat-file commit refs/remotes/bar~4 | \
-          grep '^git-svn-id: $bar_url@6 $uuid$' &&
-       git cat-file commit refs/remotes/bar~5 | \
-          grep '^git-svn-id: $bar_url@1 $uuid$'
+       git cat-file commit refs/remotes/bar >actual &&
+       grep '^git-svn-id: $bar_url@12 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~1 >actual &&
+       grep '^git-svn-id: $bar_url@11 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~2 >actual &&
+       grep '^git-svn-id: $bar_url@10 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~3 >actual &&
+       grep '^git-svn-id: $bar_url@9 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~4 >actual &&
+       grep '^git-svn-id: $bar_url@6 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~5 >actual &&
+       grep '^git-svn-id: $bar_url@1 $uuid$' actual
        "
 
 e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
 test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
-       git cat-file commit refs/remotes/e | \
-          grep '^git-svn-id: $e_url@1 $uuid$'
+       git cat-file commit refs/remotes/e >actual &&
+       grep '^git-svn-id: $e_url@1 $uuid$' actual
        "
 
 dir_url=http://mayonaise/svnrepo/dir
 test_expect_success 'verify metadata for /dir' "
-       git cat-file commit refs/remotes/dir | \
-          grep '^git-svn-id: $dir_url@2 $uuid$' &&
-       git cat-file commit refs/remotes/dir~1 | \
-          grep '^git-svn-id: $dir_url@1 $uuid$'
+       git cat-file commit refs/remotes/dir >actual &&
+       grep '^git-svn-id: $dir_url@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/dir~1 >actual &&
+       grep '^git-svn-id: $dir_url@1 $uuid$' actual
        "
 
 test_expect_success 'find commit based on SVN revision number' "
-        git svn find-rev r12 |
-           grep $(git rev-parse HEAD)
+       git svn find-rev r12 >actual &&
+       grep $(git rev-parse HEAD) actual
         "
 
 test_expect_success 'empty rebase' "
index 22b6e5ee7d8c274b7fe60c9f10de45eaf4d3c385..6c9307355137fe86ed16552113f0d60b007e3eca 100755 (executable)
@@ -20,32 +20,32 @@ uuid=161ce429-a9dd-4828-af4a-52023f968c89
 
 bar_url=http://mayonaise/svnrepo/bar
 test_expect_success 'verify metadata for /bar' "
-       git cat-file commit refs/remotes/bar | \
-          grep '^git-svn-id: $bar_url@12 $uuid$' &&
-       git cat-file commit refs/remotes/bar~1 | \
-          grep '^git-svn-id: $bar_url@11 $uuid$' &&
-       git cat-file commit refs/remotes/bar~2 | \
-          grep '^git-svn-id: $bar_url@10 $uuid$' &&
-       git cat-file commit refs/remotes/bar~3 | \
-          grep '^git-svn-id: $bar_url@9 $uuid$' &&
-       git cat-file commit refs/remotes/bar~4 | \
-          grep '^git-svn-id: $bar_url@6 $uuid$' &&
-       git cat-file commit refs/remotes/bar~5 | \
-          grep '^git-svn-id: $bar_url@1 $uuid$'
+       git cat-file commit refs/remotes/bar >actual &&
+       grep '^git-svn-id: $bar_url@12 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~1 >actual &&
+       grep '^git-svn-id: $bar_url@11 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~2 >actual &&
+       grep '^git-svn-id: $bar_url@10 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~3 >actual &&
+       grep '^git-svn-id: $bar_url@9 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~4 >actual &&
+       grep '^git-svn-id: $bar_url@6 $uuid$' actual &&
+       git cat-file commit refs/remotes/bar~5 >actual &&
+       grep '^git-svn-id: $bar_url@1 $uuid$' actual
        "
 
 e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
 test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
-       git cat-file commit refs/remotes/e | \
-          grep '^git-svn-id: $e_url@1 $uuid$'
+       git cat-file commit refs/remotes/e >actual &&
+       grep '^git-svn-id: $e_url@1 $uuid$' actual
        "
 
 dir_url=http://mayonaise/svnrepo/dir
 test_expect_success 'verify metadata for /dir' "
-       git cat-file commit refs/remotes/dir | \
-          grep '^git-svn-id: $dir_url@2 $uuid$' &&
-       git cat-file commit refs/remotes/dir~1 | \
-          grep '^git-svn-id: $dir_url@1 $uuid$'
+       git cat-file commit refs/remotes/dir >actual &&
+       grep '^git-svn-id: $dir_url@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/dir~1 >actual &&
+       grep '^git-svn-id: $dir_url@1 $uuid$' actual
        "
 
 test_done
index 50bca62def6b0632cd89d1c0cfb2d98b76e6aa1d..32317d6bca5f45314a46082ecd9aa68061853904 100755 (executable)
@@ -68,7 +68,8 @@ test_debug 'gitk --all & sleep 1'
 test_expect_success 'verify pre-merge ancestry' "
        test x\$(git rev-parse --verify refs/heads/svn^2) = \
             x\$(git rev-parse --verify refs/heads/merge) &&
-       git cat-file commit refs/heads/svn^ | grep '^friend$'
+       git cat-file commit refs/heads/svn^ >actual &&
+       grep '^friend$' actual
        "
 
 test_expect_success 'git svn dcommit merges' "
@@ -82,12 +83,13 @@ test_expect_success 'verify post-merge ancestry' "
             x\$(git rev-parse --verify refs/remotes/origin/trunk) &&
        test x\$(git rev-parse --verify refs/heads/svn^2) = \
             x\$(git rev-parse --verify refs/heads/merge) &&
-       git cat-file commit refs/heads/svn^ | grep '^friend$'
+       git cat-file commit refs/heads/svn^ >actual &&
+       grep '^friend$' actual
        "
 
 test_expect_success 'verify merge commit message' "
-       git rev-list --pretty=raw -1 refs/heads/svn | \
-         grep \"    Merge branch 'merge' into svn\"
+       git rev-list --pretty=raw -1 refs/heads/svn >actual &&
+       grep \"    Merge branch 'merge' into svn\" actual
        "
 
 test_done
index 41264818ccdd85abb4b0a17c8a508d4bcbfe57f5..7752a1faeb0cc9e96b318af37d24b79a545a9b3c 100755 (executable)
@@ -26,11 +26,12 @@ test_expect_success 'start import with incomplete authors file' '
 test_expect_success 'imported 2 revisions successfully' '
        (
                cd x
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 2 &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author BBBBBBB BBBBBBB <bb@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author AAAAAAA AAAAAAA <aa@example\.com> "
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 2 actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author BBBBBBB BBBBBBB <bb@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author AAAAAAA AAAAAAA <aa@example\.com> " actual
        )
        '
 
@@ -43,11 +44,12 @@ test_expect_success 'continues to import once authors have been added' '
        (
                cd x
                git svn fetch --authors-file=../svn-authors &&
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 4 &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author DDDDDDD DDDDDDD <dd@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author CCCCCCC CCCCCCC <cc@example\.com> "
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 4 actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author DDDDDDD DDDDDDD <dd@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author CCCCCCC CCCCCCC <cc@example\.com> " actual
        )
        '
 
@@ -102,8 +104,10 @@ test_expect_success !MINGW 'fresh clone with svn.authors-file in config' '
                test x"$HOME"/svn-authors = x"$(git config svn.authorsfile)" &&
                git svn clone "$svnrepo" gitconfig.clone &&
                cd gitconfig.clone &&
-               nr_ex=$(git log | grep "^Author:.*example.com" | wc -l) &&
-               nr_rev=$(git rev-list HEAD | wc -l) &&
+               git log >actual &&
+               nr_ex=$(grep "^Author:.*example.com" actual | wc -l) &&
+               git rev-list HEAD >actual &&
+               nr_rev=$(wc -l <actual) &&
                test $nr_rev -eq $nr_ex
        )
 '
index 7d7e9d46bc6bf40f52efffede6b369f052c46843..48109f949f27082ddb1acf8255d70c72d3eea521 100755 (executable)
@@ -37,31 +37,32 @@ test_expect_success 'import authors with prog and file' '
 test_expect_success 'imported 6 revisions successfully' '
        (
                cd x
-               test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 6
+               git rev-list refs/remotes/git-svn >actual &&
+               test_line_count = 6 actual
        )
 '
 
 test_expect_success 'authors-prog ran correctly' '
        (
                cd x
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
-                 grep "^author ee-foo <ee-foo@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~2 | \
-                 grep "^author dd <dd@sub\.example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~3 | \
-                 grep "^author cc <cc@sub\.example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~4 | \
-                 grep "^author bb <bb@example\.com> " &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn~5 | \
-                 grep "^author aa <aa@example\.com> "
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
+               grep "^author ee-foo <ee-foo@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~2 >actual &&
+               grep "^author dd <dd@sub\.example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~3 >actual &&
+               grep "^author cc <cc@sub\.example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~4 >actual &&
+               grep "^author bb <bb@example\.com> " actual &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn~5 >actual &&
+               grep "^author aa <aa@example\.com> " actual
        )
 '
 
 test_expect_success 'authors-file overrode authors-prog' '
        (
                cd x
-               git rev-list -1 --pretty=raw refs/remotes/git-svn | \
-                 grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> "
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> " actual
        )
 '
 
@@ -73,8 +74,8 @@ test_expect_success 'authors-prog handled special characters in username' '
        (
                cd x &&
                git svn --authors-prog=../svn-authors-prog fetch &&
-               git rev-list -1 --pretty=raw refs/remotes/git-svn |
-               grep "^author xyz; touch evil <xyz; touch evil@example\.com> " &&
+               git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
+               grep "^author xyz; touch evil <xyz; touch evil@example\.com> " actual &&
                ! test -f evil
        )
 '
index 372ef156850098928ac1ed402ab2228460af3c00..8cb2b5c69cfc89c4613fc3f7e4f2153a251433c7 100755 (executable)
@@ -16,10 +16,10 @@ test_expect_success 'load svn repo' "
        "
 
 test_expect_success 'verify uuid' "
-       git cat-file commit refs/remotes/git-svn~0 | \
-          grep '^git-svn-id: .*@2 $uuid$' &&
-       git cat-file commit refs/remotes/git-svn~1 | \
-          grep '^git-svn-id: .*@1 $uuid$'
+       git cat-file commit refs/remotes/git-svn~0 >actual &&
+       grep '^git-svn-id: .*@2 $uuid$' actual &&
+       git cat-file commit refs/remotes/git-svn~1 >actual &&
+       grep '^git-svn-id: .*@1 $uuid$' actual
        "
 
 test_done
index 8b22f2272cca47cd9187accdaed3ce1f12b6c9e1..bdf6e849993bff79a5e04c5b8a1d7df77118091f 100755 (executable)
@@ -48,8 +48,8 @@ test_expect_success 'test refspec prefixed globbing' '
        git config --add svn-remote.svn.tags\
                         "tags/t_*/src/a:refs/remotes/tags/t_*" &&
        git svn multi-fetch &&
-       git log --pretty=oneline refs/remotes/tags/t_end | \
-           sed -e "s/^.\{41\}//" >output.end &&
+       git log --pretty=oneline refs/remotes/tags/t_end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.end &&
        test_cmp expect.end output.end &&
        test "$(git rev-parse refs/remotes/tags/t_end~1)" = \
                "$(git rev-parse refs/remotes/branches/b_start)" &&
@@ -78,14 +78,16 @@ test_expect_success 'test left-hand-side only prefixed globbing' '
                svn_cmd commit -m "try to try"
        ) &&
        git svn fetch two &&
-       test $(git rev-list refs/remotes/two/tags/t_end | wc -l) -eq 6 &&
-       test $(git rev-list refs/remotes/two/branches/b_start | wc -l) -eq 3 &&
+       git rev-list refs/remotes/two/tags/t_end >actual &&
+       test_line_count = 6 actual &&
+       git rev-list refs/remotes/two/branches/b_start >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/two/branches/b_start~2) = \
             $(git rev-parse refs/remotes/two/trunk) &&
        test $(git rev-parse refs/remotes/two/tags/t_end~3) = \
             $(git rev-parse refs/remotes/two/branches/b_start) &&
-       git log --pretty=oneline refs/remotes/two/tags/t_end | \
-           sed -e "s/^.\{41\}//" >output.two &&
+       git log --pretty=oneline refs/remotes/two/tags/t_end >actual &&
+       sed -e "s/^.\{41\}//" actual >output.two &&
        test_cmp expect.two output.two
        '
 
@@ -118,14 +120,16 @@ test_expect_success 'test prefixed globs match just prefix' '
                svn_cmd up
        ) &&
        git svn fetch three &&
-       test $(git rev-list refs/remotes/three/branches/b_ | wc -l) -eq 2 &&
-       test $(git rev-list refs/remotes/three/tags/t_ | wc -l) -eq 3 &&
+       git rev-list refs/remotes/three/branches/b_ >actual &&
+       test_line_count = 2 actual &&
+       git rev-list refs/remotes/three/tags/t_ >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/three/branches/b_~1) = \
             $(git rev-parse refs/remotes/three/trunk) &&
        test $(git rev-parse refs/remotes/three/tags/t_~1) = \
             $(git rev-parse refs/remotes/three/branches/b_) &&
-       git log --pretty=oneline refs/remotes/three/tags/t_ | \
-           sed -e "s/^.\{41\}//" >output.three &&
+       git log --pretty=oneline refs/remotes/three/tags/t_ >actual &&
+       sed -e "s/^.\{41\}//" actual >output.three &&
        test_cmp expect.three output.three
        '
 
@@ -186,14 +190,16 @@ test_expect_success 'test globbing in the middle of the word' '
                svn_cmd up
        ) &&
        git svn fetch five &&
-       test $(git rev-list refs/remotes/five/branches/abcde | wc -l) -eq 2 &&
-       test $(git rev-list refs/remotes/five/tags/fghij | wc -l) -eq 3 &&
+       git rev-list refs/remotes/five/branches/abcde >actual &&
+       test_line_count = 2 actual &&
+       git rev-list refs/remotes/five/tags/fghij >actual &&
+       test_line_count = 3 actual &&
        test $(git rev-parse refs/remotes/five/branches/abcde~1) = \
             $(git rev-parse refs/remotes/five/trunk) &&
        test $(git rev-parse refs/remotes/five/tags/fghij~1) = \
             $(git rev-parse refs/remotes/five/branches/abcde) &&
-       git log --pretty=oneline refs/remotes/five/tags/fghij | \
-           sed -e "s/^.\{41\}//" >output.five &&
+       git log --pretty=oneline refs/remotes/five/tags/fghij >actual &&
+       sed -e "s/^.\{41\}//" actual >output.five &&
        test_cmp expect.five output.five
        '
 
index e4d06accc458191d0d52fe325b3118ac019e96c2..dc79df7b042b75ca192ff1a860eb0c22a9059a8e 100755 (executable)
@@ -2654,7 +2654,7 @@ test_expect_success 'R: corrupt lines do not mess marks file' '
 ##
 test_expect_success 'R: blob bigger than threshold' '
        blobsize=$((2*1024*1024 + 53)) &&
-       test-genrandom bar $blobsize >expect &&
+       test-tool genrandom bar $blobsize >expect &&
        cat >input <<-INPUT_END &&
        commit refs/heads/big-file
        committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
index 866ddf60581e3fea1afdbe28e71e7f3137da5722..d5679ffb873b4657cd953f7b318a231f56671f20 100755 (executable)
@@ -43,20 +43,20 @@ test_expect_success 'fast-export | fast-import' '
        MUSS=$(git rev-parse --verify muss) &&
        mkdir new &&
        git --git-dir=new/.git init &&
-       git fast-export --all |
+       git fast-export --all >actual &&
        (cd new &&
         git fast-import &&
         test $MASTER = $(git rev-parse --verify refs/heads/master) &&
         test $REIN = $(git rev-parse --verify refs/tags/rein) &&
         test $WER = $(git rev-parse --verify refs/heads/wer) &&
-        test $MUSS = $(git rev-parse --verify refs/tags/muss))
+        test $MUSS = $(git rev-parse --verify refs/tags/muss)) <actual
 
 '
 
 test_expect_success 'fast-export master~2..master' '
 
-       git fast-export master~2..master |
-               sed "s/master/partial/" |
+       git fast-export master~2..master >actual &&
+       sed "s/master/partial/" actual |
                (cd new &&
                 git fast-import &&
                 test $MASTER != $(git rev-parse --verify refs/heads/partial) &&
@@ -74,11 +74,12 @@ test_expect_success 'iso-8859-1' '
        test_tick &&
        echo rosten >file &&
        git commit -s -m den file &&
-       git fast-export wer^..wer |
-               sed "s/wer/i18n/" |
+       git fast-export wer^..wer >iso8859-1.fi &&
+       sed "s/wer/i18n/" iso8859-1.fi |
                (cd new &&
                 git fast-import &&
-                git cat-file commit i18n | grep "Áéí óú")
+                git cat-file commit i18n >actual &&
+                grep "Áéí óú" actual)
 
 '
 test_expect_success 'import/export-marks' '
@@ -87,20 +88,14 @@ test_expect_success 'import/export-marks' '
        git fast-export --export-marks=tmp-marks HEAD &&
        test -s tmp-marks &&
        test_line_count = 3 tmp-marks &&
-       test $(
-               git fast-export --import-marks=tmp-marks\
-               --export-marks=tmp-marks HEAD |
-               grep ^commit |
-               wc -l) \
-       -eq 0 &&
+       git fast-export --import-marks=tmp-marks \
+               --export-marks=tmp-marks HEAD >actual &&
+       test $(grep ^commit actual | wc -l) -eq 0 &&
        echo change > file &&
        git commit -m "last commit" file &&
-       test $(
-               git fast-export --import-marks=tmp-marks \
-               --export-marks=tmp-marks HEAD |
-               grep ^commit\  |
-               wc -l) \
-       -eq 1 &&
+       git fast-export --import-marks=tmp-marks \
+               --export-marks=tmp-marks HEAD >actual &&
+       test $(grep ^commit\  actual | wc -l) -eq 1 &&
        test_line_count = 4 tmp-marks
 
 '
@@ -184,7 +179,7 @@ test_expect_success 'submodule fast-export | fast-import' '
        rm -rf new &&
        mkdir new &&
        git --git-dir=new/.git init &&
-       git fast-export --signed-tags=strip --all |
+       git fast-export --signed-tags=strip --all >actual &&
        (cd new &&
         git fast-import &&
         test "$SUBENT1" = "$(git ls-tree refs/heads/master^ sub)" &&
@@ -192,7 +187,7 @@ test_expect_success 'submodule fast-export | fast-import' '
         git checkout master &&
         git submodule init &&
         git submodule update &&
-        cmp sub/file ../sub/file)
+        cmp sub/file ../sub/file) <actual
 
 '
 
@@ -367,12 +362,14 @@ test_expect_success 'path limiting with import-marks does not lose unmodified fi
        echo more content >> file &&
        test_tick &&
        git commit -mnext file &&
-       git fast-export --import-marks=marks simple -- file file0 | grep file0
+       git fast-export --import-marks=marks simple -- file file0 >actual &&
+       grep file0 actual
 '
 
 test_expect_success 'full-tree re-shows unmodified files'        '
        git checkout -f simple &&
-       test $(git fast-export --full-tree simple | grep -c file0) -eq 3
+       git fast-export --full-tree simple >actual &&
+       test $(grep -c file0 actual) -eq 3
 '
 
 test_expect_success 'set-up a few more tags for tag export tests' '
@@ -505,8 +502,8 @@ test_expect_success 'refs are updated even if no commits need to be exported' '
 '
 
 test_expect_success 'use refspec' '
-       git fast-export --refspec refs/heads/master:refs/heads/foobar master | \
-               grep "^commit " | sort | uniq > actual &&
+       git fast-export --refspec refs/heads/master:refs/heads/foobar master >actual2 &&
+       grep "^commit " actual2 | sort | uniq >actual &&
        echo "commit refs/heads/foobar" > expected &&
        test_cmp expected actual
 '
@@ -534,7 +531,8 @@ test_expect_success 'when using -C, do not declare copy when source of copy is a
        git -C src commit -m 2nd_commit &&
 
        test_create_repo dst &&
-       git -C src fast-export --all -C | git -C dst fast-import &&
+       git -C src fast-export --all -C >actual &&
+       git -C dst fast-import <actual &&
        git -C src show >expected &&
        git -C dst show >actual &&
        test_cmp expected actual
index c30660d60626c886dfa5993acddaebf2d3364de9..06742748e99fda241ec629e34c0700ae4050692e 100755 (executable)
@@ -447,12 +447,10 @@ test_expect_success 'cvs update (-p)' '
     git push gitcvs.git >/dev/null &&
     cd cvswork &&
     GIT_CONFIG="$git_config" cvs update &&
-    rm -f failures &&
     for i in merge no-lf empty really-empty; do
-        GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out
-       test_cmp $i.out ../$i >>failures 2>&1
-    done &&
-    test -z "$(cat failures)"
+       GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out &&
+       test_cmp $i.out ../$i || return 1
+    done
 '
 
 cd "$WORKDIR"
index 6d2d3c8739cbd6978d823e7947d9f95c758ff241..cf31ace66763741a7f53e3b9a1cb3c0019255f56 100755 (executable)
@@ -455,20 +455,20 @@ test_expect_success 'cvs up -r $(git rev-parse v1)' '
 '
 
 test_expect_success 'cvs diff -r v1 -u' '
-       ( cd cvswork && cvs -f diff -r v1 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && cvs -f diff -r v1 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvsDiff.out &&
        test_must_be_empty cvs.log
 '
 
 test_expect_success 'cvs diff -N -r v2 -u' '
-       ( cd cvswork && ! cvs -f diff -N -r v2 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && ! cvs -f diff -N -r v2 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        check_diff cvsDiff.out v2 v1 >check_diff.out 2>&1
 '
 
 test_expect_success 'cvs diff -N -r v2 -r v1.2' '
-       ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        check_diff cvsDiff.out v2 v1.2 >check_diff.out 2>&1
@@ -487,7 +487,7 @@ test_expect_success 'apply early [cvswork3] diff to b3' '
 '
 
 test_expect_success 'check [cvswork3] diff' '
-       ( cd cvswork3 && ! cvs -f diff -N -u ) >"$WORKDIR/cvsDiff.out" 2>cvs.log &&
+       ( cd cvswork3 && ! cvs -f diff -N -u >"$WORKDIR/cvsDiff.out" 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        test $(grep Index: cvsDiff.out | wc -l) = 3 &&
index eb9a8ed197b4c6383a4c688f5e5c3a3fcb102b12..1fc9b33aeb5f095816fba72e85c94f0455994418 100755 (executable)
@@ -237,7 +237,7 @@ test_expect_success 'ignore apple' '
        build_gendouble &&
        (
                cd "$cli" &&
-               test-genrandom apple 1024 >double.png &&
+               test-tool genrandom apple 1024 >double.png &&
                "$PYTHON_PATH" "$TRASH_DIRECTORY/gendouble.py" >%double.png &&
                p4 add -t apple double.png &&
                p4 submit -d appledouble
index d950c7d665498c484f83d127cd2363def5169a89..d5c367510049607ce33db73ffc1869a0652fe663 100755 (executable)
@@ -28,7 +28,7 @@ test_expect_success 'shell metachars in filenames' '
                echo f2 >"file with spaces" &&
                git add "file with spaces" &&
                git commit -m "add files" &&
-               P4EDITOR="test-chmtime +5" git p4 submit
+               P4EDITOR="test-tool chmtime +5" git p4 submit
        ) &&
        (
                cd "$cli" &&
@@ -47,7 +47,7 @@ test_expect_success 'deleting with shell metachars' '
                git rm foo\$bar &&
                git rm file\ with\ spaces &&
                git commit -m "remove files" &&
-               P4EDITOR="test-chmtime +5" git p4 submit
+               P4EDITOR="test-tool chmtime +5" git p4 submit
        ) &&
        (
                cd "$cli" &&
index bda222aa0270f3a93fa494b308aa174ebc942eca..783c6ad1653142d174e4ddc6d49e1f631121be51 100755 (executable)
@@ -53,7 +53,7 @@ test_expect_success 'preserve users' '
                git commit --author "Alice <alice@example.com>" -m "a change by alice" file1 &&
                git commit --author "Bob <bob@example.com>" -m "a change by bob" file2 &&
                git config git-p4.skipSubmitEditCheck true &&
-               P4EDITOR="test-chmtime +5" P4USER=alice P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=alice P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                git p4 commit --preserve-user &&
                p4_check_commit_author file1 alice &&
@@ -71,7 +71,7 @@ test_expect_success 'refuse to preserve users without perms' '
                git config git-p4.skipSubmitEditCheck true &&
                echo "username-noperms: a change by alice" >>file1 &&
                git commit --author "Alice <alice@example.com>" -m "perms: a change by alice" file1 &&
-               P4EDITOR="test-chmtime +5" P4USER=bob P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=bob P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                test_must_fail git p4 commit --preserve-user &&
                ! git diff --exit-code HEAD..p4/master
@@ -89,7 +89,7 @@ test_expect_success 'preserve user where author is unknown to p4' '
                git commit --author "Bob <bob@example.com>" -m "preserve: a change by bob" file1 &&
                echo "username-unknown: a change by charlie" >>file1 &&
                git commit --author "Charlie <charlie@example.com>" -m "preserve: a change by charlie" file1 &&
-               P4EDITOR="test-chmtime +5" P4USER=alice P4PASSWD=secret &&
+               P4EDITOR="test-tool chmtime +5" P4USER=alice P4PASSWD=secret &&
                export P4EDITOR P4USER P4PASSWD &&
                test_must_fail git p4 commit --preserve-user &&
                ! git diff --exit-code HEAD..p4/master &&
index 6dc6df032ec0c15b39dccd24ade52580fc115add..3c22f74bd436b7c6d94d5c29d5b2e3e3510d58e6 100755 (executable)
@@ -26,7 +26,7 @@ test_expect_success 'EDITOR with options' '
                cd "$git" &&
                echo change >file1 &&
                git commit -m "change" file1 &&
-               P4EDITOR=": >\"$git/touched\" && test-chmtime +5" git p4 submit &&
+               P4EDITOR=": >\"$git/touched\" && test-tool chmtime +5" git p4 submit &&
                test_path_is_file "$git/touched"
        )
 '
index fc614dcbfa74c39e120ff2fc80cdec07ea16a338..1b34caa1e1a5e86cd2edccbb1559009b35022e10 100755 (executable)
@@ -1237,17 +1237,19 @@ test_expect_success 'double dash "git" itself' '
 test_expect_success 'double dash "git checkout"' '
        test_completion "git checkout --" <<-\EOF
        --quiet Z
+       --detach Z
+       --track Z
+       --orphan=Z
        --ours Z
        --theirs Z
-       --track Z
-       --no-track Z
        --merge Z
-       --conflict=
-       --orphan Z
+       --conflict=Z
        --patch Z
-       --detach Z
        --ignore-skip-worktree-bits Z
+       --ignore-other-worktrees Z
        --recurse-submodules Z
+       --progress Z
+       --no-track Z
        --no-recurse-submodules Z
        EOF
 '
@@ -1452,6 +1454,12 @@ test_expect_success 'completion used <cmd> completion for alias: !f() { : git <c
        EOF
 '
 
+test_expect_success 'completion without explicit _git_xxx function' '
+       test_completion "git version --" <<-\EOF
+       --build-options Z
+       EOF
+'
+
 test_expect_failure 'complete with tilde expansion' '
        git init tmp && cd tmp &&
        test_when_finished "cd .. && rm -rf tmp" &&
@@ -1495,4 +1503,35 @@ do
        '
 done
 
+test_expect_success 'sourcing the completion script clears cached commands' '
+       __git_compute_all_commands &&
+       verbose test -n "$__git_all_commands" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_all_commands"
+'
+
+test_expect_success 'sourcing the completion script clears cached porcelain commands' '
+       __git_compute_porcelain_commands &&
+       verbose test -n "$__git_porcelain_commands" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_porcelain_commands"
+'
+
+test_expect_success !GETTEXT_POISON 'sourcing the completion script clears cached merge strategies' '
+       __git_compute_merge_strategies &&
+       verbose test -n "$__git_merge_strategies" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_merge_strategies"
+'
+
+test_expect_success 'sourcing the completion script clears cached --options' '
+       __gitcomp_builtin checkout &&
+       verbose test -n "$__gitcomp_builtin_checkout" &&
+       __gitcomp_builtin notes_edit &&
+       verbose test -n "$__gitcomp_builtin_notes_edit" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__gitcomp_builtin_checkout" &&
+       verbose test -z "$__gitcomp_builtin_notes_edit"
+'
+
 test_done
index 97c9b32c2ecfa608f1e9c37c1c6402a1772e4187..8f5c811dd7d627ea5a0611be5161cea98deedb95 100755 (executable)
@@ -735,22 +735,12 @@ test_expect_success 'prompt - hide if pwd ignored - env var set, config unset, p
        test_cmp expected "$actual"
 '
 
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stdout)' '
+test_expect_success 'prompt - hide if pwd ignored - inside gitdir' '
        printf " (GIT_DIR!)" >expected &&
        (
                GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
                cd .git &&
-               __git_ps1 >"$actual" 2>/dev/null
-       ) &&
-       test_cmp expected "$actual"
-'
-
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stderr)' '
-       printf "" >expected &&
-       (
-               GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
-               cd .git &&
-               __git_ps1 >/dev/null 2>"$actual"
+               __git_ps1 >"$actual"
        ) &&
        test_cmp expected "$actual"
 '
index a679b02a1c3bd181aff94d36e6e2f3347cf2c34f..7d620bf2a9a26c325de035c8d9d85323d5088357 100644 (file)
@@ -610,6 +610,14 @@ list_contains () {
 #
 # Writing this as "! git checkout ../outerspace" is wrong, because
 # the failure could be due to a segv.  We want a controlled failure.
+#
+# Accepts the following options:
+#
+#   ok=<signal-name>[,<...>]:
+#     Don't treat an exit caused by the given signal as error.
+#     Multiple signals can be specified as a comma separated list.
+#     Currently recognized signal names are: sigpipe, success.
+#     (Don't use 'success', use 'test_might_fail' instead.)
 
 test_must_fail () {
        case "$1" in
@@ -621,30 +629,30 @@ test_must_fail () {
                _test_ok=
                ;;
        esac
-       "$@"
+       "$@" 2>&7
        exit_code=$?
        if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
        then
-               echo >&2 "test_must_fail: command succeeded: $*"
+               echo >&4 "test_must_fail: command succeeded: $*"
                return 1
        elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe
        then
                return 0
        elif test $exit_code -gt 129 && test $exit_code -le 192
        then
-               echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
+               echo >&4 "test_must_fail: died by signal $(($exit_code - 128)): $*"
                return 1
        elif test $exit_code -eq 127
        then
-               echo >&2 "test_must_fail: command not found: $*"
+               echo >&4 "test_must_fail: command not found: $*"
                return 1
        elif test $exit_code -eq 126
        then
-               echo >&2 "test_must_fail: valgrind error: $*"
+               echo >&4 "test_must_fail: valgrind error: $*"
                return 1
        fi
        return 0
-}
+} 7>&2 2>&4
 
 # Similar to test_must_fail, but tolerates success, too.  This is
 # meant to be used in contexts like:
@@ -656,10 +664,12 @@ test_must_fail () {
 #
 # Writing "git config --unset all.configuration || :" would be wrong,
 # because we want to notice if it fails due to segv.
+#
+# Accepts the same options as test_must_fail.
 
 test_might_fail () {
-       test_must_fail ok=success "$@"
-}
+       test_must_fail ok=success "$@" 2>&7
+} 7>&2 2>&4
 
 # Similar to test_must_fail and test_might_fail, but check that a
 # given command exited with a given exit code. Meant to be used as:
@@ -671,16 +681,16 @@ test_might_fail () {
 test_expect_code () {
        want_code=$1
        shift
-       "$@"
+       "$@" 2>&7
        exit_code=$?
        if test $exit_code = $want_code
        then
                return 0
        fi
 
-       echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
+       echo >&4 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
        return 1
-}
+} 7>&2 2>&4
 
 # test_cmp is a helper function to compare actual and expected output.
 # You can use it like:
@@ -705,12 +715,66 @@ test_cmp_bin() {
        cmp "$@"
 }
 
+# Use this instead of test_cmp to compare files that contain expected and
+# actual output from git commands that can be translated.  When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ncmp () {
+       test -n "$GETTEXT_POISON" || test_cmp "$@"
+}
+
+# Use this instead of "grep expected-string actual" to see if the
+# output from a git command that can be translated either contains an
+# expected string, or does not contain an unwanted one.  When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ngrep () {
+       eval "last_arg=\${$#}"
+
+       test -f "$last_arg" ||
+       error "bug in the test script: test_i18ngrep requires a file" \
+             "to read as the last parameter"
+
+       if test $# -lt 2 ||
+          { test "x!" = "x$1" && test $# -lt 3 ; }
+       then
+               error "bug in the test script: too few parameters to test_i18ngrep"
+       fi
+
+       if test -n "$GETTEXT_POISON"
+       then
+               # pretend success
+               return 0
+       fi
+
+       if test "x!" = "x$1"
+       then
+               shift
+               ! grep "$@" && return 0
+
+               echo >&4 "error: '! grep $@' did find a match in:"
+       else
+               grep "$@" && return 0
+
+               echo >&4 "error: 'grep $@' didn't find a match in:"
+       fi
+
+       if test -s "$last_arg"
+       then
+               cat >&4 "$last_arg"
+       else
+               echo >&4 "<File '$last_arg' is empty>"
+       fi
+
+       return 1
+}
+
 # Call any command "$@" but be more verbose about its
 # failure. This is handy for commands like "test" which do
 # not output anything when they fail.
 verbose () {
        "$@" && return 0
-       echo >&2 "command failed: $(git rev-parse --sq-quote "$@")"
+       echo >&4 "command failed: $(git rev-parse --sq-quote "$@")"
        return 1
 }
 
@@ -718,6 +782,7 @@ verbose () {
 # otherwise.
 
 test_must_be_empty () {
+       test_path_is_file "$1" &&
        if test -s "$1"
        then
                echo "'$1' is not empty, it contains:"
@@ -828,8 +893,8 @@ test_write_lines () {
 }
 
 perl () {
-       command "$PERL_PATH" "$@"
-}
+       command "$PERL_PATH" "$@" 2>&7
+} 7>&2 2>&4
 
 # Is the value one of the various ways to spell a boolean true/false?
 test_normalize_bool () {
@@ -969,13 +1034,13 @@ test_env () {
                                shift
                                ;;
                        *)
-                               "$@"
+                               "$@" 2>&7
                                exit
                                ;;
                        esac
                done
        )
-}
+} 7>&2 2>&4
 
 # Returns true if the numeric exit code in "$2" represents the expected signal
 # in "$1". Signals should be given numerically.
@@ -1017,9 +1082,9 @@ nongit () {
                GIT_CEILING_DIRECTORIES=$(pwd) &&
                export GIT_CEILING_DIRECTORIES &&
                cd non-repo &&
-               "$@"
+               "$@" 2>&7
        )
-}
+} 7>&2 2>&4
 
 # convert stdin to pktline representation; note that empty input becomes an
 # empty packet, not a flush packet (for that you can just print 0000 yourself).
index 9af19055b307e2c8306efbd41a89338ee408b838..483c8d6d7cd9734c04552043b5ff0cb7d1b0af3a 100644 (file)
@@ -116,6 +116,7 @@ unset VISUAL EMAIL LANGUAGE COLUMNS $("$PERL_PATH" -e '
        my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env);
        print join("\n", @vars);
 ')
+unset XDG_CACHE_HOME
 unset XDG_CONFIG_HOME
 unset GITPERLLIB
 GIT_AUTHOR_EMAIL=author@example.com
@@ -263,7 +264,24 @@ do
                GIT_TEST_CHAIN_LINT=0
                shift ;;
        -x)
-               trace=t
+               # Some test scripts can't be reliably traced  with '-x',
+               # unless the test is run with a Bash version supporting
+               # BASH_XTRACEFD (introduced in Bash v4.1).  Check whether
+               # this test is marked as such, and ignore '-x' if it
+               # isn't executed with a suitable Bash version.
+               if test -z "$test_untraceable" || {
+                    test -n "$BASH_VERSION" && {
+                      test ${BASH_VERSINFO[0]} -gt 4 || {
+                        test ${BASH_VERSINFO[0]} -eq 4 &&
+                        test ${BASH_VERSINFO[1]} -ge 1
+                      }
+                    }
+                  }
+               then
+                       trace=t
+               else
+                       echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD"
+               fi
                shift ;;
        --verbose-log)
                verbose_log=t
@@ -945,10 +963,10 @@ test -d "$GIT_BUILD_DIR"/templates/blt || {
        error "You haven't built things yet, have you?"
 }
 
-if ! test -x "$GIT_BUILD_DIR"/t/helper/test-chmtime
+if ! test -x "$GIT_BUILD_DIR"/t/helper/test-tool
 then
-       echo >&2 'You need to build test-chmtime:'
-       echo >&2 'Run "make t/helper/test-chmtime" in the source (toplevel) directory'
+       echo >&2 'You need to build test-tool:'
+       echo >&2 'Run "make t/helper/test-tool" in the source (toplevel) directory'
        exit 1
 fi
 
@@ -1062,32 +1080,6 @@ else
        test_set_prereq C_LOCALE_OUTPUT
 fi
 
-# Use this instead of test_cmp to compare files that contain expected and
-# actual output from git commands that can be translated.  When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ncmp () {
-       test -n "$GETTEXT_POISON" || test_cmp "$@"
-}
-
-# Use this instead of "grep expected-string actual" to see if the
-# output from a git command that can be translated either contains an
-# expected string, or does not contain an unwanted one.  When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ngrep () {
-       if test -n "$GETTEXT_POISON"
-       then
-           : # pretend success
-       elif test "x!" = "x$1"
-       then
-               shift
-               ! grep "$@"
-       else
-               grep "$@"
-       fi
-}
-
 test_lazy_prereq PIPE '
        # test whether the filesystem supports FIFOs
        test_have_prereq !MINGW,!CYGWIN &&
@@ -1132,6 +1124,10 @@ test_lazy_prereq EXPENSIVE '
        test -n "$GIT_TEST_LONG"
 '
 
+test_lazy_prereq EXPENSIVE_ON_WINDOWS '
+       test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN
+'
+
 test_lazy_prereq USR_BIN_TIME '
        test -x /usr/bin/time
 '
@@ -1210,5 +1206,5 @@ test_lazy_prereq LONG_IS_64BIT '
        test 8 -le "$(build_option sizeof-long)"
 '
 
-test_lazy_prereq TIME_IS_64BIT 'test-date is64bit'
-test_lazy_prereq TIME_T_IS_64BIT 'test-date time_t-is64bit'
+test_lazy_prereq TIME_IS_64BIT 'test-tool date is64bit'
+test_lazy_prereq TIME_T_IS_64BIT 'test-tool date time_t-is64bit'
diff --git a/tag.c b/tag.c
index fcbe012f7a2203e198dac9eac03aa9dab999f334..86b1dcbb8270e944ac643a39835d490bbaf35b20 100644 (file)
--- a/tag.c
+++ b/tag.c
@@ -41,20 +41,20 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
        unsigned long size;
        int ret;
 
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type != OBJ_TAG)
                return error("%s: cannot verify a non-tag object of type %s.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
-                               typename(type));
+                               find_unique_abbrev(oid, DEFAULT_ABBREV),
+                               type_name(type));
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(oid, DEFAULT_ABBREV));
 
        ret = run_gpg_verify(buf, size, flags);
 
@@ -182,7 +182,7 @@ int parse_tag(struct tag *item)
 
        if (item->object.parsed)
                return 0;
-       data = read_sha1_file(item->object.oid.hash, &type, &size);
+       data = read_object_file(&item->object.oid, &type, &size);
        if (!data)
                return error("Could not read %s",
                             oid_to_hex(&item->object.oid));
index 5fdafdd2d2d72390ee9fe3c2afd501ad222fac8e..139ecd97f8eb88b597aab50c2eb2b171a11ef3ef 100644 (file)
@@ -165,11 +165,11 @@ struct tempfile *register_tempfile(const char *path)
        return tempfile;
 }
 
-struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_sm(const char *filename_template, int suffixlen, int mode)
 {
        struct tempfile *tempfile = new_tempfile();
 
-       strbuf_add_absolute_path(&tempfile->filename, template);
+       strbuf_add_absolute_path(&tempfile->filename, filename_template);
        tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
        if (tempfile->fd < 0) {
                deactivate_tempfile(tempfile);
@@ -179,7 +179,7 @@ struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode)
        return tempfile;
 }
 
-struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_tsm(const char *filename_template, int suffixlen, int mode)
 {
        struct tempfile *tempfile = new_tempfile();
        const char *tmpdir;
@@ -188,7 +188,7 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
        if (!tmpdir)
                tmpdir = "/tmp";
 
-       strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, template);
+       strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, filename_template);
        tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
        if (tempfile->fd < 0) {
                deactivate_tempfile(tempfile);
@@ -198,12 +198,12 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
        return tempfile;
 }
 
-struct tempfile *xmks_tempfile_m(const char *template, int mode)
+struct tempfile *xmks_tempfile_m(const char *filename_template, int mode)
 {
        struct tempfile *tempfile;
        struct strbuf full_template = STRBUF_INIT;
 
-       strbuf_add_absolute_path(&full_template, template);
+       strbuf_add_absolute_path(&full_template, filename_template);
        tempfile = mks_tempfile_m(full_template.buf, mode);
        if (!tempfile)
                die_errno("Unable to create temporary file '%s'",
index 450908b2e0bc4a94bc23a862abf04a56366aedd7..8959c5f1b5761dc34f1742a96941f67efd6429a7 100644 (file)
@@ -135,58 +135,58 @@ extern struct tempfile *register_tempfile(const char *path);
  */
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_sm(const char *template,
+extern struct tempfile *mks_tempfile_sm(const char *filename_template,
                                        int suffixlen, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_s(const char *template,
+static inline struct tempfile *mks_tempfile_s(const char *filename_template,
                                              int suffixlen)
 {
-       return mks_tempfile_sm(template, suffixlen, 0600);
+       return mks_tempfile_sm(filename_template, suffixlen, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_m(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_m(const char *filename_template, int mode)
 {
-       return mks_tempfile_sm(template, 0, mode);
+       return mks_tempfile_sm(filename_template, 0, mode);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile(const char *template)
+static inline struct tempfile *mks_tempfile(const char *filename_template)
 {
-       return mks_tempfile_sm(template, 0, 0600);
+       return mks_tempfile_sm(filename_template, 0, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_tsm(const char *template,
+extern struct tempfile *mks_tempfile_tsm(const char *filename_template,
                                         int suffixlen, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_ts(const char *template,
+static inline struct tempfile *mks_tempfile_ts(const char *filename_template,
                                               int suffixlen)
 {
-       return mks_tempfile_tsm(template, suffixlen, 0600);
+       return mks_tempfile_tsm(filename_template, suffixlen, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_tm(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_tm(const char *filename_template, int mode)
 {
-       return mks_tempfile_tsm(template, 0, mode);
+       return mks_tempfile_tsm(filename_template, 0, mode);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_t(const char *template)
+static inline struct tempfile *mks_tempfile_t(const char *filename_template)
 {
-       return mks_tempfile_tsm(template, 0, 0600);
+       return mks_tempfile_tsm(filename_template, 0, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *xmks_tempfile_m(const char *template, int mode);
+extern struct tempfile *xmks_tempfile_m(const char *filename_template, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *xmks_tempfile(const char *template)
+static inline struct tempfile *xmks_tempfile(const char *filename_template)
 {
-       return xmks_tempfile_m(template, 0600);
+       return xmks_tempfile_m(filename_template, 0600);
 }
 
 /*
index 3ba157ed0d6157281f0a9e91f7b7f208652adfcc..c508c9b7521b48c18f1b373ada9b9f2e5b02681d 100644 (file)
--- a/trailer.c
+++ b/trailer.c
@@ -174,12 +174,12 @@ static void print_all(FILE *outfile, struct list_head *head,
 
 static struct trailer_item *trailer_from_arg(struct arg_item *arg_tok)
 {
-       struct trailer_item *new = xcalloc(sizeof(*new), 1);
-       new->token = arg_tok->token;
-       new->value = arg_tok->value;
+       struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = arg_tok->token;
+       new_item->value = arg_tok->value;
        arg_tok->token = arg_tok->value = NULL;
        free_arg_item(arg_tok);
-       return new;
+       return new_item;
 }
 
 static void add_arg_to_input_list(struct trailer_item *on_tok,
@@ -666,30 +666,30 @@ static void parse_trailer(struct strbuf *tok, struct strbuf *val,
 static struct trailer_item *add_trailer_item(struct list_head *head, char *tok,
                                             char *val)
 {
-       struct trailer_item *new = xcalloc(sizeof(*new), 1);
-       new->token = tok;
-       new->value = val;
-       list_add_tail(&new->list, head);
-       return new;
+       struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = tok;
+       new_item->value = val;
+       list_add_tail(&new_item->list, head);
+       return new_item;
 }
 
 static void add_arg_item(struct list_head *arg_head, char *tok, char *val,
                         const struct conf_info *conf,
                         const struct new_trailer_item *new_trailer_item)
 {
-       struct arg_item *new = xcalloc(sizeof(*new), 1);
-       new->token = tok;
-       new->value = val;
-       duplicate_conf(&new->conf, conf);
+       struct arg_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = tok;
+       new_item->value = val;
+       duplicate_conf(&new_item->conf, conf);
        if (new_trailer_item) {
                if (new_trailer_item->where != WHERE_DEFAULT)
-                       new->conf.where = new_trailer_item->where;
+                       new_item->conf.where = new_trailer_item->where;
                if (new_trailer_item->if_exists != EXISTS_DEFAULT)
-                       new->conf.if_exists = new_trailer_item->if_exists;
+                       new_item->conf.if_exists = new_trailer_item->if_exists;
                if (new_trailer_item->if_missing != MISSING_DEFAULT)
-                       new->conf.if_missing = new_trailer_item->if_missing;
+                       new_item->conf.if_missing = new_trailer_item->if_missing;
        }
-       list_add_tail(&new->list, arg_head);
+       list_add_tail(&new_item->list, arg_head);
 }
 
 static void process_command_line_args(struct list_head *arg_head,
@@ -1000,7 +1000,7 @@ static struct tempfile *trailers_tempfile;
 static FILE *create_in_place_tempfile(const char *file)
 {
        struct stat st;
-       struct strbuf template = STRBUF_INIT;
+       struct strbuf filename_template = STRBUF_INIT;
        const char *tail;
        FILE *outfile;
 
@@ -1014,11 +1014,11 @@ static FILE *create_in_place_tempfile(const char *file)
        /* Create temporary file in the same directory as the original */
        tail = strrchr(file, '/');
        if (tail != NULL)
-               strbuf_add(&template, file, tail - file + 1);
-       strbuf_addstr(&template, "git-interpret-trailers-XXXXXX");
+               strbuf_add(&filename_template, file, tail - file + 1);
+       strbuf_addstr(&filename_template, "git-interpret-trailers-XXXXXX");
 
-       trailers_tempfile = xmks_tempfile_m(template.buf, st.st_mode);
-       strbuf_release(&template);
+       trailers_tempfile = xmks_tempfile_m(filename_template.buf, st.st_mode);
+       strbuf_release(&filename_template);
        outfile = fdopen_tempfile(trailers_tempfile, "w");
        if (!outfile)
                die_errno(_("could not open temporary file"));
index 3f380d87d99eab317d5ac567b43e3cea05885145..11f1055b47e5e204a272e3588f74f161bdf73895 100644 (file)
@@ -12,6 +12,7 @@
 #include "argv-array.h"
 #include "refs.h"
 #include "transport-internal.h"
+#include "protocol.h"
 
 static int debug;
 
@@ -26,6 +27,7 @@ struct helper_data {
                option : 1,
                push : 1,
                connect : 1,
+               stateless_connect : 1,
                signed_tags : 1,
                check_connectivity : 1,
                no_disconnect_req : 1,
@@ -49,7 +51,7 @@ static void sendline(struct helper_data *helper, struct strbuf *buffer)
                die_errno("Full write to remote helper failed");
 }
 
-static int recvline_fh(FILE *helper, struct strbuf *buffer, const char *name)
+static int recvline_fh(FILE *helper, struct strbuf *buffer)
 {
        strbuf_reset(buffer);
        if (debug)
@@ -67,7 +69,7 @@ static int recvline_fh(FILE *helper, struct strbuf *buffer, const char *name)
 
 static int recvline(struct helper_data *helper, struct strbuf *buffer)
 {
-       return recvline_fh(helper->out, buffer, helper->name);
+       return recvline_fh(helper->out, buffer);
 }
 
 static void write_constant(int fd, const char *str)
@@ -188,6 +190,8 @@ static struct child_process *get_helper(struct transport *transport)
                        refspecs[refspec_nr++] = xstrdup(arg);
                } else if (!strcmp(capname, "connect")) {
                        data->connect = 1;
+               } else if (!strcmp(capname, "stateless-connect")) {
+                       data->stateless_connect = 1;
                } else if (!strcmp(capname, "signed-tags")) {
                        data->signed_tags = 1;
                } else if (skip_prefix(capname, "export-marks ", &arg)) {
@@ -545,14 +549,13 @@ static int fetch_with_import(struct transport *transport,
        return 0;
 }
 
-static int process_connect_service(struct transport *transport,
-                                  const char *name, const char *exec)
+static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
 {
        struct helper_data *data = transport->data;
-       struct strbuf cmdbuf = STRBUF_INIT;
-       struct child_process *helper;
-       int r, duped, ret = 0;
+       int ret = 0;
+       int duped;
        FILE *input;
+       struct child_process *helper;
 
        helper = get_helper(transport);
 
@@ -568,44 +571,61 @@ static int process_connect_service(struct transport *transport,
        input = xfdopen(duped, "r");
        setvbuf(input, NULL, _IONBF, 0);
 
+       sendline(data, cmdbuf);
+       if (recvline_fh(input, cmdbuf))
+               exit(128);
+
+       if (!strcmp(cmdbuf->buf, "")) {
+               data->no_disconnect_req = 1;
+               if (debug)
+                       fprintf(stderr, "Debug: Smart transport connection "
+                               "ready.\n");
+               ret = 1;
+       } else if (!strcmp(cmdbuf->buf, "fallback")) {
+               if (debug)
+                       fprintf(stderr, "Debug: Falling back to dumb "
+                               "transport.\n");
+       } else {
+               die("Unknown response to connect: %s",
+                       cmdbuf->buf);
+       }
+
+       fclose(input);
+       return ret;
+}
+
+static int process_connect_service(struct transport *transport,
+                                  const char *name, const char *exec)
+{
+       struct helper_data *data = transport->data;
+       struct strbuf cmdbuf = STRBUF_INIT;
+       int ret = 0;
+
        /*
         * Handle --upload-pack and friends. This is fire and forget...
         * just warn if it fails.
         */
        if (strcmp(name, exec)) {
-               r = set_helper_option(transport, "servpath", exec);
+               int r = set_helper_option(transport, "servpath", exec);
                if (r > 0)
                        warning("Setting remote service path not supported by protocol.");
                else if (r < 0)
                        warning("Invalid remote service path.");
        }
 
-       if (data->connect)
+       if (data->connect) {
                strbuf_addf(&cmdbuf, "connect %s\n", name);
-       else
-               goto exit;
-
-       sendline(data, &cmdbuf);
-       if (recvline_fh(input, &cmdbuf, name))
-               exit(128);
-
-       if (!strcmp(cmdbuf.buf, "")) {
-               data->no_disconnect_req = 1;
-               if (debug)
-                       fprintf(stderr, "Debug: Smart transport connection "
-                               "ready.\n");
-               ret = 1;
-       } else if (!strcmp(cmdbuf.buf, "fallback")) {
-               if (debug)
-                       fprintf(stderr, "Debug: Falling back to dumb "
-                               "transport.\n");
-       } else
-               die("Unknown response to connect: %s",
-                       cmdbuf.buf);
+               ret = run_connect(transport, &cmdbuf);
+       } else if (data->stateless_connect &&
+                  (get_protocol_version_config() == protocol_v2) &&
+                  !strcmp("git-upload-pack", name)) {
+               strbuf_addf(&cmdbuf, "stateless-connect %s\n", name);
+               ret = run_connect(transport, &cmdbuf);
+               if (ret)
+                       transport->stateless_rpc = 1;
+       }
 
-exit:
        strbuf_release(&cmdbuf);
-       fclose(input);
        return ret;
 }
 
@@ -1031,7 +1051,8 @@ static int has_attribute(const char *attrs, const char *attr) {
        }
 }
 
-static struct ref *get_refs_list(struct transport *transport, int for_push)
+static struct ref *get_refs_list(struct transport *transport, int for_push,
+                                const struct argv_array *ref_prefixes)
 {
        struct helper_data *data = transport->data;
        struct child_process *helper;
@@ -1044,7 +1065,7 @@ static struct ref *get_refs_list(struct transport *transport, int for_push)
 
        if (process_connect(transport, for_push)) {
                do_take_over(transport);
-               return transport->vtable->get_refs_list(transport, for_push);
+               return transport->vtable->get_refs_list(transport, for_push, ref_prefixes);
        }
 
        if (data->push && for_push)
index 3c1a29d7274465b977d9285781673f235e66ad21..1cde6258a73bcf8582b0746d1c44a23b30115dc9 100644 (file)
@@ -3,6 +3,7 @@
 
 struct ref;
 struct transport;
+struct argv_array;
 
 struct transport_vtable {
        /**
@@ -17,11 +18,19 @@ struct transport_vtable {
         * the transport to try to share connections, for_push is a
         * hint as to whether the ultimate operation is a push or a fetch.
         *
+        * If communicating using protocol v2 a list of prefixes can be
+        * provided to be sent to the server to enable it to limit the ref
+        * advertisement.  Since ref filtering is done on the server's end, and
+        * only when using protocol v2, this list will be ignored when not
+        * using protocol v2 meaning this function can return refs which don't
+        * match the provided ref_prefixes.
+        *
         * If the transport is able to determine the remote hash for
         * the ref without a huge amount of effort, it should store it
         * in the ref's old_sha1 field; otherwise it should be all 0.
         **/
-       struct ref *(*get_refs_list)(struct transport *transport, int for_push);
+       struct ref *(*get_refs_list)(struct transport *transport, int for_push,
+                                    const struct argv_array *ref_prefixes);
 
        /**
         * Fetch the objects for the given refs. Note that this gets
index 3afc6324726eafb71e7805153aa6a635f4e0c8d9..16b2f54f22e910c27ea10c32dfbc398925cc85c1 100644 (file)
@@ -18,6 +18,7 @@
 #include "sha1-array.h"
 #include "sigchain.h"
 #include "transport-internal.h"
+#include "protocol.h"
 #include "object-store.h"
 
 static void set_upstreams(struct transport *transport, struct ref *refs,
@@ -72,7 +73,9 @@ struct bundle_transport_data {
        struct bundle_header header;
 };
 
-static struct ref *get_refs_from_bundle(struct transport *transport, int for_push)
+static struct ref *get_refs_from_bundle(struct transport *transport,
+                                       int for_push,
+                                       const struct argv_array *ref_prefixes)
 {
        struct bundle_transport_data *data = transport->data;
        struct ref *result = NULL;
@@ -118,6 +121,7 @@ struct git_transport_data {
        struct child_process *conn;
        int fd[2];
        unsigned got_remote_heads : 1;
+       enum protocol_version version;
        struct oid_array extra_have;
        struct oid_array shallow;
 };
@@ -197,16 +201,35 @@ static int connect_setup(struct transport *transport, int for_push)
        return 0;
 }
 
-static struct ref *get_refs_via_connect(struct transport *transport, int for_push)
+static struct ref *get_refs_via_connect(struct transport *transport, int for_push,
+                                       const struct argv_array *ref_prefixes)
 {
        struct git_transport_data *data = transport->data;
-       struct ref *refs;
+       struct ref *refs = NULL;
+       struct packet_reader reader;
 
        connect_setup(transport, for_push);
-       get_remote_heads(data->fd[0], NULL, 0, &refs,
-                        for_push ? REF_NORMAL : 0,
-                        &data->extra_have,
-                        &data->shallow);
+
+       packet_reader_init(&reader, data->fd[0], NULL, 0,
+                          PACKET_READ_CHOMP_NEWLINE |
+                          PACKET_READ_GENTLE_ON_EOF);
+
+       data->version = discover_version(&reader);
+       switch (data->version) {
+       case protocol_v2:
+               get_remote_refs(data->fd[1], &reader, &refs, for_push,
+                               ref_prefixes);
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               get_remote_heads(&reader, &refs,
+                                for_push ? REF_NORMAL : 0,
+                                &data->extra_have,
+                                &data->shallow);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
        data->got_remote_heads = 1;
 
        return refs;
@@ -217,7 +240,7 @@ static int fetch_refs_via_pack(struct transport *transport,
 {
        int ret = 0;
        struct git_transport_data *data = transport->data;
-       struct ref *refs;
+       struct ref *refs = NULL;
        char *dest = xstrdup(transport->url);
        struct fetch_pack_args args;
        struct ref *refs_tmp = NULL;
@@ -242,18 +265,29 @@ static int fetch_refs_via_pack(struct transport *transport,
        args.from_promisor = data->options.from_promisor;
        args.no_dependents = data->options.no_dependents;
        args.filter_options = data->options.filter_options;
+       args.stateless_rpc = transport->stateless_rpc;
 
-       if (!data->got_remote_heads) {
-               connect_setup(transport, 0);
-               get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
-                                NULL, &data->shallow);
-               data->got_remote_heads = 1;
+       if (!data->got_remote_heads)
+               refs_tmp = get_refs_via_connect(transport, 0, NULL);
+
+       switch (data->version) {
+       case protocol_v2:
+               refs = fetch_pack(&args, data->fd, data->conn,
+                                 refs_tmp ? refs_tmp : transport->remote_refs,
+                                 dest, to_fetch, nr_heads, &data->shallow,
+                                 &transport->pack_lockfile, data->version);
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               refs = fetch_pack(&args, data->fd, data->conn,
+                                 refs_tmp ? refs_tmp : transport->remote_refs,
+                                 dest, to_fetch, nr_heads, &data->shallow,
+                                 &transport->pack_lockfile, data->version);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
        }
 
-       refs = fetch_pack(&args, data->fd, data->conn,
-                         refs_tmp ? refs_tmp : transport->remote_refs,
-                         dest, to_fetch, nr_heads, &data->shallow,
-                         &transport->pack_lockfile);
        close(data->fd[0]);
        close(data->fd[1]);
        if (finish_connect(data->conn))
@@ -368,7 +402,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                char type;
                const char *msg;
 
-               strbuf_add_unique_abbrev(&quickref, ref->old_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->old_oid,
                                         DEFAULT_ABBREV);
                if (ref->forced_update) {
                        strbuf_addstr(&quickref, "...");
@@ -379,7 +413,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                        type = ' ';
                        msg = NULL;
                }
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid,
                                         DEFAULT_ABBREV);
 
                print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg,
@@ -462,7 +496,7 @@ static int print_one_push_status(struct ref *ref, const char *dest, int count,
 static int measure_abbrev(const struct object_id *oid, int sofar)
 {
        char hex[GIT_MAX_HEXSZ + 1];
-       int w = find_unique_abbrev_r(hex, oid->hash, DEFAULT_ABBREV);
+       int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV);
 
        return (w < sofar) ? sofar : w;
 }
@@ -552,16 +586,10 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
 {
        struct git_transport_data *data = transport->data;
        struct send_pack_args args;
-       int ret;
-
-       if (!data->got_remote_heads) {
-               struct ref *tmp_refs;
-               connect_setup(transport, 1);
+       int ret = 0;
 
-               get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
-                                NULL, &data->shallow);
-               data->got_remote_heads = 1;
-       }
+       if (!data->got_remote_heads)
+               get_refs_via_connect(transport, 1, NULL);
 
        memset(&args, 0, sizeof(args));
        args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR);
@@ -583,8 +611,18 @@ static int git_transport_push(struct transport *transport, struct ref *remote_re
        else
                args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
 
-       ret = send_pack(&args, data->fd, data->conn, remote_refs,
-                       &data->extra_have);
+       switch (data->version) {
+       case protocol_v2:
+               die("support for protocol v2 not implemented yet");
+               break;
+       case protocol_v1:
+       case protocol_v0:
+               ret = send_pack(&args, data->fd, data->conn, remote_refs,
+                               &data->extra_have);
+               break;
+       case protocol_unknown_version:
+               BUG("unknown protocol version");
+       }
 
        close(data->fd[1]);
        close(data->fd[0]);
@@ -1007,11 +1045,38 @@ int transport_push(struct transport *transport,
                int porcelain = flags & TRANSPORT_PUSH_PORCELAIN;
                int pretend = flags & TRANSPORT_PUSH_DRY_RUN;
                int push_ret, ret, err;
+               struct refspec *tmp_rs;
+               struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
+               int i;
 
                if (check_push_refs(local_refs, refspec_nr, refspec) < 0)
                        return -1;
 
-               remote_refs = transport->vtable->get_refs_list(transport, 1);
+               tmp_rs = parse_push_refspec(refspec_nr, refspec);
+               for (i = 0; i < refspec_nr; i++) {
+                       const char *prefix = NULL;
+
+                       if (tmp_rs[i].dst)
+                               prefix = tmp_rs[i].dst;
+                       else if (tmp_rs[i].src && !tmp_rs[i].exact_sha1)
+                               prefix = tmp_rs[i].src;
+
+                       if (prefix) {
+                               const char *glob = strchr(prefix, '*');
+                               if (glob)
+                                       argv_array_pushf(&ref_prefixes, "%.*s",
+                                                        (int)(glob - prefix),
+                                                        prefix);
+                               else
+                                       expand_ref_prefix(&ref_prefixes, prefix);
+                       }
+               }
+
+               remote_refs = transport->vtable->get_refs_list(transport, 1,
+                                                              &ref_prefixes);
+
+               argv_array_clear(&ref_prefixes);
+               free_refspec(refspec_nr, tmp_rs);
 
                if (flags & TRANSPORT_PUSH_ALL)
                        match_flags |= MATCH_REFS_ALL;
@@ -1117,10 +1182,13 @@ int transport_push(struct transport *transport,
        return 1;
 }
 
-const struct ref *transport_get_remote_refs(struct transport *transport)
+const struct ref *transport_get_remote_refs(struct transport *transport,
+                                           const struct argv_array *ref_prefixes)
 {
        if (!transport->got_remote_refs) {
-               transport->remote_refs = transport->vtable->get_refs_list(transport, 0);
+               transport->remote_refs =
+                       transport->vtable->get_refs_list(transport, 0,
+                                                        ref_prefixes);
                transport->got_remote_refs = 1;
        }
 
index 3c68d73b215bbabc81a75a810b1083697bfd6329..e783cfa07536b5202eaa697e31a765c1847f1dbf 100644 (file)
@@ -59,6 +59,12 @@ struct transport {
         */
        unsigned cloning : 1;
 
+       /*
+        * Indicates that the transport is connected via a half-duplex
+        * connection and should operate in stateless-rpc mode.
+        */
+       unsigned stateless_rpc : 1;
+
        /*
         * These strings will be passed to the {pre, post}-receive hook,
         * on the remote side, if both sides support the push options capability.
@@ -194,7 +200,17 @@ int transport_push(struct transport *connection,
                   int refspec_nr, const char **refspec, int flags,
                   unsigned int * reject_reasons);
 
-const struct ref *transport_get_remote_refs(struct transport *transport);
+/*
+ * Retrieve refs from a remote.
+ *
+ * Optionally a list of ref prefixes can be provided which can be sent to the
+ * server (when communicating using protocol v2) to enable it to limit the ref
+ * advertisement.  Since ref filtering is done on the server's end (and only
+ * when using protocol v2), this can return refs which don't match the provided
+ * ref_prefixes.
+ */
+const struct ref *transport_get_remote_refs(struct transport *transport,
+                                           const struct argv_array *ref_prefixes);
 
 int transport_fetch_refs(struct transport *transport, struct ref *refs);
 void transport_unlock_pack(struct transport *transport);
index 63a87ed666bbb10cb3c2bd0e27117ac696e7d1b3..e11b3063afa610239162dc45c24528dd144c4759 100644 (file)
@@ -84,8 +84,7 @@ void *fill_tree_descriptor(struct tree_desc *desc, const struct object_id *oid)
        void *buf = NULL;
 
        if (oid) {
-               buf = read_object_with_reference(oid->hash, tree_type, &size,
-                                                NULL);
+               buf = read_object_with_reference(oid, tree_type, &size, NULL);
                if (!buf)
                        die("unable to read tree %s", oid_to_hex(oid));
        }
@@ -492,7 +491,7 @@ struct dir_state {
        unsigned char sha1[20];
 };
 
-static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char *result, unsigned *mode)
+static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned *mode)
 {
        int namelen = strlen(name);
        while (t->size) {
@@ -511,7 +510,7 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (cmp < 0)
                        break;
                if (entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
                if (name[entrylen] != '/')
@@ -519,27 +518,27 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (!S_ISDIR(*mode))
                        break;
                if (++entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
-               return get_tree_entry(oid->hash, name + entrylen, result, mode);
+               return get_tree_entry(oid, name + entrylen, result, mode);
        }
        return -1;
 }
 
-int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned char *sha1, unsigned *mode)
+int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned *mode)
 {
        int retval;
        void *tree;
        unsigned long size;
-       unsigned char root[20];
+       struct object_id root;
 
-       tree = read_object_with_reference(tree_sha1, tree_type, &size, root);
+       tree = read_object_with_reference(tree_oid, tree_type, &size, &root);
        if (!tree)
                return -1;
 
        if (name[0] == '\0') {
-               hashcpy(sha1, root);
+               oidcpy(oid, &root);
                free(tree);
                return 0;
        }
@@ -549,7 +548,7 @@ int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned ch
        } else {
                struct tree_desc t;
                init_tree_desc(&t, tree, size);
-               retval = find_tree_entry(&t, name, sha1, mode);
+               retval = find_tree_entry(&t, name, oid, mode);
        }
        free(tree);
        return retval;
@@ -583,14 +582,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
        struct dir_state *parents = NULL;
        size_t parents_alloc = 0;
        size_t i, parents_nr = 0;
-       unsigned char current_tree_sha1[20];
+       struct object_id current_tree_oid;
        struct strbuf namebuf = STRBUF_INIT;
        struct tree_desc t;
        int follows_remaining = GET_TREE_ENTRY_FOLLOW_SYMLINKS_MAX_LINKS;
 
        init_tree_desc(&t, NULL, 0UL);
        strbuf_addstr(&namebuf, name);
-       hashcpy(current_tree_sha1, tree_sha1);
+       hashcpy(current_tree_oid.hash, tree_sha1);
 
        while (1) {
                int find_result;
@@ -599,22 +598,22 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                if (!t.buffer) {
                        void *tree;
-                       unsigned char root[20];
+                       struct object_id root;
                        unsigned long size;
-                       tree = read_object_with_reference(current_tree_sha1,
+                       tree = read_object_with_reference(&current_tree_oid,
                                                          tree_type, &size,
-                                                         root);
+                                                         &root);
                        if (!tree)
                                goto done;
 
                        ALLOC_GROW(parents, parents_nr + 1, parents_alloc);
                        parents[parents_nr].tree = tree;
                        parents[parents_nr].size = size;
-                       hashcpy(parents[parents_nr].sha1, root);
+                       hashcpy(parents[parents_nr].sha1, root.hash);
                        parents_nr++;
 
                        if (namebuf.buf[0] == '\0') {
-                               hashcpy(result, root);
+                               hashcpy(result, root.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -671,14 +670,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                /* Look up the first (or only) path component in the tree. */
                find_result = find_tree_entry(&t, namebuf.buf,
-                                             current_tree_sha1, mode);
+                                             &current_tree_oid, mode);
                if (find_result) {
                        goto done;
                }
 
                if (S_ISDIR(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -688,7 +687,7 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                                      1 + first_slash - namebuf.buf);
                } else if (S_ISREG(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                        } else {
                                retval = NOT_DIR;
@@ -714,8 +713,8 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                         */
                        retval = DANGLING_SYMLINK;
 
-                       contents = read_sha1_file(current_tree_sha1, &type,
-                                                 &link_len);
+                       contents = read_object_file(&current_tree_oid, &type,
+                                                   &link_len);
 
                        if (!contents)
                                goto done;
index b6bd1b4ccfbb8bb69c464ea687c63a2058a424b8..4617deeb0e09e71c7ba7231192ece055eccb2dde 100644 (file)
@@ -79,7 +79,7 @@ struct traverse_info {
        int show_all_errors;
 };
 
-int get_tree_entry(const unsigned char *, const char *, unsigned char *, unsigned *);
+int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned *);
 extern char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
 extern void setup_traverse_info(struct traverse_info *info, const char *base);
 
diff --git a/tree.c b/tree.c
index b224115e0f4d61368560eba406a04f0259b7c4f0..1c68ea586bd30d3e3389efa1c83f25ed607d1d80 100644 (file)
--- a/tree.c
+++ b/tree.c
@@ -10,7 +10,7 @@
 const char *tree_type = "tree";
 
 static int read_one_entry_opt(struct index_state *istate,
-                             const unsigned char *sha1,
+                             const struct object_id *oid,
                              const char *base, int baselen,
                              const char *pathname,
                              unsigned mode, int stage, int opt)
@@ -31,16 +31,16 @@ static int read_one_entry_opt(struct index_state *istate,
        ce->ce_namelen = baselen + len;
        memcpy(ce->name, base, baselen);
        memcpy(ce->name + baselen, pathname, len+1);
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        return add_index_entry(istate, ce, opt);
 }
 
-static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
                          const char *pathname, unsigned mode, int stage,
                          void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
 }
@@ -49,12 +49,12 @@ static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
  * This is used when the caller knows there is no existing entries at
  * the stage that will conflict with the entry being added.
  */
-static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
                                const char *pathname, unsigned mode, int stage,
                                void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_JUST_APPEND);
 }
@@ -83,7 +83,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
                                continue;
                }
 
-               switch (fn(entry.oid->hash, base,
+               switch (fn(entry.oid, base,
                           entry.path, entry.mode, stage, context)) {
                case 0:
                        continue;
@@ -219,7 +219,7 @@ int parse_tree_gently(struct tree *item, int quiet_on_missing)
 
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
diff --git a/tree.h b/tree.h
index 744e6dc2ac883adfa0e079f5f84f45a45e22b59d..e2a80be4ef87e35d895e8591a0f8a75df347d347 100644 (file)
--- a/tree.h
+++ b/tree.h
@@ -27,7 +27,7 @@ void free_tree_buffer(struct tree *tree);
 struct tree *parse_tree_indirect(const struct object_id *oid);
 
 #define READ_TREE_RECURSIVE 1
-typedef int (*read_tree_fn_t)(const unsigned char *, struct strbuf *, const char *, unsigned int, int, void *);
+typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *);
 
 extern int read_tree_recursive(struct tree *tree,
                               const char *base, int baselen,
index e6a15bbe44f24555a8edb9461c2af553de70cba0..79fd97074ec238714634de1c9e6144accdc710f3 100644 (file)
@@ -195,10 +195,10 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
 static struct cache_entry *dup_entry(const struct cache_entry *ce)
 {
        unsigned int size = ce_size(ce);
-       struct cache_entry *new = xmalloc(size);
+       struct cache_entry *new_entry = xmalloc(size);
 
-       memcpy(new, ce, size);
-       return new;
+       memcpy(new_entry, ce, size);
+       return new_entry;
 }
 
 static void add_entry(struct unpack_trees_options *o,
@@ -391,6 +391,7 @@ static int check_updates(struct unpack_trees_options *o)
                        fetch_objects(repository_format_partial_clone,
                                      &to_fetch);
                fetch_if_missing = fetch_if_missing_store;
+               oid_array_clear(&to_fetch);
        }
        for (i = 0; i < index->cache_nr; i++) {
                struct cache_entry *ce = index->cache[i];
@@ -1508,8 +1509,8 @@ static int verify_uptodate_1(const struct cache_entry *ce,
                add_rejected_path(o, error_type, ce->name);
 }
 
-static int verify_uptodate(const struct cache_entry *ce,
-                          struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o)
 {
        if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
                return 0;
@@ -1528,7 +1529,7 @@ static void invalidate_ce_path(const struct cache_entry *ce,
        if (!ce)
                return;
        cache_tree_invalidate_path(o->src_index, ce->name);
-       untracked_cache_invalidate_path(o->src_index, ce->name);
+       untracked_cache_invalidate_path(o->src_index, ce->name, 1);
 }
 
 /*
index 6c48117b845fbf7b983852be302e4472c5e6d651..41178ada94a4b7c5222cab7dd17d9eeb7a1956e4 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef UNPACK_TREES_H
 #define UNPACK_TREES_H
 
+#include "tree-walk.h"
 #include "string-list.h"
 
 #define MAX_UNPACK_TREES 8
@@ -78,6 +79,9 @@ struct unpack_trees_options {
 extern int unpack_trees(unsigned n, struct tree_desc *t,
                struct unpack_trees_options *options);
 
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o);
+
 int threeway_merge(const struct cache_entry * const *stages,
                   struct unpack_trees_options *o);
 int twoway_merge(const struct cache_entry * const *src,
index f51b6cfca9435d6b3b3f5b44b593a7c5cd2b1d3e..87b4d32a6e23aed2416a9bb752dfa56b916b141c 100644 (file)
@@ -6,7 +6,6 @@
 #include "tag.h"
 #include "object.h"
 #include "commit.h"
-#include "exec_cmd.h"
 #include "diff.h"
 #include "revision.h"
 #include "list-objects.h"
 #include "sigchain.h"
 #include "version.h"
 #include "string-list.h"
-#include "parse-options.h"
 #include "argv-array.h"
 #include "prio-queue.h"
 #include "protocol.h"
 #include "quote.h"
-
-static const char * const upload_pack_usage[] = {
-       N_("git upload-pack [<options>] <dir>"),
-       NULL
-};
+#include "upload-pack.h"
+#include "serve.h"
 
 /* Remember to update object flag allocation in object.h */
 #define THEY_HAVE      (1u << 11)
@@ -64,12 +59,11 @@ static int keepalive = 5;
  * otherwise maximum packet size (up to 65520 bytes).
  */
 static int use_sideband;
-static int advertise_refs;
 static int stateless_rpc;
 static const char *pack_objects_hook;
 
 static int filter_capability_requested;
-static int filter_advertise;
+static int allow_filter;
 static struct list_objects_filter_options filter_options;
 
 static void reset_timeout(void)
@@ -734,7 +728,6 @@ static void deepen(int depth, int deepen_relative,
        }
 
        send_unshallow(shallows);
-       packet_flush(1);
 }
 
 static void deepen_by_rev_list(int ac, const char **av,
@@ -746,7 +739,122 @@ static void deepen_by_rev_list(int ac, const char **av,
        send_shallow(result);
        free_commit_list(result);
        send_unshallow(shallows);
-       packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+                            timestamp_t deepen_since,
+                            struct string_list *deepen_not,
+                            struct object_array *shallows)
+{
+       int ret = 0;
+
+       if (depth > 0 && deepen_rev_list)
+               die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+       if (depth > 0) {
+               deepen(depth, deepen_relative, shallows);
+               ret = 1;
+       } else if (deepen_rev_list) {
+               struct argv_array av = ARGV_ARRAY_INIT;
+               int i;
+
+               argv_array_push(&av, "rev-list");
+               if (deepen_since)
+                       argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+               if (deepen_not->nr) {
+                       argv_array_push(&av, "--not");
+                       for (i = 0; i < deepen_not->nr; i++) {
+                               struct string_list_item *s = deepen_not->items + i;
+                               argv_array_push(&av, s->string);
+                       }
+                       argv_array_push(&av, "--not");
+               }
+               for (i = 0; i < want_obj.nr; i++) {
+                       struct object *o = want_obj.objects[i].item;
+                       argv_array_push(&av, oid_to_hex(&o->oid));
+               }
+               deepen_by_rev_list(av.argc, av.argv, shallows);
+               argv_array_clear(&av);
+               ret = 1;
+       } else {
+               if (shallows->nr > 0) {
+                       int i;
+                       for (i = 0; i < shallows->nr; i++)
+                               register_shallow(&shallows->objects[i].item->oid);
+               }
+       }
+
+       shallow_nr += shallows->nr;
+       return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+       const char *arg;
+       if (skip_prefix(line, "shallow ", &arg)) {
+               struct object_id oid;
+               struct object *object;
+               if (get_oid_hex(arg, &oid))
+                       die("invalid shallow line: %s", line);
+               object = parse_object(&oid);
+               if (!object)
+                       return 1;
+               if (object->type != OBJ_COMMIT)
+                       die("invalid shallow object %s", oid_to_hex(&oid));
+               if (!(object->flags & CLIENT_SHALLOW)) {
+                       object->flags |= CLIENT_SHALLOW;
+                       add_object_array(object, NULL, shallows);
+               }
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen ", &arg)) {
+               char *end = NULL;
+               *depth = (int)strtol(arg, &end, 0);
+               if (!end || *end || *depth <= 0)
+                       die("Invalid deepen: %s", line);
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen-since ", &arg)) {
+               char *end = NULL;
+               *deepen_since = parse_timestamp(arg, &end, 0);
+               if (!end || *end || !deepen_since ||
+                   /* revisions.c's max_age -1 is special */
+                   *deepen_since == -1)
+                       die("Invalid deepen-since: %s", line);
+               *deepen_rev_list = 1;
+               return 1;
+       }
+       return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+       const char *arg;
+       if (skip_prefix(line, "deepen-not ", &arg)) {
+               char *ref = NULL;
+               struct object_id oid;
+               if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+                       die("git upload-pack: ambiguous deepen-not: %s", line);
+               string_list_append(deepen_not, ref);
+               free(ref);
+               *deepen_rev_list = 1;
+               return 1;
+       }
+       return 0;
 }
 
 static void receive_needs(void)
@@ -770,55 +878,22 @@ static void receive_needs(void)
                if (!line)
                        break;
 
-               if (skip_prefix(line, "shallow ", &arg)) {
-                       struct object_id oid;
-                       struct object *object;
-                       if (get_oid_hex(arg, &oid))
-                               die("invalid shallow line: %s", line);
-                       object = parse_object(&oid);
-                       if (!object)
-                               continue;
-                       if (object->type != OBJ_COMMIT)
-                               die("invalid shallow object %s", oid_to_hex(&oid));
-                       if (!(object->flags & CLIENT_SHALLOW)) {
-                               object->flags |= CLIENT_SHALLOW;
-                               add_object_array(object, NULL, &shallows);
-                       }
+               if (process_shallow(line, &shallows))
                        continue;
-               }
-               if (skip_prefix(line, "deepen ", &arg)) {
-                       char *end = NULL;
-                       depth = strtol(arg, &end, 0);
-                       if (!end || *end || depth <= 0)
-                               die("Invalid deepen: %s", line);
+               if (process_deepen(line, &depth))
                        continue;
-               }
-               if (skip_prefix(line, "deepen-since ", &arg)) {
-                       char *end = NULL;
-                       deepen_since = parse_timestamp(arg, &end, 0);
-                       if (!end || *end || !deepen_since ||
-                           /* revisions.c's max_age -1 is special */
-                           deepen_since == -1)
-                               die("Invalid deepen-since: %s", line);
-                       deepen_rev_list = 1;
+               if (process_deepen_since(line, &deepen_since, &deepen_rev_list))
                        continue;
-               }
-               if (skip_prefix(line, "deepen-not ", &arg)) {
-                       char *ref = NULL;
-                       struct object_id oid;
-                       if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
-                               die("git upload-pack: ambiguous deepen-not: %s", line);
-                       string_list_append(&deepen_not, ref);
-                       free(ref);
-                       deepen_rev_list = 1;
+               if (process_deepen_not(line, &deepen_not, &deepen_rev_list))
                        continue;
-               }
+
                if (skip_prefix(line, "filter ", &arg)) {
                        if (!filter_capability_requested)
                                die("git upload-pack: filtering capability not negotiated");
                        parse_list_objects_filter(&filter_options, arg);
                        continue;
                }
+
                if (!skip_prefix(line, "want ", &arg) ||
                    get_oid_hex(arg, &oid_buf))
                        die("git upload-pack: protocol error, "
@@ -846,7 +921,7 @@ static void receive_needs(void)
                        no_progress = 1;
                if (parse_feature_request(features, "include-tag"))
                        use_include_tag = 1;
-               if (parse_feature_request(features, "filter"))
+               if (allow_filter && parse_feature_request(features, "filter"))
                        filter_capability_requested = 1;
 
                o = parse_object(&oid_buf);
@@ -881,40 +956,10 @@ static void receive_needs(void)
 
        if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
                return;
-       if (depth > 0 && deepen_rev_list)
-               die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
-       if (depth > 0)
-               deepen(depth, deepen_relative, &shallows);
-       else if (deepen_rev_list) {
-               struct argv_array av = ARGV_ARRAY_INIT;
-               int i;
 
-               argv_array_push(&av, "rev-list");
-               if (deepen_since)
-                       argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
-               if (deepen_not.nr) {
-                       argv_array_push(&av, "--not");
-                       for (i = 0; i < deepen_not.nr; i++) {
-                               struct string_list_item *s = deepen_not.items + i;
-                               argv_array_push(&av, s->string);
-                       }
-                       argv_array_push(&av, "--not");
-               }
-               for (i = 0; i < want_obj.nr; i++) {
-                       struct object *o = want_obj.objects[i].item;
-                       argv_array_push(&av, oid_to_hex(&o->oid));
-               }
-               deepen_by_rev_list(av.argc, av.argv, &shallows);
-               argv_array_clear(&av);
-       }
-       else
-               if (shallows.nr > 0) {
-                       int i;
-                       for (i = 0; i < shallows.nr; i++)
-                               register_shallow(&shallows.objects[i].item->oid);
-               }
-
-       shallow_nr += shallows.nr;
+       if (send_shallow_list(depth, deepen_rev_list, deepen_since,
+                             &deepen_not, &shallows))
+               packet_flush(1);
        object_array_clear(&shallows);
 }
 
@@ -976,7 +1021,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
                                     " allow-reachable-sha1-in-want" : "",
                             stateless_rpc ? " no-done" : "",
                             symref_info.buf,
-                            filter_advertise ? " filter" : "",
+                            allow_filter ? " filter" : "",
                             git_user_agent_sanitized());
                strbuf_release(&symref_info);
        } else {
@@ -1004,33 +1049,6 @@ static int find_symref(const char *refname, const struct object_id *oid,
        return 0;
 }
 
-static void upload_pack(void)
-{
-       struct string_list symref = STRING_LIST_INIT_DUP;
-
-       head_ref_namespaced(find_symref, &symref);
-
-       if (advertise_refs || !stateless_rpc) {
-               reset_timeout();
-               head_ref_namespaced(send_ref, &symref);
-               for_each_namespaced_ref(send_ref, &symref);
-               advertise_shallow_grafts(1);
-               packet_flush(1);
-       } else {
-               head_ref_namespaced(check_ref, NULL);
-               for_each_namespaced_ref(check_ref, NULL);
-       }
-       string_list_clear(&symref, 1);
-       if (advertise_refs)
-               return;
-
-       receive_needs();
-       if (want_obj.nr) {
-               get_common_commits();
-               create_pack_file();
-       }
-}
-
 static int upload_pack_config(const char *var, const char *value, void *unused)
 {
        if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
@@ -1056,63 +1074,361 @@ static int upload_pack_config(const char *var, const char *value, void *unused)
                if (!strcmp("uploadpack.packobjectshook", var))
                        return git_config_string(&pack_objects_hook, var, value);
        } else if (!strcmp("uploadpack.allowfilter", var)) {
-               filter_advertise = git_config_bool(var, value);
+               allow_filter = git_config_bool(var, value);
        }
        return parse_hide_refs_config(var, value, "uploadpack");
 }
 
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
 {
-       const char *dir;
-       int strict = 0;
-       struct option options[] = {
-               OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
-                        N_("quit after a single request/response exchange")),
-               OPT_BOOL(0, "advertise-refs", &advertise_refs,
-                        N_("exit immediately after initial ref advertisement")),
-               OPT_BOOL(0, "strict", &strict,
-                        N_("do not try <directory>/.git/ if <directory> is no Git directory")),
-               OPT_INTEGER(0, "timeout", &timeout,
-                           N_("interrupt transfer after <n> seconds of inactivity")),
-               OPT_END()
-       };
+       struct string_list symref = STRING_LIST_INIT_DUP;
 
-       packet_trace_identity("upload-pack");
-       check_replace_refs = 0;
+       stateless_rpc = options->stateless_rpc;
+       timeout = options->timeout;
+       daemon_mode = options->daemon_mode;
 
-       argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+       git_config(upload_pack_config, NULL);
 
-       if (argc != 1)
-               usage_with_options(upload_pack_usage, options);
+       head_ref_namespaced(find_symref, &symref);
 
-       if (timeout)
-               daemon_mode = 1;
+       if (options->advertise_refs || !stateless_rpc) {
+               reset_timeout();
+               head_ref_namespaced(send_ref, &symref);
+               for_each_namespaced_ref(send_ref, &symref);
+               advertise_shallow_grafts(1);
+               packet_flush(1);
+       } else {
+               head_ref_namespaced(check_ref, NULL);
+               for_each_namespaced_ref(check_ref, NULL);
+       }
+       string_list_clear(&symref, 1);
+       if (options->advertise_refs)
+               return;
 
-       setup_path();
+       receive_needs();
+       if (want_obj.nr) {
+               get_common_commits();
+               create_pack_file();
+       }
+}
 
-       dir = argv[0];
+struct upload_pack_data {
+       struct object_array wants;
+       struct oid_array haves;
 
-       if (!enter_repo(dir, strict))
-               die("'%s' does not appear to be a git repository", dir);
+       struct object_array shallows;
+       struct string_list deepen_not;
+       int depth;
+       timestamp_t deepen_since;
+       int deepen_rev_list;
+       int deepen_relative;
 
-       git_config(upload_pack_config, NULL);
+       unsigned stateless_rpc : 1;
 
-       switch (determine_protocol_version_server()) {
-       case protocol_v1:
-               /*
-                * v1 is just the original protocol with a version string,
-                * so just fall through after writing the version string.
-                */
-               if (advertise_refs || !stateless_rpc)
-                       packet_write_fmt(1, "version 1\n");
-
-               /* fallthrough */
-       case protocol_v0:
-               upload_pack();
-               break;
-       case protocol_unknown_version:
-               BUG("unknown protocol version");
+       unsigned use_thin_pack : 1;
+       unsigned use_ofs_delta : 1;
+       unsigned no_progress : 1;
+       unsigned use_include_tag : 1;
+       unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+       struct object_array wants = OBJECT_ARRAY_INIT;
+       struct oid_array haves = OID_ARRAY_INIT;
+       struct object_array shallows = OBJECT_ARRAY_INIT;
+       struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+       memset(data, 0, sizeof(*data));
+       data->wants = wants;
+       data->haves = haves;
+       data->shallows = shallows;
+       data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+       object_array_clear(&data->wants);
+       oid_array_clear(&data->haves);
+       object_array_clear(&data->shallows);
+       string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+       const char *arg;
+       if (skip_prefix(line, "want ", &arg)) {
+               struct object_id oid;
+               struct object *o;
+
+               if (get_oid_hex(arg, &oid))
+                       die("git upload-pack: protocol error, "
+                           "expected to get oid, not '%s'", line);
+
+               o = parse_object(&oid);
+               if (!o) {
+                       packet_write_fmt(1,
+                                        "ERR upload-pack: not our ref %s",
+                                        oid_to_hex(&oid));
+                       die("git upload-pack: not our ref %s",
+                           oid_to_hex(&oid));
+               }
+
+               if (!(o->flags & WANTED)) {
+                       o->flags |= WANTED;
+                       add_object_array(o, NULL, &want_obj);
+               }
+
+               return 1;
+       }
+
+       return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+       const char *arg;
+       if (skip_prefix(line, "have ", &arg)) {
+               struct object_id oid;
+
+               if (get_oid_hex(arg, &oid))
+                       die("git upload-pack: expected SHA1 object, got '%s'", arg);
+               oid_array_append(haves, &oid);
+               return 1;
        }
 
        return 0;
 }
+
+static void process_args(struct packet_reader *request,
+                        struct upload_pack_data *data)
+{
+       while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+               const char *arg = request->line;
+
+               /* process want */
+               if (parse_want(arg))
+                       continue;
+               /* process have line */
+               if (parse_have(arg, &data->haves))
+                       continue;
+
+               /* process args like thin-pack */
+               if (!strcmp(arg, "thin-pack")) {
+                       use_thin_pack = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "ofs-delta")) {
+                       use_ofs_delta = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "no-progress")) {
+                       no_progress = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "include-tag")) {
+                       use_include_tag = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "done")) {
+                       data->done = 1;
+                       continue;
+               }
+
+               /* Shallow related arguments */
+               if (process_shallow(arg, &data->shallows))
+                       continue;
+               if (process_deepen(arg, &data->depth))
+                       continue;
+               if (process_deepen_since(arg, &data->deepen_since,
+                                        &data->deepen_rev_list))
+                       continue;
+               if (process_deepen_not(arg, &data->deepen_not,
+                                      &data->deepen_rev_list))
+                       continue;
+               if (!strcmp(arg, "deepen-relative")) {
+                       data->deepen_relative = 1;
+                       continue;
+               }
+
+               /* ignore unknown lines maybe? */
+               die("unexpect line: '%s'", arg);
+       }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+       int i;
+
+       /* Process haves */
+       for (i = 0; i < haves->nr; i++) {
+               const struct object_id *oid = &haves->oid[i];
+               struct object *o;
+               int we_knew_they_have = 0;
+
+               if (!has_object_file(oid))
+                       continue;
+
+               oid_array_append(common, oid);
+
+               o = parse_object(oid);
+               if (!o)
+                       die("oops (%s)", oid_to_hex(oid));
+               if (o->type == OBJ_COMMIT) {
+                       struct commit_list *parents;
+                       struct commit *commit = (struct commit *)o;
+                       if (o->flags & THEY_HAVE)
+                               we_knew_they_have = 1;
+                       else
+                               o->flags |= THEY_HAVE;
+                       if (!oldest_have || (commit->date < oldest_have))
+                               oldest_have = commit->date;
+                       for (parents = commit->parents;
+                            parents;
+                            parents = parents->next)
+                               parents->item->object.flags |= THEY_HAVE;
+               }
+               if (!we_knew_they_have)
+                       add_object_array(o, NULL, &have_obj);
+       }
+
+       return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+       int i;
+
+       packet_buf_write(response, "acknowledgments\n");
+
+       /* Send Acks */
+       if (!acks->nr)
+               packet_buf_write(response, "NAK\n");
+
+       for (i = 0; i < acks->nr; i++) {
+               packet_buf_write(response, "ACK %s\n",
+                                oid_to_hex(&acks->oid[i]));
+       }
+
+       if (ok_to_give_up()) {
+               /* Send Ready */
+               packet_buf_write(response, "ready\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+       struct oid_array common = OID_ARRAY_INIT;
+       struct strbuf response = STRBUF_INIT;
+       int ret = 0;
+
+       process_haves(&data->haves, &common);
+       if (data->done) {
+               ret = 1;
+       } else if (send_acks(&common, &response)) {
+               packet_buf_delim(&response);
+               ret = 1;
+       } else {
+               /* Add Flush */
+               packet_buf_flush(&response);
+               ret = 0;
+       }
+
+       /* Send response */
+       write_or_die(1, response.buf, response.len);
+       strbuf_release(&response);
+
+       oid_array_clear(&data->haves);
+       oid_array_clear(&common);
+       return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+       /* No shallow info needs to be sent */
+       if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
+           !is_repository_shallow())
+               return;
+
+       packet_write_fmt(1, "shallow-info\n");
+
+       if (!send_shallow_list(data->depth, data->deepen_rev_list,
+                              data->deepen_since, &data->deepen_not,
+                              &data->shallows) && is_repository_shallow())
+               deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+       packet_delim(1);
+}
+
+enum fetch_state {
+       FETCH_PROCESS_ARGS = 0,
+       FETCH_SEND_ACKS,
+       FETCH_SEND_PACK,
+       FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+                  struct packet_reader *request)
+{
+       enum fetch_state state = FETCH_PROCESS_ARGS;
+       struct upload_pack_data data;
+
+       upload_pack_data_init(&data);
+       use_sideband = LARGE_PACKET_MAX;
+
+       while (state != FETCH_DONE) {
+               switch (state) {
+               case FETCH_PROCESS_ARGS:
+                       process_args(request, &data);
+
+                       if (!want_obj.nr) {
+                               /*
+                                * Request didn't contain any 'want' lines,
+                                * guess they didn't want anything.
+                                */
+                               state = FETCH_DONE;
+                       } else if (data.haves.nr) {
+                               /*
+                                * Request had 'have' lines, so lets ACK them.
+                                */
+                               state = FETCH_SEND_ACKS;
+                       } else {
+                               /*
+                                * Request had 'want's but no 'have's so we can
+                                * immedietly go to construct and send a pack.
+                                */
+                               state = FETCH_SEND_PACK;
+                       }
+                       break;
+               case FETCH_SEND_ACKS:
+                       if (process_haves_and_send_acks(&data))
+                               state = FETCH_SEND_PACK;
+                       else
+                               state = FETCH_DONE;
+                       break;
+               case FETCH_SEND_PACK:
+                       send_shallow_info(&data);
+
+                       packet_write_fmt(1, "packfile\n");
+                       create_pack_file();
+                       state = FETCH_DONE;
+                       break;
+               case FETCH_DONE:
+                       continue;
+               }
+       }
+
+       upload_pack_data_clear(&data);
+       return 0;
+}
+
+int upload_pack_advertise(struct repository *r,
+                         struct strbuf *value)
+{
+       if (value)
+               strbuf_addstr(value, "shallow");
+       return 1;
+}
diff --git a/upload-pack.h b/upload-pack.h
new file mode 100644 (file)
index 0000000..cab2178
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef UPLOAD_PACK_H
+#define UPLOAD_PACK_H
+
+struct upload_pack_options {
+       int stateless_rpc;
+       int advertise_refs;
+       unsigned int timeout;
+       int daemon_mode;
+};
+
+void upload_pack(struct upload_pack_options *options);
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int upload_pack_v2(struct repository *r, struct argv_array *keys,
+                         struct packet_reader *request);
+
+struct strbuf;
+extern int upload_pack_advertise(struct repository *r,
+                                struct strbuf *value);
+
+#endif /* UPLOAD_PACK_H */
index dbfb4e13cddceaa44feee51016c9b837f7f4fce1..a69241b25ddaff5b61380aa8b451a6fcb833502c 100644 (file)
@@ -38,6 +38,15 @@ IPATTERN("fortran",
         "|//|\\*\\*|::|[/<>=]="),
 IPATTERN("fountain", "^((\\.[^.]|(int|ext|est|int\\.?/ext|i/e)[. ]).*)$",
         "[^ \t-]+"),
+PATTERNS("golang",
+        /* Functions */
+        "^[ \t]*(func[ \t]*.*(\\{[ \t]*)?)\n"
+        /* Structs and interfaces */
+        "^[ \t]*(type[ \t].*(struct|interface)[ \t]*(\\{[ \t]*)?)",
+        /* -- */
+        "[a-zA-Z_][a-zA-Z0-9_]*"
+        "|[-+0-9.eE]+i?|0[xX]?[0-9a-fA-F]+i?"
+        "|[-+*/<>%&^|=!:]=|--|\\+\\+|<<=?|>>=?|&\\^=?|&&|\\|\\||<-|\\.{3}"),
 PATTERNS("html", "^[ \t]*(<[Hh][1-6]([ \t].*)?>.*)$",
         "[^<>= \t]+"),
 PATTERNS("java",
@@ -138,7 +147,7 @@ PATTERNS("csharp",
         /* Keywords */
         "!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n"
         /* Methods and constructors */
-        "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
+        "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe|async)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
         /* Properties */
         "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n"
         /* Type definitions */
index 5d4d3733f75648dfa8d6955a5ec1ed9f3dfeb77b..dffb9c8e37c220e71e108060dc5a81bc21f8370c 100644 (file)
--- a/walker.c
+++ b/walker.c
@@ -22,7 +22,7 @@ void walker_say(struct walker *walker, const char *fmt, ...)
 static void report_missing(const struct object *obj)
 {
        fprintf(stderr, "Cannot obtain needed %s %s\n",
-               obj->type ? typename(obj->type): "object",
+               obj->type ? type_name(obj->type): "object",
                oid_to_hex(&obj->oid));
        if (!is_null_oid(&current_commit_oid))
                fprintf(stderr, "while processing commit %s.\n",
@@ -134,7 +134,7 @@ static int process_object(struct walker *walker, struct object *obj)
        }
        return error("Unable to determine requirements "
                     "of type %s for %s",
-                    typename(obj->type), oid_to_hex(&obj->oid));
+                    type_name(obj->type), oid_to_hex(&obj->oid));
 }
 
 static int process(struct walker *walker, struct object *obj)
index f5da7d286d537fa99a1bd2dd5180068b9d85da2f..28989cf06ef4bd15b3b97f86ae9fd32c4b78401f 100644 (file)
@@ -254,6 +254,102 @@ const char *is_worktree_locked(struct worktree *wt)
        return wt->lock_reason;
 }
 
+/* convenient wrapper to deal with NULL strbuf */
+static void strbuf_addf_gently(struct strbuf *buf, const char *fmt, ...)
+{
+       va_list params;
+
+       if (!buf)
+               return;
+
+       va_start(params, fmt);
+       strbuf_vaddf(buf, fmt, params);
+       va_end(params);
+}
+
+int validate_worktree(const struct worktree *wt, struct strbuf *errmsg,
+                     unsigned flags)
+{
+       struct strbuf wt_path = STRBUF_INIT;
+       char *path = NULL;
+       int err, ret = -1;
+
+       strbuf_addf(&wt_path, "%s/.git", wt->path);
+
+       if (is_main_worktree(wt)) {
+               if (is_directory(wt_path.buf)) {
+                       ret = 0;
+                       goto done;
+               }
+               /*
+                * Main worktree using .git file to point to the
+                * repository would make it impossible to know where
+                * the actual worktree is if this function is executed
+                * from another worktree. No .git file support for now.
+                */
+               strbuf_addf_gently(errmsg,
+                                  _("'%s' at main working tree is not the repository directory"),
+                                  wt_path.buf);
+               goto done;
+       }
+
+       /*
+        * Make sure "gitdir" file points to a real .git file and that
+        * file points back here.
+        */
+       if (!is_absolute_path(wt->path)) {
+               strbuf_addf_gently(errmsg,
+                                  _("'%s' file does not contain absolute path to the working tree location"),
+                                  git_common_path("worktrees/%s/gitdir", wt->id));
+               goto done;
+       }
+
+       if (flags & WT_VALIDATE_WORKTREE_MISSING_OK &&
+           !file_exists(wt->path)) {
+               ret = 0;
+               goto done;
+       }
+
+       if (!file_exists(wt_path.buf)) {
+               strbuf_addf_gently(errmsg, _("'%s' does not exist"), wt_path.buf);
+               goto done;
+       }
+
+       path = xstrdup_or_null(read_gitfile_gently(wt_path.buf, &err));
+       if (!path) {
+               strbuf_addf_gently(errmsg, _("'%s' is not a .git file, error code %d"),
+                                  wt_path.buf, err);
+               goto done;
+       }
+
+       ret = fspathcmp(path, real_path(git_common_path("worktrees/%s", wt->id)));
+
+       if (ret)
+               strbuf_addf_gently(errmsg, _("'%s' does not point back to '%s'"),
+                                  wt->path, git_common_path("worktrees/%s", wt->id));
+done:
+       free(path);
+       strbuf_release(&wt_path);
+       return ret;
+}
+
+void update_worktree_location(struct worktree *wt, const char *path_)
+{
+       struct strbuf path = STRBUF_INIT;
+
+       if (is_main_worktree(wt))
+               die("BUG: can't relocate main worktree");
+
+       strbuf_realpath(&path, path_, 1);
+       if (fspathcmp(wt->path, path.buf)) {
+               write_file(git_common_path("worktrees/%s/gitdir", wt->id),
+                          "%s/.git", path.buf);
+               free(wt->path);
+               wt->path = strbuf_detach(&path, NULL);
+       }
+       strbuf_release(&path);
+}
+
 int is_worktree_being_rebased(const struct worktree *wt,
                              const char *target)
 {
index c28a880e1839ef26604d80de78eb9c663a6be381..fe38ce10c300ba456f406950bf960d19d5d79cce 100644 (file)
@@ -3,6 +3,8 @@
 
 #include "refs.h"
 
+struct strbuf;
+
 struct worktree {
        char *path;
        char *id;
@@ -59,6 +61,22 @@ extern int is_main_worktree(const struct worktree *wt);
  */
 extern const char *is_worktree_locked(struct worktree *wt);
 
+#define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0)
+
+/*
+ * Return zero if the worktree is in good condition. Error message is
+ * returned if "errmsg" is not NULL.
+ */
+extern int validate_worktree(const struct worktree *wt,
+                            struct strbuf *errmsg,
+                            unsigned flags);
+
+/*
+ * Update worktrees/xxx/gitdir with the new path.
+ */
+extern void update_worktree_location(struct worktree *wt,
+                                    const char *path_);
+
 /*
  * Free up the memory for worktree(s)
  */
index d20356a776bc63d4a94e4b421582433fb915087a..1fd5e33ea8f5c8cb4e6720b0bec74acf09232e3c 100644 (file)
--- a/wrapper.c
+++ b/wrapper.c
@@ -445,21 +445,21 @@ FILE *fopen_or_warn(const char *path, const char *mode)
        return NULL;
 }
 
-int xmkstemp(char *template)
+int xmkstemp(char *filename_template)
 {
        int fd;
        char origtemplate[PATH_MAX];
-       strlcpy(origtemplate, template, sizeof(origtemplate));
+       strlcpy(origtemplate, filename_template, sizeof(origtemplate));
 
-       fd = mkstemp(template);
+       fd = mkstemp(filename_template);
        if (fd < 0) {
                int saved_errno = errno;
                const char *nonrelative_template;
 
-               if (strlen(template) != strlen(origtemplate))
-                       template = origtemplate;
+               if (strlen(filename_template) != strlen(origtemplate))
+                       filename_template = origtemplate;
 
-               nonrelative_template = absolute_path(template);
+               nonrelative_template = absolute_path(filename_template);
                errno = saved_errno;
                die_errno("Unable to create temporary file '%s'",
                        nonrelative_template);
@@ -481,7 +481,7 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
        static const int num_letters = 62;
        uint64_t value;
        struct timeval tv;
-       char *template;
+       char *filename_template;
        size_t len;
        int fd, count;
 
@@ -503,16 +503,16 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
         */
        gettimeofday(&tv, NULL);
        value = ((size_t)(tv.tv_usec << 16)) ^ tv.tv_sec ^ getpid();
-       template = &pattern[len - 6 - suffix_len];
+       filename_template = &pattern[len - 6 - suffix_len];
        for (count = 0; count < TMP_MAX; ++count) {
                uint64_t v = value;
                /* Fill in the random bits. */
-               template[0] = letters[v % num_letters]; v /= num_letters;
-               template[1] = letters[v % num_letters]; v /= num_letters;
-               template[2] = letters[v % num_letters]; v /= num_letters;
-               template[3] = letters[v % num_letters]; v /= num_letters;
-               template[4] = letters[v % num_letters]; v /= num_letters;
-               template[5] = letters[v % num_letters]; v /= num_letters;
+               filename_template[0] = letters[v % num_letters]; v /= num_letters;
+               filename_template[1] = letters[v % num_letters]; v /= num_letters;
+               filename_template[2] = letters[v % num_letters]; v /= num_letters;
+               filename_template[3] = letters[v % num_letters]; v /= num_letters;
+               filename_template[4] = letters[v % num_letters]; v /= num_letters;
+               filename_template[5] = letters[v % num_letters]; v /= num_letters;
 
                fd = open(pattern, O_CREAT | O_EXCL | O_RDWR, mode);
                if (fd >= 0)
@@ -541,21 +541,21 @@ int git_mkstemp_mode(char *pattern, int mode)
        return git_mkstemps_mode(pattern, 0, mode);
 }
 
-int xmkstemp_mode(char *template, int mode)
+int xmkstemp_mode(char *filename_template, int mode)
 {
        int fd;
        char origtemplate[PATH_MAX];
-       strlcpy(origtemplate, template, sizeof(origtemplate));
+       strlcpy(origtemplate, filename_template, sizeof(origtemplate));
 
-       fd = git_mkstemp_mode(template, mode);
+       fd = git_mkstemp_mode(filename_template, mode);
        if (fd < 0) {
                int saved_errno = errno;
                const char *nonrelative_template;
 
-               if (!template[0])
-                       template = origtemplate;
+               if (!filename_template[0])
+                       filename_template = origtemplate;
 
-               nonrelative_template = absolute_path(template);
+               nonrelative_template = absolute_path(filename_template);
                errno = saved_errno;
                die_errno("Unable to create temporary file '%s'",
                        nonrelative_template);
index f5debcd2b4f05c50d5e70efc95d10d95ca6372cd..50815e5faffba0e9df861dbc4881e9fbffd7a941 100644 (file)
@@ -136,6 +136,7 @@ void wt_status_prepare(struct wt_status *s)
        s->ignored.strdup_strings = 1;
        s->show_branch = -1;  /* unspecified */
        s->show_stash = 0;
+       s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED;
        s->display_comment_prefix = 0;
 }
 
@@ -1032,7 +1033,7 @@ static void wt_longstatus_print_tracking(struct wt_status *s)
        if (!skip_prefix(s->branch, "refs/heads/", &branch_name))
                return;
        branch = branch_get(branch_name);
-       if (!format_tracking_info(branch, &sb))
+       if (!format_tracking_info(branch, &sb, s->ahead_behind_flags))
                return;
 
        i = 0;
@@ -1187,7 +1188,7 @@ static void abbrev_sha1_in_line(struct strbuf *line)
                strbuf_trim(split[1]);
                if (!get_oid(split[1]->buf, &oid)) {
                        strbuf_reset(split[1]);
-                       strbuf_add_unique_abbrev(split[1], oid.hash,
+                       strbuf_add_unique_abbrev(split[1], &oid,
                                                 DEFAULT_ABBREV);
                        strbuf_addch(split[1], ' ');
                        strbuf_reset(line);
@@ -1349,7 +1350,7 @@ static void show_cherry_pick_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently cherry-picking commit %s."),
-                       find_unique_abbrev(state->cherry_pick_head_sha1, DEFAULT_ABBREV));
+                       find_unique_abbrev(&state->cherry_pick_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1368,7 +1369,7 @@ static void show_revert_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently reverting commit %s."),
-                        find_unique_abbrev(state->revert_head_sha1, DEFAULT_ABBREV));
+                        find_unique_abbrev(&state->revert_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1421,7 +1422,7 @@ static char *get_branch(const struct worktree *wt, const char *path)
                ;
        else if (!get_oid_hex(sb.buf, &oid)) {
                strbuf_reset(&sb);
-               strbuf_add_unique_abbrev(&sb, oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&sb, &oid, DEFAULT_ABBREV);
        } else if (!strcmp(sb.buf, "detached HEAD")) /* rebase */
                goto got_nothing;
        else                    /* bisect */
@@ -1458,7 +1459,7 @@ static int grab_1st_switch(struct object_id *ooid, struct object_id *noid,
        if (!strcmp(cb->buf.buf, "HEAD")) {
                /* HEAD is relative. Resolve it to the right reflog entry. */
                strbuf_reset(&cb->buf);
-               strbuf_add_unique_abbrev(&cb->buf, noid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&cb->buf, noid, DEFAULT_ABBREV);
        }
        return 1;
 }
@@ -1488,10 +1489,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
                state->detached_from = xstrdup(from);
        } else
                state->detached_from =
-                       xstrdup(find_unique_abbrev(cb.noid.hash, DEFAULT_ABBREV));
-       hashcpy(state->detached_sha1, cb.noid.hash);
+                       xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV));
+       oidcpy(&state->detached_oid, &cb.noid);
        state->detached_at = !get_oid("HEAD", &oid) &&
-                            !hashcmp(oid.hash, state->detached_sha1);
+                            !oidcmp(&oid, &state->detached_oid);
 
        free(ref);
        strbuf_release(&cb.buf);
@@ -1550,13 +1551,13 @@ void wt_status_get_state(struct wt_status_state *state,
        } else if (!stat(git_path_cherry_pick_head(), &st) &&
                        !get_oid("CHERRY_PICK_HEAD", &oid)) {
                state->cherry_pick_in_progress = 1;
-               hashcpy(state->cherry_pick_head_sha1, oid.hash);
+               oidcpy(&state->cherry_pick_head_oid, &oid);
        }
        wt_status_check_bisect(NULL, state);
        if (!stat(git_path_revert_head(), &st) &&
            !get_oid("REVERT_HEAD", &oid)) {
                state->revert_in_progress = 1;
-               hashcpy(state->revert_head_sha1, oid.hash);
+               oidcpy(&state->revert_head_oid, &oid);
        }
 
        if (get_detached_from)
@@ -1793,7 +1794,7 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
        const char *base;
        char *short_base;
        const char *branch_name;
-       int num_ours, num_theirs;
+       int num_ours, num_theirs, sti;
        int upstream_is_gone = 0;
 
        color_fprintf(s->fp, color(WT_STATUS_HEADER, s), "## ");
@@ -1819,7 +1820,9 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
 
        color_fprintf(s->fp, branch_color_local, "%s", branch_name);
 
-       if (stat_tracking_info(branch, &num_ours, &num_theirs, &base) < 0) {
+       sti = stat_tracking_info(branch, &num_ours, &num_theirs, &base,
+                                s->ahead_behind_flags);
+       if (sti < 0) {
                if (!base)
                        goto conclude;
 
@@ -1831,12 +1834,14 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
        color_fprintf(s->fp, branch_color_remote, "%s", short_base);
        free(short_base);
 
-       if (!upstream_is_gone && !num_ours && !num_theirs)
+       if (!upstream_is_gone && !sti)
                goto conclude;
 
        color_fprintf(s->fp, header_color, " [");
        if (upstream_is_gone) {
                color_fprintf(s->fp, header_color, LABEL(N_("gone")));
+       } else if (s->ahead_behind_flags == AHEAD_BEHIND_QUICK) {
+               color_fprintf(s->fp, header_color, LABEL(N_("different")));
        } else if (!num_ours) {
                color_fprintf(s->fp, header_color, LABEL(N_("behind ")));
                color_fprintf(s->fp, branch_color_remote, "%d", num_theirs);
@@ -1905,18 +1910,19 @@ static void wt_porcelain_print(struct wt_status *s)
  *
  *    <upstream> ::= the upstream branch name, when set.
  *
- *       <ahead> ::= integer ahead value, when upstream set
- *                   and the commit is present (not gone).
- *
- *      <behind> ::= integer behind value, when upstream set
- *                   and commit is present.
+ *       <ahead> ::= integer ahead value or '?'.
  *
+ *      <behind> ::= integer behind value or '?'.
  *
  * The end-of-line is defined by the -z flag.
  *
  *                 <eol> ::= NUL when -z,
  *                           LF when NOT -z.
  *
+ * When an upstream is set and present, the 'branch.ab' line will
+ * be printed with the ahead/behind counts for the branch and the
+ * upstream.  When AHEAD_BEHIND_QUICK is requested and the branches
+ * are different, '?' will be substituted for the actual count.
  */
 static void wt_porcelain_v2_print_tracking(struct wt_status *s)
 {
@@ -1956,14 +1962,25 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s)
                /* Lookup stats on the upstream tracking branch, if set. */
                branch = branch_get(branch_name);
                base = NULL;
-               ab_info = (stat_tracking_info(branch, &nr_ahead, &nr_behind, &base) == 0);
+               ab_info = stat_tracking_info(branch, &nr_ahead, &nr_behind,
+                                            &base, s->ahead_behind_flags);
                if (base) {
                        base = shorten_unambiguous_ref(base, 0);
                        fprintf(s->fp, "# branch.upstream %s%c", base, eol);
                        free((char *)base);
 
-                       if (ab_info)
-                               fprintf(s->fp, "# branch.ab +%d -%d%c", nr_ahead, nr_behind, eol);
+                       if (ab_info > 0) {
+                               /* different */
+                               if (nr_ahead || nr_behind)
+                                       fprintf(s->fp, "# branch.ab +%d -%d%c",
+                                               nr_ahead, nr_behind, eol);
+                               else
+                                       fprintf(s->fp, "# branch.ab +? -?%c",
+                                               eol);
+                       } else if (!ab_info) {
+                               /* same */
+                               fprintf(s->fp, "# branch.ab +0 -0%c", eol);
+                       }
                }
        }
 
index 3f84d5c29ff270596a894e2d42843b03d7aa37c5..430770b854c41b38b95dae6e8fa8943629a2309b 100644 (file)
@@ -5,6 +5,7 @@
 #include "string-list.h"
 #include "color.h"
 #include "pathspec.h"
+#include "remote.h"
 
 struct worktree;
 
@@ -87,6 +88,7 @@ struct wt_status {
        int show_branch;
        int show_stash;
        int hints;
+       enum ahead_behind_flags ahead_behind_flags;
 
        enum wt_status_format status_format;
        unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */
@@ -116,9 +118,9 @@ struct wt_status_state {
        char *branch;
        char *onto;
        char *detached_from;
-       unsigned char detached_sha1[20];
-       unsigned char revert_head_sha1[20];
-       unsigned char cherry_pick_head_sha1[20];
+       struct object_id detached_oid;
+       struct object_id revert_head_oid;
+       struct object_id cherry_pick_head_oid;
 };
 
 size_t wt_status_locate_end(const char *s, size_t len);
index 770e1f7f8185e05f2618c261b70a5773041432fb..9315bc0ede11ba0377e27d711e37b6a0ae555c43 100644 (file)
@@ -191,7 +191,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
                return;
        }
 
-       ptr->ptr = read_sha1_file(oid->hash, &type, &size);
+       ptr->ptr = read_object_file(oid, &type, &size);
        if (!ptr->ptr || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
        ptr->size = size;