Merge branch 'tg/stash-doc-typofix' into next
authorJunio C Hamano <gitster@pobox.com>
Wed, 28 Mar 2018 06:08:51 +0000 (23:08 -0700)
committerJunio C Hamano <gitster@pobox.com>
Wed, 28 Mar 2018 06:08:51 +0000 (23:08 -0700)
* tg/stash-doc-typofix:
git-stash.txt: remove extra square bracket

386 files changed:
.clang-format
Documentation/CodingGuidelines
Documentation/Makefile
Documentation/RelNotes/2.17.0.txt [new file with mode: 0644]
Documentation/config.txt
Documentation/diff-options.txt
Documentation/fetch-options.txt
Documentation/git-am.txt
Documentation/git-config.txt
Documentation/git-daemon.txt
Documentation/git-fetch.txt
Documentation/git-filter-branch.txt
Documentation/git-gc.txt
Documentation/git-index-pack.txt
Documentation/git-pack-objects.txt
Documentation/git-rebase.txt
Documentation/git-remote.txt
Documentation/git-send-email.txt
Documentation/git-shortlog.txt
Documentation/git-status.txt
Documentation/git-tag.txt
Documentation/git-update-index.txt
Documentation/git-worktree.txt
Documentation/git.txt
Documentation/gitattributes.txt
Documentation/gitremote-helpers.txt
Documentation/gitrepository-layout.txt
Documentation/merge-options.txt
Documentation/merge-strategies.txt
Documentation/pretty-formats.txt
Documentation/rev-list-options.txt
Documentation/technical/api-object-access.txt
Documentation/technical/http-protocol.txt
Documentation/technical/long-running-process-protocol.txt [new file with mode: 0644]
Documentation/technical/pack-protocol.txt
Documentation/technical/protocol-capabilities.txt
Documentation/technical/repository-version.txt
GIT-VERSION-GEN
INSTALL
Makefile
RelNotes
apply.c
archive-tar.c
archive-zip.c
archive.c
archive.h
bisect.c
blame.c
builtin/add.c
builtin/am.c
builtin/archive.c
builtin/blame.c
builtin/branch.c
builtin/cat-file.c
builtin/check-ignore.c
builtin/checkout-index.c
builtin/checkout.c
builtin/clean.c
builtin/clone.c
builtin/commit-tree.c
builtin/commit.c
builtin/config.c
builtin/describe.c
builtin/diff-tree.c
builtin/difftool.c
builtin/fast-export.c
builtin/fetch-pack.c
builtin/fetch.c
builtin/fmt-merge-msg.c
builtin/fsck.c
builtin/gc.c
builtin/grep.c
builtin/hash-object.c
builtin/help.c
builtin/index-pack.c
builtin/init-db.c
builtin/log.c
builtin/ls-files.c
builtin/ls-remote.c
builtin/ls-tree.c
builtin/merge-tree.c
builtin/merge.c
builtin/mktag.c
builtin/mktree.c
builtin/mv.c
builtin/name-rev.c
builtin/notes.c
builtin/pack-objects.c
builtin/pack-redundant.c
builtin/prune.c
builtin/pull.c
builtin/push.c
builtin/rebase--helper.c
builtin/receive-pack.c
builtin/reflog.c
builtin/remote.c
builtin/repack.c
builtin/replace.c
builtin/reset.c
builtin/rev-list.c
builtin/rev-parse.c
builtin/revert.c
builtin/rm.c
builtin/shortlog.c
builtin/show-branch.c
builtin/show-ref.c
builtin/submodule--helper.c
builtin/tag.c
builtin/unpack-file.c
builtin/unpack-objects.c
builtin/update-index.c
builtin/update-server-info.c
builtin/verify-commit.c
builtin/worktree.c
builtin/write-tree.c
bulk-checkin.c
bulk-checkin.h
bundle.c
cache-tree.c
cache-tree.h
cache.h
ci/lib-travisci.sh
ci/run-build-and-tests.sh
ci/run-linux32-build.sh
ci/run-linux32-docker.sh
color.c
color.h
combine-diff.c
commit.c
commit.h
common-main.c
compat/mingw.c
compat/mingw.h
config.c
configure.ac
connected.c
contrib/coccinelle/strbuf.cocci
contrib/completion/git-completion.bash
contrib/emacs/.gitignore [deleted file]
contrib/emacs/Makefile [deleted file]
contrib/emacs/README
contrib/emacs/git-blame.el [deleted file]
contrib/emacs/git.el [deleted file]
contrib/examples/builtin-fetch--tool.c
contrib/examples/git-difftool.perl
contrib/examples/git-svnimport.perl
contrib/hooks/pre-auto-gc-battery
contrib/subtree/git-subtree.sh
convert.c
convert.h
csum-file.c
csum-file.h
daemon.c
diff-lib.c
diff.c
diff.h
diffcore-delta.c
diffcore-pickaxe.c
diffcore-rename.c
dir.c
dir.h
entry.c
environment.c
fast-import.c
fetch-object.c [new file with mode: 0644]
fetch-object.h [new file with mode: 0644]
fetch-pack.c
fetch-pack.h
fsck.c
fsmonitor.c
fsmonitor.h
git-add--interactive.perl
git-compat-util.h
git-cvsimport.perl
git-filter-branch.sh
git-rebase--am.sh
git-rebase--interactive.sh
git-rebase--merge.sh
git-rebase.sh
git-send-email.perl
git-stash.sh
git-submodule.sh
git.c
gitweb/INSTALL
gitweb/gitweb.perl
grep.c
hash.h
http-push.c
http-walker.c
http.c
imap-send.c
line-log.c
list-objects-filter-options.c
list-objects-filter-options.h
list-objects-filter.c
list-objects.c
log-tree.c
mailmap.c
match-trees.c
merge-blobs.c
merge-recursive.c
merge-recursive.h
merge.c
mru.c [deleted file]
mru.h [deleted file]
name-hash.c
notes-cache.c
notes-merge.c
notes-utils.c
notes-utils.h
notes.c
notes.h
object.c
object.h
pack-bitmap-write.c
pack-check.c
pack-revindex.c
pack-write.c
pack.h
packfile.c
packfile.h
parse-options.c
parse-options.h
perl/.gitignore
perl/FromCPAN/.gitattributes [new file with mode: 0644]
perl/FromCPAN/Error.pm [new file with mode: 0644]
perl/FromCPAN/Mail/Address.pm [new file with mode: 0644]
perl/Git.pm
perl/Git/I18N.pm
perl/Git/LoadCPAN.pm [new file with mode: 0644]
perl/Git/LoadCPAN/Error.pm [new file with mode: 0644]
perl/Git/LoadCPAN/Mail/Address.pm [new file with mode: 0644]
perl/Git/SVN.pm
perl/Makefile [deleted file]
perl/Makefile.PL [deleted file]
perl/private-Error.pm [deleted file]
preload-index.c
pretty.c
quote.c
quote.h
reachable.c
read-cache.c
ref-filter.c
refs.c
refs/packed-backend.c
refs/ref-cache.c
remote-curl.c
remote-testsvn.c
remote.c
remote.h
replace_object.c
repository.c
repository.h
rerere.c
resolve-undo.c
resolve-undo.h
revision.c
revision.h
run-command.c
send-pack.c
sequencer.c
sequencer.h
setup.c
sha1-lookup.c
sha1-lookup.h
sha1_file.c
sha1_name.c
sha1dc_git.h
split-index.c
split-index.h
strbuf.c
strbuf.h
streaming.c
streaming.h
sub-process.h
submodule-config.c
submodule.c
submodule.h
t/README
t/helper/test-dump-untracked-cache.c
t/helper/test-hashmap.c
t/helper/test-run-command.c
t/helper/test-wildmatch.c
t/lib-terminal.sh
t/perf/aggregate.perl
t/perf/run
t/t0002-gitfile.sh
t/t0008-ignores.sh
t/t0040-parse-options.sh
t/t0041-usage.sh [new file with mode: 0755]
t/t0050-filesystem.sh
t/t0061-run-command.sh
t/t0410-partial-clone.sh [new file with mode: 0755]
t/t1300-repo-config.sh
t/t1506-rev-parse-diagnosis.sh
t/t1507-rev-parse-upstream.sh
t/t1510-repo-setup.sh
t/t2025-worktree-add.sh
t/t2026-worktree-prune.sh
t/t2028-worktree-move.sh
t/t3030-merge-recursive.sh
t/t3070-wildmatch.sh
t/t3200-branch.sh
t/t3400-rebase.sh
t/t3404-rebase-interactive.sh
t/t3405-rebase-malformed.sh
t/t3408-rebase-multi-line.sh
t/t3501-revert-cherry-pick.sh
t/t3512-cherry-pick-submodule.sh
t/t3513-revert-submodule.sh
t/t3701-add-interactive.sh
t/t3905-stash-include-untracked.sh
t/t4013-diff-various.sh
t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial [new file with mode: 0644]
t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial [new file with mode: 0644]
t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode [new file with mode: 0644]
t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode [new file with mode: 0644]
t/t4018-diff-funcname.sh
t/t4018/golang-complex-function [new file with mode: 0644]
t/t4018/golang-func [new file with mode: 0644]
t/t4018/golang-interface [new file with mode: 0644]
t/t4018/golang-long-func [new file with mode: 0644]
t/t4018/golang-struct [new file with mode: 0644]
t/t4052-stat-output.sh
t/t4064-diff-oidfind.sh [new file with mode: 0755]
t/t4135-apply-weird-filenames.sh
t/t4150-am.sh
t/t4151-am-abort.sh
t/t4201-shortlog.sh
t/t5302-pack-index.sh
t/t5500-fetch-pack.sh
t/t5510-fetch.sh
t/t5526-fetch-submodules.sh
t/t5536-fetch-conflicts.sh
t/t5545-push-options.sh
t/t5570-git-daemon.sh
t/t5601-clone.sh
t/t5616-partial-clone.sh [new file with mode: 0755]
t/t6040-tracking-info.sh
t/t6043-merge-rename-directories.sh [new file with mode: 0755]
t/t6120-describe.sh
t/t6200-fmt-merge-msg.sh
t/t6300-for-each-ref.sh
t/t7004-tag.sh
t/t7006-pager.sh
t/t7063-status-untracked-cache.sh
t/t7064-wtstatus-pv2.sh
t/t7505-prepare-commit-msg-hook.sh
t/t7505/expected-rebase-i [new file with mode: 0644]
t/t7505/expected-rebase-p [new file with mode: 0644]
t/t7519-status-fsmonitor.sh
t/t7600-merge.sh
t/t7607-merge-overwrite.sh
t/t9000-addresses.sh [deleted file]
t/t9000/test.pl [deleted file]
t/t9001-send-email.sh
t/t9400-git-cvsserver-server.sh
t/t9402-git-cvsserver-refs.sh
t/t9902-completion.sh
t/t9903-bash-prompt.sh
t/test-lib-functions.sh
t/test-lib.sh
tag.c
tempfile.c
tempfile.h
trace.c
trailer.c
transport-helper.c
transport.c
transport.h
tree-walk.c
tree-walk.h
tree.c
tree.h
unpack-trees.c
unpack-trees.h
upload-pack.c
userdiff.c
walker.c
worktree.c
worktree.h
wrap-for-bin.sh
wrapper.c
wt-status.c
wt-status.h
xdiff-interface.c
index 611ab4750bd21e77d0fec41c8b2e115574c692ff..12a89f95f993546888410613458c9385b16f0108 100644 (file)
@@ -163,7 +163,7 @@ PenaltyBreakComment: 10
 PenaltyBreakFirstLessLess: 0
 PenaltyBreakString: 10
 PenaltyExcessCharacter: 100
-PenaltyReturnTypeOnItsOwnLine: 5
+PenaltyReturnTypeOnItsOwnLine: 60
 
 # Don't sort #include's
 SortIncludes: false
index c4cb5ff0d477938b8fd49749c3589c5afbb04221..48aa4edfbdd180e1c6d874b6bb61ea5fc8e32ef5 100644 (file)
@@ -386,6 +386,11 @@ For C programs:
  - Use Git's gettext wrappers to make the user interface
    translatable. See "Marking strings for translation" in po/README.
 
+ - Variables and functions local to a given source file should be marked
+   with "static". Variables that are visible to other source files
+   must be declared with "extern" in header files. However, function
+   declarations should not use "extern", as that is already the default.
+
 For Perl programs:
 
  - Most of the C guidelines above apply.
index 4ae9ba5c86e038fff94277c0d1a767b93634ec53..6232143cb95d105b0c33b81d16c6b7b628fadb97 100644 (file)
@@ -72,6 +72,7 @@ TECH_DOCS += SubmittingPatches
 TECH_DOCS += technical/hash-function-transition
 TECH_DOCS += technical/http-protocol
 TECH_DOCS += technical/index-format
+TECH_DOCS += technical/long-running-process-protocol
 TECH_DOCS += technical/pack-format
 TECH_DOCS += technical/pack-heuristics
 TECH_DOCS += technical/pack-protocol
diff --git a/Documentation/RelNotes/2.17.0.txt b/Documentation/RelNotes/2.17.0.txt
new file mode 100644 (file)
index 0000000..d6db0e1
--- /dev/null
@@ -0,0 +1,398 @@
+Git 2.17 Release Notes
+======================
+
+Updates since v2.16
+-------------------
+
+UI, Workflows & Features
+
+ * "diff" family of commands learned "--find-object=<object-id>" option
+   to limit the findings to changes that involve the named object.
+
+ * "git format-patch" learned to give 72-cols to diffstat, which is
+   consistent with other line length limits the subcommand uses for
+   its output meant for e-mails.
+
+ * The log from "git daemon" can be redirected with a new option; one
+   relevant use case is to send the log to standard error (instead of
+   syslog) when running it from inetd.
+
+ * "git rebase" learned to take "--allow-empty-message" option.
+
+ * "git am" has learned the "--quit" option, in addition to the
+   existing "--abort" option; having the pair mirrors a few other
+   commands like "rebase" and "cherry-pick".
+
+ * "git worktree add" learned to run the post-checkout hook, just like
+   "git clone" runs it upon the initial checkout.
+
+ * "git tag" learned an explicit "--edit" option that allows the
+   message given via "-m" and "-F" to be further edited.
+
+ * "git fetch --prune-tags" may be used as a handy short-hand for
+   getting rid of stale tags that are locally held.
+
+ * The new "--show-current-patch" option gives an end-user facing way
+   to get the diff being applied when "git rebase" (and "git am")
+   stops with a conflict.
+
+ * "git add -p" used to offer "/" (look for a matching hunk) as a
+   choice, even there was only one hunk, which has been corrected.
+   Also the single-key help is now given only for keys that are
+   enabled (e.g. help for '/' won't be shown when there is only one
+   hunk).
+
+ * Since Git 1.7.9, "git merge" defaulted to --no-ff (i.e. even when
+   the side branch being merged is a descendant of the current commit,
+   create a merge commit instead of fast-forwarding) when merging a
+   tag object.  This was appropriate default for integrators who pull
+   signed tags from their downstream contributors, but caused an
+   unnecessary merges when used by downstream contributors who
+   habitually "catch up" their topic branches with tagged releases
+   from the upstream.  Update "git merge" to default to --no-ff only
+   when merging a tag object that does *not* sit at its usual place in
+   refs/tags/ hierarchy, and allow fast-forwarding otherwise, to
+   mitigate the problem.
+
+ * "git status" can spend a lot of cycles to compute the relation
+   between the current branch and its upstream, which can now be
+   disabled with "--no-ahead-behind" option.
+
+ * "git diff" and friends learned funcname patterns for Go language
+   source files.
+
+ * "git send-email" learned "--reply-to=<address>" option.
+
+ * Funcname pattern used for C# now recognizes "async" keyword.
+
+ * In a way similar to how "git tag" learned to honor the pager
+   setting only in the list mode, "git config" learned to ignore the
+   pager setting when it is used for setting values (i.e. when the
+   purpose of the operation is not to "show").
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * More perf tests for threaded grep
+
+ * "perf" test output can be sent to codespeed server.
+
+ * The build procedure for perl/ part has been greatly simplified by
+   weaning ourselves off of MakeMaker.
+
+ * Perl 5.8 or greater has been required since Git 1.7.4 released in
+   2010, but we continued to assume some core modules may not exist and
+   used a conditional "eval { require <<module>> }"; we no longer do
+   this.  Some platforms (Fedora/RedHat/CentOS, for example) ship Perl
+   without all core modules by default (e.g. Digest::MD5, File::Temp,
+   File::Spec, Net::Domain, Net::SMTP).  Users on such platforms may
+   need to install these additional modules.
+
+ * As a convenience, we install copies of Perl modules we require which
+   are not part of the core Perl distribution (e.g. Error and
+   Mail::Address).  Users and packagers whose operating system provides
+   these modules can set NO_PERL_CPAN_FALLBACKS to avoid installing the
+   bundled modules.
+
+ * In preparation for implementing narrow/partial clone, the machinery
+   for checking object connectivity used by gc and fsck has been
+   taught that a missing object is OK when it is referenced by a
+   packfile specially marked as coming from trusted repository that
+   promises to make them available on-demand and lazily.
+
+ * The machinery to clone & fetch, which in turn involves packing and
+   unpacking objects, has been told how to omit certain objects using
+   the filtering mechanism introduced by another topic.  It now knows
+   to mark the resulting pack as a promisor pack to tolerate missing
+   objects, laying foundation for "narrow" clones.
+
+ * The first step to getting rid of mru API and using the
+   doubly-linked list API directly instead.
+
+ * Retire mru API as it does not give enough abstraction over
+   underlying list API to be worth it.
+
+ * Rewrite two more "git submodule" subcommands in C.
+
+ * The tracing machinery learned to report tweaking of environment
+   variables as well.
+
+ * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str)
+
+ * Prevent "clang-format" from breaking line after function return type.
+
+ * The sequencer infrastructure is shared across "git cherry-pick",
+   "git rebase -i", etc., and has always spawned "git commit" when it
+   needs to create a commit.  It has been taught to do so internally,
+   when able, by reusing the codepath "git commit" itself uses, which
+   gives performance boost for a few tens of percents in some sample
+   scenarios.
+
+ * Push the submodule version of collision-detecting SHA-1 hash
+   implementation a bit harder on builders.
+
+ * Avoid mmapping small files while using packed refs (especially ones
+   with zero size, which would cause later munmap() to fail).
+
+ * Conversion from uchar[20] to struct object_id continues.
+
+ * More tests for wildmatch functions.
+
+ * The code to binary search starting from a fan-out table (which is
+   how the packfile is indexed with object names) has been refactored
+   into a reusable helper.
+
+ * We now avoid using identifiers that clash with C++ keywords.  Even
+   though it is not a goal to compile Git with C++ compilers, changes
+   like this help use of code analysis tools that targets C++ on our
+   codebase.
+
+ * The executable is now built in 'script' phase in Travis CI integration,
+   to follow the established practice, rather than during 'before_script'
+   phase.  This allows the CI categorize the failures better ('failed'
+   is project's fault, 'errored' is build environment's).
+   (merge 3c93b82920 sg/travis-build-during-script-phase later to maint).
+
+ * Writing out the index file when the only thing that changed in it
+   is the untracked cache information is often wasteful, and this has
+   been optimized out.
+
+ * Various pieces of Perl code we have have been cleaned up.
+
+ * Internal API clean-up to allow write_locked_index() optionally skip
+   writing the in-core index when it is not modified.
+
+
+Also contains various documentation updates and code clean-ups.
+
+
+Fixes since v2.16
+-----------------
+
+ * An old regression in "git describe --all $annotated_tag^0" has been
+   fixed.
+
+ * "git status" after moving a path in the working tree (hence making
+   it appear "removed") and then adding with the -N option (hence
+   making that appear "added") detected it as a rename, but did not
+   report the  old and new pathnames correctly.
+
+ * "git svn dcommit" did not take into account the fact that a
+   svn+ssh:// URL with a username@ (typically used for pushing) refers
+   to the same SVN repository without the username@ and failed when
+   svn.pushmergeinfo option is set.
+
+ * API clean-up around revision traversal.
+
+ * "git merge -Xours/-Xtheirs" learned to use our/their version when
+   resolving a conflicting updates to a symbolic link.
+
+ * "git clone $there $here" is allowed even when here directory exists
+   as long as it is an empty directory, but the command incorrectly
+   removed it upon a failure of the operation.
+
+ * "git commit --fixup" did not allow "-m<message>" option to be used
+   at the same time; allow it to annotate resulting commit with more
+   text.
+
+ * When resetting the working tree files recursively, the working tree
+   of submodules are now also reset to match.
+
+ * "git stash -- <pathspec>" incorrectly blew away untracked files in
+   the directory that matched the pathspec, which has been corrected.
+
+ * Instead of maintaining home-grown email address parsing code, ship
+   a copy of reasonably recent Mail::Address to be used as a fallback
+   in 'git send-email' when the platform lacks it.
+   (merge d60be8acab mm/send-email-fallback-to-local-mail-address later to maint).
+
+ * "git add -p" was taught to ignore local changes to submodules as
+   they do not interfere with the partial addition of regular changes
+   anyway.
+
+ * Avoid showing a warning message in the middle of a line of "git
+   diff" output.
+   (merge 4e056c989f nd/diff-flush-before-warning later to maint).
+
+ * The http tracing code, often used to debug connection issues,
+   learned to redact potentially sensitive information from its output
+   so that it can be more safely sharable.
+   (merge 8ba18e6fa4 jt/http-redact-cookies later to maint).
+
+ * Crash fix for a corner case where an error codepath tried to unlock
+   what it did not acquire lock on.
+   (merge 81fcb698e0 mr/packed-ref-store-fix later to maint).
+
+ * The split-index mode had a few corner case bugs fixed.
+   (merge ae59a4e44f tg/split-index-fixes later to maint).
+
+ * Assorted fixes to "git daemon".
+   (merge ed15e58efe jk/daemon-fixes later to maint).
+
+ * Completion of "git merge -s<strategy>" (in contrib/) did not work
+   well in non-C locale.
+   (merge 7cc763aaa3 nd/list-merge-strategy later to maint).
+
+ * Workaround for segfault with more recent versions of SVN.
+   (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint).
+
+ * Plug recently introduced leaks in fsck.
+   (merge ba3a08ca0e jt/fsck-code-cleanup later to maint).
+
+ * "git pull --rebase" did not pass verbosity setting down when
+   recursing into a submodule.
+   (merge a56771a668 sb/pull-rebase-submodule later to maint).
+
+ * The way "git reset --hard" reports the commit the updated HEAD
+   points at is made consistent with the way how the commit title is
+   generated by the other parts of the system.  This matters when the
+   title is spread across physically multiple lines.
+   (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint).
+
+ * Test fixes.
+   (merge 63b1a175ee sg/test-i18ngrep later to maint).
+
+ * Some bugs around "untracked cache" feature have been fixed.  This
+   will notice corrupt data in the untracked cache left by old and
+   buggy code and issue a warning---the index can be fixed by clearing
+   the untracked cache from it.
+   (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint).
+   (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint).
+
+ * "git blame HEAD COPYING" in a bare repository failed to run, while
+   "git blame HEAD -- COPYING" run just fine.  This has been corrected.
+
+ * "git add" files in the same directory, but spelling the directory
+   path in different cases on case insensitive filesystem, corrupted
+   the name hash data structure and led to unexpected results.  This
+   has been corrected.
+   (merge c95525e90d bp/name-hash-dirname-fix later to maint).
+
+ * "git rebase -p" mangled log messages of a merge commit, which is
+   now fixed.
+   (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint).
+
+ * Some low level protocol codepath could crash when they get an
+   unexpected flush packet, which is now fixed.
+   (merge bb1356dc64 js/packet-read-line-check-null later to maint).
+
+ * "git check-ignore" with multiple paths got confused when one is a
+   file and the other is a directory, which has been fixed.
+   (merge d60771e930 rs/check-ignore-multi later to maint).
+
+ * "git describe $garbage" stopped giving any errors when the garbage
+   happens to be a string with 40 hexadecimal letters.
+   (merge a8e7a2bf0f sb/describe-blob later to maint).
+
+ * Code to unquote single-quoted string (used in the parser for
+   configuration files, etc.) did not diagnose bogus input correctly
+   and produced bogus results instead.
+   (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint).
+
+ * Many places in "git apply" knew that "/dev/null" that signals
+   "there is no such file on this side of the diff" can be followed by
+   whitespace and garbage when parsing a patch, except for one, which
+   made an otherwise valid patch (e.g. ones from subversion) rejected.
+   (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint).
+
+ * We no longer create any *.spec file, so "make clean" should not
+   remove it.
+   (merge 4321bdcabb tz/do-not-clean-spec-file later to maint).
+
+ * "git push" over http transport did not unquote the push-options
+   correctly.
+   (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint).
+
+ * "git send-email" learned to complain when the batch-size option is
+   not defined when the relogin-delay option is, since these two are
+   mutually required.
+   (merge 9caa70697b xz/send-email-batch-size later to maint).
+
+ * Y2k20 fix ;-) for our perl scripts.
+   (merge a40e06ee33 bw/perl-timegm-timelocal-fix later to maint).
+
+ * Threaded "git grep" has been optimized to avoid allocation in code
+   section that is covered under a mutex.
+   (merge 38ef24dccf rv/grep-cleanup later to maint).
+
+ * "git subtree" script (in contrib/) scripted around "git log", whose
+   output got affected by end-user configuration like log.showsignature
+   (merge 8841b5222c sg/subtree-signed-commits later to maint).
+
+ * While finding unique object name abbreviation, the code may
+   accidentally have read beyond the end of the array of object names
+   in a pack.
+   (merge 21abed500c ds/find-unique-abbrev-optim later to maint).
+
+ * Micro optimization in revision traversal code.
+   (merge ebbed3ba04 ds/mark-parents-uninteresting-optim later to maint).
+
+ * "git commit" used to run "gc --auto" near the end, which was lost
+   when the command was reimplemented in C by mistake.
+   (merge 095c741edd ab/gc-auto-in-commit later to maint).
+
+ * Allow running a couple of tests with "sh -x".
+   (merge c20bf94abc sg/cvs-tests-with-x later to maint).
+
+ * The codepath to replace an existing entry in the index had a bug in
+   updating the name hash structure, which has been fixed.
+   (merge 0e267b7a24 bp/refresh-cache-ent-rehash-fix later to maint).
+
+ * The transfer.fsckobjects configuration tells "git fetch" to
+   validate the data and connected-ness of objects in the received
+   pack; the code to perform this check has been taught about the
+   narrow clone's convention that missing objects that are reachable
+   from objects in a pack that came from a promissor remote is OK.
+
+ * There was an unused file-scope static variable left in http.c when
+   building for versions of libCURL that is older than 7.19.4, which
+   has been fixed.
+   (merge b8fd6008ec rj/http-code-cleanup later to maint).
+
+ * Shell script portability fix.
+   (merge 206a6ae013 ml/filter-branch-portability-fix later to maint).
+
+ * Other minor doc, test and build updates and code cleanups.
+   (merge e2a5a028c7 bw/oidmap-autoinit later to maint).
+   (merge ec3b4b06f8 cl/t9001-cleanup later to maint).
+   (merge e1b3f3dd38 ks/submodule-doc-updates later to maint).
+   (merge fbac558a9b rs/describe-unique-abbrev later to maint).
+   (merge 8462ff43e4 tb/crlf-conv-flags later to maint).
+   (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint).
+   (merge 3449847168 cc/sha1-file-name later to maint).
+   (merge ad622a256f ds/use-get-be64 later to maint).
+   (merge f919ffebed sg/cocci-move-array later to maint).
+   (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint).
+   (merge ef5b3a6c5e nd/shared-index-fix later to maint).
+   (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint).
+   (merge b780e4407d jc/worktree-add-short-help later to maint).
+   (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint).
+   (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint).
+   (merge 3738031581 jk/gettext-poison later to maint).
+   (merge 54360a1956 rj/sparse-updates later to maint).
+   (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint).
+   (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint).
+   (merge 4ccf461f56 bp/fsmonitor later to maint).
+   (merge a6119f82b1 jk/test-hashmap-updates later to maint).
+   (merge 5aea9fe6cc rd/typofix later to maint).
+   (merge e4e5da2796 sb/status-doc-fix later to maint).
+   (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint).
+   (merge d023df1ee6 tg/worktree-create-tracking later to maint).
+   (merge 4cbe92fd41 sm/mv-dry-run-update later to maint).
+   (merge 75e5e9c3f7 sb/color-h-cleanup later to maint).
+   (merge 2708ef4af6 sg/t6300-modernize later to maint).
+   (merge d88e92d4e0 bw/doc-submodule-recurse-config-with-clone later to maint).
+   (merge f74bbc8dd2 jk/cached-commit-buffer later to maint).
+   (merge 1316416903 ms/non-ascii-ticks later to maint).
+   (merge 878056005e rs/strbuf-read-file-or-whine later to maint).
+   (merge 79f0ba1547 jk/strbuf-read-file-close-error later to maint).
+   (merge edfb8ba068 ot/ref-filter-cleanup later to maint).
+   (merge 11395a3b4b jc/test-must-be-empty later to maint).
+   (merge 768b9d6db7 mk/doc-pretty-fill later to maint).
+   (merge 2caa7b8d27 ab/man-sec-list later to maint).
+   (merge 40c17eb184 ks/t3200-typofix later to maint).
+   (merge bd9958c358 dp/merge-strategy-doc-fix later to maint).
+   (merge 9ee0540a40 js/ming-strftime later to maint).
+   (merge 1775e990f7 tz/complete-tag-delete-tagname later to maint).
+   (merge 00a4b03501 rj/warning-uninitialized-fix later to maint).
+   (merge b635ed97a0 jk/attributes-path-doc later to maint).
index 0e25b2c92b309330f27ce562e0f21f4e10179879..ce9102cea83766be0ae8d2ed2867fd4c587d7a95 100644 (file)
@@ -1398,7 +1398,16 @@ fetch.unpackLimit::
 
 fetch.prune::
        If true, fetch will automatically behave as if the `--prune`
-       option was given on the command line.  See also `remote.<name>.prune`.
+       option was given on the command line.  See also `remote.<name>.prune`
+       and the PRUNING section of linkgit:git-fetch[1].
+
+fetch.pruneTags::
+       If true, fetch will automatically behave as if the
+       `refs/tags/*:refs/tags/*` refspec was provided when pruning,
+       if not set already. This allows for setting both this option
+       and `fetch.prune` to maintain a 1=1 mapping to upstream
+       refs. See also `remote.<name>.pruneTags` and the PRUNING
+       section of linkgit:git-fetch[1].
 
 fetch.output::
        Control how ref update status is printed. Valid values are
@@ -2945,6 +2954,15 @@ remote.<name>.prune::
        remote (as if the `--prune` option was given on the command line).
        Overrides `fetch.prune` settings, if any.
 
+remote.<name>.pruneTags::
+       When set to true, fetching from this remote by default will also
+       remove any local tags that no longer exist on the remote if pruning
+       is activated in general via `remote.<name>.prune`, `fetch.prune` or
+       `--prune`. Overrides `fetch.pruneTags` settings, if any.
++
+See also `remote.<name>.prune` and the PRUNING section of
+linkgit:git-fetch[1].
+
 remotes.<group>::
        The list of remotes which are fetched by "git remote update
        <group>".  See linkgit:git-remote[1].
@@ -3210,7 +3228,8 @@ submodule.active::
 
 submodule.recurse::
        Specifies if commands recurse into submodules by default. This
-       applies to all commands that have a `--recurse-submodules` option.
+       applies to all commands that have a `--recurse-submodules` option,
+       except `clone`.
        Defaults to false.
 
 submodule.fetchJobs::
@@ -3343,6 +3362,10 @@ uploadpack.packObjectsHook::
        was run. I.e., `upload-pack` will feed input intended for
        `pack-objects` to the hook, and expects a completed packfile on
        stdout.
+
+uploadpack.allowFilter::
+       If this option is set, `upload-pack` will advertise partial
+       clone and partial fetch object filtering.
 +
 Note that this configuration variable is ignored if it is seen in the
 repository-level config (this is a safety measure against fetching from
index 743af97b06153813820264bb6cf9085f50b6696f..e3a44f03cdcee92098287bfccc9801fde042ef2b 100644 (file)
@@ -128,6 +128,14 @@ have to use `--diff-algorithm=default` option.
 These parameters can also be set individually with `--stat-width=<width>`,
 `--stat-name-width=<name-width>` and `--stat-count=<count>`.
 
+--compact-summary::
+       Output a condensed summary of extended header information such
+       as file creations or deletions ("new" or "gone", optionally "+l"
+       if it's a symlink) and mode changes ("+x" or "-x" for adding
+       or removing executable bit respectively) in diffstat. The
+       information is put betwen the filename part and the graph
+       part. Implies `--stat`.
+
 --numstat::
        Similar to `--stat`, but shows number of added and
        deleted lines in decimal notation and pathname without
@@ -508,6 +516,15 @@ occurrences of that string did not change).
 See the 'pickaxe' entry in linkgit:gitdiffcore[7] for more
 information.
 
+--find-object=<object-id>::
+       Look for differences that change the number of occurrences of
+       the specified object. Similar to `-S`, just the argument is different
+       in that it doesn't search for a specific string but for a specific
+       object id.
++
+The object can be a blob or a submodule commit. It implies the `-t` option in
+`git-log` to also find trees.
+
 --pickaxe-all::
        When `-S` or `-G` finds a change, show all the changes in that
        changeset, not just the files that contain the change
@@ -516,6 +533,7 @@ information.
 --pickaxe-regex::
        Treat the <string> given to `-S` as an extended POSIX regular
        expression to match.
+
 endif::git-format-patch[]
 
 -O<orderfile>::
index fb6bebbc618c3f71ab400ff4267264a0167a887a..8631e365f437fd85058bed3dbd0cebde15756ccc 100644 (file)
@@ -73,7 +73,22 @@ ifndef::git-pull[]
        are fetched due to an explicit refspec (either on the command
        line or in the remote configuration, for example if the remote
        was cloned with the --mirror option), then they are also
-       subject to pruning.
+       subject to pruning. Supplying `--prune-tags` is a shorthand for
+       providing the tag refspec.
++
+See the PRUNING section below for more details.
+
+-P::
+--prune-tags::
+       Before fetching, remove any local tags that no longer exist on
+       the remote if `--prune` is enabled. This option should be used
+       more carefully, unlike `--prune` it will remove any local
+       references (local tags) that have been created. This option is
+       a shorthand for providing the explicit tag refspec along with
+       `--prune`, see the discussion about that in its documentation.
++
+See the PRUNING section below for more details.
+
 endif::git-pull[]
 
 ifndef::git-pull[]
index 12879e4029a7710e2d78bae476f7ad7d9b0fe830..6f6c34b0f4bc9ba18ec890dff1a6fe10af2fd68f 100644 (file)
@@ -16,7 +16,7 @@ SYNOPSIS
         [--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet]
         [--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>]
         [(<mbox> | <Maildir>)...]
-'git am' (--continue | --skip | --abort)
+'git am' (--continue | --skip | --abort | --quit | --show-current-patch)
 
 DESCRIPTION
 -----------
@@ -167,6 +167,14 @@ default.   You can use `--no-utf8` to override this.
 --abort::
        Restore the original branch and abort the patching operation.
 
+--quit::
+       Abort the patching operation but keep HEAD and the index
+       untouched.
+
+--show-current-patch::
+       Show the patch being applied when "git am" is stopped because
+       of conflicts.
+
 DISCUSSION
 ----------
 
index 14da5fc157ee0ea7467e7b9bd9d33d465c1f7a3f..e09ed5d7d5147d93039c479efc8ab450bf5ca8b4 100644 (file)
@@ -233,6 +233,12 @@ See also <<FILES>>.
        using `--file`, `--global`, etc) and `on` when searching all
        config files.
 
+CONFIGURATION
+-------------
+`pager.config` is only respected when listing configuration, i.e., when
+using `--list` or any of the `--get-*` which may return multiple results.
+The default is to use a pager.
+
 [[FILES]]
 FILES
 -----
index 3c91db7bed038f7ba28a4e7554cc6e63c5d91958..56d54a489875652e754f7cd16ee5a77c2f5e5202 100644 (file)
@@ -20,6 +20,7 @@ SYNOPSIS
             [--inetd |
              [--listen=<host_or_ipaddr>] [--port=<n>]
              [--user=<user> [--group=<group>]]]
+            [--log-destination=(stderr|syslog|none)]
             [<directory>...]
 
 DESCRIPTION
@@ -80,7 +81,8 @@ OPTIONS
        do not have the 'git-daemon-export-ok' file.
 
 --inetd::
-       Have the server run as an inetd service. Implies --syslog.
+       Have the server run as an inetd service. Implies --syslog (may be
+       overridden with `--log-destination=`).
        Incompatible with --detach, --port, --listen, --user and --group
        options.
 
@@ -110,8 +112,28 @@ OPTIONS
        zero for no limit.
 
 --syslog::
-       Log to syslog instead of stderr. Note that this option does not imply
-       --verbose, thus by default only error conditions will be logged.
+       Short for `--log-destination=syslog`.
+
+--log-destination=<destination>::
+       Send log messages to the specified destination.
+       Note that this option does not imply --verbose,
+       thus by default only error conditions will be logged.
+       The <destination> must be one of:
++
+--
+stderr::
+       Write to standard error.
+       Note that if `--detach` is specified,
+       the process disconnects from the real standard error,
+       making this destination effectively equivalent to `none`.
+syslog::
+       Write to syslog, using the `git-daemon` identifier.
+none::
+       Disable all logging.
+--
++
+The default destination is `syslog` if `--inetd` or `--detach` is specified,
+otherwise `stderr`.
 
 --user-path::
 --user-path=<path>::
index b153aefa68c8dcaa5f3600d67c5ff0010ee899af..e3199355978880b21668bc1aff0e42f610c02bd6 100644 (file)
@@ -99,6 +99,93 @@ The latter use of the `remote.<repository>.fetch` values can be
 overridden by giving the `--refmap=<refspec>` parameter(s) on the
 command line.
 
+PRUNING
+-------
+
+Git has a default disposition of keeping data unless it's explicitly
+thrown away; this extends to holding onto local references to branches
+on remotes that have themselves deleted those branches.
+
+If left to accumulate, these stale references might make performance
+worse on big and busy repos that have a lot of branch churn, and
+e.g. make the output of commands like `git branch -a --contains
+<commit>` needlessly verbose, as well as impacting anything else
+that'll work with the complete set of known references.
+
+These remote-tracking references can be deleted as a one-off with
+either of:
+
+------------------------------------------------
+# While fetching
+$ git fetch --prune <name>
+
+# Only prune, don't fetch
+$ git remote prune <name>
+------------------------------------------------
+
+To prune references as part of your normal workflow without needing to
+remember to run that, set `fetch.prune` globally, or
+`remote.<name>.prune` per-remote in the config. See
+linkgit:git-config[1].
+
+Here's where things get tricky and more specific. The pruning feature
+doesn't actually care about branches, instead it'll prune local <->
+remote-references as a function of the refspec of the remote (see
+`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above).
+
+Therefore if the refspec for the remote includes
+e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch
+--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote
+tracking branches that are deleted, but any local tag that doesn't
+exist on the remote.
+
+This might not be what you expect, i.e. you want to prune remote
+`<name>`, but also explicitly fetch tags from it, so when you fetch
+from it you delete all your local tags, most of which may not have
+come from the `<name>` remote in the first place.
+
+So be careful when using this with a refspec like
+`refs/tags/*:refs/tags/*`, or any other refspec which might map
+references from multiple remotes to the same local namespace.
+
+Since keeping up-to-date with both branches and tags on the remote is
+a common use-case the `--prune-tags` option can be supplied along with
+`--prune` to prune local tags that don't exist on the remote, and
+force-update those tags that differ. Tag pruning can also be enabled
+with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See
+linkgit:git-config[1].
+
+The `--prune-tags` option is equivalent to having
+`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This
+can lead to some seemingly strange interactions:
+
+------------------------------------------------
+# These both fetch tags
+$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*'
+$ git fetch --no-tags --prune-tags origin
+------------------------------------------------
+
+The reason it doesn't error out when provided without `--prune` or its
+config versions is for flexibility of the configured versions, and to
+maintain a 1=1 mapping between what the command line flags do, and
+what the configuration versions do.
+
+It's reasonable to e.g. configure `fetch.pruneTags=true` in
+`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is
+run, without making every invocation of `git fetch` without `--prune`
+an error.
+
+Pruning tags with `--prune-tags` also works when fetching a URL
+instead of a named remote. These will all prune tags not found on
+origin:
+
+------------------------------------------------
+$ git fetch origin --prune --prune-tags
+$ git fetch origin --prune 'refs/tags/*:refs/tags/*'
+$ git fetch <url of origin> --prune --prune-tags
+$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*'
+------------------------------------------------
+
 OUTPUT
 ------
 
index 3a52e4dce39eeaf6eba896ccbf9e0505cebb3ec9..b634043183b453a89b3f56e0544503b06ccdbdec 100644 (file)
@@ -222,6 +222,14 @@ this purpose, they are instead rewritten to point at the nearest ancestor that
 was not excluded.
 
 
+EXIT STATUS
+-----------
+
+On success, the exit status is `0`.  If the filter can't find any commits to
+rewrite, the exit status is `2`.  On any other error, the exit status may be
+any other non-zero value.
+
+
 Examples
 --------
 
index 571b5a7e3c9dbc11aafc194b6e08dbbed5b2f7d3..3126e0dd002eca7ac420932bb9d1ace63752e8dc 100644 (file)
@@ -15,8 +15,9 @@ DESCRIPTION
 -----------
 Runs a number of housekeeping tasks within the current repository,
 such as compressing file revisions (to reduce disk space and increase
-performance) and removing unreachable objects which may have been
-created from prior invocations of 'git add'.
+performance), removing unreachable objects which may have been
+created from prior invocations of 'git add', packing refs, pruning
+reflog, rerere metadata or stale working trees.
 
 Users are encouraged to run this task on a regular basis within
 each repository to maintain good disk space utilization and good
@@ -45,20 +46,25 @@ OPTIONS
        With this option, 'git gc' checks whether any housekeeping is
        required; if not, it exits without performing any work.
        Some git commands run `git gc --auto` after performing
-       operations that could create many loose objects.
+       operations that could create many loose objects. Housekeeping
+       is required if there are too many loose objects or too many
+       packs in the repository.
 +
-Housekeeping is required if there are too many loose objects or
-too many packs in the repository. If the number of loose objects
-exceeds the value of the `gc.auto` configuration variable, then
-all loose objects are combined into a single pack using
-`git repack -d -l`.  Setting the value of `gc.auto` to 0
-disables automatic packing of loose objects.
+If the number of loose objects exceeds the value of the `gc.auto`
+configuration variable, then all loose objects are combined into a
+single pack using `git repack -d -l`.  Setting the value of `gc.auto`
+to 0 disables automatic packing of loose objects.
 +
 If the number of packs exceeds the value of `gc.autoPackLimit`,
 then existing packs (except those marked with a `.keep` file)
 are consolidated into a single pack by using the `-A` option of
 'git repack'. Setting `gc.autoPackLimit` to 0 disables
 automatic consolidation of packs.
++
+If houskeeping is required due to many loose objects or packs, all
+other housekeeping tasks (e.g. rerere, working trees, reflog...) will
+be performed as well.
+
 
 --prune=<date>::
        Prune loose objects older than date (default is 2 weeks ago,
@@ -133,6 +139,10 @@ The optional configuration variable `gc.pruneExpire` controls how old
 the unreferenced loose objects have to be before they are pruned.  The
 default is "2 weeks ago".
 
+Optional configuration variable `gc.worktreePruneExpire` controls how
+old a stale working tree should be before `git worktree prune` deletes
+it. Default is "3 months ago".
+
 
 Notes
 -----
index 1b4b65d6657b003079e0407d1da8d8c239933267..138edb47b6a17ab925ef3206c1aec6336ff380fe 100644 (file)
@@ -77,6 +77,9 @@ OPTIONS
 --check-self-contained-and-connected::
        Die if the pack contains broken links. For internal use only.
 
+--fsck-objects::
+       Die if the pack contains broken objects. For internal use only.
+
 --threads=<n>::
        Specifies the number of threads to spawn when resolving
        deltas. This requires that index-pack be compiled with
index aa403d02f33699cde77c507c0402d9dfa0b7fb2f..81bc490ac52eb9414015979d8c244ce063c838b5 100644 (file)
@@ -255,6 +255,17 @@ a missing object is encountered.  This is the default action.
 The form '--missing=allow-any' will allow object traversal to continue
 if a missing object is encountered.  Missing objects will silently be
 omitted from the results.
++
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing object will raise an error.
+
+--exclude-promisor-objects::
+       Omit objects that are known to be in the promisor remote.  (This
+       option has the purpose of operating only on locally created objects,
+       so that when we repack, we still maintain a distinction between
+       locally created objects [without .promisor] and objects from the
+       promisor remote [with .promisor].)  This is used with partial clone.
 
 SEE ALSO
 --------
index 8a861c1e0d69eda71fa2eec791e67c136b63b177..3277ca143273e01f5f4973ed351c8a5cb4b8e0fa 100644 (file)
@@ -12,7 +12,7 @@ SYNOPSIS
        [<upstream> [<branch>]]
 'git rebase' [-i | --interactive] [options] [--exec <cmd>] [--onto <newbase>]
        --root [<branch>]
-'git rebase' --continue | --skip | --abort | --quit | --edit-todo
+'git rebase' --continue | --skip | --abort | --quit | --edit-todo | --show-current-patch
 
 DESCRIPTION
 -----------
@@ -244,12 +244,22 @@ leave out at most one of A and B, in which case it defaults to HEAD.
        Keep the commits that do not change anything from its
        parents in the result.
 
+--allow-empty-message::
+       By default, rebasing commits with an empty message will fail.
+       This option overrides that behavior, allowing commits with empty
+       messages to be rebased.
+
 --skip::
        Restart the rebasing process by skipping the current patch.
 
 --edit-todo::
        Edit the todo list during an interactive rebase.
 
+--show-current-patch::
+       Show the current patch in an interactive rebase or when rebase
+       is stopped because of conflicts. This is the equivalent of
+       `git show REBASE_HEAD`.
+
 -m::
 --merge::
        Use merging strategies to rebase.  When the recursive (default) merge
index 577b969c1bda2bfd95fd28e3ffc4bbbc96c5d16c..4feddc0293bd7eb9827ce45f11519335a484f013 100644 (file)
@@ -172,10 +172,14 @@ With `-n` option, the remote heads are not queried first with
 
 'prune'::
 
-Deletes all stale remote-tracking branches under <name>.
-These stale branches have already been removed from the remote repository
-referenced by <name>, but are still locally available in
-"remotes/<name>".
+Deletes stale references associated with <name>. By default, stale
+remote-tracking branches under <name> are deleted, but depending on
+global configuration and the configuration of the remote we might even
+prune local tags that haven't been pushed there. Equivalent to `git
+fetch --prune <name>`, except that no new references will be fetched.
++
+See the PRUNING section of linkgit:git-fetch[1] for what it'll prune
+depending on various configuration.
 +
 With `--dry-run` option, report what branches will be pruned, but do not
 actually prune them.
@@ -189,7 +193,7 @@ remotes.default is not defined, all remotes which do not have the
 configuration parameter remote.<name>.skipDefaultUpdate set to true will
 be updated.  (See linkgit:git-config[1]).
 +
-With `--prune` option, prune all the remotes that are updated.
+With `--prune` option, run pruning against all the remotes that are updated.
 
 
 DISCUSSION
index 8060ea35c5f932c1e4328f000a501ff399418f27..71ef97ba9b22aad996049bd0c1500a835a433350 100644 (file)
@@ -84,6 +84,11 @@ See the CONFIGURATION section for `sendemail.multiEdit`.
        the value of GIT_AUTHOR_IDENT, or GIT_COMMITTER_IDENT if that is not
        set, as returned by "git var -l".
 
+--reply-to=<address>::
+       Specify the address where replies from recipients should go to.
+       Use this if replies to messages should go to another address than what
+       is specified with the --from parameter.
+
 --in-reply-to=<identifier>::
        Make the first mail (or all the mails with `--no-thread`) appear as a
        reply to the given Message-Id, which avoids breaking threads to
index ee6c5476c1d2bf3b2a708e6152ebaba5882cc4f7..5e35ea18acd790469735400644358af9843b6c99 100644 (file)
@@ -8,8 +8,8 @@ git-shortlog - Summarize 'git log' output
 SYNOPSIS
 --------
 [verse]
-git log --pretty=short | 'git shortlog' [<options>]
 'git shortlog' [<options>] [<revision range>] [[\--] <path>...]
+git log --pretty=short | 'git shortlog' [<options>]
 
 DESCRIPTION
 -----------
index f9c91c721e909291ba42c27c3fd521135b4869f0..6c230c0c7200412b988d233352e3411a9fb813a8 100644 (file)
@@ -130,6 +130,11 @@ ignored, then the directory is not shown, but all contents are shown.
        without options are equivalent to 'always' and 'never'
        respectively.
 
+--ahead-behind::
+--no-ahead-behind::
+       Display or do not display detailed ahead/behind counts for the
+       branch relative to its upstream branch.  Defaults to true.
+
 <pathspec>...::
        See the 'pathspec' entry in linkgit:gitglossary[7].
 
index 956fc019f984bca1754a72dc0d7308b39a28445d..1d17101bac39d64cc3d4aff89cd588955dd00735 100644 (file)
@@ -9,7 +9,7 @@ git-tag - Create, list, delete or verify a tag object signed with GPG
 SYNOPSIS
 --------
 [verse]
-'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>]
+'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e]
        <tagname> [<commit> | <object>]
 'git tag' -d <tagname>...
 'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
@@ -167,6 +167,12 @@ This option is only applicable when listing tags without annotation lines.
        Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
        is given.
 
+-e::
+--edit::
+       The message taken from file with `-F` and command line with
+       `-m` are usually used as the tag message unmodified.
+       This option lets you further edit the message taken from these sources.
+
 --cleanup=<mode>::
        This option sets how the tag message is cleaned up.
        The  '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'.  The
index ad2383d7ed2197ed517eab04eed6712440137fa2..3897a59ee94bc424c2c66cb5d05bc0193fb6eaf2 100644 (file)
@@ -464,6 +464,32 @@ command reads the index; while when `--[no-|force-]untracked-cache`
 are used, the untracked cache is immediately added to or removed from
 the index.
 
+Before 2.17, the untracked cache had a bug where replacing a directory
+with a symlink to another directory could cause it to incorrectly show
+files tracked by git as untracked. See the "status: add a failing test
+showing a core.untrackedCache bug" commit to git.git. A workaround for
+that is (and this might work for other undiscovered bugs in the
+future):
+
+----------------
+$ git -c core.untrackedCache=false status
+----------------
+
+This bug has also been shown to affect non-symlink cases of replacing
+a directory with a file when it comes to the internal structures of
+the untracked cache, but no case has been reported where this resulted in
+wrong "git status" output.
+
+There are also cases where existing indexes written by git versions
+before 2.17 will reference directories that don't exist anymore,
+potentially causing many "could not open directory" warnings to be
+printed on "git status". These are new warnings for existing issues
+that were previously silently discarded.
+
+As with the bug described above the solution is to one-off do a "git
+status" run with `core.untrackedCache=false` to flush out the leftover
+bad data.
+
 File System Monitor
 -------------------
 
index 5ac3f68ab5396ade59c25f2083088c21f94f1f93..e7eb24ab8528e39aa4e0a75f6feaa46d9cfb39fe 100644 (file)
@@ -12,7 +12,9 @@ SYNOPSIS
 'git worktree add' [-f] [--detach] [--checkout] [--lock] [-b <new-branch>] <path> [<commit-ish>]
 'git worktree list' [--porcelain]
 'git worktree lock' [--reason <string>] <worktree>
+'git worktree move' <worktree> <new-path>
 'git worktree prune' [-n] [-v] [--expire <expire>]
+'git worktree remove' [--force] <worktree>
 'git worktree unlock' <worktree>
 
 DESCRIPTION
@@ -34,10 +36,6 @@ The working tree's administrative files in the repository (see
 `git worktree prune` in the main or any linked working tree to
 clean up any stale administrative files.
 
-If you move a linked working tree, you need to manually update the
-administrative files so that they do not get pruned automatically. See
-section "DETAILS" for more information.
-
 If a linked working tree is stored on a portable device or network share
 which is not always mounted, you can prevent its administrative files from
 being pruned by issuing the `git worktree lock` command, optionally
@@ -80,10 +78,22 @@ files from being pruned automatically. This also prevents it from
 being moved or deleted. Optionally, specify a reason for the lock
 with `--reason`.
 
+move::
+
+Move a working tree to a new location. Note that the main working tree
+or linked working trees containing submodules cannot be moved.
+
 prune::
 
 Prune working tree information in $GIT_DIR/worktrees.
 
+remove::
+
+Remove a working tree. Only clean working trees (no untracked files
+and no modification in tracked files) can be removed. Unclean working
+trees or ones with submodules can be removed with `--force`. The main
+working tree cannot be removed.
+
 unlock::
 
 Unlock a working tree, allowing it to be pruned, moved or deleted.
@@ -93,9 +103,10 @@ OPTIONS
 
 -f::
 --force::
-       By default, `add` refuses to create a new working tree when `<commit-ish>` is a branch name and
-       is already checked out by another working tree. This option overrides
-       that safeguard.
+       By default, `add` refuses to create a new working tree when
+       `<commit-ish>` is a branch name and is already checked out by
+       another working tree and `remove` refuses to remove an unclean
+       working tree. This option overrides that safeguard.
 
 -b <new-branch>::
 -B <new-branch>::
@@ -197,7 +208,7 @@ thumb is do not make any assumption about whether a path belongs to
 $GIT_DIR or $GIT_COMMON_DIR when you need to directly access something
 inside $GIT_DIR. Use `git rev-parse --git-path` to get the final path.
 
-If you move a linked working tree, you need to update the 'gitdir' file
+If you manually move a linked working tree, you need to update the 'gitdir' file
 in the entry's directory. For example, if a linked working tree is moved
 to `/newpath/test-next` and its `.git` file points to
 `/path/main/.git/worktrees/test-next`, then update
@@ -277,13 +288,6 @@ Multiple checkout in general is still experimental, and the support
 for submodules is incomplete. It is NOT recommended to make multiple
 checkouts of a superproject.
 
-git-worktree could provide more automation for tasks currently
-performed manually, such as:
-
-- `remove` to remove a linked working tree and its administrative files (and
-  warn if the working tree is dirty)
-- `mv` to move or rename a working tree and update its administrative files
-
 GIT
 ---
 Part of the linkgit:git[1] suite
index 8163b5796b192ee5f5c0cb13fc85bdeff8202e64..4767860e72f46d4e4df883f2fdbb4a46eb8e8eda 100644 (file)
@@ -849,6 +849,9 @@ Report bugs to the Git mailing list <git@vger.kernel.org> where the
 development and maintenance is primarily done.  You do not have to be
 subscribed to the list to send a message there.
 
+Issues which are security relevant should be disclosed privately to
+the Git Security mailing list <git-security@googlegroups.com>.
+
 SEE ALSO
 --------
 linkgit:gittutorial[7], linkgit:gittutorial-2[7],
index 30687de81a6e40b3d9cc120f659817b1a8d03603..1094fe2b5b0cc97030dc6694364f473999a9f4d3 100644 (file)
@@ -56,9 +56,16 @@ Unspecified::
 
 When more than one pattern matches the path, a later line
 overrides an earlier line.  This overriding is done per
-attribute.  The rules how the pattern matches paths are the
-same as in `.gitignore` files; see linkgit:gitignore[5].
-Unlike `.gitignore`, negative patterns are forbidden.
+attribute.
+
+The rules by which the pattern matches paths are the same as in
+`.gitignore` files (see linkgit:gitignore[5]), with a few exceptions:
+
+  - negative patterns are forbidden
+
+  - patterns that match a directory do not recursively match paths
+    inside that directory (so using the trailing-slash `path/` syntax is
+    pointless in an attributes file; use `path/**` instead)
 
 When deciding what attributes are assigned to a path, Git
 consults `$GIT_DIR/info/attributes` file (which has the highest
@@ -392,46 +399,14 @@ Long Running Filter Process
 If the filter command (a string value) is defined via
 `filter.<driver>.process` then Git can process all blobs with a
 single filter invocation for the entire life of a single Git
-command. This is achieved by using a packet format (pkt-line,
-see technical/protocol-common.txt) based protocol over standard
-input and standard output as follows. All packets, except for the
-"*CONTENT" packets and the "0000" flush packet, are considered
-text and therefore are terminated by a LF.
-
-Git starts the filter when it encounters the first file
-that needs to be cleaned or smudged. After the filter started
-Git sends a welcome message ("git-filter-client"), a list of supported
-protocol version numbers, and a flush packet. Git expects to read a welcome
-response message ("git-filter-server"), exactly one protocol version number
-from the previously sent list, and a flush packet. All further
-communication will be based on the selected version. The remaining
-protocol description below documents "version=2". Please note that
-"version=42" in the example below does not exist and is only there
-to illustrate how the protocol would look like with more than one
-version.
-
-After the version negotiation Git sends a list of all capabilities that
-it supports and a flush packet. Git expects to read a list of desired
-capabilities, which must be a subset of the supported capabilities list,
-and a flush packet as response:
-------------------------
-packet:          git> git-filter-client
-packet:          git> version=2
-packet:          git> version=42
-packet:          git> 0000
-packet:          git< git-filter-server
-packet:          git< version=2
-packet:          git< 0000
-packet:          git> capability=clean
-packet:          git> capability=smudge
-packet:          git> capability=not-yet-invented
-packet:          git> 0000
-packet:          git< capability=clean
-packet:          git< capability=smudge
-packet:          git< 0000
-------------------------
-Supported filter capabilities in version 2 are "clean", "smudge",
-and "delay".
+command. This is achieved by using the long-running process protocol
+(described in technical/long-running-process-protocol.txt).
+
+When Git encounters the first file that needs to be cleaned or smudged,
+it starts the filter and performs the handshake. In the handshake, the
+welcome message sent by Git is "git-filter-client", only version 2 is
+suppported, and the supported capabilities are "clean", "smudge", and
+"delay".
 
 Afterwards Git sends a list of "key=value" pairs terminated with
 a flush packet. The list will contain at least the filter command
@@ -517,12 +492,6 @@ the protocol then Git will stop the filter process and restart it
 with the next file that needs to be processed. Depending on the
 `filter.<driver>.required` flag Git will interpret that as error.
 
-After the filter has processed a command it is expected to wait for
-a "key=value" list containing the next command. Git will close
-the command pipe on exit. The filter is expected to detect EOF
-and exit gracefully on its own. Git will wait until the filter
-process has stopped.
-
 Delay
 ^^^^^
 
@@ -752,6 +721,8 @@ patterns are available:
 
 - `fountain` suitable for Fountain documents.
 
+- `golang` suitable for source code in the Go language.
+
 - `html` suitable for HTML/XHTML documents.
 
 - `java` suitable for source code in the Java language.
index 4a584f3c5d7e40fc5a8a1e5c68a63e8156bdcd64..4b8c93ec59de3db02b9914aed4955d486be5f875 100644 (file)
@@ -466,6 +466,13 @@ set by Git if the remote helper has the 'option' capability.
        Transmit <string> as a push option. As the push option
        must not contain LF or NUL characters, the string is not encoded.
 
+'option from-promisor' {'true'|'false'}::
+       Indicate that these objects are being fetched from a promisor.
+
+'option no-dependents' {'true'|'false'}::
+       Indicate that only the objects wanted need to be fetched, not
+       their dependents.
+
 SEE ALSO
 --------
 linkgit:git-remote[1]
index c60bcad44aa581b2449a7a638b6487c1c73e8c23..e85148f05eb79a968ad84bac6ce7a88289270c49 100644 (file)
@@ -275,11 +275,6 @@ worktrees/<id>/locked::
        or manually by `git worktree prune`. The file may contain a string
        explaining why the repository is locked.
 
-worktrees/<id>/link::
-       If this file exists, it is a hard link to the linked .git
-       file. It is used to detect if the linked repository is
-       manually removed.
-
 SEE ALSO
 --------
 linkgit:git-init[1],
index 3888c3ff85e2dc5b137e4e3ed50e39327760a02a..63a3fc09548abe8d34faab98f183e1817b21b878 100644 (file)
@@ -35,7 +35,8 @@ set to `no` at the beginning of them.
 --no-ff::
        Create a merge commit even when the merge resolves as a
        fast-forward.  This is the default behaviour when merging an
-       annotated (and possibly signed) tag.
+       annotated (and possibly signed) tag that is not stored in
+       its natural place in 'refs/tags/' hierarchy.
 
 --ff-only::
        Refuse to merge and exit with a non-zero status unless the
index fd5d748d1b508c9cef1063227b5c478952d3bc7e..4a58aad4b83b9e365b57a8afa71f1ab939c47aee 100644 (file)
@@ -40,7 +40,7 @@ the other tree did, declaring 'our' history contains all that happened in it.
 
 theirs;;
        This is the opposite of 'ours'; note that, unlike 'ours', there is
-       no 'theirs' merge stragegy to confuse this merge option with.
+       no 'theirs' merge strategy to confuse this merge option with.
 
 patience;;
        With this option, 'merge-recursive' spends a little extra time
index e664c088a5e6a4bf1d09425ff37e36fe11d9f2f3..6109ef09aa2eeba564023cced75728ffaa1c4f96 100644 (file)
@@ -202,7 +202,7 @@ endif::git-rev-list[]
 - '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)'
   respectively, except that if the next placeholder takes more spaces
   than given and there are spaces on its left, use those spaces
-- '%><(<N>)', '%><|(<N>)': similar to '% <(<N>)', '%<|(<N>)'
+- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
   respectively, but padding both sides (i.e. the text is centered)
 - %(trailers[:options]): display the trailers of the body as interpreted
   by linkgit:git-interpret-trailers[1]. The `trailers` string may be
index 22f5c9b43dd01c02710e44e36ee6a9dd2afc9b60..7b273635de2b5bf3e3ba6ade8bcca3068a216395 100644 (file)
@@ -750,10 +750,21 @@ The form '--missing=allow-any' will allow object traversal to continue
 if a missing object is encountered.  Missing objects will silently be
 omitted from the results.
 +
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing objects will raise an error.
++
 The form '--missing=print' is like 'allow-any', but will also print a
 list of the missing objects.  Object IDs are prefixed with a ``?'' character.
 endif::git-rev-list[]
 
+--exclude-promisor-objects::
+       (For internal use only.)  Prefilter object traversal at
+       promisor boundary.  This is used with partial clone.  This is
+       stronger than `--missing=allow-promisor` because it limits the
+       traversal, rather than just silencing errors about missing
+       objects.
+
 --no-walk[=(sorted|unsorted)]::
        Only show the given commits, but do not traverse their ancestors.
        This has no effect if a range is specified. If the argument
index 03bb0e950dd1616b00f950f83263835c57bfa70a..a1162e5bcd19ba509fff39bceca49e1b33e2add2 100644 (file)
@@ -7,7 +7,7 @@ Talk about <sha1_file.c> and <object.h> family, things like
 * read_object_with_reference()
 * has_sha1_file()
 * write_sha1_file()
-* pretend_sha1_file()
+* pretend_object_file()
 * lookup_{object,commit,tag,blob,tree}
 * parse_{object,commit,tag,blob,tree}
 * Use of object flags
index a0e45f2889e6e52db71f88d7368bbb2efc02616a..64f49d0bbb7b0c28832562e59fc8b89e872c4f4b 100644 (file)
@@ -214,10 +214,12 @@ smart server reply:
    S: Cache-Control: no-cache
    S:
    S: 001e# service=git-upload-pack\n
+   S: 0000
    S: 004895dcfa3633004da0049d3d0fa03f80589cbcaf31 refs/heads/maint\0multi_ack\n
    S: 0042d049f6c27a2244e12041955e262a404c7faba355 refs/heads/master\n
    S: 003c2cb58b79488a98d2721cea644875a8dd0026b115 refs/tags/v1.0\n
    S: 003fa3c2e2402b99163d1d59756e5f207ae21cccba4c refs/tags/v1.0^{}\n
+   S: 0000
 
 The client may send Extra Parameters (see
 Documentation/technical/pack-protocol.txt) as a colon-separated string
@@ -277,6 +279,7 @@ The returned response contains "version 1" if "version=1" was sent as an
 Extra Parameter.
 
   smart_reply     =  PKT-LINE("# service=$servicename" LF)
+                    "0000"
                     *1("version 1")
                     ref_list
                     "0000"
diff --git a/Documentation/technical/long-running-process-protocol.txt b/Documentation/technical/long-running-process-protocol.txt
new file mode 100644 (file)
index 0000000..aa0aa9a
--- /dev/null
@@ -0,0 +1,50 @@
+Long-running process protocol
+=============================
+
+This protocol is used when Git needs to communicate with an external
+process throughout the entire life of a single Git command. All
+communication is in pkt-line format (see technical/protocol-common.txt)
+over standard input and standard output.
+
+Handshake
+---------
+
+Git starts by sending a welcome message (for example,
+"git-filter-client"), a list of supported protocol version numbers, and
+a flush packet. Git expects to read the welcome message with "server"
+instead of "client" (for example, "git-filter-server"), exactly one
+protocol version number from the previously sent list, and a flush
+packet. All further communication will be based on the selected version.
+The remaining protocol description below documents "version=2". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look like with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet:          git> git-filter-client
+packet:          git> version=2
+packet:          git> version=42
+packet:          git> 0000
+packet:          git< git-filter-server
+packet:          git< version=2
+packet:          git< 0000
+packet:          git> capability=clean
+packet:          git> capability=smudge
+packet:          git> capability=not-yet-invented
+packet:          git> 0000
+packet:          git< capability=clean
+packet:          git< capability=smudge
+packet:          git< 0000
+------------------------
+
+Shutdown
+--------
+
+Git will close
+the command pipe on exit. The filter is expected to detect EOF
+and exit gracefully on its own. Git will wait until the filter
+process has stopped.
index cd31edc91ea1989adc7f2321327c0a97685fe62f..7fee6b780a869fcb9af979fb95a54587002b92f2 100644 (file)
@@ -241,6 +241,7 @@ out of what the server said it could do with the first 'want' line.
   upload-request    =  want-list
                       *shallow-line
                       *1depth-request
+                      [filter-request]
                       flush-pkt
 
   want-list         =  first-want
@@ -256,6 +257,8 @@ out of what the server said it could do with the first 'want' line.
   additional-want   =  PKT-LINE("want" SP obj-id)
 
   depth             =  1*DIGIT
+
+  filter-request    =  PKT-LINE("filter" SP filter-spec)
 ----
 
 Clients MUST send all the obj-ids it wants from the reference
@@ -278,6 +281,11 @@ complete those commits. Commits whose parents are not received as a
 result are defined as shallow and marked as such in the server. This
 information is sent back to the client in the next step.
 
+The client can optionally request that pack-objects omit various
+objects from the packfile using one of several filtering techniques.
+These are intended for use with partial clone and partial fetch
+operations.  See `rev-list` for possible "filter-spec" values.
+
 Once all the 'want's and 'shallow's (and optional 'deepen') are
 transferred, clients MUST send a flush-pkt, to tell the server side
 that it is done sending the list.
index 26dcc6f502020da5214f1e161f1678813dd24059..332d209b58ca42dc303c757f2b31f7a4f3d033f4 100644 (file)
@@ -309,3 +309,11 @@ to accept a signed push certificate, and asks the <nonce> to be
 included in the push certificate.  A send-pack client MUST NOT
 send a push-cert packet unless the receive-pack server advertises
 this capability.
+
+filter
+------
+
+If the upload-pack server advertises the 'filter' capability,
+fetch-pack may send "filter" commands to request a partial clone
+or partial fetch and request that the server omit various objects
+from the packfile.
index 00ad37986efdcebb440c95629acd16213028c239..e03eaccebc9c28acf29408060b2f76d7161db0d6 100644 (file)
@@ -86,3 +86,15 @@ for testing format-1 compatibility.
 When the config key `extensions.preciousObjects` is set to `true`,
 objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
 `git repack -d`).
+
+`partialclone`
+~~~~~~~~~~~~~~
+
+When the config key `extensions.partialclone` is set, it indicates
+that the repo was created with a partial clone (or later performed
+a partial fetch) and that the remote may have omitted sending
+certain unwanted objects.  Such a remote is called a "promisor remote"
+and it promises that all such omitted objects can be fetched from it
+in the future.
+
+The value of this key is the name of the promisor remote.
index 8945e05f526ce9b0047b29b4b93ade436315a4a2..b4fb7d9a39f39513d3c29004f470920865b3f9ec 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 GVF=GIT-VERSION-FILE
-DEF_VER=v2.16.3
+DEF_VER=v2.17.0-rc1
 
 LF='
 '
diff --git a/INSTALL b/INSTALL
index ffb071e9f03a79a052beaa4372fa790ecbabbb7b..c39006e8e7e5c5be2114b79d50135dc08e3d1aaa 100644 (file)
--- a/INSTALL
+++ b/INSTALL
@@ -84,9 +84,29 @@ Issues of note:
 
        GIT_EXEC_PATH=`pwd`
        PATH=`pwd`:$PATH
-       GITPERLLIB=`pwd`/perl/blib/lib
+       GITPERLLIB=`pwd`/perl/build/lib
        export GIT_EXEC_PATH PATH GITPERLLIB
 
+ - By default (unless NO_PERL is provided) Git will ship various perl
+   scripts. However, for simplicity it doesn't use the
+   ExtUtils::MakeMaker toolchain to decide where to place the perl
+   libraries. Depending on the system this can result in the perl
+   libraries not being where you'd like them if they're expected to be
+   used by things other than Git itself.
+
+   Manually supplying a perllibdir prefix should fix this, if this is
+   a problem you care about, e.g.:
+
+       prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}')
+
+   Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian,
+   perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS.
+
+ - Unless NO_PERL is provided Git will ship various perl libraries it
+   needs. Distributors of Git will usually want to set
+   NO_PERL_CPAN_FALLBACKS if NO_PERL is not provided to use their own
+   copies of the CPAN modules Git needs.
+
  - Git is reasonably self-sufficient, but does depend on a few external
    programs and libraries.  Git can be used without most of them by adding
    the approriate "NO_<LIBRARY>=YesPlease" to the make command line or
@@ -106,7 +126,8 @@ Issues of note:
          Redhat/Fedora are reported to ship Perl binary package with some
          core modules stripped away (see http://lwn.net/Articles/477234/),
          so you might need to install additional packages other than Perl
-         itself, e.g. Time::HiRes.
+         itself, e.g. Digest::MD5, File::Spec, File::Temp, Net::Domain,
+         Net::SMTP, and Time::HiRes.
 
        - git-imap-send needs the OpenSSL library to talk IMAP over SSL if
          you are using libcurl older than 7.34.0.  Otherwise you can use
index 7f40f76739e9c9eca58c2d6743a55c7d02bcab9c..96f6138f634b6aaf009edd5026320494b27a77f5 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -29,10 +29,10 @@ all::
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # When using USE_LIBPCRE1, define NO_LIBPCRE1_JIT if the PCRE v1
 # library is compiled without --enable-jit. We will auto-detect
@@ -294,11 +294,14 @@ all::
 #
 # Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl).
 #
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
 # Define NO_PERL if you do not want Perl scripts or libraries at all.
 #
+# Define NO_PERL_CPAN_FALLBACKS if you do not want to install bundled
+# copies of CPAN modules that serve as a fallback in case the modules
+# are not available on the system. This option is intended for
+# distributions that want to use their packaged versions of Perl
+# modules, instead of the fallbacks shipped with Git.
+#
 # Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
 # but /usr/bin/python2.7 on some platforms).
 #
@@ -332,6 +335,13 @@ all::
 # when hardlinking a file to another name and unlinking the original file right
 # away (some NTFS drivers seem to zero the contents in that scenario).
 #
+# Define INSTALL_SYMLINKS if you prefer to have everything that can be
+# symlinked between bin/ and libexec/ to use relative symlinks between
+# the two. This option overrides NO_CROSS_DIRECTORY_HARDLINKS and
+# NO_INSTALL_HARDLINKS which will also use symlinking by indirection
+# within the same directory in some cases, INSTALL_SYMLINKS will
+# always symlink to the final target directly.
+#
 # Define NO_CROSS_DIRECTORY_HARDLINKS if you plan to distribute the installed
 # programs as a tar, where bin/ and libexec/ might be on different file systems.
 #
@@ -471,14 +481,14 @@ ARFLAGS = rcs
 # This can help installing the suite in a relocatable way.
 
 prefix = $(HOME)
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(prefix)/bin
 mandir = $(prefix)/share/man
 infodir = $(prefix)/share/info
 gitexecdir = libexec/git-core
 mergetoolsdir = $(gitexecdir)/mergetools
 sharedir = $(prefix)/share
 gitwebdir = $(sharedir)/gitweb
+perllibdir = $(sharedir)/perl5
 localedir = $(sharedir)/locale
 template_dir = share/git-core/templates
 htmldir = $(prefix)/share/doc/git-doc
@@ -488,11 +498,13 @@ lib = lib
 # DESTDIR =
 pathsep = :
 
+bindir_relative = $(patsubst $(prefix)/%,%,$(bindir))
 mandir_relative = $(patsubst $(prefix)/%,%,$(mandir))
 infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
+gitexecdir_relative = $(patsubst $(prefix)/%,%,$(gitexecdir))
 htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
 
-export prefix bindir sharedir sysconfdir gitwebdir localedir
+export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
 
 CC = cc
 AR = ar
@@ -804,6 +816,7 @@ LIB_OBJS += ewah/ewah_bitmap.o
 LIB_OBJS += ewah/ewah_io.o
 LIB_OBJS += ewah/ewah_rlw.o
 LIB_OBJS += exec_cmd.o
+LIB_OBJS += fetch-object.o
 LIB_OBJS += fetch-pack.o
 LIB_OBJS += fsck.o
 LIB_OBJS += fsmonitor.o
@@ -832,7 +845,6 @@ LIB_OBJS += merge.o
 LIB_OBJS += merge-blobs.o
 LIB_OBJS += merge-recursive.o
 LIB_OBJS += mergesort.o
-LIB_OBJS += mru.o
 LIB_OBJS += name-hash.o
 LIB_OBJS += notes.o
 LIB_OBJS += notes-cache.o
@@ -1166,13 +1178,18 @@ ifdef NO_LIBGEN_H
        COMPAT_OBJS += compat/basename.o
 endif
 
-USE_LIBPCRE1 ?= $(USE_LIBPCRE)
+USE_LIBPCRE2 ?= $(USE_LIBPCRE)
 
-ifneq (,$(USE_LIBPCRE1))
-       ifdef USE_LIBPCRE2
-$(error Only set USE_LIBPCRE1 (or its alias USE_LIBPCRE) or USE_LIBPCRE2, not both!)
+ifneq (,$(USE_LIBPCRE2))
+       ifdef USE_LIBPCRE1
+$(error Only set USE_LIBPCRE2 (or its alias USE_LIBPCRE) or USE_LIBPCRE1, not both!)
        endif
 
+       BASIC_CFLAGS += -DUSE_LIBPCRE2
+       EXTLIBS += -lpcre2-8
+endif
+
+ifdef USE_LIBPCRE1
        BASIC_CFLAGS += -DUSE_LIBPCRE1
        EXTLIBS += -lpcre
 
@@ -1181,11 +1198,6 @@ ifdef NO_LIBPCRE1_JIT
 endif
 endif
 
-ifdef USE_LIBPCRE2
-       BASIC_CFLAGS += -DUSE_LIBPCRE2
-       EXTLIBS += -lpcre2-8
-endif
-
 ifdef LIBPCREDIR
        BASIC_CFLAGS += -I$(LIBPCREDIR)/include
        EXTLIBS += -L$(LIBPCREDIR)/$(lib) $(CC_LD_DYNPATH)$(LIBPCREDIR)/$(lib)
@@ -1515,7 +1527,9 @@ else
        LIB_OBJS += sha1dc_git.o
 ifdef DC_SHA1_EXTERNAL
        ifdef DC_SHA1_SUBMODULE
+               ifneq ($(DC_SHA1_SUBMODULE),auto)
 $(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
+               endif
        endif
        BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
        EXTLIBS += -lsha1detectcoll
@@ -1543,9 +1557,6 @@ ifdef SHA1_MAX_BLOCK_SIZE
        LIB_OBJS += compat/sha1-chunked.o
        BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)"
 endif
-ifdef NO_PERL_MAKEMAKER
-       export NO_PERL_MAKEMAKER
-endif
 ifdef NO_HSTRERROR
        COMPAT_CFLAGS += -DNO_HSTRERROR
        COMPAT_OBJS += compat/hstrerror.o
@@ -1732,10 +1743,13 @@ ETC_GITATTRIBUTES_SQ = $(subst ','\'',$(ETC_GITATTRIBUTES))
 DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
 bindir_SQ = $(subst ','\'',$(bindir))
 bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+mandir_SQ = $(subst ','\'',$(mandir))
 mandir_relative_SQ = $(subst ','\'',$(mandir_relative))
 infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
+perllibdir_SQ = $(subst ','\'',$(perllibdir))
 localedir_SQ = $(subst ','\'',$(localedir))
 gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
+gitexecdir_relative_SQ = $(subst ','\'',$(gitexecdir_relative))
 template_dir_SQ = $(subst ','\'',$(template_dir))
 htmldir_relative_SQ = $(subst ','\'',$(htmldir_relative))
 prefix_SQ = $(subst ','\'',$(prefix))
@@ -1843,9 +1857,6 @@ all::
 ifndef NO_TCLTK
        $(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all
        $(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all
-endif
-ifndef NO_PERL
-       $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all
 endif
        $(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
 
@@ -1928,7 +1939,8 @@ common-cmds.h: $(wildcard Documentation/git-*.txt)
 
 SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
        $(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
-       $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV)
+       $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\
+       $(perllibdir_SQ)
 define cmd_munge_script
 $(RM) $@ $@+ && \
 sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
@@ -1972,23 +1984,12 @@ git.res: git.rc GIT-VERSION-FILE
 $(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS
 
 ifndef NO_PERL
-$(SCRIPT_PERL_GEN): perl/perl.mak
+$(SCRIPT_PERL_GEN):
 
-perl/perl.mak: perl/PM.stamp
-
-perl/PM.stamp: FORCE
-       @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \
-       $(PERL_PATH) -V >>$@+ && \
-       { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \
-       $(RM) $@+
-
-perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL
-       $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F)
-
-PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ)
-$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE
+PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
+$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE
        $(QUIET_GEN)$(RM) $@ $@+ && \
-       INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \
+       INSTLIBDIR='$(perllibdir_SQ)' && \
        INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
        INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
        sed -e '1{' \
@@ -2232,13 +2233,15 @@ $(VCSSVN_LIB): $(VCSSVN_OBJS)
 
 export DEFAULT_EDITOR DEFAULT_PAGER
 
-.PHONY: doc man html info pdf
-doc:
+.PHONY: doc man man-perl html info pdf
+doc: man-perl
        $(MAKE) -C Documentation all
 
-man:
+man: man-perl
        $(MAKE) -C Documentation man
 
+man-perl: perl/build/man/man3/Git.3pm
+
 html:
        $(MAKE) -C Documentation html
 
@@ -2314,6 +2317,29 @@ endif
 po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
        $(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $<
 
+LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
+LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
+LIB_CPAN := $(wildcard perl/FromCPAN/*.pm perl/FromCPAN/*/*.pm)
+LIB_CPAN_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_CPAN))
+
+ifndef NO_PERL
+all:: $(LIB_PERL_GEN)
+ifndef NO_PERL_CPAN_FALLBACKS
+all:: $(LIB_CPAN_GEN)
+endif
+NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
+endif
+
+perl/build/lib/%.pm: perl/%.pm
+       $(QUIET_GEN)mkdir -p $(dir $@) && \
+       sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \
+           -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
+       < $< > $@
+
+perl/build/man/man3/Git.3pm: perl/Git.pm
+       $(QUIET_GEN)mkdir -p $(dir $@) && \
+       pod2man $< $@
+
 FIND_SOURCE_FILES = ( \
        git ls-files \
                '*.[hcS]' \
@@ -2574,7 +2600,9 @@ ifndef NO_GETTEXT
        (cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -)
 endif
 ifndef NO_PERL
-       $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install
+       $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)'
+       (cd perl/build/lib && $(TAR) cf - .) | \
+       (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -)
        $(MAKE) -C gitweb install
 endif
 ifndef NO_TCLTK
@@ -2587,49 +2615,63 @@ endif
 
        bindir=$$(cd '$(DESTDIR_SQ)$(bindir_SQ)' && pwd) && \
        execdir=$$(cd '$(DESTDIR_SQ)$(gitexec_instdir_SQ)' && pwd) && \
+       destdir_from_execdir_SQ=$$(echo '$(gitexecdir_relative_SQ)' | sed -e 's|[^/][^/]*|..|g') && \
        { test "$$bindir/" = "$$execdir/" || \
          for p in git$X $(filter $(install_bindir_programs),$(ALL_PROGRAMS)); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
-               ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$bindir/$$p" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/$$p" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)$(NO_CROSS_DIRECTORY_HARDLINKS)" && \
+                 ln "$$bindir/$$p" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$bindir/$$p" "$$execdir/$$p" || exit; } \
          done; \
        } && \
        for p in $(filter $(install_bindir_programs),$(BUILT_INS)); do \
                $(RM) "$$bindir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
-               cp "$$bindir/git$X" "$$bindir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git$X" "$$bindir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$bindir/git$X" "$$bindir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$bindir/$$p" 2>/dev/null || \
+                 cp "$$bindir/git$X" "$$bindir/$$p" || exit; } \
        done && \
        for p in $(BUILT_INS); do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "$$destdir_from_execdir_SQ/$(bindir_relative_SQ)/git$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git$X" "$$execdir/$$p" || exit; } \
        done && \
        remote_curl_aliases="$(REMOTE_CURL_ALIASES)" && \
        for p in $$remote_curl_aliases; do \
                $(RM) "$$execdir/$$p" && \
-               test -z "$(NO_INSTALL_HARDLINKS)" && \
-               ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
-               cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; \
+               test -n "$(INSTALL_SYMLINKS)" && \
+               ln -s "git-remote-http$X" "$$execdir/$$p" || \
+               { test -z "$(NO_INSTALL_HARDLINKS)" && \
+                 ln "$$execdir/git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 ln -s "git-remote-http$X" "$$execdir/$$p" 2>/dev/null || \
+                 cp "$$execdir/git-remote-http$X" "$$execdir/$$p" || exit; } \
        done && \
        ./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
 
-.PHONY: install-gitweb install-doc install-man install-html install-info install-pdf
+.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf
 .PHONY: quick-install-doc quick-install-man quick-install-html
 install-gitweb:
        $(MAKE) -C gitweb install
 
-install-doc:
+install-doc: install-man-perl
        $(MAKE) -C Documentation install
 
-install-man:
+install-man: install-man-perl
        $(MAKE) -C Documentation install-man
 
+install-man-perl: man-perl
+       $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3'
+       (cd perl/build/man/man3 && $(TAR) cf - .) | \
+       (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -)
+
 install-html:
        $(MAKE) -C Documentation install-html
 
@@ -2664,6 +2706,21 @@ dist: git-archive$(X) configure
                $(GIT_TARNAME)/configure \
                $(GIT_TARNAME)/version \
                $(GIT_TARNAME)/git-gui/version
+ifdef DC_SHA1_SUBMODULE
+       @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib
+       @cp sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/
+       @cp sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/
+       @cp sha1collisiondetection/lib/sha1.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/
+       @cp sha1collisiondetection/lib/ubc_check.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/
+       $(TAR) rf $(GIT_TARNAME).tar \
+               $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \
+               $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch]
+endif
        @$(RM) -r $(GIT_TARNAME)
        gzip -f -9 $(GIT_TARNAME).tar
 
@@ -2713,7 +2770,7 @@ clean: profile-clean coverage-clean
        $(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
        $(RM) -r bin-wrappers $(dep_dirs)
        $(RM) -r po/build/
-       $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
+       $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
        $(RM) -r $(GIT_TARNAME) .doc-tmp-dir
        $(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
        $(RM) $(htmldocs).tar.gz $(manpages).tar.gz
@@ -2721,7 +2778,7 @@ clean: profile-clean coverage-clean
        $(MAKE) -C Documentation/ clean
 ifndef NO_PERL
        $(MAKE) -C gitweb clean
-       $(MAKE) -C perl clean
+       $(RM) -r perl/build/
 endif
        $(MAKE) -C templates/ clean
        $(MAKE) -C t/ clean
index fdd7e420b1999e89f35a245b225a263bf5c1fd9d..7a6dc0603be1af20219ec41b4df926a9861d3644 120000 (symlink)
--- a/RelNotes
+++ b/RelNotes
@@ -1 +1 @@
-Documentation/RelNotes/2.16.3.txt
\ No newline at end of file
+Documentation/RelNotes/2.17.0.txt
\ No newline at end of file
diff --git a/apply.c b/apply.c
index 321a9fa68d491f7e5e89dc9e398b067f67a10c77..7e5792c996f430952b1b768f8267de851156ce83 100644 (file)
--- a/apply.c
+++ b/apply.c
@@ -950,7 +950,7 @@ static int gitdiff_verify_name(struct apply_state *state,
                }
                free(another);
        } else {
-               if (!starts_with(line, "/dev/null\n"))
+               if (!is_dev_null(line))
                        return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
        }
 
@@ -2263,8 +2263,8 @@ static void show_stats(struct apply_state *state, struct patch *patch)
 static int read_old_data(struct stat *st, struct patch *patch,
                         const char *path, struct strbuf *buf)
 {
-       enum safe_crlf safe_crlf = patch->crlf_in_old ?
-               SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE;
+       int conv_flags = patch->crlf_in_old ?
+               CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE;
        switch (st->st_mode & S_IFMT) {
        case S_IFLNK:
                if (strbuf_readlink(buf, path, st->st_size) < 0)
@@ -2281,7 +2281,7 @@ static int read_old_data(struct stat *st, struct patch *patch,
                 * should never look at the index when explicit crlf option
                 * is given.
                 */
-               convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf);
+               convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags);
                return 0;
        default:
                return -1;
@@ -2301,7 +2301,7 @@ static void update_pre_post_images(struct image *preimage,
                                   size_t len, size_t postlen)
 {
        int i, ctx, reduced;
-       char *new, *old, *fixed;
+       char *new_buf, *old_buf, *fixed;
        struct image fixed_preimage;
 
        /*
@@ -2327,25 +2327,25 @@ static void update_pre_post_images(struct image *preimage,
         * We trust the caller to tell us if the update can be done
         * in place (postlen==0) or not.
         */
-       old = postimage->buf;
+       old_buf = postimage->buf;
        if (postlen)
-               new = postimage->buf = xmalloc(postlen);
+               new_buf = postimage->buf = xmalloc(postlen);
        else
-               new = old;
+               new_buf = old_buf;
        fixed = preimage->buf;
 
        for (i = reduced = ctx = 0; i < postimage->nr; i++) {
                size_t l_len = postimage->line[i].len;
                if (!(postimage->line[i].flag & LINE_COMMON)) {
                        /* an added line -- no counterparts in preimage */
-                       memmove(new, old, l_len);
-                       old += l_len;
-                       new += l_len;
+                       memmove(new_buf, old_buf, l_len);
+                       old_buf += l_len;
+                       new_buf += l_len;
                        continue;
                }
 
                /* a common context -- skip it in the original postimage */
-               old += l_len;
+               old_buf += l_len;
 
                /* and find the corresponding one in the fixed preimage */
                while (ctx < preimage->nr &&
@@ -2365,29 +2365,29 @@ static void update_pre_post_images(struct image *preimage,
 
                /* and copy it in, while fixing the line length */
                l_len = preimage->line[ctx].len;
-               memcpy(new, fixed, l_len);
-               new += l_len;
+               memcpy(new_buf, fixed, l_len);
+               new_buf += l_len;
                fixed += l_len;
                postimage->line[i].len = l_len;
                ctx++;
        }
 
        if (postlen
-           ? postlen < new - postimage->buf
-           : postimage->len < new - postimage->buf)
+           ? postlen < new_buf - postimage->buf
+           : postimage->len < new_buf - postimage->buf)
                die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
-                   (int)postlen, (int) postimage->len, (int)(new - postimage->buf));
+                   (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
 
        /* Fix the length of the whole thing */
-       postimage->len = new - postimage->buf;
+       postimage->len = new_buf - postimage->buf;
        postimage->nr -= reduced;
 }
 
 static int line_by_line_fuzzy_match(struct image *img,
                                    struct image *preimage,
                                    struct image *postimage,
-                                   unsigned long try,
-                                   int try_lno,
+                                   unsigned long current,
+                                   int current_lno,
                                    int preimage_limit)
 {
        int i;
@@ -2404,9 +2404,9 @@ static int line_by_line_fuzzy_match(struct image *img,
 
        for (i = 0; i < preimage_limit; i++) {
                size_t prelen = preimage->line[i].len;
-               size_t imglen = img->line[try_lno+i].len;
+               size_t imglen = img->line[current_lno+i].len;
 
-               if (!fuzzy_matchlines(img->buf + try + imgoff, imglen,
+               if (!fuzzy_matchlines(img->buf + current + imgoff, imglen,
                                      preimage->buf + preoff, prelen))
                        return 0;
                if (preimage->line[i].flag & LINE_COMMON)
@@ -2443,7 +2443,7 @@ static int line_by_line_fuzzy_match(struct image *img,
         */
        extra_chars = preimage_end - preimage_eof;
        strbuf_init(&fixed, imgoff + extra_chars);
-       strbuf_add(&fixed, img->buf + try, imgoff);
+       strbuf_add(&fixed, img->buf + current, imgoff);
        strbuf_add(&fixed, preimage_eof, extra_chars);
        fixed_buf = strbuf_detach(&fixed, &fixed_len);
        update_pre_post_images(preimage, postimage,
@@ -2455,8 +2455,8 @@ static int match_fragment(struct apply_state *state,
                          struct image *img,
                          struct image *preimage,
                          struct image *postimage,
-                         unsigned long try,
-                         int try_lno,
+                         unsigned long current,
+                         int current_lno,
                          unsigned ws_rule,
                          int match_beginning, int match_end)
 {
@@ -2466,12 +2466,12 @@ static int match_fragment(struct apply_state *state,
        size_t fixed_len, postlen;
        int preimage_limit;
 
-       if (preimage->nr + try_lno <= img->nr) {
+       if (preimage->nr + current_lno <= img->nr) {
                /*
                 * The hunk falls within the boundaries of img.
                 */
                preimage_limit = preimage->nr;
-               if (match_end && (preimage->nr + try_lno != img->nr))
+               if (match_end && (preimage->nr + current_lno != img->nr))
                        return 0;
        } else if (state->ws_error_action == correct_ws_error &&
                   (ws_rule & WS_BLANK_AT_EOF)) {
@@ -2482,7 +2482,7 @@ static int match_fragment(struct apply_state *state,
                 * match with img, and the remainder of the preimage
                 * must be blank.
                 */
-               preimage_limit = img->nr - try_lno;
+               preimage_limit = img->nr - current_lno;
        } else {
                /*
                 * The hunk extends beyond the end of the img and
@@ -2492,27 +2492,27 @@ static int match_fragment(struct apply_state *state,
                return 0;
        }
 
-       if (match_beginning && try_lno)
+       if (match_beginning && current_lno)
                return 0;
 
        /* Quick hash check */
        for (i = 0; i < preimage_limit; i++)
-               if ((img->line[try_lno + i].flag & LINE_PATCHED) ||
-                   (preimage->line[i].hash != img->line[try_lno + i].hash))
+               if ((img->line[current_lno + i].flag & LINE_PATCHED) ||
+                   (preimage->line[i].hash != img->line[current_lno + i].hash))
                        return 0;
 
        if (preimage_limit == preimage->nr) {
                /*
                 * Do we have an exact match?  If we were told to match
-                * at the end, size must be exactly at try+fragsize,
-                * otherwise try+fragsize must be still within the preimage,
+                * at the end, size must be exactly at current+fragsize,
+                * otherwise current+fragsize must be still within the preimage,
                 * and either case, the old piece should match the preimage
                 * exactly.
                 */
                if ((match_end
-                    ? (try + preimage->len == img->len)
-                    : (try + preimage->len <= img->len)) &&
-                   !memcmp(img->buf + try, preimage->buf, preimage->len))
+                    ? (current + preimage->len == img->len)
+                    : (current + preimage->len <= img->len)) &&
+                   !memcmp(img->buf + current, preimage->buf, preimage->len))
                        return 1;
        } else {
                /*
@@ -2543,7 +2543,7 @@ static int match_fragment(struct apply_state *state,
         */
        if (state->ws_ignore_action == ignore_ws_change)
                return line_by_line_fuzzy_match(img, preimage, postimage,
-                                               try, try_lno, preimage_limit);
+                                               current, current_lno, preimage_limit);
 
        if (state->ws_error_action != correct_ws_error)
                return 0;
@@ -2577,10 +2577,10 @@ static int match_fragment(struct apply_state *state,
         */
        strbuf_init(&fixed, preimage->len + 1);
        orig = preimage->buf;
-       target = img->buf + try;
+       target = img->buf + current;
        for (i = 0; i < preimage_limit; i++) {
                size_t oldlen = preimage->line[i].len;
-               size_t tgtlen = img->line[try_lno + i].len;
+               size_t tgtlen = img->line[current_lno + i].len;
                size_t fixstart = fixed.len;
                struct strbuf tgtfix;
                int match;
@@ -2666,8 +2666,8 @@ static int find_pos(struct apply_state *state,
                    int match_beginning, int match_end)
 {
        int i;
-       unsigned long backwards, forwards, try;
-       int backwards_lno, forwards_lno, try_lno;
+       unsigned long backwards, forwards, current;
+       int backwards_lno, forwards_lno, current_lno;
 
        /*
         * If match_beginning or match_end is specified, there is no
@@ -2687,25 +2687,25 @@ static int find_pos(struct apply_state *state,
        if ((size_t) line > img->nr)
                line = img->nr;
 
-       try = 0;
+       current = 0;
        for (i = 0; i < line; i++)
-               try += img->line[i].len;
+               current += img->line[i].len;
 
        /*
         * There's probably some smart way to do this, but I'll leave
         * that to the smart and beautiful people. I'm simple and stupid.
         */
-       backwards = try;
+       backwards = current;
        backwards_lno = line;
-       forwards = try;
+       forwards = current;
        forwards_lno = line;
-       try_lno = line;
+       current_lno = line;
 
        for (i = 0; ; i++) {
                if (match_fragment(state, img, preimage, postimage,
-                                  try, try_lno, ws_rule,
+                                  current, current_lno, ws_rule,
                                   match_beginning, match_end))
-                       return try_lno;
+                       return current_lno;
 
        again:
                if (backwards_lno == 0 && forwards_lno == img->nr)
@@ -2718,8 +2718,8 @@ static int find_pos(struct apply_state *state,
                        }
                        backwards_lno--;
                        backwards -= img->line[backwards_lno].len;
-                       try = backwards;
-                       try_lno = backwards_lno;
+                       current = backwards;
+                       current_lno = backwards_lno;
                } else {
                        if (forwards_lno == img->nr) {
                                i++;
@@ -2727,8 +2727,8 @@ static int find_pos(struct apply_state *state,
                        }
                        forwards += img->line[forwards_lno].len;
                        forwards_lno++;
-                       try = forwards;
-                       try_lno = forwards_lno;
+                       current = forwards;
+                       current_lno = forwards_lno;
                }
 
        }
@@ -3154,7 +3154,7 @@ static int apply_binary(struct apply_state *state,
                 * See if the old one matches what the patch
                 * applies to.
                 */
-               hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+               hash_object_file(img->buf, img->len, blob_type, &oid);
                if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
                        return error(_("the patch applies to '%s' (%s), "
                                       "which does not match the "
@@ -3180,7 +3180,7 @@ static int apply_binary(struct apply_state *state,
                unsigned long size;
                char *result;
 
-               result = read_sha1_file(oid.hash, &type, &size);
+               result = read_object_file(&oid, &type, &size);
                if (!result)
                        return error(_("the necessary postimage %s for "
                                       "'%s' cannot be read"),
@@ -3199,7 +3199,7 @@ static int apply_binary(struct apply_state *state,
                                     name);
 
                /* verify that the result matches */
-               hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+               hash_object_file(img->buf, img->len, blob_type, &oid);
                if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
                        return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
                                name, patch->new_sha1_prefix, oid_to_hex(&oid));
@@ -3242,7 +3242,7 @@ static int read_blob_object(struct strbuf *buf, const struct object_id *oid, uns
                unsigned long sz;
                char *result;
 
-               result = read_sha1_file(oid->hash, &type, &sz);
+               result = read_object_file(oid, &type, &sz);
                if (!result)
                        return -1;
                /* XXX read_sha1_file NUL-terminates */
@@ -3554,7 +3554,7 @@ static int try_threeway(struct apply_state *state,
 
        /* Preimage the patch was prepared for */
        if (patch->is_new)
-               write_sha1_file("", 0, blob_type, pre_oid.hash);
+               write_object_file("", 0, blob_type, &pre_oid);
        else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
                 read_blob_object(&buf, &pre_oid, patch->old_mode))
                return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
@@ -3570,7 +3570,7 @@ static int try_threeway(struct apply_state *state,
                return -1;
        }
        /* post_oid is theirs */
-       write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+       write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
        clear_image(&tmp_image);
 
        /* our_oid is ours */
@@ -3583,7 +3583,7 @@ static int try_threeway(struct apply_state *state,
                        return error(_("cannot read the current contents of '%s'"),
                                     patch->old_name);
        }
-       write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+       write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
        clear_image(&tmp_image);
 
        /* in-core three-way merge between post and our using pre as base */
@@ -4163,30 +4163,30 @@ static void show_mode_change(struct patch *p, int show_name)
 static void show_rename_copy(struct patch *p)
 {
        const char *renamecopy = p->is_rename ? "rename" : "copy";
-       const char *old, *new;
+       const char *old_name, *new_name;
 
        /* Find common prefix */
-       old = p->old_name;
-       new = p->new_name;
+       old_name = p->old_name;
+       new_name = p->new_name;
        while (1) {
                const char *slash_old, *slash_new;
-               slash_old = strchr(old, '/');
-               slash_new = strchr(new, '/');
+               slash_old = strchr(old_name, '/');
+               slash_new = strchr(new_name, '/');
                if (!slash_old ||
                    !slash_new ||
-                   slash_old - old != slash_new - new ||
-                   memcmp(old, new, slash_new - new))
+                   slash_old - old_name != slash_new - new_name ||
+                   memcmp(old_name, new_name, slash_new - new_name))
                        break;
-               old = slash_old + 1;
-               new = slash_new + 1;
+               old_name = slash_old + 1;
+               new_name = slash_new + 1;
        }
-       /* p->old_name thru old is the common prefix, and old and new
+       /* p->old_name thru old_name is the common prefix, and old_name and new_name
         * through the end of names are renames
         */
-       if (old != p->old_name)
+       if (old_name != p->old_name)
                printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy,
-                      (int)(old - p->old_name), p->old_name,
-                      old, new, p->score);
+                      (int)(old_name - p->old_name), p->old_name,
+                      old_name, new_name, p->score);
        else
                printf(" %s %s => %s (%d%%)\n", renamecopy,
                       p->old_name, p->new_name, p->score);
@@ -4291,7 +4291,7 @@ static int add_index_file(struct apply_state *state,
                        }
                        fill_stat_cache_info(ce, &st);
                }
-               if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+               if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
                        free(ce);
                        return error(_("unable to create backing store "
                                       "for newly created file %s"), path);
@@ -4943,8 +4943,9 @@ int apply_parse_options(int argc, const char **argv,
                        N_("make sure the patch is applicable to the current index")),
                OPT_BOOL(0, "cached", &state->cached,
                        N_("apply a patch without touching the working tree")),
-               OPT_BOOL(0, "unsafe-paths", &state->unsafe_paths,
-                       N_("accept a patch that touches outside the working area")),
+               OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths,
+                          N_("accept a patch that touches outside the working area"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "apply", force_apply,
                        N_("also apply the patch (use with --stat/--summary/--check)")),
                OPT_BOOL('3', "3way", &state->threeway,
index c6ed96ee74ec10f5c9ffb6f520193326d4704b6b..3563bcb9f263f7782a77c679bf9be32af6bd13df 100644 (file)
@@ -111,7 +111,7 @@ static void write_trailer(void)
  * queues up writes, so that all our write(2) calls write exactly one
  * full block; pads writes to RECORDSIZE
  */
-static int stream_blocked(const unsigned char *sha1)
+static int stream_blocked(const struct object_id *oid)
 {
        struct git_istream *st;
        enum object_type type;
@@ -119,9 +119,9 @@ static int stream_blocked(const unsigned char *sha1)
        char buf[BLOCKSIZE];
        ssize_t readlen;
 
-       st = open_istream(sha1, &type, &sz, NULL);
+       st = open_istream(oid, &type, &sz, NULL);
        if (!st)
-               return error("cannot stream blob %s", sha1_to_hex(sha1));
+               return error("cannot stream blob %s", oid_to_hex(oid));
        for (;;) {
                readlen = read_istream(st, buf, sizeof(buf));
                if (readlen <= 0)
@@ -218,7 +218,7 @@ static void prepare_header(struct archiver_args *args,
 }
 
 static void write_extended_header(struct archiver_args *args,
-                                 const unsigned char *sha1,
+                                 const struct object_id *oid,
                                  const void *buffer, unsigned long size)
 {
        struct ustar_header header;
@@ -226,14 +226,14 @@ static void write_extended_header(struct archiver_args *args,
        memset(&header, 0, sizeof(header));
        *header.typeflag = TYPEFLAG_EXT_HEADER;
        mode = 0100666;
-       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", sha1_to_hex(sha1));
+       xsnprintf(header.name, sizeof(header.name), "%s.paxheader", oid_to_hex(oid));
        prepare_header(args, &header, mode, size);
        write_blocked(&header, sizeof(header));
        write_blocked(buffer, size);
 }
 
 static int write_tar_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -257,7 +257,7 @@ static int write_tar_entry(struct archiver_args *args,
                mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)",
-                            mode, sha1_to_hex(sha1));
+                            mode, oid_to_hex(oid));
        }
        if (pathlen > sizeof(header.name)) {
                size_t plen = get_path_prefix(path, pathlen,
@@ -268,7 +268,7 @@ static int write_tar_entry(struct archiver_args *args,
                        memcpy(header.name, path + plen + 1, rest);
                } else {
                        xsnprintf(header.name, sizeof(header.name), "%s.data",
-                                 sha1_to_hex(sha1));
+                                 oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "path",
                                                 path, pathlen);
                }
@@ -276,14 +276,14 @@ static int write_tar_entry(struct archiver_args *args,
                memcpy(header.name, path, pathlen);
 
        if (S_ISREG(mode) && !args->convert &&
-           sha1_object_info(sha1, &size) == OBJ_BLOB &&
+           oid_object_info(oid, &size) == OBJ_BLOB &&
            size > big_file_threshold)
                buffer = NULL;
        else if (S_ISLNK(mode) || S_ISREG(mode)) {
                enum object_type type;
-               buffer = sha1_file_to_archive(args, path, sha1, old_mode, &type, &size);
+               buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
                if (!buffer)
-                       return error("cannot read %s", sha1_to_hex(sha1));
+                       return error("cannot read %s", oid_to_hex(oid));
        } else {
                buffer = NULL;
                size = 0;
@@ -292,7 +292,7 @@ static int write_tar_entry(struct archiver_args *args,
        if (S_ISLNK(mode)) {
                if (size > sizeof(header.linkname)) {
                        xsnprintf(header.linkname, sizeof(header.linkname),
-                                 "see %s.paxheader", sha1_to_hex(sha1));
+                                 "see %s.paxheader", oid_to_hex(oid));
                        strbuf_append_ext_header(&ext_header, "linkpath",
                                                 buffer, size);
                } else
@@ -308,7 +308,7 @@ static int write_tar_entry(struct archiver_args *args,
        prepare_header(args, &header, mode, size_in_header);
 
        if (ext_header.len > 0) {
-               write_extended_header(args, sha1, ext_header.buf,
+               write_extended_header(args, oid, ext_header.buf,
                                      ext_header.len);
        }
        strbuf_release(&ext_header);
@@ -317,7 +317,7 @@ static int write_tar_entry(struct archiver_args *args,
                if (buffer)
                        write_blocked(buffer, size);
                else
-                       err = stream_blocked(sha1);
+                       err = stream_blocked(oid);
        }
        free(buffer);
        return err;
index e8913e5a26c6e97216c4b79ad96b5e3ddf906c45..6b20bce4d1cd78563037c8658dd8bd8f7690c47b 100644 (file)
@@ -276,7 +276,7 @@ static int entry_is_binary(const char *path, const void *buffer, size_t size)
 #define STREAM_BUFFER_SIZE (1024 * 16)
 
 static int write_zip_entry(struct archiver_args *args,
-                          const unsigned char *sha1,
+                          const struct object_id *oid,
                           const char *path, size_t pathlen,
                           unsigned int mode)
 {
@@ -314,7 +314,7 @@ static int write_zip_entry(struct archiver_args *args,
 
        if (pathlen > 0xffff) {
                return error("path too long (%d chars, SHA1: %s): %s",
-                               (int)pathlen, sha1_to_hex(sha1), path);
+                               (int)pathlen, oid_to_hex(oid), path);
        }
 
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
@@ -325,7 +325,7 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = 0;
                buffer = NULL;
        } else if (S_ISREG(mode) || S_ISLNK(mode)) {
-               enum object_type type = sha1_object_info(sha1, &size);
+               enum object_type type = oid_object_info(oid, &size);
 
                method = 0;
                attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) :
@@ -337,18 +337,18 @@ static int write_zip_entry(struct archiver_args *args,
 
                if (S_ISREG(mode) && type == OBJ_BLOB && !args->convert &&
                    size > big_file_threshold) {
-                       stream = open_istream(sha1, &type, &size, NULL);
+                       stream = open_istream(oid, &type, &size, NULL);
                        if (!stream)
                                return error("cannot stream blob %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        flags |= ZIP_STREAM;
                        out = buffer = NULL;
                } else {
-                       buffer = sha1_file_to_archive(args, path, sha1, mode,
-                                                     &type, &size);
+                       buffer = object_file_to_archive(args, path, oid, mode,
+                                                       &type, &size);
                        if (!buffer)
                                return error("cannot read %s",
-                                            sha1_to_hex(sha1));
+                                            oid_to_hex(oid));
                        crc = crc32(crc, buffer, size);
                        is_binary = entry_is_binary(path_without_prefix,
                                                    buffer, size);
@@ -357,7 +357,7 @@ static int write_zip_entry(struct archiver_args *args,
                compressed_size = (method == 0) ? size : 0;
        } else {
                return error("unsupported file mode: 0%o (SHA1: %s)", mode,
-                               sha1_to_hex(sha1));
+                               oid_to_hex(oid));
        }
 
        if (creator_version > max_creator_version)
index 0b7b62af0c3ecee10a26e9bd2d274690604ffcad..93ab175b0b4055bcfbd9334c7ccb36475c33e549 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -63,16 +63,16 @@ static void format_subst(const struct commit *commit,
        free(to_free);
 }
 
-void *sha1_file_to_archive(const struct archiver_args *args,
-                          const char *path, const unsigned char *sha1,
-                          unsigned int mode, enum object_type *type,
-                          unsigned long *sizep)
+void *object_file_to_archive(const struct archiver_args *args,
+                            const char *path, const struct object_id *oid,
+                            unsigned int mode, enum object_type *type,
+                            unsigned long *sizep)
 {
        void *buffer;
        const struct commit *commit = args->convert ? args->commit : NULL;
 
        path += args->baselen;
-       buffer = read_sha1_file(sha1, type, sizep);
+       buffer = read_object_file(oid, type, sizep);
        if (buffer && S_ISREG(mode)) {
                struct strbuf buf = STRBUF_INIT;
                size_t size = 0;
@@ -121,7 +121,7 @@ static int check_attr_export_subst(const struct attr_check *check)
        return check && ATTR_TRUE(check->items[1].value);
 }
 
-static int write_archive_entry(const unsigned char *sha1, const char *base,
+static int write_archive_entry(const struct object_id *oid, const char *base,
                int baselen, const char *filename, unsigned mode, int stage,
                void *context)
 {
@@ -153,7 +153,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
        if (S_ISDIR(mode) || S_ISGITLINK(mode)) {
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-               err = write_entry(args, sha1, path.buf, path.len, mode);
+               err = write_entry(args, oid, path.buf, path.len, mode);
                if (err)
                        return err;
                return (S_ISDIR(mode) ? READ_TREE_RECURSIVE : 0);
@@ -161,7 +161,7 @@ static int write_archive_entry(const unsigned char *sha1, const char *base,
 
        if (args->verbose)
                fprintf(stderr, "%.*s\n", (int)path.len, path.buf);
-       return write_entry(args, sha1, path.buf, path.len, mode);
+       return write_entry(args, oid, path.buf, path.len, mode);
 }
 
 static void queue_directory(const unsigned char *sha1,
@@ -191,14 +191,14 @@ static int write_directory(struct archiver_context *c)
        d->path[d->len - 1] = '\0'; /* no trailing slash */
        ret =
                write_directory(c) ||
-               write_archive_entry(d->oid.hash, d->path, d->baselen,
+               write_archive_entry(&d->oid, d->path, d->baselen,
                                    d->path + d->baselen, d->mode,
                                    d->stage, c) != READ_TREE_RECURSIVE;
        free(d);
        return ret ? -1 : 0;
 }
 
-static int queue_or_write_archive_entry(const unsigned char *sha1,
+static int queue_or_write_archive_entry(const struct object_id *oid,
                struct strbuf *base, const char *filename,
                unsigned mode, int stage, void *context)
 {
@@ -224,14 +224,14 @@ static int queue_or_write_archive_entry(const unsigned char *sha1,
 
                if (check_attr_export_ignore(check))
                        return 0;
-               queue_directory(sha1, base, filename,
+               queue_directory(oid->hash, base, filename,
                                mode, stage, c);
                return READ_TREE_RECURSIVE;
        }
 
        if (write_directory(c))
                return -1;
-       return write_archive_entry(sha1, base->buf, base->len, filename, mode,
+       return write_archive_entry(oid, base->buf, base->len, filename, mode,
                                   stage, context);
 }
 
@@ -250,7 +250,7 @@ int write_archive_entries(struct archiver_args *args,
                        len--;
                if (args->verbose)
                        fprintf(stderr, "%.*s\n", (int)len, args->base);
-               err = write_entry(args, args->tree->object.oid.hash, args->base,
+               err = write_entry(args, &args->tree->object.oid, args->base,
                                  len, 040777);
                if (err)
                        return err;
@@ -303,7 +303,7 @@ static const struct archiver *lookup_archiver(const char *name)
        return NULL;
 }
 
-static int reject_entry(const unsigned char *sha1, struct strbuf *base,
+static int reject_entry(const struct object_id *oid, struct strbuf *base,
                        const char *filename, unsigned mode,
                        int stage, void *context)
 {
@@ -397,8 +397,8 @@ static void parse_treeish_arg(const char **argv,
                unsigned int mode;
                int err;
 
-               err = get_tree_entry(tree->object.oid.hash, prefix,
-                                    tree_oid.hash, &mode);
+               err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
+                                    &mode);
                if (err || !S_ISDIR(mode))
                        die("current working directory is untracked");
 
index 62d1d82c1af0fa3bf77b32d63e9b4866f3428898..1f9954f7cdc5a1ee8036321e439a65bdfb90e59f 100644 (file)
--- a/archive.h
+++ b/archive.h
@@ -31,7 +31,7 @@ extern void init_tar_archiver(void);
 extern void init_zip_archiver(void);
 
 typedef int (*write_archive_entry_fn_t)(struct archiver_args *args,
-                                       const unsigned char *sha1,
+                                       const struct object_id *oid,
                                        const char *path, size_t pathlen,
                                        unsigned int mode);
 
@@ -39,9 +39,9 @@ extern int write_archive_entries(struct archiver_args *args, write_archive_entry
 extern int write_archive(int argc, const char **argv, const char *prefix, const char *name_hint, int remote);
 
 const char *archive_format_from_filename(const char *filename);
-extern void *sha1_file_to_archive(const struct archiver_args *args,
-                                 const char *path, const unsigned char *sha1,
-                                 unsigned int mode, enum object_type *type,
-                                 unsigned long *sizep);
+extern void *object_file_to_archive(const struct archiver_args *args,
+                                   const char *path, const struct object_id *oid,
+                                   unsigned int mode, enum object_type *type,
+                                   unsigned long *sizep);
 
 #endif /* ARCHIVE_H */
index f6d05bd66f42bd9874a08f5585ae99337d22dad5..ad395bb2b8663013102737be78cfc7601cab9abc 100644 (file)
--- a/bisect.c
+++ b/bisect.c
@@ -132,7 +132,8 @@ static void show_list(const char *debug, int counted, int nr,
                unsigned flags = commit->object.flags;
                enum object_type type;
                unsigned long size;
-               char *buf = read_sha1_file(commit->object.oid.hash, &type, &size);
+               char *buf = read_object_file(&commit->object.oid, &type,
+                                            &size);
                const char *subject_start;
                int subject_len;
 
diff --git a/blame.c b/blame.c
index 2893f3c1030aab91a42ff9e0daf8a54ba8c3ef3c..78c9808bd1a04a4c641b0f5f853540ea7618a522 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -80,8 +80,8 @@ static void verify_working_tree_path(struct commit *work_tree, const char *path)
                struct object_id blob_oid;
                unsigned mode;
 
-               if (!get_tree_entry(commit_oid->hash, path, blob_oid.hash, &mode) &&
-                   sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+               if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
+                   oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
                        return;
        }
 
@@ -232,7 +232,7 @@ static struct commit *fake_working_tree_commit(struct diff_options *opt,
        convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
        origin->file.ptr = buf.buf;
        origin->file.size = buf.len;
-       pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+       pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
 
        /*
         * Read the current index, replace the path entry with
@@ -297,8 +297,8 @@ static void fill_origin_blob(struct diff_options *opt,
                    textconv_object(o->path, o->mode, &o->blob_oid, 1, &file->ptr, &file_size))
                        ;
                else
-                       file->ptr = read_sha1_file(o->blob_oid.hash, &type,
-                                                  &file_size);
+                       file->ptr = read_object_file(&o->blob_oid, &type,
+                                                    &file_size);
                file->size = file_size;
 
                if (!file->ptr)
@@ -502,11 +502,9 @@ static int fill_blob_sha1_and_mode(struct blame_origin *origin)
 {
        if (!is_null_oid(&origin->blob_oid))
                return 0;
-       if (get_tree_entry(origin->commit->object.oid.hash,
-                          origin->path,
-                          origin->blob_oid.hash, &origin->mode))
+       if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
                goto error_out;
-       if (sha1_object_info(origin->blob_oid.hash, NULL) != OBJ_BLOB)
+       if (oid_object_info(&origin->blob_oid, NULL) != OBJ_BLOB)
                goto error_out;
        return 0;
  error_out:
@@ -998,28 +996,29 @@ unsigned blame_entry_score(struct blame_scoreboard *sb, struct blame_entry *e)
 }
 
 /*
- * best_so_far[] and this[] are both a split of an existing blame_entry
- * that passes blame to the parent.  Maintain best_so_far the best split
- * so far, by comparing this and best_so_far and copying this into
+ * best_so_far[] and potential[] are both a split of an existing blame_entry
+ * that passes blame to the parent.  Maintain best_so_far the best split so
+ * far, by comparing potential and best_so_far and copying potential into
  * bst_so_far as needed.
  */
 static void copy_split_if_better(struct blame_scoreboard *sb,
                                 struct blame_entry *best_so_far,
-                                struct blame_entry *this)
+                                struct blame_entry *potential)
 {
        int i;
 
-       if (!this[1].suspect)
+       if (!potential[1].suspect)
                return;
        if (best_so_far[1].suspect) {
-               if (blame_entry_score(sb, &this[1]) < blame_entry_score(sb, &best_so_far[1]))
+               if (blame_entry_score(sb, &potential[1]) <
+                   blame_entry_score(sb, &best_so_far[1]))
                        return;
        }
 
        for (i = 0; i < 3; i++)
-               blame_origin_incref(this[i].suspect);
+               blame_origin_incref(potential[i].suspect);
        decref_split(best_so_far);
-       memcpy(best_so_far, this, sizeof(struct blame_entry [3]));
+       memcpy(best_so_far, potential, sizeof(struct blame_entry[3]));
 }
 
 /*
@@ -1046,12 +1045,12 @@ static void handle_split(struct blame_scoreboard *sb,
        if (ent->num_lines <= tlno)
                return;
        if (tlno < same) {
-               struct blame_entry this[3];
+               struct blame_entry potential[3];
                tlno += ent->s_lno;
                same += ent->s_lno;
-               split_overlap(this, ent, tlno, plno, same, parent);
-               copy_split_if_better(sb, split, this);
-               decref_split(this);
+               split_overlap(potential, ent, tlno, plno, same, parent);
+               copy_split_if_better(sb, split, potential);
+               decref_split(potential);
        }
 }
 
@@ -1273,7 +1272,7 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
                        struct diff_filepair *p = diff_queued_diff.queue[i];
                        struct blame_origin *norigin;
                        mmfile_t file_p;
-                       struct blame_entry this[3];
+                       struct blame_entry potential[3];
 
                        if (!DIFF_FILE_VALID(p->one))
                                continue; /* does not exist in parent */
@@ -1292,10 +1291,10 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
 
                        for (j = 0; j < num_ents; j++) {
                                find_copy_in_blob(sb, blame_list[j].ent,
-                                                 norigin, this, &file_p);
+                                                 norigin, potential, &file_p);
                                copy_split_if_better(sb, blame_list[j].split,
-                                                    this);
-                               decref_split(this);
+                                                    potential);
+                               decref_split(potential);
                        }
                        blame_origin_decref(norigin);
                }
@@ -1830,8 +1829,8 @@ void setup_scoreboard(struct blame_scoreboard *sb, const char *path, struct blam
                                    &sb->final_buf_size))
                        ;
                else
-                       sb->final_buf = read_sha1_file(o->blob_oid.hash, &type,
-                                                      &sb->final_buf_size);
+                       sb->final_buf = read_object_file(&o->blob_oid, &type,
+                                                        &sb->final_buf_size);
 
                if (!sb->final_buf)
                        die(_("cannot read blob %s for path %s"),
index bf01d89e28eaef0f8113d42c6b9d4142b8e912e4..9ef7fb02d56aac94d104b50aed7d7dfda09cfc98 100644 (file)
@@ -294,7 +294,7 @@ static struct option builtin_add_options[] = {
        OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")),
        OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")),
        OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")),
-       OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files")),
+       OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0),
        OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")),
        OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")),
        OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")),
@@ -534,10 +534,9 @@ int cmd_add(int argc, const char **argv, const char *prefix)
        unplug_bulk_checkin();
 
 finish:
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
-                       die(_("Unable to write new index file"));
-       }
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die(_("Unable to write new index file"));
 
        UNLEAK(pathspec);
        UNLEAK(dir);
index acfe9d3c8cd6dbd8362c17d55dc036ce6682ce8a..1bcc3606c54f62c02ea6b7e70c895bf21714ae65 100644 (file)
@@ -1011,6 +1011,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format,
 
        if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
                die_errno(_("failed to create directory '%s'"), state->dir);
+       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
 
        if (split_mail(state, patch_format, paths, keep_cr) < 0) {
                am_destroy(state);
@@ -1061,7 +1062,7 @@ static void am_setup(struct am_state *state, enum patch_format patch_format,
        }
        write_state_text(state, "scissors", str);
 
-       sq_quote_argv(&sb, state->git_apply_opts.argv, 0);
+       sq_quote_argv(&sb, state->git_apply_opts.argv);
        write_state_text(state, "apply-opt", sb.buf);
 
        if (state->rebasing)
@@ -1110,6 +1111,7 @@ static void am_next(struct am_state *state)
 
        oidclr(&state->orig_commit);
        unlink(am_path(state, "original-commit"));
+       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
 
        if (!get_oid("HEAD", &head))
                write_state_text(state, "abort-safety", oid_to_hex(&head));
@@ -1441,6 +1443,8 @@ static int parse_mail_rebase(struct am_state *state, const char *mail)
 
        oidcpy(&state->orig_commit, &commit_oid);
        write_state_text(state, "original-commit", oid_to_hex(&commit_oid));
+       update_ref("am", "REBASE_HEAD", &commit_oid,
+                  NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
 
        return 0;
 }
@@ -1546,7 +1550,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
        discard_cache();
        read_cache_from(index_path);
 
-       if (write_index_as_tree(orig_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&orig_tree, &the_index, index_path, 0, NULL))
                return error(_("Repository lacks necessary blobs to fall back on 3-way merge."));
 
        say(state, stdout, _("Using index info to reconstruct a base tree..."));
@@ -1571,7 +1575,7 @@ static int fall_back_threeway(const struct am_state *state, const char *index_pa
                return error(_("Did you hand edit your patch?\n"
                                "It does not apply to blobs recorded in its index."));
 
-       if (write_index_as_tree(their_tree.hash, &the_index, index_path, 0, NULL))
+       if (write_index_as_tree(&their_tree, &the_index, index_path, 0, NULL))
                return error("could not write tree");
 
        say(state, stdout, _("Falling back to patching base and 3-way merge..."));
@@ -1622,7 +1626,7 @@ static void do_commit(const struct am_state *state)
        if (run_hook_le(NULL, "pre-applypatch", NULL))
                exit(1);
 
-       if (write_cache_as_tree(tree.hash, 0, NULL))
+       if (write_cache_as_tree(&tree, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 
        if (!get_oid_commit("HEAD", &parent)) {
@@ -1641,8 +1645,8 @@ static void do_commit(const struct am_state *state)
                setenv("GIT_COMMITTER_DATE",
                        state->ignore_date ? "" : state->author_date, 1);
 
-       if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash,
-                               author, state->sign_commit))
+       if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
+                       author, state->sign_commit))
                die(_("failed to write commit object"));
 
        reflog_msg = getenv("GIT_REFLOG_ACTION");
@@ -1831,8 +1835,7 @@ static void am_run(struct am_state *state, int resume)
                        git_config_get_bool("advice.amworkdir", &advice_amworkdir);
 
                        if (advice_amworkdir)
-                               printf_ln(_("The copy of the patch that failed is found in: %s"),
-                                               am_path(state, "patch"));
+                               printf_ln(_("Use 'git am --show-current-patch' to see the failed patch"));
 
                        die_user_resolve(state);
                }
@@ -2001,7 +2004,7 @@ static int clean_index(const struct object_id *head, const struct object_id *rem
        if (fast_forward_to(head_tree, head_tree, 1))
                return -1;
 
-       if (write_cache_as_tree(index.hash, 0, NULL))
+       if (write_cache_as_tree(&index, 0, NULL))
                return -1;
 
        index_tree = parse_tree_indirect(&index);
@@ -2121,6 +2124,34 @@ static void am_abort(struct am_state *state)
        am_destroy(state);
 }
 
+static int show_patch(struct am_state *state)
+{
+       struct strbuf sb = STRBUF_INIT;
+       const char *patch_path;
+       int len;
+
+       if (!is_null_oid(&state->orig_commit)) {
+               const char *av[4] = { "show", NULL, "--", NULL };
+               char *new_oid_str;
+               int ret;
+
+               av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit));
+               ret = run_command_v_opt(av, RUN_GIT_CMD);
+               free(new_oid_str);
+               return ret;
+       }
+
+       patch_path = am_path(state, msgnum(state));
+       len = strbuf_read_file(&sb, patch_path, 0);
+       if (len < 0)
+               die_errno(_("failed to read '%s'"), patch_path);
+
+       setup_pager();
+       write_in_full(1, sb.buf, sb.len);
+       strbuf_release(&sb);
+       return 0;
+}
+
 /**
  * parse_options() callback that validates and sets opt->value to the
  * PATCH_FORMAT_* enum value corresponding to `arg`.
@@ -2149,7 +2180,9 @@ enum resume_mode {
        RESUME_APPLY,
        RESUME_RESOLVED,
        RESUME_SKIP,
-       RESUME_ABORT
+       RESUME_ABORT,
+       RESUME_QUIT,
+       RESUME_SHOW_PATCH
 };
 
 static int git_am_config(const char *k, const char *v, void *cb)
@@ -2171,6 +2204,7 @@ int cmd_am(int argc, const char **argv, const char *prefix)
        int patch_format = PATCH_FORMAT_UNKNOWN;
        enum resume_mode resume = RESUME_FALSE;
        int in_progress;
+       int ret = 0;
 
        const char * const usage[] = {
                N_("git am [<options>] [(<mbox> | <Maildir>)...]"),
@@ -2249,6 +2283,12 @@ int cmd_am(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE(0, "abort", &resume,
                        N_("restore the original branch and abort the patching operation."),
                        RESUME_ABORT),
+               OPT_CMDMODE(0, "quit", &resume,
+                       N_("abort the patching operation but keep HEAD where it is."),
+                       RESUME_QUIT),
+               OPT_CMDMODE(0, "show-current-patch", &resume,
+                       N_("show the patch being applied."),
+                       RESUME_SHOW_PATCH),
                OPT_BOOL(0, "committer-date-is-author-date",
                        &state.committer_date_is_author_date,
                        N_("lie about committer date")),
@@ -2317,7 +2357,7 @@ int cmd_am(int argc, const char **argv, const char *prefix)
                 * stray directories.
                 */
                if (file_exists(state.dir) && !state.rebasing) {
-                       if (resume == RESUME_ABORT) {
+                       if (resume == RESUME_ABORT || resume == RESUME_QUIT) {
                                am_destroy(&state);
                                am_state_release(&state);
                                return 0;
@@ -2359,11 +2399,18 @@ int cmd_am(int argc, const char **argv, const char *prefix)
        case RESUME_ABORT:
                am_abort(&state);
                break;
+       case RESUME_QUIT:
+               am_rerere_clear();
+               am_destroy(&state);
+               break;
+       case RESUME_SHOW_PATCH:
+               ret = show_patch(&state);
+               break;
        default:
                die("BUG: invalid resume value");
        }
 
        am_state_release(&state);
 
-       return 0;
+       return ret;
 }
index f863465a0fa137f3446071a0ec1653adad536fdc..73971d0dd20e7233d6cb1e6c16986ede25526939 100644 (file)
@@ -55,7 +55,7 @@ static int run_remote_archiver(int argc, const char **argv,
 
        buf = packet_read_line(fd[0], NULL);
        if (!buf)
-               die(_("git archive: expected ACK/NAK, got EOF"));
+               die(_("git archive: expected ACK/NAK, got a flush packet"));
        if (strcmp(buf, "ACK")) {
                if (starts_with(buf, "NACK "))
                        die(_("git archive: NACK %s"), buf + 5);
index 005f55aaa257fc34f81517650223a3c195c03178..db38c0b307c5719ab3bd5e6b8597ec8810c0de3d 100644 (file)
@@ -499,7 +499,7 @@ static int read_ancestry(const char *graft_file)
 
 static int update_auto_abbrev(int auto_abbrev, struct blame_origin *suspect)
 {
-       const char *uniq = find_unique_abbrev(suspect->commit->object.oid.hash,
+       const char *uniq = find_unique_abbrev(&suspect->commit->object.oid,
                                              auto_abbrev);
        int len = strlen(uniq);
        if (auto_abbrev < len)
@@ -649,6 +649,15 @@ static int blame_move_callback(const struct option *option, const char *arg, int
        return 0;
 }
 
+static int is_a_rev(const char *name)
+{
+       struct object_id oid;
+
+       if (get_oid(name, &oid))
+               return 0;
+       return OBJ_NONE < oid_object_info(&oid, NULL);
+}
+
 int cmd_blame(int argc, const char **argv, const char *prefix)
 {
        struct rev_info revs;
@@ -720,6 +729,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, blame_opt_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        if (ctx.argv[0])
@@ -845,16 +855,15 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        } else {
                if (argc < 2)
                        usage_with_options(blame_opt_usage, options);
-               path = add_prefix(prefix, argv[argc - 1]);
-               if (argc == 3 && !file_exists(path)) { /* (2b) */
+               if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
                        path = add_prefix(prefix, argv[1]);
                        argv[1] = argv[2];
+               } else {        /* (2a) */
+                       if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree())
+                               die("missing <path> to blame");
+                       path = add_prefix(prefix, argv[argc - 1]);
                }
                argv[argc - 1] = "--";
-
-               setup_work_tree();
-               if (!file_exists(path))
-                       die_errno("cannot stat path '%s'", path);
        }
 
        revs.disable_stdin = 1;
index 8dcc2ed058be6e653f885e48c9aa5f465a5f9749..5bd2a0dd4891ce0d42ad897c7b5fe0f66ca73be4 100644 (file)
@@ -273,7 +273,7 @@ static int delete_branches(int argc, const char **argv, int force, int kinds,
                               bname.buf,
                               (flags & REF_ISBROKEN) ? "broken"
                               : (flags & REF_ISSYMREF) ? target
-                              : find_unique_abbrev(oid.hash, DEFAULT_ABBREV));
+                              : find_unique_abbrev(&oid, DEFAULT_ABBREV));
                }
                delete_branch_config(bname.buf);
 
@@ -615,7 +615,7 @@ int cmd_branch(int argc, const char **argv, const char *prefix)
                OPT_BOOL('l', "create-reflog", &reflog, N_("create the branch's reflog")),
                OPT_BOOL(0, "edit-description", &edit_description,
                         N_("edit the description for the branch")),
-               OPT__FORCE(&force, N_("force creation, move/rename, deletion")),
+               OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
                OPT_MERGED(&filter, N_("print only branches that are merged")),
                OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
                OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
index f5fa4fd75af26a66cd1b2c0d493116704bc16dd5..2c46d257cd9a09a8f8ff05820b53224394385c7d 100644 (file)
@@ -32,7 +32,7 @@ static int filter_object(const char *path, unsigned mode,
 {
        enum object_type type;
 
-       *buf = read_sha1_file(oid->hash, &type, size);
+       *buf = read_object_file(oid, &type, size);
        if (!*buf)
                return error(_("cannot read object %s '%s'"),
                             oid_to_hex(oid), path);
@@ -76,8 +76,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        buf = NULL;
        switch (opt) {
        case 't':
-               oi.typename = &sb;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               oi.type_name = &sb;
+               if (oid_object_info_extended(&oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                if (sb.len) {
                        printf("%s\n", sb.buf);
@@ -88,7 +88,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
        case 's':
                oi.sizep = &size;
-               if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
+               if (oid_object_info_extended(&oid, &oi, flags) < 0)
                        die("git cat-file: could not get object info");
                printf("%lu\n", size);
                return 0;
@@ -116,7 +116,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                /* else fallthrough */
 
        case 'p':
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("Not a valid object name %s", obj_name);
 
@@ -130,7 +130,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
 
                if (type == OBJ_BLOB)
                        return stream_blob_to_fd(1, &oid, NULL, 0);
-               buf = read_sha1_file(oid.hash, &type, &size);
+               buf = read_object_file(&oid, &type, &size);
                if (!buf)
                        die("Cannot read object %s", obj_name);
 
@@ -140,8 +140,9 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
        case 0:
                if (type_from_string(exp_type) == OBJ_BLOB) {
                        struct object_id blob_oid;
-                       if (sha1_object_info(oid.hash, NULL) == OBJ_TAG) {
-                               char *buffer = read_sha1_file(oid.hash, &type, &size);
+                       if (oid_object_info(&oid, NULL) == OBJ_TAG) {
+                               char *buffer = read_object_file(&oid, &type,
+                                                               &size);
                                const char *target;
                                if (!skip_prefix(buffer, "object ", &target) ||
                                    get_oid_hex(target, &blob_oid))
@@ -150,7 +151,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                        } else
                                oidcpy(&blob_oid, &oid);
 
-                       if (sha1_object_info(blob_oid.hash, NULL) == OBJ_BLOB)
+                       if (oid_object_info(&blob_oid, NULL) == OBJ_BLOB)
                                return stream_blob_to_fd(1, &blob_oid, NULL, 0);
                        /*
                         * we attempted to dereference a tag to a blob
@@ -159,7 +160,7 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                         * fall-back to the usual case.
                         */
                }
-               buf = read_object_with_reference(oid.hash, exp_type, &size, NULL);
+               buf = read_object_with_reference(&oid, exp_type, &size, NULL);
                break;
 
        default:
@@ -229,7 +230,7 @@ static void expand_atom(struct strbuf *sb, const char *atom, int len,
                if (data->mark_query)
                        data->info.typep = &data->type;
                else
-                       strbuf_addstr(sb, typename(data->type));
+                       strbuf_addstr(sb, type_name(data->type));
        } else if (is_atom("objectsize", atom, len)) {
                if (data->mark_query)
                        data->info.sizep = &data->size;
@@ -304,8 +305,9 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                                enum object_type type;
                                if (!textconv_object(data->rest, 0100644, oid,
                                                     1, &contents, &size))
-                                       contents = read_sha1_file(oid->hash, &type,
-                                                                 &size);
+                                       contents = read_object_file(oid,
+                                                                   &type,
+                                                                   &size);
                                if (!contents)
                                        die("could not convert '%s' %s",
                                            oid_to_hex(oid), data->rest);
@@ -321,7 +323,7 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
                unsigned long size;
                void *contents;
 
-               contents = read_sha1_file(oid->hash, &type, &size);
+               contents = read_object_file(oid, &type, &size);
                if (!contents)
                        die("object %s disappeared", oid_to_hex(oid));
                if (type != data->type)
@@ -340,8 +342,8 @@ static void batch_object_write(const char *obj_name, struct batch_options *opt,
        struct strbuf buf = STRBUF_INIT;
 
        if (!data->skip_object_info &&
-           sha1_object_info_extended(data->oid.hash, &data->info,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0) {
+           oid_object_info_extended(&data->oid, &data->info,
+                                    OBJECT_INFO_LOOKUP_REPLACE) < 0) {
                printf("%s missing\n",
                       obj_name ? obj_name : oid_to_hex(&data->oid));
                fflush(stdout);
@@ -475,6 +477,8 @@ static int batch_objects(struct batch_options *opt)
 
                for_each_loose_object(batch_loose_object, &sa, 0);
                for_each_packed_object(batch_packed_object, &sa, 0);
+               if (repository_format_partial_clone)
+                       warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
 
                cb.opt = opt;
                cb.expand = &data;
index 3e280b9c7aa9c93c8e7572a4fe3f7ef3ac92b3af..ec9a959e08d0e3dfa0d2b08529cacbe8d2e04c6e 100644 (file)
@@ -72,7 +72,7 @@ static int check_ignore(struct dir_struct *dir,
 {
        const char *full_path;
        char *seen;
-       int num_ignored = 0, dtype = DT_UNKNOWN, i;
+       int num_ignored = 0, i;
        struct exclude *exclude;
        struct pathspec pathspec;
 
@@ -104,6 +104,7 @@ static int check_ignore(struct dir_struct *dir,
                full_path = pathspec.items[i].match;
                exclude = NULL;
                if (!seen[i]) {
+                       int dtype = DT_UNKNOWN;
                        exclude = last_exclude_matching(dir, &the_index,
                                                        full_path, &dtype);
                }
index b0e78b819db3138f1c5ac4fe4b5d850ab306b470..a730f6a1aa47a60d4c131ee47076411a129d8080 100644 (file)
@@ -157,7 +157,7 @@ int cmd_checkout_index(int argc, const char **argv, const char *prefix)
        struct option builtin_checkout_index_options[] = {
                OPT_BOOL('a', "all", &all,
                        N_("check out all files in the index")),
-               OPT__FORCE(&force, N_("force overwrite of existing files")),
+               OPT__FORCE(&force, N_("force overwrite of existing files"), 0),
                OPT__QUIET(&quiet,
                        N_("no warning for existing files and files not in index")),
                OPT_BOOL('n', "no-create", &not_new,
index c54c78df547c8c66377f023730958f7a95d3aea1..b49b5820718335ba6a70b70ef339ece7157281cc 100644 (file)
@@ -54,19 +54,19 @@ struct checkout_opts {
        struct tree *source_tree;
 };
 
-static int post_checkout_hook(struct commit *old, struct commit *new,
+static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
                              int changed)
 {
        return run_hook_le(NULL, "post-checkout",
-                          oid_to_hex(old ? &old->object.oid : &null_oid),
-                          oid_to_hex(new ? &new->object.oid : &null_oid),
+                          oid_to_hex(old_commit ? &old_commit->object.oid : &null_oid),
+                          oid_to_hex(new_commit ? &new_commit->object.oid : &null_oid),
                           changed ? "1" : "0", NULL);
-       /* "new" can be NULL when checking out from the index before
+       /* "new_commit" can be NULL when checking out from the index before
           a commit exists. */
 
 }
 
-static int update_some(const unsigned char *sha1, struct strbuf *base,
+static int update_some(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int len;
@@ -78,7 +78,7 @@ static int update_some(const unsigned char *sha1, struct strbuf *base,
 
        len = base->len + strlen(pathname);
        ce = xcalloc(1, cache_entry_size(len));
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        memcpy(ce->name, base->buf, base->len);
        memcpy(ce->name + base->len, pathname, len - base->len);
        ce->ce_flags = create_ce_flags(0) | CE_UPDATE;
@@ -227,8 +227,7 @@ static int checkout_merged(int pos, const struct checkout *state)
         * (it also writes the merge result to the object database even
         * when it may contain conflicts).
         */
-       if (write_sha1_file(result_buf.ptr, result_buf.size,
-                           blob_type, oid.hash))
+       if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
                die(_("Unable to add merge result for '%s'"), path);
        free(result_buf.ptr);
        ce = make_cache_entry(mode, oid.hash, path, 2, 0);
@@ -406,10 +405,10 @@ static void describe_detached_head(const char *msg, struct commit *commit)
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &sb);
        if (print_sha1_ellipsis()) {
                fprintf(stderr, "%s %s... %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        } else {
                fprintf(stderr, "%s %s %s\n", msg,
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV), sb.buf);
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV), sb.buf);
        }
        strbuf_release(&sb);
 }
@@ -472,8 +471,8 @@ static void setup_branch_path(struct branch_info *branch)
 }
 
 static int merge_working_tree(const struct checkout_opts *opts,
-                             struct branch_info *old,
-                             struct branch_info *new,
+                             struct branch_info *old_branch_info,
+                             struct branch_info *new_branch_info,
                              int *writeout_error)
 {
        int ret;
@@ -485,7 +484,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
 
        resolve_undo_clear();
        if (opts->force) {
-               ret = reset_tree(new->commit->tree, opts, 1, writeout_error);
+               ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
                if (ret)
                        return ret;
        } else {
@@ -511,7 +510,7 @@ static int merge_working_tree(const struct checkout_opts *opts,
                topts.initial_checkout = is_cache_unborn();
                topts.update = 1;
                topts.merge = 1;
-               topts.gently = opts->merge && old->commit;
+               topts.gently = opts->merge && old_branch_info->commit;
                topts.verbose_update = opts->show_progress;
                topts.fn = twoway_merge;
                if (opts->overwrite_ignore) {
@@ -519,11 +518,11 @@ static int merge_working_tree(const struct checkout_opts *opts,
                        topts.dir->flags |= DIR_SHOW_IGNORED;
                        setup_standard_excludes(topts.dir);
                }
-               tree = parse_tree_indirect(old->commit ?
-                                          &old->commit->object.oid :
+               tree = parse_tree_indirect(old_branch_info->commit ?
+                                          &old_branch_info->commit->object.oid :
                                           the_hash_algo->empty_tree);
                init_tree_desc(&trees[0], tree->buffer, tree->size);
-               tree = parse_tree_indirect(&new->commit->object.oid);
+               tree = parse_tree_indirect(&new_branch_info->commit->object.oid);
                init_tree_desc(&trees[1], tree->buffer, tree->size);
 
                ret = unpack_trees(2, trees, &topts);
@@ -540,10 +539,10 @@ static int merge_working_tree(const struct checkout_opts *opts,
                                return 1;
 
                        /*
-                        * Without old->commit, the below is the same as
+                        * Without old_branch_info->commit, the below is the same as
                         * the two-tree unpack we already tried and failed.
                         */
-                       if (!old->commit)
+                       if (!old_branch_info->commit)
                                return 1;
 
                        /* Do more real merge */
@@ -571,18 +570,18 @@ static int merge_working_tree(const struct checkout_opts *opts,
                        o.verbosity = 0;
                        work = write_tree_from_memory(&o);
 
-                       ret = reset_tree(new->commit->tree, opts, 1,
+                       ret = reset_tree(new_branch_info->commit->tree, opts, 1,
                                         writeout_error);
                        if (ret)
                                return ret;
-                       o.ancestor = old->name;
-                       o.branch1 = new->name;
+                       o.ancestor = old_branch_info->name;
+                       o.branch1 = new_branch_info->name;
                        o.branch2 = "local";
-                       ret = merge_trees(&o, new->commit->tree, work,
-                               old->commit->tree, &result);
+                       ret = merge_trees(&o, new_branch_info->commit->tree, work,
+                               old_branch_info->commit->tree, &result);
                        if (ret < 0)
                                exit(128);
-                       ret = reset_tree(new->commit->tree, opts, 0,
+                       ret = reset_tree(new_branch_info->commit->tree, opts, 0,
                                         writeout_error);
                        strbuf_release(&o.obuf);
                        if (ret)
@@ -600,25 +599,25 @@ static int merge_working_tree(const struct checkout_opts *opts,
                die(_("unable to write new index file"));
 
        if (!opts->force && !opts->quiet)
-               show_local_changes(&new->commit->object, &opts->diff_options);
+               show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
 
        return 0;
 }
 
-static void report_tracking(struct branch_info *new)
+static void report_tracking(struct branch_info *new_branch_info)
 {
        struct strbuf sb = STRBUF_INIT;
-       struct branch *branch = branch_get(new->name);
+       struct branch *branch = branch_get(new_branch_info->name);
 
-       if (!format_tracking_info(branch, &sb))
+       if (!format_tracking_info(branch, &sb, AHEAD_BEHIND_FULL))
                return;
        fputs(sb.buf, stdout);
        strbuf_release(&sb);
 }
 
 static void update_refs_for_switch(const struct checkout_opts *opts,
-                                  struct branch_info *old,
-                                  struct branch_info *new)
+                                  struct branch_info *old_branch_info,
+                                  struct branch_info *new_branch_info)
 {
        struct strbuf msg = STRBUF_INIT;
        const char *old_desc, *reflog_msg;
@@ -645,69 +644,69 @@ static void update_refs_for_switch(const struct checkout_opts *opts,
                        free(refname);
                }
                else
-                       create_branch(opts->new_branch, new->name,
+                       create_branch(opts->new_branch, new_branch_info->name,
                                      opts->new_branch_force ? 1 : 0,
                                      opts->new_branch_force ? 1 : 0,
                                      opts->new_branch_log,
                                      opts->quiet,
                                      opts->track);
-               new->name = opts->new_branch;
-               setup_branch_path(new);
+               new_branch_info->name = opts->new_branch;
+               setup_branch_path(new_branch_info);
        }
 
-       old_desc = old->name;
-       if (!old_desc && old->commit)
-               old_desc = oid_to_hex(&old->commit->object.oid);
+       old_desc = old_branch_info->name;
+       if (!old_desc && old_branch_info->commit)
+               old_desc = oid_to_hex(&old_branch_info->commit->object.oid);
 
        reflog_msg = getenv("GIT_REFLOG_ACTION");
        if (!reflog_msg)
                strbuf_addf(&msg, "checkout: moving from %s to %s",
-                       old_desc ? old_desc : "(invalid)", new->name);
+                       old_desc ? old_desc : "(invalid)", new_branch_info->name);
        else
                strbuf_insert(&msg, 0, reflog_msg, strlen(reflog_msg));
 
-       if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) {
+       if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) {
                /* Nothing to do. */
-       } else if (opts->force_detach || !new->path) {  /* No longer on any branch. */
-               update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
+       } else if (opts->force_detach || !new_branch_info->path) {      /* No longer on any branch. */
+               update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL,
                           REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
                if (!opts->quiet) {
-                       if (old->path &&
+                       if (old_branch_info->path &&
                            advice_detached_head && !opts->force_detach)
-                               detach_advice(new->name);
-                       describe_detached_head(_("HEAD is now at"), new->commit);
+                               detach_advice(new_branch_info->name);
+                       describe_detached_head(_("HEAD is now at"), new_branch_info->commit);
                }
-       } else if (new->path) { /* Switch branches. */
-               if (create_symref("HEAD", new->path, msg.buf) < 0)
+       } else if (new_branch_info->path) {     /* Switch branches. */
+               if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0)
                        die(_("unable to update HEAD"));
                if (!opts->quiet) {
-                       if (old->path && !strcmp(new->path, old->path)) {
+                       if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) {
                                if (opts->new_branch_force)
                                        fprintf(stderr, _("Reset branch '%s'\n"),
-                                               new->name);
+                                               new_branch_info->name);
                                else
                                        fprintf(stderr, _("Already on '%s'\n"),
-                                               new->name);
+                                               new_branch_info->name);
                        } else if (opts->new_branch) {
                                if (opts->branch_exists)
-                                       fprintf(stderr, _("Switched to and reset branch '%s'\n"), new->name);
+                                       fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name);
                                else
-                                       fprintf(stderr, _("Switched to a new branch '%s'\n"), new->name);
+                                       fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name);
                        } else {
                                fprintf(stderr, _("Switched to branch '%s'\n"),
-                                       new->name);
+                                       new_branch_info->name);
                        }
                }
-               if (old->path && old->name) {
-                       if (!ref_exists(old->path) && reflog_exists(old->path))
-                               delete_reflog(old->path);
+               if (old_branch_info->path && old_branch_info->name) {
+                       if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path))
+                               delete_reflog(old_branch_info->path);
                }
        }
        remove_branch_state();
        strbuf_release(&msg);
        if (!opts->quiet &&
-           (new->path || (!opts->force_detach && !strcmp(new->name, "HEAD"))))
-               report_tracking(new);
+           (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+               report_tracking(new_branch_info);
 }
 
 static int add_pending_uninteresting_ref(const char *refname,
@@ -721,7 +720,7 @@ static int add_pending_uninteresting_ref(const char *refname,
 static void describe_one_orphan(struct strbuf *sb, struct commit *commit)
 {
        strbuf_addstr(sb, "  ");
-       strbuf_add_unique_abbrev(sb, commit->object.oid.hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(sb, &commit->object.oid, DEFAULT_ABBREV);
        strbuf_addch(sb, ' ');
        if (!parse_commit(commit))
                pp_commit_easy(CMIT_FMT_ONELINE, commit, sb);
@@ -779,7 +778,7 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs)
                        " git branch <new-branch-name> %s\n\n",
                        /* Give ngettext() the count */
                        lost),
-                       find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+                       find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
 }
 
 /*
@@ -787,10 +786,10 @@ static void suggest_reattach(struct commit *commit, struct rev_info *revs)
  * HEAD.  If it is not reachable from any ref, this is the last chance
  * for the user to do so without resorting to reflog.
  */
-static void orphaned_commit_warning(struct commit *old, struct commit *new)
+static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit)
 {
        struct rev_info revs;
-       struct object *object = &old->object;
+       struct object *object = &old_commit->object;
 
        init_revisions(&revs, NULL);
        setup_revisions(0, NULL, &revs, NULL);
@@ -799,57 +798,57 @@ static void orphaned_commit_warning(struct commit *old, struct commit *new)
        add_pending_object(&revs, object, oid_to_hex(&object->oid));
 
        for_each_ref(add_pending_uninteresting_ref, &revs);
-       add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+       add_pending_oid(&revs, "HEAD", &new_commit->object.oid, UNINTERESTING);
 
        if (prepare_revision_walk(&revs))
                die(_("internal error in revision walk"));
-       if (!(old->object.flags & UNINTERESTING))
-               suggest_reattach(old, &revs);
+       if (!(old_commit->object.flags & UNINTERESTING))
+               suggest_reattach(old_commit, &revs);
        else
-               describe_detached_head(_("Previous HEAD position was"), old);
+               describe_detached_head(_("Previous HEAD position was"), old_commit);
 
        /* Clean up objects used, as they will be reused. */
        clear_commit_marks_all(ALL_REV_FLAGS);
 }
 
 static int switch_branches(const struct checkout_opts *opts,
-                          struct branch_info *new)
+                          struct branch_info *new_branch_info)
 {
        int ret = 0;
-       struct branch_info old;
+       struct branch_info old_branch_info;
        void *path_to_free;
        struct object_id rev;
        int flag, writeout_error = 0;
-       memset(&old, 0, sizeof(old));
-       old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
-       if (old.path)
-               old.commit = lookup_commit_reference_gently(&rev, 1);
+       memset(&old_branch_info, 0, sizeof(old_branch_info));
+       old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
+       if (old_branch_info.path)
+               old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
        if (!(flag & REF_ISSYMREF))
-               old.path = NULL;
+               old_branch_info.path = NULL;
 
-       if (old.path)
-               skip_prefix(old.path, "refs/heads/", &old.name);
+       if (old_branch_info.path)
+               skip_prefix(old_branch_info.path, "refs/heads/", &old_branch_info.name);
 
-       if (!new->name) {
-               new->name = "HEAD";
-               new->commit = old.commit;
-               if (!new->commit)
+       if (!new_branch_info->name) {
+               new_branch_info->name = "HEAD";
+               new_branch_info->commit = old_branch_info.commit;
+               if (!new_branch_info->commit)
                        die(_("You are on a branch yet to be born"));
-               parse_commit_or_die(new->commit);
+               parse_commit_or_die(new_branch_info->commit);
        }
 
-       ret = merge_working_tree(opts, &old, new, &writeout_error);
+       ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
        if (ret) {
                free(path_to_free);
                return ret;
        }
 
-       if (!opts->quiet && !old.path && old.commit && new->commit != old.commit)
-               orphaned_commit_warning(old.commit, new->commit);
+       if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
+               orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit);
 
-       update_refs_for_switch(opts, &old, new);
+       update_refs_for_switch(opts, &old_branch_info, new_branch_info);
 
-       ret = post_checkout_hook(old.commit, new->commit, 1);
+       ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1);
        free(path_to_free);
        return ret || writeout_error;
 }
@@ -870,7 +869,7 @@ static int git_checkout_config(const char *var, const char *value, void *cb)
 
 static int parse_branchname_arg(int argc, const char **argv,
                                int dwim_new_local_branch_ok,
-                               struct branch_info *new,
+                               struct branch_info *new_branch_info,
                                struct checkout_opts *opts,
                                struct object_id *rev)
 {
@@ -988,22 +987,22 @@ static int parse_branchname_arg(int argc, const char **argv,
        argv++;
        argc--;
 
-       new->name = arg;
-       setup_branch_path(new);
+       new_branch_info->name = arg;
+       setup_branch_path(new_branch_info);
 
-       if (!check_refname_format(new->path, 0) &&
-           !read_ref(new->path, &branch_rev))
+       if (!check_refname_format(new_branch_info->path, 0) &&
+           !read_ref(new_branch_info->path, &branch_rev))
                oidcpy(rev, &branch_rev);
        else
-               new->path = NULL; /* not an existing branch */
+               new_branch_info->path = NULL; /* not an existing branch */
 
-       new->commit = lookup_commit_reference_gently(rev, 1);
-       if (!new->commit) {
+       new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+       if (!new_branch_info->commit) {
                /* not a commit */
                *source_tree = parse_tree_indirect(rev);
        } else {
-               parse_commit_or_die(new->commit);
-               *source_tree = new->commit->tree;
+               parse_commit_or_die(new_branch_info->commit);
+               *source_tree = new_branch_info->commit->tree;
        }
 
        if (!*source_tree)                   /* case (1): want a tree */
@@ -1043,7 +1042,7 @@ static int switch_unborn_to_new_branch(const struct checkout_opts *opts)
 }
 
 static int checkout_branch(struct checkout_opts *opts,
-                          struct branch_info *new)
+                          struct branch_info *new_branch_info)
 {
        if (opts->pathspec.nr)
                die(_("paths cannot be used with switching branches"));
@@ -1072,21 +1071,21 @@ static int checkout_branch(struct checkout_opts *opts,
        } else if (opts->track == BRANCH_TRACK_UNSPECIFIED)
                opts->track = git_branch_track;
 
-       if (new->name && !new->commit)
+       if (new_branch_info->name && !new_branch_info->commit)
                die(_("Cannot switch branch to a non-commit '%s'"),
-                   new->name);
+                   new_branch_info->name);
 
-       if (new->path && !opts->force_detach && !opts->new_branch &&
+       if (new_branch_info->path && !opts->force_detach && !opts->new_branch &&
            !opts->ignore_other_worktrees) {
                int flag;
                char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
                if (head_ref &&
-                   (!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
-                       die_if_checked_out(new->path, 1);
+                   (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path)))
+                       die_if_checked_out(new_branch_info->path, 1);
                free(head_ref);
        }
 
-       if (!new->commit && opts->new_branch) {
+       if (!new_branch_info->commit && opts->new_branch) {
                struct object_id rev;
                int flag;
 
@@ -1094,13 +1093,13 @@ static int checkout_branch(struct checkout_opts *opts,
                    (flag & REF_ISSYMREF) && is_null_oid(&rev))
                        return switch_unborn_to_new_branch(opts);
        }
-       return switch_branches(opts, new);
+       return switch_branches(opts, new_branch_info);
 }
 
 int cmd_checkout(int argc, const char **argv, const char *prefix)
 {
        struct checkout_opts opts;
-       struct branch_info new;
+       struct branch_info new_branch_info;
        char *conflict_style = NULL;
        int dwim_new_local_branch = 1;
        struct option options[] = {
@@ -1118,9 +1117,12 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
                            2),
                OPT_SET_INT('3', "theirs", &opts.writeout_stage, N_("checkout their version for unmerged files"),
                            3),
-               OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)")),
+               OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('m', "merge", &opts.merge, N_("perform a 3-way merge with the new branch")),
-               OPT_BOOL(0, "overwrite-ignore", &opts.overwrite_ignore, N_("update ignored files (default)")),
+               OPT_BOOL_F(0, "overwrite-ignore", &opts.overwrite_ignore,
+                          N_("update ignored files (default)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_STRING(0, "conflict", &conflict_style, N_("style"),
                           N_("conflict style (merge or diff3)")),
                OPT_BOOL('p', "patch", &opts.patch_mode, N_("select hunks interactively")),
@@ -1138,7 +1140,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
        };
 
        memset(&opts, 0, sizeof(opts));
-       memset(&new, 0, sizeof(new));
+       memset(&new_branch_info, 0, sizeof(new_branch_info));
        opts.overwrite_ignore = 1;
        opts.prefix = prefix;
        opts.show_progress = -1;
@@ -1210,7 +1212,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
                        opts.track == BRANCH_TRACK_UNSPECIFIED &&
                        !opts.new_branch;
                int n = parse_branchname_arg(argc, argv, dwim_ok,
-                                            &new, &opts, &rev);
+                                            &new_branch_info, &opts, &rev);
                argv += n;
                argc -= n;
        }
@@ -1253,7 +1255,7 @@ int cmd_checkout(int argc, const char **argv, const char *prefix)
 
        UNLEAK(opts);
        if (opts.patch_mode || opts.pathspec.nr)
-               return checkout_paths(&opts, new.name);
+               return checkout_paths(&opts, new_branch_info.name);
        else
-               return checkout_branch(&opts, &new);
+               return checkout_branch(&opts, &new_branch_info);
 }
index 189e20628c07774089c5c925380b6b69af16fc7f..fad533a0a7382f10ecf48a738c955734ad5c0d96 100644 (file)
@@ -909,7 +909,7 @@ int cmd_clean(int argc, const char **argv, const char *prefix)
        struct option options[] = {
                OPT__QUIET(&quiet, N_("do not print names of files removed")),
                OPT__DRY_RUN(&dry_run, N_("dry run")),
-               OPT__FORCE(&force, N_("force")),
+               OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")),
                OPT_BOOL('d', NULL, &remove_directories,
                                N_("remove whole directories")),
index 284651797e5402c9f7ef85003d7d1c4ea944e2d7..101c27a593f4c64a735410f18bfcb46489728696 100644 (file)
@@ -26,6 +26,7 @@
 #include "run-command.h"
 #include "connected.h"
 #include "packfile.h"
+#include "list-objects-filter-options.h"
 
 /*
  * Overall FIXMEs:
@@ -60,6 +61,7 @@ static struct string_list option_optional_reference = STRING_LIST_INIT_NODUP;
 static int option_dissociate;
 static int max_jobs = -1;
 static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
+static struct list_objects_filter_options filter_options;
 
 static int recurse_submodules_cb(const struct option *opt,
                                 const char *arg, int unset)
@@ -135,6 +137,7 @@ static struct option builtin_clone_options[] = {
                        TRANSPORT_FAMILY_IPV4),
        OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
                        TRANSPORT_FAMILY_IPV6),
+       OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
        OPT_END()
 };
 
@@ -893,6 +896,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        struct refspec *refspec;
        const char *fetch_pattern;
 
+       fetch_if_missing = 0;
+
        packet_trace_identity("clone");
        argc = parse_options(argc, argv, prefix, builtin_clone_options,
                             builtin_clone_usage, 0);
@@ -1090,6 +1095,8 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
                        warning(_("--shallow-since is ignored in local clones; use file:// instead."));
                if (option_not.nr)
                        warning(_("--shallow-exclude is ignored in local clones; use file:// instead."));
+               if (filter_options.choice)
+                       warning(_("--filter is ignored in local clones; use file:// instead."));
                if (!access(mkpath("%s/shallow", path), F_OK)) {
                        if (option_local > 0)
                                warning(_("source repository is shallow, ignoring --local"));
@@ -1118,7 +1125,13 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
                transport_set_option(transport, TRANS_OPT_UPLOADPACK,
                                     option_upload_pack);
 
-       if (transport->smart_options && !deepen)
+       if (filter_options.choice) {
+               transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+                                    filter_options.filter_spec);
+               transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+       }
+
+       if (transport->smart_options && !deepen && !filter_options.choice)
                transport->smart_options->check_self_contained_and_connected = 1;
 
        refs = transport_get_remote_refs(transport);
@@ -1178,13 +1191,17 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        write_refspec_config(src_ref_prefix, our_head_points_at,
                        remote_head_points_at, &branch_top);
 
+       if (filter_options.choice)
+               partial_clone_register("origin", &filter_options);
+
        if (is_local)
                clone_local(path, git_dir);
        else if (refs && complete_refs_before_fetch)
                transport_fetch_refs(transport, mapped_refs);
 
        update_remote_refs(refs, mapped_refs, remote_head_points_at,
-                          branch_top.buf, reflog_msg.buf, transport, !is_local);
+                          branch_top.buf, reflog_msg.buf, transport,
+                          !is_local && !filter_options.choice);
 
        update_head(our_head_points_at, remote_head, reflog_msg.buf);
 
@@ -1205,6 +1222,7 @@ int cmd_clone(int argc, const char **argv, const char *prefix)
        }
 
        junk_mode = JUNK_LEAVE_REPO;
+       fetch_if_missing = 1;
        err = checkout(submodule_progress);
 
        strbuf_release(&reflog_msg);
index 2177251e247cc144d6ac8619fa5940262430d50a..ecf42191da10cd2e87360f001d5493e792b9682e 100644 (file)
@@ -58,7 +58,7 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
                                usage(commit_tree_usage);
                        if (get_oid_commit(argv[i], &oid))
                                die("Not a valid object name %s", argv[i]);
-                       assert_sha1_type(oid.hash, OBJ_COMMIT);
+                       assert_oid_type(&oid, OBJ_COMMIT);
                        new_parent(lookup_commit(&oid), &parents);
                        continue;
                }
@@ -117,8 +117,8 @@ int cmd_commit_tree(int argc, const char **argv, const char *prefix)
                        die_errno("git commit-tree: failed to read");
        }
 
-       if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents,
-                       commit_oid.hash, NULL, sign_commit)) {
+       if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+                       NULL, sign_commit)) {
                strbuf_release(&buffer);
                return 1;
        }
index 4610e3d8e3e04a70678a102cc3e0e7472089a3cf..37fcb55ab0a03a5fdabaca1913bc700201fd8e10 100644 (file)
@@ -31,9 +31,7 @@
 #include "gpg-interface.h"
 #include "column.h"
 #include "sequencer.h"
-#include "notes-utils.h"
 #include "mailmap.h"
-#include "sigchain.h"
 
 static const char * const builtin_commit_usage[] = {
        N_("git commit [<options>] [--] <pathspec>..."),
@@ -45,31 +43,6 @@ static const char * const builtin_status_usage[] = {
        NULL
 };
 
-static const char implicit_ident_advice_noconfig[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly. Run the\n"
-"following command and follow the instructions in your editor to edit\n"
-"your configuration file:\n"
-"\n"
-"    git config --global --edit\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-"    git commit --amend --reset-author\n");
-
-static const char implicit_ident_advice_config[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly:\n"
-"\n"
-"    git config --global user.name \"Your Name\"\n"
-"    git config --global user.email you@example.com\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-"    git commit --amend --reset-author\n");
-
 static const char empty_amend_advice[] =
 N_("You asked to amend the most recent commit, but doing so would make\n"
 "it empty. You can repeat your command with --allow-empty, or you can\n"
@@ -93,8 +66,6 @@ N_("If you wish to skip this commit, use:\n"
 "Then \"git cherry-pick --continue\" will resume cherry-picking\n"
 "the remaining commits.\n");
 
-static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
-
 static const char *use_message_buffer;
 static struct lock_file index_lock; /* real index */
 static struct lock_file false_lock; /* used only for partial commits */
@@ -128,12 +99,7 @@ static char *sign_commit;
  * if editor is used, and only the whitespaces if the message
  * is specified explicitly.
  */
-static enum {
-       CLEANUP_SPACE,
-       CLEANUP_NONE,
-       CLEANUP_SCISSORS,
-       CLEANUP_ALL
-} cleanup_mode;
+static enum commit_msg_cleanup_mode cleanup_mode;
 static const char *cleanup_arg;
 
 static enum commit_whence whence;
@@ -423,13 +389,9 @@ static const char *prepare_index(int argc, const char **argv, const char *prefix
                if (active_cache_changed
                    || !cache_tree_fully_valid(active_cache_tree))
                        update_main_cache_tree(WRITE_TREE_SILENT);
-               if (active_cache_changed) {
-                       if (write_locked_index(&the_index, &index_lock,
-                                              COMMIT_LOCK))
-                               die(_("unable to write new_index file"));
-               } else {
-                       rollback_lock_file(&index_lock);
-               }
+               if (write_locked_index(&the_index, &index_lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED))
+                       die(_("unable to write new_index file"));
                commit_style = COMMIT_AS_IS;
                ret = get_index_file();
                goto out;
@@ -673,7 +635,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
        struct strbuf sb = STRBUF_INIT;
        const char *hook_arg1 = NULL;
        const char *hook_arg2 = NULL;
-       int clean_message_contents = (cleanup_mode != CLEANUP_NONE);
+       int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
        int old_display_comment_prefix;
 
        /* This checks and barfs if author is badly specified */
@@ -814,7 +776,7 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
                struct ident_split ci, ai;
 
                if (whence != FROM_COMMIT) {
-                       if (cleanup_mode == CLEANUP_SCISSORS)
+                       if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
                                wt_status_add_cut_line(s->fp);
                        status_printf_ln(s, GIT_COLOR_NORMAL,
                            whence == FROM_MERGE
@@ -834,14 +796,15 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
                }
 
                fprintf(s->fp, "\n");
-               if (cleanup_mode == CLEANUP_ALL)
+               if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
                        status_printf(s, GIT_COLOR_NORMAL,
                                _("Please enter the commit message for your changes."
                                  " Lines starting\nwith '%c' will be ignored, and an empty"
                                  " message aborts the commit.\n"), comment_line_char);
-               else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT)
+               else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+                        whence == FROM_COMMIT)
                        wt_status_add_cut_line(s->fp);
-               else /* CLEANUP_SPACE, that is. */
+               else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
                        status_printf(s, GIT_COLOR_NORMAL,
                                _("Please enter the commit message for your changes."
                                  " Lines starting\n"
@@ -986,65 +949,6 @@ static int prepare_to_commit(const char *index_file, const char *prefix,
        return 1;
 }
 
-static int rest_is_empty(struct strbuf *sb, int start)
-{
-       int i, eol;
-       const char *nl;
-
-       /* Check if the rest is just whitespace and Signed-off-by's. */
-       for (i = start; i < sb->len; i++) {
-               nl = memchr(sb->buf + i, '\n', sb->len - i);
-               if (nl)
-                       eol = nl - sb->buf;
-               else
-                       eol = sb->len;
-
-               if (strlen(sign_off_header) <= eol - i &&
-                   starts_with(sb->buf + i, sign_off_header)) {
-                       i = eol;
-                       continue;
-               }
-               while (i < eol)
-                       if (!isspace(sb->buf[i++]))
-                               return 0;
-       }
-
-       return 1;
-}
-
-/*
- * Find out if the message in the strbuf contains only whitespace and
- * Signed-off-by lines.
- */
-static int message_is_empty(struct strbuf *sb)
-{
-       if (cleanup_mode == CLEANUP_NONE && sb->len)
-               return 0;
-       return rest_is_empty(sb, 0);
-}
-
-/*
- * See if the user edited the message in the editor or left what
- * was in the template intact
- */
-static int template_untouched(struct strbuf *sb)
-{
-       struct strbuf tmpl = STRBUF_INIT;
-       const char *start;
-
-       if (cleanup_mode == CLEANUP_NONE && sb->len)
-               return 0;
-
-       if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
-               return 0;
-
-       strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL);
-       if (!skip_prefix(sb->buf, tmpl.buf, &start))
-               start = sb->buf;
-       strbuf_release(&tmpl);
-       return rest_is_empty(sb, start - sb->buf);
-}
-
 static const char *find_author_by_nickname(const char *name)
 {
        struct rev_info revs;
@@ -1153,6 +1057,9 @@ static void finalize_deferred_config(struct wt_status *s)
                s->show_branch = status_deferred_config.show_branch;
        if (s->show_branch < 0)
                s->show_branch = 0;
+
+       if (s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED)
+               s->ahead_behind_flags = AHEAD_BEHIND_FULL;
 }
 
 static int parse_and_validate_options(int argc, const char *argv[],
@@ -1229,15 +1136,17 @@ static int parse_and_validate_options(int argc, const char *argv[],
        if (argc == 0 && (also || (only && !amend && !allow_empty)))
                die(_("No paths with --include/--only does not make sense."));
        if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
-               cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE;
+               cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL :
+                                           COMMIT_MSG_CLEANUP_SPACE;
        else if (!strcmp(cleanup_arg, "verbatim"))
-               cleanup_mode = CLEANUP_NONE;
+               cleanup_mode = COMMIT_MSG_CLEANUP_NONE;
        else if (!strcmp(cleanup_arg, "whitespace"))
-               cleanup_mode = CLEANUP_SPACE;
+               cleanup_mode = COMMIT_MSG_CLEANUP_SPACE;
        else if (!strcmp(cleanup_arg, "strip"))
-               cleanup_mode = CLEANUP_ALL;
+               cleanup_mode = COMMIT_MSG_CLEANUP_ALL;
        else if (!strcmp(cleanup_arg, "scissors"))
-               cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE;
+               cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
+                                           COMMIT_MSG_CLEANUP_SPACE;
        else
                die(_("Invalid cleanup mode %s"), cleanup_arg);
 
@@ -1367,6 +1276,8 @@ int cmd_status(int argc, const char **argv, const char *prefix)
                         N_("show branch information")),
                OPT_BOOL(0, "show-stash", &s.show_stash,
                         N_("show stash information")),
+               OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+                        N_("compute full ahead/behind values")),
                { OPTION_CALLBACK, 0, "porcelain", &status_format,
                  N_("version"), N_("machine-readable output"),
                  PARSE_OPT_OPTARG, opt_parse_porcelain },
@@ -1439,98 +1350,6 @@ int cmd_status(int argc, const char **argv, const char *prefix)
        return 0;
 }
 
-static const char *implicit_ident_advice(void)
-{
-       char *user_config = expand_user_path("~/.gitconfig", 0);
-       char *xdg_config = xdg_config_home("config");
-       int config_exists = file_exists(user_config) || file_exists(xdg_config);
-
-       free(user_config);
-       free(xdg_config);
-
-       if (config_exists)
-               return _(implicit_ident_advice_config);
-       else
-               return _(implicit_ident_advice_noconfig);
-
-}
-
-static void print_summary(const char *prefix, const struct object_id *oid,
-                         int initial_commit)
-{
-       struct rev_info rev;
-       struct commit *commit;
-       struct strbuf format = STRBUF_INIT;
-       const char *head;
-       struct pretty_print_context pctx = {0};
-       struct strbuf author_ident = STRBUF_INIT;
-       struct strbuf committer_ident = STRBUF_INIT;
-
-       commit = lookup_commit(oid);
-       if (!commit)
-               die(_("couldn't look up newly created commit"));
-       if (parse_commit(commit))
-               die(_("could not parse newly created commit"));
-
-       strbuf_addstr(&format, "format:%h] %s");
-
-       format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
-       format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
-       if (strbuf_cmp(&author_ident, &committer_ident)) {
-               strbuf_addstr(&format, "\n Author: ");
-               strbuf_addbuf_percentquote(&format, &author_ident);
-       }
-       if (author_date_is_interesting()) {
-               struct strbuf date = STRBUF_INIT;
-               format_commit_message(commit, "%ad", &date, &pctx);
-               strbuf_addstr(&format, "\n Date: ");
-               strbuf_addbuf_percentquote(&format, &date);
-               strbuf_release(&date);
-       }
-       if (!committer_ident_sufficiently_given()) {
-               strbuf_addstr(&format, "\n Committer: ");
-               strbuf_addbuf_percentquote(&format, &committer_ident);
-               if (advice_implicit_identity) {
-                       strbuf_addch(&format, '\n');
-                       strbuf_addstr(&format, implicit_ident_advice());
-               }
-       }
-       strbuf_release(&author_ident);
-       strbuf_release(&committer_ident);
-
-       init_revisions(&rev, prefix);
-       setup_revisions(0, NULL, &rev, NULL);
-
-       rev.diff = 1;
-       rev.diffopt.output_format =
-               DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
-
-       rev.verbose_header = 1;
-       rev.show_root_diff = 1;
-       get_commit_format(format.buf, &rev);
-       rev.always_show_header = 0;
-       rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
-       rev.diffopt.break_opt = 0;
-       diff_setup_done(&rev.diffopt);
-
-       head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
-       if (!head)
-               die_errno(_("unable to resolve HEAD after creating commit"));
-       if (!strcmp(head, "HEAD"))
-               head = _("detached HEAD");
-       else
-               skip_prefix(head, "refs/heads/", &head);
-       printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : "");
-
-       if (!log_tree_commit(&rev, commit)) {
-               rev.always_show_header = 1;
-               rev.use_terminator = 1;
-               log_tree_commit(&rev, commit);
-       }
-
-       strbuf_release(&format);
-}
-
 static int git_commit_config(const char *k, const char *v, void *cb)
 {
        struct wt_status *s = cb;
@@ -1560,37 +1379,6 @@ static int git_commit_config(const char *k, const char *v, void *cb)
        return git_status_config(k, v, s);
 }
 
-static int run_rewrite_hook(const struct object_id *oldoid,
-                           const struct object_id *newoid)
-{
-       struct child_process proc = CHILD_PROCESS_INIT;
-       const char *argv[3];
-       int code;
-       struct strbuf sb = STRBUF_INIT;
-
-       argv[0] = find_hook("post-rewrite");
-       if (!argv[0])
-               return 0;
-
-       argv[1] = "amend";
-       argv[2] = NULL;
-
-       proc.argv = argv;
-       proc.in = -1;
-       proc.stdout_to_stderr = 1;
-
-       code = start_command(&proc);
-       if (code)
-               return code;
-       strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
-       sigchain_push(SIGPIPE, SIG_IGN);
-       write_in_full(proc.in, sb.buf, sb.len);
-       close(proc.in);
-       strbuf_release(&sb);
-       sigchain_pop(SIGPIPE);
-       return finish_command(&proc);
-}
-
 int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...)
 {
        struct argv_array hook_env = ARGV_ARRAY_INIT;
@@ -1615,6 +1403,7 @@ int run_commit_hook(int editor_is_used, const char *index_file, const char *name
 
 int cmd_commit(int argc, const char **argv, const char *prefix)
 {
+       const char *argv_gc_auto[] = {"gc", "--auto", NULL};
        static struct wt_status s;
        static struct option builtin_commit_options[] = {
                OPT__QUIET(&quiet, N_("suppress summary after successful commit")),
@@ -1650,6 +1439,8 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                OPT_SET_INT(0, "short", &status_format, N_("show status concisely"),
                            STATUS_FORMAT_SHORT),
                OPT_BOOL(0, "branch", &s.show_branch, N_("show branch information")),
+               OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+                        N_("compute full ahead/behind values")),
                OPT_SET_INT(0, "porcelain", &status_format,
                            N_("machine-readable output"), STATUS_FORMAT_PORCELAIN),
                OPT_SET_INT(0, "long", &status_format,
@@ -1673,13 +1464,11 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
        struct strbuf sb = STRBUF_INIT;
        struct strbuf author_ident = STRBUF_INIT;
        const char *index_file, *reflog_msg;
-       char *nl;
        struct object_id oid;
        struct commit_list *parents = NULL;
        struct stat statbuf;
        struct commit *current_head = NULL;
        struct commit_extra_header *extra = NULL;
-       struct ref_transaction *transaction;
        struct strbuf err = STRBUF_INIT;
 
        if (argc == 2 && !strcmp(argv[1], "-h"))
@@ -1770,17 +1559,17 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
        }
 
        if (verbose || /* Truncate the message just before the diff, if any. */
-           cleanup_mode == CLEANUP_SCISSORS)
+           cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
                strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len));
-       if (cleanup_mode != CLEANUP_NONE)
-               strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL);
+       if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
+               strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
 
-       if (message_is_empty(&sb) && !allow_empty_message) {
+       if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
                rollback_index_files();
                fprintf(stderr, _("Aborting commit due to empty commit message.\n"));
                exit(1);
        }
-       if (template_untouched(&sb) && !allow_empty_message) {
+       if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) {
                rollback_index_files();
                fprintf(stderr, _("Aborting commit; you did not edit the message.\n"));
                exit(1);
@@ -1794,33 +1583,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                append_merge_tag_headers(parents, &tail);
        }
 
-       if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash,
-                        parents, oid.hash, author_ident.buf, sign_commit, extra)) {
+       if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
+                                parents, &oid, author_ident.buf, sign_commit,
+                                extra)) {
                rollback_index_files();
                die(_("failed to write commit object"));
        }
        strbuf_release(&author_ident);
        free_commit_extra_headers(extra);
 
-       nl = strchr(sb.buf, '\n');
-       if (nl)
-               strbuf_setlen(&sb, nl + 1 - sb.buf);
-       else
-               strbuf_addch(&sb, '\n');
-       strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg));
-       strbuf_insert(&sb, strlen(reflog_msg), ": ", 2);
-
-       transaction = ref_transaction_begin(&err);
-       if (!transaction ||
-           ref_transaction_update(transaction, "HEAD", &oid,
-                                  current_head
-                                  ? &current_head->object.oid : &null_oid,
-                                  0, sb.buf, &err) ||
-           ref_transaction_commit(transaction, &err)) {
+       if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
+                                   &err)) {
                rollback_index_files();
                die("%s", err.buf);
        }
-       ref_transaction_free(transaction);
 
        unlink(git_path_cherry_pick_head());
        unlink(git_path_revert_head());
@@ -1835,19 +1611,20 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                     "not exceeded, and then \"git reset HEAD\" to recover."));
 
        rerere(0);
+       run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
        run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
        if (amend && !no_post_rewrite) {
-               struct notes_rewrite_cfg *cfg;
-               cfg = init_copy_notes_for_rewrite("amend");
-               if (cfg) {
-                       /* we are amending, so current_head is not NULL */
-                       copy_note_for_rewrite(cfg, &current_head->object.oid, &oid);
-                       finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
-               }
-               run_rewrite_hook(&current_head->object.oid, &oid);
+               commit_post_rewrite(current_head, &oid);
+       }
+       if (!quiet) {
+               unsigned int flags = 0;
+
+               if (!current_head)
+                       flags |= SUMMARY_INITIAL_COMMIT;
+               if (author_date_is_interesting())
+                       flags |= SUMMARY_SHOW_AUTHOR_DATE;
+               print_commit_summary(prefix, &oid, flags);
        }
-       if (!quiet)
-               print_summary(prefix, &oid, !current_head);
 
        UNLEAK(err);
        UNLEAK(sb);
index ab5f95476e6c726798fd84b4a6300577bf0fafba..01169dd628b24a7b5502550a6342ab73cb8154c5 100644 (file)
@@ -48,6 +48,13 @@ static int show_origin;
 #define ACTION_GET_COLORBOOL (1<<14)
 #define ACTION_GET_URLMATCH (1<<15)
 
+/*
+ * The actions "ACTION_LIST | ACTION_GET_*" which may produce more than
+ * one line of output and which should therefore be paged.
+ */
+#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
+                       ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
+
 #define TYPE_BOOL (1<<0)
 #define TYPE_INT (1<<1)
 #define TYPE_BOOL_OR_INT (1<<2)
@@ -594,6 +601,9 @@ int cmd_config(int argc, const char **argv, const char *prefix)
                usage_with_options(builtin_config_usage, builtin_config_options);
        }
 
+       if (actions & PAGING_ACTIONS)
+               setup_auto_pager("config", 1);
+
        if (actions == ACTION_LIST) {
                check_argc(argc, 0, 0);
                if (config_with_options(show_all_config, NULL,
index c4289847063ac02ed061f890ffd71cd329d67109..de840f96a4ba0135667261cfd74c674672f4c6b4 100644 (file)
@@ -285,7 +285,7 @@ static void append_name(struct commit_name *n, struct strbuf *dst)
 
 static void append_suffix(int depth, const struct object_id *oid, struct strbuf *dst)
 {
-       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid->hash, abbrev));
+       strbuf_addf(dst, "-%d-g%s", depth, find_unique_abbrev(oid, abbrev));
 }
 
 static void describe_commit(struct object_id *oid, struct strbuf *dst)
@@ -383,7 +383,7 @@ static void describe_commit(struct object_id *oid, struct strbuf *dst)
        if (!match_cnt) {
                struct object_id *cmit_oid = &cmit->object.oid;
                if (always) {
-                       strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
+                       strbuf_add_unique_abbrev(dst, cmit_oid, abbrev);
                        if (suffix)
                                strbuf_addstr(dst, suffix);
                        return;
@@ -502,7 +502,7 @@ static void describe(const char *arg, int last_one)
 
        if (cmit)
                describe_commit(&oid, &sb);
-       else if (lookup_blob(&oid))
+       else if (oid_object_info(&oid, NULL) == OBJ_BLOB)
                describe_blob(oid, &sb);
        else
                die(_("%s is neither a commit nor blob"), arg);
index b775a756470ddc365907fae53dcc949bcaae7ca5..473615117e0adbb301e6ddc0ab000accc513a9ee 100644 (file)
@@ -76,7 +76,7 @@ static int diff_tree_stdin(char *line)
        if (obj->type == OBJ_TREE)
                return stdin_diff_trees((struct tree *)obj, p);
        error("Object %s is a %s, not a commit or tree",
-             oid_to_hex(&oid), typename(obj->type));
+             oid_to_hex(&oid), type_name(obj->type));
        return -1;
 }
 
index bcc79d1888f2217bcb380ffb1e7178c100a41e8e..ee8dce019e1ca04111de185b5f47c12076b32a0c 100644 (file)
@@ -306,7 +306,7 @@ static char *get_symlink(const struct object_id *oid, const char *path)
        } else {
                enum object_type type;
                unsigned long size;
-               data = read_sha1_file(oid->hash, &type, &size);
+               data = read_object_file(oid, &type, &size);
                if (!data)
                        die(_("could not read object %s for symlink %s"),
                                oid_to_hex(oid), path);
index 796d0cd66c7750a20613dea77cd75b1b8e7665a4..a15898d64177b380ea021e3bc63fb91446bc02b3 100644 (file)
@@ -237,10 +237,10 @@ static void export_blob(const struct object_id *oid)
                object = (struct object *)lookup_blob(oid);
                eaten = 0;
        } else {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        die ("Could not read blob %s", oid_to_hex(oid));
-               if (check_sha1_signature(oid->hash, buf, size, typename(type)) < 0)
+               if (check_object_signature(oid, buf, size, type_name(type)) < 0)
                        die("sha1 mismatch in blob %s", oid_to_hex(oid));
                object = parse_object_buffer(oid, type, size, buf, &eaten);
        }
@@ -682,7 +682,7 @@ static void handle_tag(const char *name, struct tag *tag)
                return;
        }
 
-       buf = read_sha1_file(tag->object.oid.hash, &type, &size);
+       buf = read_object_file(&tag->object.oid, &type, &size);
        if (!buf)
                die ("Could not read tag %s", oid_to_hex(&tag->object.oid));
        message = memmem(buf, size, "\n\n", 2);
@@ -757,7 +757,7 @@ static void handle_tag(const char *name, struct tag *tag)
                        if (tagged->type != OBJ_COMMIT) {
                                die ("Tag %s tags unexported %s!",
                                     oid_to_hex(&tag->object.oid),
-                                    typename(tagged->type));
+                                    type_name(tagged->type));
                        }
                        p = (struct commit *)tagged;
                        for (;;) {
@@ -839,7 +839,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
                if (!commit) {
                        warning("%s: Unexpected object of type %s, skipping.",
                                e->name,
-                               typename(e->item->type));
+                               type_name(e->item->type));
                        continue;
                }
 
@@ -851,7 +851,7 @@ static void get_tags_and_duplicates(struct rev_cmdline_info *info)
                        continue;
                default: /* OBJ_TAG (nested tags) is already handled */
                        warning("Tag points to object of unexpected type %s, skipping.",
-                               typename(commit->object.type));
+                               type_name(commit->object.type));
                        continue;
                }
 
@@ -947,7 +947,7 @@ static void import_marks(char *input_file)
                if (last_idnum < mark)
                        last_idnum = mark;
 
-               type = sha1_object_info(oid.hash, NULL);
+               type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(&oid));
 
index 366b9d13f929b7d299a31d9c53fb643220fe64e3..a7bc1366ab375765c41014640743ef9d77c84c42 100644 (file)
@@ -53,6 +53,8 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
        struct oid_array shallow = OID_ARRAY_INIT;
        struct string_list deepen_not = STRING_LIST_INIT_DUP;
 
+       fetch_if_missing = 0;
+
        packet_trace_identity("fetch-pack");
 
        memset(&args, 0, sizeof(args));
@@ -143,6 +145,22 @@ int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
                        args.update_shallow = 1;
                        continue;
                }
+               if (!strcmp("--from-promisor", arg)) {
+                       args.from_promisor = 1;
+                       continue;
+               }
+               if (!strcmp("--no-dependents", arg)) {
+                       args.no_dependents = 1;
+                       continue;
+               }
+               if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+                       parse_list_objects_filter(&args.filter_options, arg);
+                       continue;
+               }
+               if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+                       list_objects_filter_set_no_filter(&args.filter_options);
+                       continue;
+               }
                usage(fetch_pack_usage);
        }
        if (deepen_not.nr)
index 7bbcd26faf1fe650ccc7dbbd526db5114aac0c8f..8295f92b3e63d2675b8238d6bc17e9b1fc81319a 100644 (file)
@@ -19,6 +19,7 @@
 #include "argv-array.h"
 #include "utf8.h"
 #include "packfile.h"
+#include "list-objects-filter-options.h"
 
 static const char * const builtin_fetch_usage[] = {
        N_("git fetch [<options>] [<repository> [<refspec>...]]"),
@@ -38,6 +39,10 @@ static int fetch_prune_config = -1; /* unspecified */
 static int prune = -1; /* unspecified */
 #define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
 
+static int fetch_prune_tags_config = -1; /* unspecified */
+static int prune_tags = -1; /* unspecified */
+#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
+
 static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
 static int progress = -1;
 static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
@@ -56,6 +61,7 @@ static int recurse_submodules_default = RECURSE_SUBMODULES_ON_DEMAND;
 static int shown_url = 0;
 static int refmap_alloc, refmap_nr;
 static const char **refmap_array;
+static struct list_objects_filter_options filter_options;
 
 static int git_fetch_config(const char *k, const char *v, void *cb)
 {
@@ -64,6 +70,11 @@ static int git_fetch_config(const char *k, const char *v, void *cb)
                return 0;
        }
 
+       if (!strcmp(k, "fetch.prunetags")) {
+               fetch_prune_tags_config = git_config_bool(k, v);
+               return 0;
+       }
+
        if (!strcmp(k, "submodule.recurse")) {
                int r = git_config_bool(k, v) ?
                        RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
@@ -115,7 +126,7 @@ static struct option builtin_fetch_options[] = {
                 N_("append to .git/FETCH_HEAD instead of overwriting")),
        OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
                   N_("path to upload pack on remote end")),
-       OPT__FORCE(&force, N_("force overwrite of local branch")),
+       OPT__FORCE(&force, N_("force overwrite of local branch"), 0),
        OPT_BOOL('m', "multiple", &multiple,
                 N_("fetch from multiple remotes")),
        OPT_SET_INT('t', "tags", &tags,
@@ -126,6 +137,8 @@ static struct option builtin_fetch_options[] = {
                    N_("number of submodules fetched in parallel")),
        OPT_BOOL('p', "prune", &prune,
                 N_("prune remote-tracking branches no longer on remote")),
+       OPT_BOOL('P', "prune-tags", &prune_tags,
+                N_("prune local tags no longer on remote and clobber changed tags")),
        { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
                    N_("control recursive fetching of submodules"),
                    PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules },
@@ -161,6 +174,7 @@ static struct option builtin_fetch_options[] = {
                        TRANSPORT_FAMILY_IPV4),
        OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
                        TRANSPORT_FAMILY_IPV6),
+       OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
        OPT_END()
 };
 
@@ -623,7 +637,7 @@ static int update_local_ref(struct ref *ref,
        struct branch *current_branch = branch_get(NULL);
        const char *pretty_ref = prettify_refname(ref->name);
 
-       type = sha1_object_info(ref->new_oid.hash, NULL);
+       type = oid_object_info(&ref->new_oid, NULL);
        if (type < 0)
                die(_("object %s not found"), oid_to_hex(&ref->new_oid));
 
@@ -694,9 +708,9 @@ static int update_local_ref(struct ref *ref,
        if (in_merge_bases(current, updated)) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "..");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -709,9 +723,9 @@ static int update_local_ref(struct ref *ref,
        } else if (force || ref->force) {
                struct strbuf quickref = STRBUF_INIT;
                int r;
-               strbuf_add_unique_abbrev(&quickref, current->object.oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &current->object.oid, DEFAULT_ABBREV);
                strbuf_addstr(&quickref, "...");
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid, DEFAULT_ABBREV);
                if ((recurse_submodules != RECURSE_SUBMODULES_OFF) &&
                    (recurse_submodules != RECURSE_SUBMODULES_ON))
                        check_for_new_submodule_commits(&ref->new_oid);
@@ -1045,6 +1059,11 @@ static struct transport *prepare_transport(struct remote *remote, int deepen)
                set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
        if (update_shallow)
                set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+       if (filter_options.choice) {
+               set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+                          filter_options.filter_spec);
+               set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+       }
        return transport;
 }
 
@@ -1212,6 +1231,8 @@ static void add_options_to_argv(struct argv_array *argv)
                argv_array_push(argv, "--dry-run");
        if (prune != -1)
                argv_array_push(argv, prune ? "--prune" : "--no-prune");
+       if (prune_tags != -1)
+               argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
        if (update_head_ok)
                argv_array_push(argv, "--update-head-ok");
        if (force)
@@ -1265,12 +1286,65 @@ static int fetch_multiple(struct string_list *list)
        return result;
 }
 
-static int fetch_one(struct remote *remote, int argc, const char **argv)
+/*
+ * Fetching from the promisor remote should use the given filter-spec
+ * or inherit the default filter-spec from the config.
+ */
+static inline void fetch_one_setup_partial(struct remote *remote)
+{
+       /*
+        * Explicit --no-filter argument overrides everything, regardless
+        * of any prior partial clones and fetches.
+        */
+       if (filter_options.no_filter)
+               return;
+
+       /*
+        * If no prior partial clone/fetch and the current fetch DID NOT
+        * request a partial-fetch, do a normal fetch.
+        */
+       if (!repository_format_partial_clone && !filter_options.choice)
+               return;
+
+       /*
+        * If this is the FIRST partial-fetch request, we enable partial
+        * on this repo and remember the given filter-spec as the default
+        * for subsequent fetches to this remote.
+        */
+       if (!repository_format_partial_clone && filter_options.choice) {
+               partial_clone_register(remote->name, &filter_options);
+               return;
+       }
+
+       /*
+        * We are currently limited to only ONE promisor remote and only
+        * allow partial-fetches from the promisor remote.
+        */
+       if (strcmp(remote->name, repository_format_partial_clone)) {
+               if (filter_options.choice)
+                       die(_("--filter can only be used with the remote configured in core.partialClone"));
+               return;
+       }
+
+       /*
+        * Do a partial-fetch from the promisor remote using either the
+        * explicitly given filter-spec or inherit the filter-spec from
+        * the config.
+        */
+       if (!filter_options.choice)
+               partial_clone_get_default_filter_spec(&filter_options);
+       return;
+}
+
+static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
 {
        static const char **refs = NULL;
        struct refspec *refspec;
        int ref_nr = 0;
+       int j = 0;
        int exit_code;
+       int maybe_prune_tags;
+       int remote_via_config = remote_is_configured(remote, 0);
 
        if (!remote)
                die(_("No remote repository specified.  Please, specify either a URL or a\n"
@@ -1280,18 +1354,39 @@ static int fetch_one(struct remote *remote, int argc, const char **argv)
 
        if (prune < 0) {
                /* no command line request */
-               if (0 <= gtransport->remote->prune)
-                       prune = gtransport->remote->prune;
+               if (0 <= remote->prune)
+                       prune = remote->prune;
                else if (0 <= fetch_prune_config)
                        prune = fetch_prune_config;
                else
                        prune = PRUNE_BY_DEFAULT;
        }
 
+       if (prune_tags < 0) {
+               /* no command line request */
+               if (0 <= remote->prune_tags)
+                       prune_tags = remote->prune_tags;
+               else if (0 <= fetch_prune_tags_config)
+                       prune_tags = fetch_prune_tags_config;
+               else
+                       prune_tags = PRUNE_TAGS_BY_DEFAULT;
+       }
+
+       maybe_prune_tags = prune_tags_ok && prune_tags;
+       if (maybe_prune_tags && remote_via_config)
+               add_prune_tags_to_fetch_refspec(remote);
+
+       if (argc > 0 || (maybe_prune_tags && !remote_via_config)) {
+               size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1);
+               refs = xcalloc(nr_alloc, sizeof(const char *));
+               if (maybe_prune_tags) {
+                       refs[j++] = xstrdup("refs/tags/*:refs/tags/*");
+                       ref_nr++;
+               }
+       }
+
        if (argc > 0) {
-               int j = 0;
                int i;
-               refs = xcalloc(st_add(argc, 1), sizeof(const char *));
                for (i = 0; i < argc; i++) {
                        if (!strcmp(argv[i], "tag")) {
                                i++;
@@ -1301,9 +1396,8 @@ static int fetch_one(struct remote *remote, int argc, const char **argv)
                                                    argv[i], argv[i]);
                        } else
                                refs[j++] = argv[i];
+                       ref_nr++;
                }
-               refs[j] = NULL;
-               ref_nr = j;
        }
 
        sigchain_push_common(unlock_pack_on_signal);
@@ -1320,12 +1414,15 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
 {
        int i;
        struct string_list list = STRING_LIST_INIT_DUP;
-       struct remote *remote;
+       struct remote *remote = NULL;
        int result = 0;
+       int prune_tags_ok = 1;
        struct argv_array argv_gc_auto = ARGV_ARRAY_INIT;
 
        packet_trace_identity("fetch");
 
+       fetch_if_missing = 0;
+
        /* Record the command line for the reflog */
        strbuf_addstr(&default_rla, "fetch");
        for (i = 1; i < argc; i++)
@@ -1359,23 +1456,23 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
        if (depth || deepen_since || deepen_not.nr)
                deepen = 1;
 
+       if (filter_options.choice && !repository_format_partial_clone)
+               die("--filter can only be used when extensions.partialClone is set");
+
        if (all) {
                if (argc == 1)
                        die(_("fetch --all does not take a repository argument"));
                else if (argc > 1)
                        die(_("fetch --all does not make sense with refspecs"));
                (void) for_each_remote(get_one_remote_for_fetch, &list);
-               result = fetch_multiple(&list);
        } else if (argc == 0) {
                /* No arguments -- use default remote */
                remote = remote_get(NULL);
-               result = fetch_one(remote, argc, argv);
        } else if (multiple) {
                /* All arguments are assumed to be remotes or groups */
                for (i = 0; i < argc; i++)
                        if (!add_remote_or_group(argv[i], &list))
                                die(_("No such remote or remote group: %s"), argv[i]);
-               result = fetch_multiple(&list);
        } else {
                /* Single remote or group */
                (void) add_remote_or_group(argv[0], &list);
@@ -1383,14 +1480,26 @@ int cmd_fetch(int argc, const char **argv, const char *prefix)
                        /* More than one remote */
                        if (argc > 1)
                                die(_("Fetching a group and specifying refspecs does not make sense"));
-                       result = fetch_multiple(&list);
                } else {
                        /* Zero or one remotes */
                        remote = remote_get(argv[0]);
-                       result = fetch_one(remote, argc-1, argv+1);
+                       prune_tags_ok = (argc == 1);
+                       argc--;
+                       argv++;
                }
        }
 
+       if (remote) {
+               if (filter_options.choice || repository_format_partial_clone)
+                       fetch_one_setup_partial(remote);
+               result = fetch_one(remote, argc, argv, prune_tags_ok);
+       } else {
+               if (filter_options.choice)
+                       die(_("--filter can only be used with the remote configured in core.partialClone"));
+               /* TODO should this also die if we have a previous partial-clone? */
+               result = fetch_multiple(&list);
+       }
+
        if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
                struct argv_array options = ARGV_ARRAY_INIT;
 
index 8e8a15ea4ad6de2bb73f63d39a0895a263918774..bd680be6874da29cf776c84468a60888beea53fb 100644 (file)
@@ -485,10 +485,10 @@ static void fmt_merge_msg_sigs(struct strbuf *out)
        struct strbuf tagbuf = STRBUF_INIT;
 
        for (i = 0; i < origins.nr; i++) {
-               unsigned char *sha1 = origins.items[i].util;
+               struct object_id *oid = origins.items[i].util;
                enum object_type type;
                unsigned long size, len;
-               char *buf = read_sha1_file(sha1, &type, &size);
+               char *buf = read_object_file(oid, &type, &size);
                struct strbuf sig = STRBUF_INIT;
 
                if (!buf || type != OBJ_TAG)
index 92ce775a74d88036b024b24d74e3987da521ef41..09225586835eb872ea0152018f1f85dc23f63710 100644 (file)
@@ -65,12 +65,12 @@ static const char *printable_type(struct object *obj)
        const char *ret;
 
        if (obj->type == OBJ_NONE) {
-               enum object_type type = sha1_object_info(obj->oid.hash, NULL);
+               enum object_type type = oid_object_info(&obj->oid, NULL);
                if (type > 0)
                        object_as_type(obj, type, 0);
        }
 
-       ret = typename(obj->type);
+       ret = type_name(obj->type);
        if (!ret)
                ret = "unknown";
 
@@ -137,7 +137,7 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt
                printf("broken link from %7s %s\n",
                           printable_type(parent), describe_object(parent));
                printf("broken link from %7s %s\n",
-                          (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+                          (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
                errors_found |= ERROR_REACHABLE;
                return 1;
        }
@@ -149,6 +149,15 @@ static int mark_object(struct object *obj, int type, void *data, struct fsck_opt
        if (obj->flags & REACHABLE)
                return 0;
        obj->flags |= REACHABLE;
+
+       if (is_promisor_object(&obj->oid))
+               /*
+                * Further recursion does not need to be performed on this
+                * object since it is a promisor object (so it does not need to
+                * be added to "pending").
+                */
+               return 0;
+
        if (!(obj->flags & HAS_OBJ)) {
                if (parent && !has_object_file(&obj->oid)) {
                        printf("broken link from %7s %s\n",
@@ -214,6 +223,8 @@ static void check_reachable_object(struct object *obj)
         * do a full fsck
         */
        if (!(obj->flags & HAS_OBJ)) {
+               if (is_promisor_object(&obj->oid))
+                       return;
                if (has_sha1_pack(obj->oid.hash))
                        return; /* it is in pack - forget about it */
                printf("missing %s %s\n", printable_type(obj),
@@ -404,7 +415,7 @@ static void fsck_handle_reflog_oid(const char *refname, struct object_id *oid,
                                        xstrfmt("%s@{%"PRItime"}", refname, timestamp));
                        obj->flags |= USED;
                        mark_object_reachable(obj);
-               } else {
+               } else if (!is_promisor_object(oid)) {
                        error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
                        errors_found |= ERROR_REACHABLE;
                }
@@ -440,6 +451,14 @@ static int fsck_handle_ref(const char *refname, const struct object_id *oid,
 
        obj = parse_object(oid);
        if (!obj) {
+               if (is_promisor_object(oid)) {
+                       /*
+                        * Increment default_refs anyway, because this is a
+                        * valid ref.
+                        */
+                        default_refs++;
+                        return 0;
+               }
                error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
                errors_found |= ERROR_REACHABLE;
                /* We'll continue with the rest despite the error.. */
@@ -494,7 +513,7 @@ static struct object *parse_loose_object(const struct object_id *oid,
        unsigned long size;
        int eaten;
 
-       if (read_loose_object(path, oid->hash, &type, &size, &contents) < 0)
+       if (read_loose_object(path, oid, &type, &size, &contents) < 0)
                return NULL;
 
        if (!contents && type != OBJ_BLOB)
@@ -665,6 +684,9 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
        int i;
        struct alternate_object_database *alt;
 
+       /* fsck knows how to handle missing promisor objects */
+       fetch_if_missing = 0;
+
        errors_found = 0;
        check_replace_refs = 0;
 
@@ -737,6 +759,8 @@ int cmd_fsck(int argc, const char **argv, const char *prefix)
                        struct object *obj = lookup_object(oid.hash);
 
                        if (!obj || !(obj->flags & HAS_OBJ)) {
+                               if (is_promisor_object(&oid))
+                                       continue;
                                error("%s: object missing", oid_to_hex(&oid));
                                errors_found |= ERROR_OBJECT;
                                continue;
index 3c5eae0edf12e461e84c71c0e2988fcfa58d3d44..f51e5a6500fc294cb719716671259de42f31bfe7 100644 (file)
@@ -360,8 +360,11 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                        N_("prune unreferenced objects"),
                        PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
                OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
-               OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")),
-               OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")),
+               OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+                          PARSE_OPT_NOCOMPLETE),
+               OPT_BOOL_F(0, "force", &force,
+                          N_("force running gc even if there may be another gc running"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
@@ -458,6 +461,9 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
                        argv_array_push(&prune, prune_expire);
                        if (quiet)
                                argv_array_push(&prune, "--no-progress");
+                       if (repository_format_partial_clone)
+                               argv_array_push(&prune,
+                                               "--exclude-promisor-objects");
                        if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
                                return error(FAILED_RUN, prune.argv[0]);
                }
index 3ca4ac80d8c7014e7e9aba228f542113a5103b48..668cb8050ae68a729cf522011f36d727fff04cfb 100644 (file)
@@ -92,8 +92,7 @@ static pthread_cond_t cond_result;
 
 static int skip_first_line;
 
-static void add_work(struct grep_opt *opt, enum grep_source_type type,
-                    const char *name, const char *path, const void *id)
+static void add_work(struct grep_opt *opt, const struct grep_source *gs)
 {
        grep_lock();
 
@@ -101,7 +100,7 @@ static void add_work(struct grep_opt *opt, enum grep_source_type type,
                pthread_cond_wait(&cond_write, &grep_mutex);
        }
 
-       grep_source_init(&todo[todo_end].source, type, name, path, id);
+       todo[todo_end].source = *gs;
        if (opt->binary != GREP_BINARY_TEXT)
                grep_source_load_driver(&todo[todo_end].source);
        todo[todo_end].done = 0;
@@ -307,7 +306,7 @@ static void *lock_and_read_oid_file(const struct object_id *oid, enum object_typ
        void *data;
 
        grep_read_lock();
-       data = read_sha1_file(oid->hash, type, size);
+       data = read_object_file(oid, type, size);
        grep_read_unlock();
        return data;
 }
@@ -317,6 +316,7 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
                     const char *path)
 {
        struct strbuf pathbuf = STRBUF_INIT;
+       struct grep_source gs;
 
        if (opt->relative && opt->prefix_length) {
                quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf);
@@ -325,19 +325,22 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
                strbuf_addstr(&pathbuf, filename);
        }
 
+       grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
+       strbuf_release(&pathbuf);
+
 #ifndef NO_PTHREADS
        if (num_threads) {
-               add_work(opt, GREP_SOURCE_OID, pathbuf.buf, path, oid);
-               strbuf_release(&pathbuf);
+               /*
+                * add_work() copies gs and thus assumes ownership of
+                * its fields, so do not call grep_source_clear()
+                */
+               add_work(opt, &gs);
                return 0;
        } else
 #endif
        {
-               struct grep_source gs;
                int hit;
 
-               grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
-               strbuf_release(&pathbuf);
                hit = grep_source(opt, &gs);
 
                grep_source_clear(&gs);
@@ -348,25 +351,29 @@ static int grep_oid(struct grep_opt *opt, const struct object_id *oid,
 static int grep_file(struct grep_opt *opt, const char *filename)
 {
        struct strbuf buf = STRBUF_INIT;
+       struct grep_source gs;
 
        if (opt->relative && opt->prefix_length)
                quote_path_relative(filename, opt->prefix, &buf);
        else
                strbuf_addstr(&buf, filename);
 
+       grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
+       strbuf_release(&buf);
+
 #ifndef NO_PTHREADS
        if (num_threads) {
-               add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename);
-               strbuf_release(&buf);
+               /*
+                * add_work() copies gs and thus assumes ownership of
+                * its fields, so do not call grep_source_clear()
+                */
+               add_work(opt, &gs);
                return 0;
        } else
 #endif
        {
-               struct grep_source gs;
                int hit;
 
-               grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
-               strbuf_release(&buf);
                hit = grep_source(opt, &gs);
 
                grep_source_clear(&gs);
@@ -445,7 +452,7 @@ static int grep_submodule(struct grep_opt *opt, struct repository *superproject,
                object = parse_object_or_die(oid, oid_to_hex(oid));
 
                grep_read_lock();
-               data = read_object_with_reference(object->oid.hash, tree_type,
+               data = read_object_with_reference(&object->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -607,7 +614,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                int hit, len;
 
                grep_read_lock();
-               data = read_object_with_reference(obj->oid.hash, tree_type,
+               data = read_object_with_reference(&obj->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -627,7 +634,7 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                free(data);
                return hit;
        }
-       die(_("unable to grep from object of type %s"), typename(obj->type));
+       die(_("unable to grep from object of type %s"), type_name(obj->type));
 }
 
 static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
@@ -832,8 +839,9 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                OPT_BOOL('L', "files-without-match",
                        &opt.unmatch_name_only,
                        N_("show only the names of files without match")),
-               OPT_BOOL('z', "null", &opt.null_following_name,
-                       N_("print NUL after filenames")),
+               OPT_BOOL_F('z', "null", &opt.null_following_name,
+                          N_("print NUL after filenames"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('c', "count", &opt.count,
                        N_("show the number of matches instead of matching lines")),
                OPT__COLOR(&opt.color, N_("highlight matches")),
@@ -884,9 +892,11 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                OPT_GROUP(""),
                { OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager,
                        N_("pager"), N_("show matching files in the pager"),
-                       PARSE_OPT_OPTARG, NULL, (intptr_t)default_pager },
-               OPT_BOOL(0, "ext-grep", &external_grep_allowed__ignored,
-                        N_("allow calling of grep(1) (ignored by this build)")),
+                       PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE,
+                       NULL, (intptr_t)default_pager },
+               OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
+                          N_("allow calling of grep(1) (ignored by this build)"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
index c532ff9320c751d1db5475add51f2c3c6a8c7146..526da5c1856ed1c387975a767f4d01382d1ea1a9 100644 (file)
@@ -24,7 +24,8 @@ static int hash_literally(struct object_id *oid, int fd, const char *type, unsig
        if (strbuf_read(&buf, fd, 4096) < 0)
                ret = -1;
        else
-               ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags);
+               ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+                                                flags);
        strbuf_release(&buf);
        return ret;
 }
index d3c8fc40820faa10f7c9004560159986b2900e9e..598867cfea40c6e0df25111c5484dde5d2cd8b47 100644 (file)
@@ -194,11 +194,11 @@ static void do_add_man_viewer_info(const char *name,
                                   size_t len,
                                   const char *value)
 {
-       struct man_viewer_info_list *new;
-       FLEX_ALLOC_MEM(new, name, name, len);
-       new->info = xstrdup(value);
-       new->next = man_viewer_info_list;
-       man_viewer_info_list = new;
+       struct man_viewer_info_list *new_man_viewer;
+       FLEX_ALLOC_MEM(new_man_viewer, name, name, len);
+       new_man_viewer->info = xstrdup(value);
+       new_man_viewer->next = man_viewer_info_list;
+       man_viewer_info_list = new_man_viewer;
 }
 
 static int add_man_viewer_path(const char *name,
index 4c51aec81f374d52e7c6a1afa73ab9dea3cf40b8..657a5dda06556baf909134f98c96029c615ae719 100644 (file)
@@ -49,6 +49,7 @@ struct thread_local {
        int pack_fd;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define FLAG_LINK (1u<<20)
 #define FLAG_CHECKED (1u<<21)
 
@@ -58,7 +59,7 @@ struct ofs_delta_entry {
 };
 
 struct ref_delta_entry {
-       unsigned char sha1[20];
+       struct object_id oid;
        int obj_no;
 };
 
@@ -91,7 +92,7 @@ static unsigned int input_offset, input_len;
 static off_t consumed_bytes;
 static off_t max_input_size;
 static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
 static uint32_t input_crc32;
 static int input_fd, output_fd;
 static const char *curr_pack;
@@ -221,14 +222,14 @@ static unsigned check_object(struct object *obj)
 
        if (!(obj->flags & FLAG_CHECKED)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(&obj->oid, &size);
                if (type <= 0)
                        die(_("did not receive expected object %s"),
                              oid_to_hex(&obj->oid));
                if (type != obj->type)
                        die(_("object %s: expected type %s, found %s"),
                            oid_to_hex(&obj->oid),
-                           typename(obj->type), typename(type));
+                           type_name(obj->type), type_name(type));
                obj->flags |= FLAG_CHECKED;
                return 1;
        }
@@ -253,7 +254,7 @@ static void flush(void)
        if (input_offset) {
                if (output_fd >= 0)
                        write_or_die(output_fd, input_buffer, input_offset);
-               git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+               the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
                memmove(input_buffer, input_buffer + input_offset, input_len);
                input_offset = 0;
        }
@@ -326,7 +327,7 @@ static const char *open_pack_file(const char *pack_name)
                output_fd = -1;
                nothread_data.pack_fd = input_fd;
        }
-       git_SHA1_Init(&input_ctx);
+       the_hash_algo->init_fn(&input_ctx);
        return pack_name;
 }
 
@@ -437,22 +438,22 @@ static int is_delta_type(enum object_type type)
 }
 
 static void *unpack_entry_data(off_t offset, unsigned long size,
-                              enum object_type type, unsigned char *sha1)
+                              enum object_type type, struct object_id *oid)
 {
        static char fixed_buf[8192];
        int status;
        git_zstream stream;
        void *buf;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        char hdr[32];
        int hdrlen;
 
        if (!is_delta_type(type)) {
-               hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1;
-               git_SHA1_Init(&c);
-               git_SHA1_Update(&c, hdr, hdrlen);
+               hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1;
+               the_hash_algo->init_fn(&c);
+               the_hash_algo->update_fn(&c, hdr, hdrlen);
        } else
-               sha1 = NULL;
+               oid = NULL;
        if (type == OBJ_BLOB && size > big_file_threshold)
                buf = fixed_buf;
        else
@@ -469,8 +470,8 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
                stream.avail_in = input_len;
                status = git_inflate(&stream, 0);
                use(input_len - stream.avail_in);
-               if (sha1)
-                       git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+               if (oid)
+                       the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
                if (buf == fixed_buf) {
                        stream.next_out = buf;
                        stream.avail_out = sizeof(fixed_buf);
@@ -479,15 +480,15 @@ static void *unpack_entry_data(off_t offset, unsigned long size,
        if (stream.total_out != size || status != Z_STREAM_END)
                bad_object(offset, _("inflate returned %d"), status);
        git_inflate_end(&stream);
-       if (sha1)
-               git_SHA1_Final(sha1, &c);
+       if (oid)
+               the_hash_algo->final_fn(oid->hash, &c);
        return buf == fixed_buf ? NULL : buf;
 }
 
 static void *unpack_raw_entry(struct object_entry *obj,
                              off_t *ofs_offset,
-                             unsigned char *ref_sha1,
-                             unsigned char *sha1)
+                             struct object_id *ref_oid,
+                             struct object_id *oid)
 {
        unsigned char *p;
        unsigned long size, c;
@@ -515,8 +516,8 @@ static void *unpack_raw_entry(struct object_entry *obj,
 
        switch (obj->type) {
        case OBJ_REF_DELTA:
-               hashcpy(ref_sha1, fill(20));
-               use(20);
+               hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+               use(the_hash_algo->rawsz);
                break;
        case OBJ_OFS_DELTA:
                p = fill(1);
@@ -546,7 +547,7 @@ static void *unpack_raw_entry(struct object_entry *obj,
        }
        obj->hdr_size = consumed_bytes - obj->idx.offset;
 
-       data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+       data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
        obj->idx.crc32 = input_crc32;
        return data;
 }
@@ -671,18 +672,18 @@ static void find_ofs_delta_children(off_t offset,
        *last_index = last;
 }
 
-static int compare_ref_delta_bases(const unsigned char *sha1,
-                                  const unsigned char *sha2,
+static int compare_ref_delta_bases(const struct object_id *oid1,
+                                  const struct object_id *oid2,
                                   enum object_type type1,
                                   enum object_type type2)
 {
        int cmp = type1 - type2;
        if (cmp)
                return cmp;
-       return hashcmp(sha1, sha2);
+       return oidcmp(oid1, oid2);
 }
 
-static int find_ref_delta(const unsigned char *sha1, enum object_type type)
+static int find_ref_delta(const struct object_id *oid, enum object_type type)
 {
        int first = 0, last = nr_ref_deltas;
 
@@ -691,7 +692,7 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
                struct ref_delta_entry *delta = &ref_deltas[next];
                int cmp;
 
-               cmp = compare_ref_delta_bases(sha1, delta->sha1,
+               cmp = compare_ref_delta_bases(oid, &delta->oid,
                                              type, objects[delta->obj_no].type);
                if (!cmp)
                        return next;
@@ -704,11 +705,11 @@ static int find_ref_delta(const unsigned char *sha1, enum object_type type)
        return -first-1;
 }
 
-static void find_ref_delta_children(const unsigned char *sha1,
+static void find_ref_delta_children(const struct object_id *oid,
                                    int *first_index, int *last_index,
                                    enum object_type type)
 {
-       int first = find_ref_delta(sha1, type);
+       int first = find_ref_delta(oid, type);
        int last = first;
        int end = nr_ref_deltas - 1;
 
@@ -717,9 +718,9 @@ static void find_ref_delta_children(const unsigned char *sha1,
                *last_index = -1;
                return;
        }
-       while (first > 0 && !hashcmp(ref_deltas[first - 1].sha1, sha1))
+       while (first > 0 && !oidcmp(&ref_deltas[first - 1].oid, oid))
                --first;
-       while (last < end && !hashcmp(ref_deltas[last + 1].sha1, sha1))
+       while (last < end && !oidcmp(&ref_deltas[last + 1].oid, oid))
                ++last;
        *first_index = first;
        *last_index = last;
@@ -771,7 +772,7 @@ static int check_collison(struct object_entry *entry)
 
        memset(&data, 0, sizeof(data));
        data.entry = entry;
-       data.st = open_istream(entry->idx.oid.hash, &type, &size, NULL);
+       data.st = open_istream(&entry->idx.oid, &type, &size, NULL);
        if (!data.st)
                return -1;
        if (size != entry->size || type != entry->type)
@@ -810,12 +811,12 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                enum object_type has_type;
                unsigned long has_size;
                read_lock();
-               has_type = sha1_object_info(oid->hash, &has_size);
+               has_type = oid_object_info(oid, &has_size);
                if (has_type < 0)
                        die(_("cannot read existing object info %s"), oid_to_hex(oid));
                if (has_type != type || has_size != size)
                        die(_("SHA1 COLLISION FOUND WITH %s !"), oid_to_hex(oid));
-               has_data = read_sha1_file(oid->hash, &has_type, &has_size);
+               has_data = read_object_file(oid, &has_type, &has_size);
                read_unlock();
                if (!data)
                        data = new_data = get_data_from_pack(obj_entry);
@@ -827,7 +828,7 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                free(has_data);
        }
 
-       if (strict) {
+       if (strict || do_fsck_object) {
                read_lock();
                if (type == OBJ_BLOB) {
                        struct blob *blob = lookup_blob(oid);
@@ -849,11 +850,11 @@ static void sha1_object(const void *data, struct object_entry *obj_entry,
                        obj = parse_object_buffer(oid, type, size, buf,
                                                  &eaten);
                        if (!obj)
-                               die(_("invalid %s"), typename(type));
+                               die(_("invalid %s"), type_name(type));
                        if (do_fsck_object &&
                            fsck_object(obj, buf, size, &fsck_options))
                                die(_("Error in object"));
-                       if (fsck_walk(obj, NULL, &fsck_options))
+                       if (strict && fsck_walk(obj, NULL, &fsck_options))
                                die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
 
                        if (obj->type == OBJ_TREE) {
@@ -958,9 +959,8 @@ static void resolve_delta(struct object_entry *delta_obj,
        free(delta_data);
        if (!result->data)
                bad_object(delta_obj->idx.offset, _("failed to apply delta"));
-       hash_sha1_file(result->data, result->size,
-                      typename(delta_obj->real_type),
-                      delta_obj->idx.oid.hash);
+       hash_object_file(result->data, result->size,
+                        type_name(delta_obj->real_type), &delta_obj->idx.oid);
        sha1_object(result->data, NULL, result->size, delta_obj->real_type,
                    &delta_obj->idx.oid);
        counter_lock();
@@ -992,7 +992,7 @@ static struct base_data *find_unresolved_deltas_1(struct base_data *base,
                                                  struct base_data *prev_base)
 {
        if (base->ref_last == -1 && base->ofs_last == -1) {
-               find_ref_delta_children(base->obj->idx.oid.hash,
+               find_ref_delta_children(&base->obj->idx.oid,
                                        &base->ref_first, &base->ref_last,
                                        OBJ_REF_DELTA);
 
@@ -1076,7 +1076,7 @@ static int compare_ref_delta_entry(const void *a, const void *b)
        const struct ref_delta_entry *delta_a = a;
        const struct ref_delta_entry *delta_b = b;
 
-       return hashcmp(delta_a->sha1, delta_b->sha1);
+       return oidcmp(&delta_a->oid, &delta_b->oid);
 }
 
 static void resolve_base(struct object_entry *obj)
@@ -1119,11 +1119,11 @@ static void *threaded_second_pass(void *data)
  * - calculate SHA1 of all non-delta objects;
  * - remember base (SHA1 or offset) for all deltas.
  */
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
 {
        int i, nr_delays = 0;
        struct ofs_delta_entry *ofs_delta = ofs_deltas;
-       unsigned char ref_delta_sha1[20];
+       struct object_id ref_delta_oid;
        struct stat st;
 
        if (verbose)
@@ -1133,8 +1133,8 @@ static void parse_pack_objects(unsigned char *sha1)
        for (i = 0; i < nr_objects; i++) {
                struct object_entry *obj = &objects[i];
                void *data = unpack_raw_entry(obj, &ofs_delta->offset,
-                                             ref_delta_sha1,
-                                             obj->idx.oid.hash);
+                                             &ref_delta_oid,
+                                             &obj->idx.oid);
                obj->real_type = obj->type;
                if (obj->type == OBJ_OFS_DELTA) {
                        nr_ofs_deltas++;
@@ -1142,7 +1142,7 @@ static void parse_pack_objects(unsigned char *sha1)
                        ofs_delta++;
                } else if (obj->type == OBJ_REF_DELTA) {
                        ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
-                       hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+                       oidcpy(&ref_deltas[nr_ref_deltas].oid, &ref_delta_oid);
                        ref_deltas[nr_ref_deltas].obj_no = i;
                        nr_ref_deltas++;
                } else if (!data) {
@@ -1160,10 +1160,10 @@ static void parse_pack_objects(unsigned char *sha1)
 
        /* Check pack integrity */
        flush();
-       git_SHA1_Final(sha1, &input_ctx);
-       if (hashcmp(fill(20), sha1))
+       the_hash_algo->final_fn(hash, &input_ctx);
+       if (hashcmp(fill(the_hash_algo->rawsz), hash))
                die(_("pack is corrupted (SHA1 mismatch)"));
-       use(20);
+       use(the_hash_algo->rawsz);
 
        /* If input_fd is a file, we should have reached its end now. */
        if (fstat(input_fd, &st))
@@ -1239,21 +1239,21 @@ static void resolve_deltas(void)
 /*
  * Third pass:
  * - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
  */
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
 {
        if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
                stop_progress(&progress);
-               /* Flush remaining pack final 20-byte SHA1. */
+               /* Flush remaining pack final hash. */
                flush();
                return;
        }
 
        if (fix_thin_pack) {
-               struct sha1file *f;
-               unsigned char read_sha1[20], tail_sha1[20];
+               struct hashfile *f;
+               unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
                struct strbuf msg = STRBUF_INIT;
                int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
                int nr_objects_initial = nr_objects;
@@ -1262,7 +1262,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
                memset(objects + nr_objects + 1, 0,
                       nr_unresolved * sizeof(*objects));
-               f = sha1fd(output_fd, curr_pack);
+               f = hashfd(output_fd, curr_pack);
                fix_unresolved_deltas(f);
                strbuf_addf(&msg, Q_("completed with %d local object",
                                     "completed with %d local objects",
@@ -1270,12 +1270,12 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                            nr_objects - nr_objects_initial);
                stop_progress_msg(&progress, msg.buf);
                strbuf_release(&msg);
-               sha1close(f, tail_sha1, 0);
-               hashcpy(read_sha1, pack_sha1);
-               fixup_pack_header_footer(output_fd, pack_sha1,
+               hashclose(f, tail_hash, 0);
+               hashcpy(read_hash, pack_hash);
+               fixup_pack_header_footer(output_fd, pack_hash,
                                         curr_pack, nr_objects,
-                                        read_sha1, consumed_bytes-20);
-               if (hashcmp(read_sha1, tail_sha1) != 0)
+                                        read_hash, consumed_bytes-the_hash_algo->rawsz);
+               if (hashcmp(read_hash, tail_hash) != 0)
                        die(_("Unexpected tail checksum for %s "
                              "(disk corruption?)"), curr_pack);
        }
@@ -1286,7 +1286,7 @@ static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned cha
                    nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
 }
 
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
 {
        git_zstream stream;
        int status;
@@ -1300,7 +1300,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
                stream.next_out = outbuf;
                stream.avail_out = sizeof(outbuf);
                status = git_deflate(&stream, Z_FINISH);
-               sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+               hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
        } while (status == Z_OK);
 
        if (status != Z_STREAM_END)
@@ -1310,7 +1310,7 @@ static int write_compressed(struct sha1file *f, void *in, unsigned int size)
        return size;
 }
 
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
                               const unsigned char *sha1, void *buf,
                               unsigned long size, enum object_type type)
 {
@@ -1327,7 +1327,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
        }
        header[n++] = c;
        crc32_begin(f);
-       sha1write(f, header, n);
+       hashwrite(f, header, n);
        obj[0].size = size;
        obj[0].hdr_size = n;
        obj[0].type = type;
@@ -1335,7 +1335,7 @@ static struct object_entry *append_obj_to_pack(struct sha1file *f,
        obj[1].idx.offset = obj[0].idx.offset + n;
        obj[1].idx.offset += write_compressed(f, buf, size);
        obj[0].idx.crc32 = crc32_end(f);
-       sha1flush(f);
+       hashflush(f);
        hashcpy(obj->idx.oid.hash, sha1);
        return obj;
 }
@@ -1347,7 +1347,7 @@ static int delta_pos_compare(const void *_a, const void *_b)
        return a->obj_no - b->obj_no;
 }
 
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
 {
        struct ref_delta_entry **sorted_by_pos;
        int i;
@@ -1374,14 +1374,15 @@ static void fix_unresolved_deltas(struct sha1file *f)
 
                if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
                        continue;
-               base_obj->data = read_sha1_file(d->sha1, &type, &base_obj->size);
+               base_obj->data = read_object_file(&d->oid, &type,
+                                                 &base_obj->size);
                if (!base_obj->data)
                        continue;
 
-               if (check_sha1_signature(d->sha1, base_obj->data,
-                               base_obj->size, typename(type)))
-                       die(_("local object %s is corrupt"), sha1_to_hex(d->sha1));
-               base_obj->obj = append_obj_to_pack(f, d->sha1,
+               if (check_object_signature(&d->oid, base_obj->data,
+                               base_obj->size, type_name(type)))
+                       die(_("local object %s is corrupt"), oid_to_hex(&d->oid));
+               base_obj->obj = append_obj_to_pack(f, d->oid.hash,
                                        base_obj->data, base_obj->size, type);
                find_unresolved_deltas(base_obj);
                display_progress(progress, nr_resolved_deltas);
@@ -1389,15 +1390,60 @@ static void fix_unresolved_deltas(struct sha1file *f)
        free(sorted_by_pos);
 }
 
+static const char *derive_filename(const char *pack_name, const char *suffix,
+                                  struct strbuf *buf)
+{
+       size_t len;
+       if (!strip_suffix(pack_name, ".pack", &len))
+               die(_("packfile name '%s' does not end with '.pack'"),
+                   pack_name);
+       strbuf_add(buf, pack_name, len);
+       strbuf_addch(buf, '.');
+       strbuf_addstr(buf, suffix);
+       return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+                              const char *pack_name, const unsigned char *hash,
+                              const char **report)
+{
+       struct strbuf name_buf = STRBUF_INIT;
+       const char *filename;
+       int fd;
+       int msg_len = strlen(msg);
+
+       if (pack_name)
+               filename = derive_filename(pack_name, suffix, &name_buf);
+       else
+               filename = odb_pack_name(&name_buf, hash, suffix);
+
+       fd = odb_pack_keep(filename);
+       if (fd < 0) {
+               if (errno != EEXIST)
+                       die_errno(_("cannot write %s file '%s'"),
+                                 suffix, filename);
+       } else {
+               if (msg_len > 0) {
+                       write_or_die(fd, msg, msg_len);
+                       write_or_die(fd, "\n", 1);
+               }
+               if (close(fd) != 0)
+                       die_errno(_("cannot close written %s file '%s'"),
+                                 suffix, filename);
+               if (report)
+                       *report = suffix;
+       }
+       strbuf_release(&name_buf);
+}
+
 static void final(const char *final_pack_name, const char *curr_pack_name,
                  const char *final_index_name, const char *curr_index_name,
-                 const char *keep_name, const char *keep_msg,
-                 unsigned char *sha1)
+                 const char *keep_msg, const char *promisor_msg,
+                 unsigned char *hash)
 {
        const char *report = "pack";
        struct strbuf pack_name = STRBUF_INIT;
        struct strbuf index_name = STRBUF_INIT;
-       struct strbuf keep_name_buf = STRBUF_INIT;
        int err;
 
        if (!from_stdin) {
@@ -1409,32 +1455,16 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
                        die_errno(_("error while closing pack file"));
        }
 
-       if (keep_msg) {
-               int keep_fd, keep_msg_len = strlen(keep_msg);
-
-               if (!keep_name)
-                       keep_name = odb_pack_name(&keep_name_buf, sha1, "keep");
-
-               keep_fd = odb_pack_keep(keep_name);
-               if (keep_fd < 0) {
-                       if (errno != EEXIST)
-                               die_errno(_("cannot write keep file '%s'"),
-                                         keep_name);
-               } else {
-                       if (keep_msg_len > 0) {
-                               write_or_die(keep_fd, keep_msg, keep_msg_len);
-                               write_or_die(keep_fd, "\n", 1);
-                       }
-                       if (close(keep_fd) != 0)
-                               die_errno(_("cannot close written keep file '%s'"),
-                                         keep_name);
-                       report = "keep";
-               }
-       }
+       if (keep_msg)
+               write_special_file("keep", keep_msg, final_pack_name, hash,
+                                  &report);
+       if (promisor_msg)
+               write_special_file("promisor", promisor_msg, final_pack_name,
+                                  hash, NULL);
 
        if (final_pack_name != curr_pack_name) {
                if (!final_pack_name)
-                       final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+                       final_pack_name = odb_pack_name(&pack_name, hash, "pack");
                if (finalize_object_file(curr_pack_name, final_pack_name))
                        die(_("cannot store pack file"));
        } else if (from_stdin)
@@ -1442,18 +1472,18 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
 
        if (final_index_name != curr_index_name) {
                if (!final_index_name)
-                       final_index_name = odb_pack_name(&index_name, sha1, "idx");
+                       final_index_name = odb_pack_name(&index_name, hash, "idx");
                if (finalize_object_file(curr_index_name, final_index_name))
                        die(_("cannot store index file"));
        } else
                chmod(final_index_name, 0444);
 
        if (!from_stdin) {
-               printf("%s\n", sha1_to_hex(sha1));
+               printf("%s\n", sha1_to_hex(hash));
        } else {
                struct strbuf buf = STRBUF_INIT;
 
-               strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+               strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
                write_or_die(1, buf.buf, buf.len);
                strbuf_release(&buf);
 
@@ -1472,7 +1502,6 @@ static void final(const char *final_pack_name, const char *curr_pack_name,
 
        strbuf_release(&index_name);
        strbuf_release(&pack_name);
-       strbuf_release(&keep_name_buf);
 }
 
 static int git_index_pack_config(const char *k, const char *v, void *cb)
@@ -1588,7 +1617,7 @@ static void show_pack_info(int stat_only)
                        continue;
                printf("%s %-6s %lu %lu %"PRIuMAX,
                       oid_to_hex(&obj->idx.oid),
-                      typename(obj->real_type), obj->size,
+                      type_name(obj->real_type), obj->size,
                       (unsigned long)(obj[1].idx.offset - obj->idx.offset),
                       (uintmax_t)obj->idx.offset);
                if (is_delta_type(obj->type)) {
@@ -1615,32 +1644,26 @@ static void show_pack_info(int stat_only)
        }
 }
 
-static const char *derive_filename(const char *pack_name, const char *suffix,
-                                  struct strbuf *buf)
-{
-       size_t len;
-       if (!strip_suffix(pack_name, ".pack", &len))
-               die(_("packfile name '%s' does not end with '.pack'"),
-                   pack_name);
-       strbuf_add(buf, pack_name, len);
-       strbuf_addstr(buf, suffix);
-       return buf->buf;
-}
-
 int cmd_index_pack(int argc, const char **argv, const char *prefix)
 {
        int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
        const char *curr_index;
        const char *index_name = NULL, *pack_name = NULL;
-       const char *keep_name = NULL, *keep_msg = NULL;
-       struct strbuf index_name_buf = STRBUF_INIT,
-                     keep_name_buf = STRBUF_INIT;
+       const char *keep_msg = NULL;
+       const char *promisor_msg = NULL;
+       struct strbuf index_name_buf = STRBUF_INIT;
        struct pack_idx_entry **idx_objects;
        struct pack_idx_option opts;
-       unsigned char pack_sha1[20];
+       unsigned char pack_hash[GIT_MAX_RAWSZ];
        unsigned foreign_nr = 1;        /* zero is a "good" value, assume bad */
        int report_end_of_input = 0;
 
+       /*
+        * index-pack never needs to fetch missing objects, since it only
+        * accesses the repo to do hash collision checks
+        */
+       fetch_if_missing = 0;
+
        if (argc == 2 && !strcmp(argv[1], "-h"))
                usage(index_pack_usage);
 
@@ -1667,6 +1690,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
                        } else if (!strcmp(arg, "--check-self-contained-and-connected")) {
                                strict = 1;
                                check_self_contained_and_connected = 1;
+                       } else if (!strcmp(arg, "--fsck-objects")) {
+                               do_fsck_object = 1;
                        } else if (!strcmp(arg, "--verify")) {
                                verify = 1;
                        } else if (!strcmp(arg, "--verify-stat")) {
@@ -1678,6 +1703,8 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
                                stat_only = 1;
                        } else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
                                ; /* nothing to do */
+                       } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+                               ; /* already parsed */
                        } else if (starts_with(arg, "--threads=")) {
                                char *end;
                                nr_threads = strtoul(arg+10, &end, 0);
@@ -1740,9 +1767,7 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        if (from_stdin && !startup_info->have_repository)
                die(_("--stdin requires a git repository"));
        if (!index_name && pack_name)
-               index_name = derive_filename(pack_name, ".idx", &index_name_buf);
-       if (keep_msg && !keep_name && pack_name)
-               keep_name = derive_filename(pack_name, ".keep", &keep_name_buf);
+               index_name = derive_filename(pack_name, "idx", &index_name_buf);
 
        if (verify) {
                if (!index_name)
@@ -1768,11 +1793,11 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        if (show_stat)
                obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
        ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
-       parse_pack_objects(pack_sha1);
+       parse_pack_objects(pack_hash);
        if (report_end_of_input)
                write_in_full(2, "\0", 1);
        resolve_deltas();
-       conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+       conclude_pack(fix_thin_pack, curr_pack, pack_hash);
        free(ofs_deltas);
        free(ref_deltas);
        if (strict)
@@ -1784,19 +1809,18 @@ int cmd_index_pack(int argc, const char **argv, const char *prefix)
        ALLOC_ARRAY(idx_objects, nr_objects);
        for (i = 0; i < nr_objects; i++)
                idx_objects[i] = &objects[i].idx;
-       curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+       curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
        free(idx_objects);
 
        if (!verify)
                final(pack_name, curr_pack,
                      index_name, curr_index,
-                     keep_name, keep_msg,
-                     pack_sha1);
+                     keep_msg, promisor_msg,
+                     pack_hash);
        else
                close(input_fd);
        free(objects);
        strbuf_release(&index_name_buf);
-       strbuf_release(&keep_name_buf);
        if (pack_name == NULL)
                free((void *) curr_pack);
        if (index_name == NULL)
index c9b7946bade9e10f799942137480e71ee3233701..68ff4ad75ace6566a233c1343fed93365c5abbe4 100644 (file)
@@ -24,11 +24,11 @@ static int init_is_bare_repository = 0;
 static int init_shared_repository = -1;
 static const char *init_db_template_dir;
 
-static void copy_templates_1(struct strbuf *path, struct strbuf *template,
+static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
                             DIR *dir)
 {
        size_t path_baselen = path->len;
-       size_t template_baselen = template->len;
+       size_t template_baselen = template_path->len;
        struct dirent *de;
 
        /* Note: if ".git/hooks" file exists in the repository being
@@ -44,12 +44,12 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template,
                int exists = 0;
 
                strbuf_setlen(path, path_baselen);
-               strbuf_setlen(template, template_baselen);
+               strbuf_setlen(template_path, template_baselen);
 
                if (de->d_name[0] == '.')
                        continue;
                strbuf_addstr(path, de->d_name);
-               strbuf_addstr(template, de->d_name);
+               strbuf_addstr(template_path, de->d_name);
                if (lstat(path->buf, &st_git)) {
                        if (errno != ENOENT)
                                die_errno(_("cannot stat '%s'"), path->buf);
@@ -57,36 +57,36 @@ static void copy_templates_1(struct strbuf *path, struct strbuf *template,
                else
                        exists = 1;
 
-               if (lstat(template->buf, &st_template))
-                       die_errno(_("cannot stat template '%s'"), template->buf);
+               if (lstat(template_path->buf, &st_template))
+                       die_errno(_("cannot stat template '%s'"), template_path->buf);
 
                if (S_ISDIR(st_template.st_mode)) {
-                       DIR *subdir = opendir(template->buf);
+                       DIR *subdir = opendir(template_path->buf);
                        if (!subdir)
-                               die_errno(_("cannot opendir '%s'"), template->buf);
+                               die_errno(_("cannot opendir '%s'"), template_path->buf);
                        strbuf_addch(path, '/');
-                       strbuf_addch(template, '/');
-                       copy_templates_1(path, template, subdir);
+                       strbuf_addch(template_path, '/');
+                       copy_templates_1(path, template_path, subdir);
                        closedir(subdir);
                }
                else if (exists)
                        continue;
                else if (S_ISLNK(st_template.st_mode)) {
                        struct strbuf lnk = STRBUF_INIT;
-                       if (strbuf_readlink(&lnk, template->buf, 0) < 0)
-                               die_errno(_("cannot readlink '%s'"), template->buf);
+                       if (strbuf_readlink(&lnk, template_path->buf, 0) < 0)
+                               die_errno(_("cannot readlink '%s'"), template_path->buf);
                        if (symlink(lnk.buf, path->buf))
                                die_errno(_("cannot symlink '%s' '%s'"),
                                          lnk.buf, path->buf);
                        strbuf_release(&lnk);
                }
                else if (S_ISREG(st_template.st_mode)) {
-                       if (copy_file(path->buf, template->buf, st_template.st_mode))
+                       if (copy_file(path->buf, template_path->buf, st_template.st_mode))
                                die_errno(_("cannot copy '%s' to '%s'"),
-                                         template->buf, path->buf);
+                                         template_path->buf, path->buf);
                }
                else
-                       error(_("ignoring template %s"), template->buf);
+                       error(_("ignoring template %s"), template_path->buf);
        }
 }
 
index 14fdf39165d20602ad8b682b55660ec2466c1d20..71f68a3e4f59d987c653fb8f32a4d8770e9c6654 100644 (file)
@@ -29,6 +29,8 @@
 #include "gpg-interface.h"
 #include "progress.h"
 
+#define MAIL_DEFAULT_WRAP 72
+
 /* Set a default date-time format for git log ("log.date" config variable) */
 static const char *default_date_mode = NULL;
 
@@ -188,8 +190,8 @@ static void cmd_log_init_finish(int argc, const char **argv, const char *prefix,
        if (rev->show_notes)
                init_display_notes(&rev->notes_opt);
 
-       if (rev->diffopt.pickaxe || rev->diffopt.filter ||
-           rev->diffopt.flags.follow_renames)
+       if ((rev->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
+           rev->diffopt.filter || rev->diffopt.flags.follow_renames)
                rev->always_show_header = 0;
 
        if (source)
@@ -516,7 +518,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(oid->hash, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        int offset = 0;
 
        if (!buf)
@@ -539,7 +541,7 @@ static int show_tag_object(const struct object_id *oid, struct rev_info *rev)
        return 0;
 }
 
-static int show_tree_object(const unsigned char *sha1,
+static int show_tree_object(const struct object_id *oid,
                struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
@@ -1044,7 +1046,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        shortlog_init(&log);
        log.wrap_lines = 1;
-       log.wrap = 72;
+       log.wrap = MAIL_DEFAULT_WRAP;
        log.in1 = 2;
        log.in2 = 4;
        log.file = rev->diffopt.file;
@@ -1061,6 +1063,7 @@ static void make_cover_letter(struct rev_info *rev, int use_stdout,
 
        memcpy(&opts, &rev->diffopt, sizeof(opts));
        opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+       opts.stat_width = MAIL_DEFAULT_WRAP;
 
        diff_setup_done(&opts);
 
@@ -1614,6 +1617,8 @@ int cmd_format_patch(int argc, const char **argv, const char *prefix)
                (!rev.diffopt.output_format ||
                 rev.diffopt.output_format == DIFF_FORMAT_PATCH))
                rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY;
+       if (!rev.diffopt.stat_width)
+               rev.diffopt.stat_width = MAIL_DEFAULT_WRAP;
 
        /* Always generate a patch */
        rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
@@ -1868,12 +1873,12 @@ static void print_commit(char sign, struct commit *commit, int verbose,
 {
        if (!verbose) {
                fprintf(file, "%c %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev));
+                      find_unique_abbrev(&commit->object.oid, abbrev));
        } else {
                struct strbuf buf = STRBUF_INIT;
                pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
                fprintf(file, "%c %s %s\n", sign,
-                      find_unique_abbrev(commit->object.oid.hash, abbrev),
+                      find_unique_abbrev(&commit->object.oid, abbrev),
                       buf.buf);
                strbuf_release(&buf);
        }
index 2fc836e33086d5b70b86a3fc746f9e75706ef7ea..a71f6bd088a2666f0637463e1c168171dd319a96 100644 (file)
@@ -240,7 +240,7 @@ static void show_ce(struct repository *repo, struct dir_struct *dir,
                        printf("%s%06o %s %d\t",
                               tag,
                               ce->ce_mode,
-                              find_unique_abbrev(ce->oid.hash, abbrev),
+                              find_unique_abbrev(&ce->oid, abbrev),
                               ce_stage(ce));
                }
                write_eolinfo(repo->index, ce, fullname);
@@ -271,7 +271,7 @@ static void show_ru_info(const struct index_state *istate)
                        if (!ui->mode[i])
                                continue;
                        printf("%s%06o %s %d\t", tag_resolve_undo, ui->mode[i],
-                              find_unique_abbrev(ui->sha1[i], abbrev),
+                              find_unique_abbrev(&ui->oid[i], abbrev),
                               i + 1);
                        write_name(path);
                }
index c4be98ab9e84fdcde2842b88d4bb60600f7bf627..540d56429f5cec4ace8655dd9a870089fc872d2c 100644 (file)
@@ -60,8 +60,9 @@ int cmd_ls_remote(int argc, const char **argv, const char *prefix)
                OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
                OPT_BOOL(0, "get-url", &get_url,
                         N_("take url.<base>.insteadOf into account")),
-               OPT_SET_INT(0, "exit-code", &status,
-                           N_("exit with exit code 2 if no matching refs are found"), 2),
+               OPT_SET_INT_F(0, "exit-code", &status,
+                             N_("exit with exit code 2 if no matching refs are found"),
+                             2, PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "symref", &show_symref_target,
                         N_("show underlying ref in addition to the object pointed by it")),
                OPT_END()
index ef965408e8fc5d80fa9e9daf0264a91abccd978c..d44b4f9c27d31cfa331ccef7ae538d8a6e6c38c6 100644 (file)
@@ -60,7 +60,7 @@ static int show_recursive(const char *base, int baselen, const char *pathname)
        return 0;
 }
 
-static int show_tree(const unsigned char *sha1, struct strbuf *base,
+static int show_tree(const struct object_id *oid, struct strbuf *base,
                const char *pathname, unsigned mode, int stage, void *context)
 {
        int retval = 0;
@@ -94,7 +94,7 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        char size_text[24];
                        if (!strcmp(type, blob_type)) {
                                unsigned long size;
-                               if (sha1_object_info(sha1, &size) == OBJ_BAD)
+                               if (oid_object_info(oid, &size) == OBJ_BAD)
                                        xsnprintf(size_text, sizeof(size_text),
                                                  "BAD");
                                else
@@ -103,11 +103,11 @@ static int show_tree(const unsigned char *sha1, struct strbuf *base,
                        } else
                                xsnprintf(size_text, sizeof(size_text), "-");
                        printf("%06o %s %s %7s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev),
+                              find_unique_abbrev(oid, abbrev),
                               size_text);
                } else
                        printf("%06o %s %s\t", mode, type,
-                              find_unique_abbrev(sha1, abbrev));
+                              find_unique_abbrev(oid, abbrev));
        }
        baselen = base->len;
        strbuf_addstr(base, pathname);
index d01ddecf6602eabdca97a175e5c2a57bf1257865..32736e0b1011f575d2585fd2e012501d807f9e9b 100644 (file)
@@ -60,7 +60,7 @@ static void *result(struct merge_list *entry, unsigned long *size)
        const char *path = entry->path;
 
        if (!entry->stage)
-               return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+               return read_object_file(&entry->blob->object.oid, &type, size);
        base = NULL;
        if (entry->stage == 1) {
                base = entry->blob;
@@ -82,7 +82,8 @@ static void *origin(struct merge_list *entry, unsigned long *size)
        enum object_type type;
        while (entry) {
                if (entry->stage == 2)
-                       return read_sha1_file(entry->blob->object.oid.hash, &type, size);
+                       return read_object_file(&entry->blob->object.oid,
+                                               &type, size);
                entry = entry->link;
        }
        return NULL;
index 30264cfd7c1720aee814699eb3ca057e292b082d..8746c5e3e867d79c85728974825ebca4b07a2e9a 100644 (file)
@@ -33,6 +33,7 @@
 #include "sequencer.h"
 #include "string-list.h"
 #include "packfile.h"
+#include "tag.h"
 
 #define DEFAULT_TWOHEAD (1<<0)
 #define DEFAULT_OCTOPUS (1<<1)
@@ -520,7 +521,7 @@ static void merge_name(const char *remote, struct strbuf *msg)
                if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
                        strbuf_addf(msg, "%s\t\t%s '%s'\n",
                                    oid_to_hex(&desc->obj->oid),
-                                   typename(desc->obj->type),
+                                   type_name(desc->obj->type),
                                    remote);
                        goto cleanup;
                }
@@ -638,7 +639,7 @@ static int read_tree_trivial(struct object_id *common, struct object_id *head,
 
 static void write_tree_trivial(struct object_id *oid)
 {
-       if (write_cache_as_tree(oid->hash, 0, NULL))
+       if (write_cache_as_tree(oid, 0, NULL))
                die(_("git write-tree failed to write a tree"));
 }
 
@@ -651,10 +652,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
 
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        refresh_cache(REFRESH_QUIET);
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return error(_("Unable to write index."));
-       rollback_lock_file(&lock);
 
        if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) {
                int clean, x;
@@ -691,10 +691,9 @@ static int try_merge_strategy(const char *strategy, struct commit_list *common,
                                remoteheads->item, reversed, &result);
                if (clean < 0)
                        exit(128);
-               if (active_cache_changed &&
-                   write_locked_index(&the_index, &lock, COMMIT_LOCK))
+               if (write_locked_index(&the_index, &lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED))
                        die (_("unable to write %s"), get_index_file());
-               rollback_lock_file(&lock);
                return clean ? 0 : 1;
        } else {
                return try_merge_command(strategy, xopts_nr, xopts,
@@ -810,18 +809,17 @@ static int merge_trivial(struct commit *head, struct commit_list *remoteheads)
 
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        refresh_cache(REFRESH_QUIET);
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return error(_("Unable to write index."));
-       rollback_lock_file(&lock);
 
        write_tree_trivial(&result_tree);
        printf(_("Wonderful.\n"));
        pptr = commit_list_append(head, pptr);
        pptr = commit_list_append(remoteheads->item, pptr);
        prepare_to_commit(remoteheads);
-       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
-                       result_commit.hash, NULL, sign_commit))
+       if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+                       &result_commit, NULL, sign_commit))
                die(_("failed to write commit object"));
        finish(head, remoteheads, &result_commit, "In-index merge");
        drop_save();
@@ -845,8 +843,8 @@ static int finish_automerge(struct commit *head,
                commit_list_insert(head, &parents);
        strbuf_addch(&merge_msg, '\n');
        prepare_to_commit(remoteheads);
-       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
-                       result_commit.hash, NULL, sign_commit))
+       if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+                       &result_commit, NULL, sign_commit))
                die(_("failed to write commit object"));
        strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
        finish(head, remoteheads, &result_commit, buf.buf);
@@ -1125,6 +1123,43 @@ static struct commit_list *collect_parents(struct commit *head_commit,
        return remoteheads;
 }
 
+static int merging_a_throwaway_tag(struct commit *commit)
+{
+       char *tag_ref;
+       struct object_id oid;
+       int is_throwaway_tag = 0;
+
+       /* Are we merging a tag? */
+       if (!merge_remote_util(commit) ||
+           !merge_remote_util(commit)->obj ||
+           merge_remote_util(commit)->obj->type != OBJ_TAG)
+               return is_throwaway_tag;
+
+       /*
+        * Now we know we are merging a tag object.  Are we downstream
+        * and following the tags from upstream?  If so, we must have
+        * the tag object pointed at by "refs/tags/$T" where $T is the
+        * tagname recorded in the tag object.  We want to allow such
+        * a "just to catch up" merge to fast-forward.
+        *
+        * Otherwise, we are playing an integrator's role, making a
+        * merge with a throw-away tag from a contributor with
+        * something like "git pull $contributor $signed_tag".
+        * We want to forbid such a merge from fast-forwarding
+        * by default; otherwise we would not keep the signature
+        * anywhere.
+        */
+       tag_ref = xstrfmt("refs/tags/%s",
+                         ((struct tag *)merge_remote_util(commit)->obj)->tag);
+       if (!read_ref(tag_ref, &oid) &&
+           !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+               is_throwaway_tag = 0;
+       else
+               is_throwaway_tag = 1;
+       free(tag_ref);
+       return is_throwaway_tag;
+}
+
 int cmd_merge(int argc, const char **argv, const char *prefix)
 {
        struct object_id result_tree, stash, head_oid;
@@ -1289,7 +1324,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                        check_commit_signature(commit, &signature_check);
 
-                       find_unique_abbrev_r(hex, commit->object.oid.hash, DEFAULT_ABBREV);
+                       find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV);
                        switch (signature_check.result) {
                        case 'G':
                                break;
@@ -1322,10 +1357,7 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                            oid_to_hex(&commit->object.oid));
                setenv(buf.buf, merge_remote_util(commit)->name, 1);
                strbuf_reset(&buf);
-               if (fast_forward != FF_ONLY &&
-                   merge_remote_util(commit) &&
-                   merge_remote_util(commit)->obj &&
-                   merge_remote_util(commit)->obj->type == OBJ_TAG)
+               if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit))
                        fast_forward = FF_NO;
        }
 
@@ -1385,9 +1417,9 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
 
                if (verbosity >= 0) {
                        printf(_("Updating %s..%s\n"),
-                              find_unique_abbrev(head_commit->object.oid.hash,
+                              find_unique_abbrev(&head_commit->object.oid,
                                                  DEFAULT_ABBREV),
-                              find_unique_abbrev(remoteheads->item->object.oid.hash,
+                              find_unique_abbrev(&remoteheads->item->object.oid,
                                                  DEFAULT_ABBREV));
                }
                strbuf_addstr(&msg, "Fast-forward");
index 031b750f068dea5e1652129a5df6d684f6bca850..9f5a50a8fd5b0b3acf664df8d20ae5bdee288fe8 100644 (file)
 /*
  * We refuse to tag something we can't verify. Just because.
  */
-static int verify_object(const unsigned char *sha1, const char *expected_type)
+static int verify_object(const struct object_id *oid, const char *expected_type)
 {
        int ret = -1;
        enum object_type type;
        unsigned long size;
-       void *buffer = read_sha1_file(sha1, &type, &size);
-       const unsigned char *repl = lookup_replace_object(sha1);
+       void *buffer = read_object_file(oid, &type, &size);
+       const struct object_id *repl = lookup_replace_object(oid);
 
        if (buffer) {
                if (type == type_from_string(expected_type))
-                       ret = check_sha1_signature(repl, buffer, size, expected_type);
+                       ret = check_object_signature(repl, buffer, size, expected_type);
                free(buffer);
        }
        return ret;
@@ -38,8 +38,8 @@ static int verify_tag(char *buffer, unsigned long size)
 {
        int typelen;
        char type[20];
-       unsigned char sha1[20];
-       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb;
+       struct object_id oid;
+       const char *object, *type_line, *tag_line, *tagger_line, *lb, *rb, *p;
        size_t len;
 
        if (size < 84)
@@ -52,11 +52,11 @@ static int verify_tag(char *buffer, unsigned long size)
        if (memcmp(object, "object ", 7))
                return error("char%d: does not start with \"object \"", 0);
 
-       if (get_sha1_hex(object + 7, sha1))
+       if (parse_oid_hex(object + 7, &oid, &p))
                return error("char%d: could not get SHA1 hash", 7);
 
        /* Verify type line */
-       type_line = object + 48;
+       type_line = p + 1;
        if (memcmp(type_line - 1, "\ntype ", 6))
                return error("char%d: could not find \"\\ntype \"", 47);
 
@@ -80,8 +80,8 @@ static int verify_tag(char *buffer, unsigned long size)
        type[typelen] = 0;
 
        /* Verify that the object matches */
-       if (verify_object(sha1, type))
-               return error("char%d: could not verify object %s", 7, sha1_to_hex(sha1));
+       if (verify_object(&oid, type))
+               return error("char%d: could not verify object %s", 7, oid_to_hex(&oid));
 
        /* Verify the tag-name: we don't allow control characters or spaces in it */
        tag_line += 4;
@@ -151,7 +151,7 @@ static int verify_tag(char *buffer, unsigned long size)
 int cmd_mktag(int argc, const char **argv, const char *prefix)
 {
        struct strbuf buf = STRBUF_INIT;
-       unsigned char result_sha1[20];
+       struct object_id result;
 
        if (argc != 1)
                usage("git mktag");
@@ -165,10 +165,10 @@ int cmd_mktag(int argc, const char **argv, const char *prefix)
        if (verify_tag(buf.buf, buf.len) < 0)
                die("invalid tag signature file");
 
-       if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0)
+       if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
                die("unable to write tag file");
 
        strbuf_release(&buf);
-       printf("%s\n", sha1_to_hex(result_sha1));
+       printf("%s\n", oid_to_hex(&result));
        return 0;
 }
index da0fd8cd706659a8784da8112cd1b3acd306375f..263c530315a4fe435a88f2042c768350f72e837b 100644 (file)
 
 static struct treeent {
        unsigned mode;
-       unsigned char sha1[20];
+       struct object_id oid;
        int len;
        char name[FLEX_ARRAY];
 } **entries;
 static int alloc, used;
 
-static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
+static void append_to_tree(unsigned mode, struct object_id *oid, char *path)
 {
        struct treeent *ent;
        size_t len = strlen(path);
@@ -26,7 +26,7 @@ static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
        FLEX_ALLOC_MEM(ent, name, path, len);
        ent->mode = mode;
        ent->len = len;
-       hashcpy(ent->sha1, sha1);
+       oidcpy(&ent->oid, oid);
 
        ALLOC_GROW(entries, used + 1, alloc);
        entries[used++] = ent;
@@ -40,7 +40,7 @@ static int ent_compare(const void *a_, const void *b_)
                                 b->name, b->len, b->mode);
 }
 
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
 {
        struct strbuf buf;
        size_t size;
@@ -54,10 +54,10 @@ static void write_tree(unsigned char *sha1)
        for (i = 0; i < used; i++) {
                struct treeent *ent = entries[i];
                strbuf_addf(&buf, "%o %s%c", ent->mode, ent->name, '\0');
-               strbuf_add(&buf, ent->sha1, 20);
+               strbuf_add(&buf, ent->oid.hash, the_hash_algo->rawsz);
        }
 
-       write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+       write_object_file(buf.buf, buf.len, tree_type, oid);
        strbuf_release(&buf);
 }
 
@@ -69,11 +69,12 @@ static const char *mktree_usage[] = {
 static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_missing)
 {
        char *ptr, *ntr;
+       const char *p;
        unsigned mode;
        enum object_type mode_type; /* object type derived from mode */
        enum object_type obj_type; /* object type derived from sha */
        char *path, *to_free = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
 
        ptr = buf;
        /*
@@ -85,9 +86,8 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                die("input format error: %s", buf);
        ptr = ntr + 1; /* type */
        ntr = strchr(ptr, ' ');
-       if (!ntr || buf + len <= ntr + 40 ||
-           ntr[41] != '\t' ||
-           get_sha1_hex(ntr + 1, sha1))
+       if (!ntr || parse_oid_hex(ntr + 1, &oid, &p) ||
+           *p != '\t')
                die("input format error: %s", buf);
 
        /* It is perfectly normal if we do not have a commit from a submodule */
@@ -112,16 +112,16 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
        mode_type = object_type(mode);
        if (mode_type != type_from_string(ptr)) {
                die("entry '%s' object type (%s) doesn't match mode type (%s)",
-                       path, ptr, typename(mode_type));
+                       path, ptr, type_name(mode_type));
        }
 
        /* Check the type of object identified by sha1 */
-       obj_type = sha1_object_info(sha1, NULL);
+       obj_type = oid_object_info(&oid, NULL);
        if (obj_type < 0) {
                if (allow_missing) {
                        ; /* no problem - missing objects are presumed to be of the right type */
                } else {
-                       die("entry '%s' object %s is unavailable", path, sha1_to_hex(sha1));
+                       die("entry '%s' object %s is unavailable", path, oid_to_hex(&oid));
                }
        } else {
                if (obj_type != mode_type) {
@@ -131,18 +131,18 @@ static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_miss
                         * because the new tree entry will never be correct.
                         */
                        die("entry '%s' object %s is a %s but specified type was (%s)",
-                               path, sha1_to_hex(sha1), typename(obj_type), typename(mode_type));
+                               path, oid_to_hex(&oid), type_name(obj_type), type_name(mode_type));
                }
        }
 
-       append_to_tree(mode, sha1, path);
+       append_to_tree(mode, &oid, path);
        free(to_free);
 }
 
 int cmd_mktree(int ac, const char **av, const char *prefix)
 {
        struct strbuf sb = STRBUF_INIT;
-       unsigned char sha1[20];
+       struct object_id oid;
        int nul_term_line = 0;
        int allow_missing = 0;
        int is_batch_mode = 0;
@@ -181,8 +181,8 @@ int cmd_mktree(int ac, const char **av, const char *prefix)
                         */
                        ; /* skip creating an empty tree */
                } else {
-                       write_tree(sha1);
-                       puts(sha1_to_hex(sha1));
+                       write_tree(&oid);
+                       puts(oid_to_hex(&oid));
                        fflush(stdout);
                }
                used=0; /* reset tree entry buffer for re-use in batch mode */
index 8ce6a2ddd4c5008c9d32d45dc21e11d97ff8b79d..6d141f7a532c08e52f1f5f82330d046c60073f93 100644 (file)
@@ -122,7 +122,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
        struct option builtin_mv_options[] = {
                OPT__VERBOSE(&verbose, N_("be verbose")),
                OPT__DRY_RUN(&show_only, N_("dry run")),
-               OPT__FORCE(&force, N_("force move/rename even if target exists")),
+               OPT__FORCE(&force, N_("force move/rename even if target exists"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
                OPT_END(),
        };
@@ -292,8 +293,8 @@ int cmd_mv(int argc, const char **argv, const char *prefix)
        if (gitmodules_modified)
                stage_updated_gitmodules(&the_index);
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                die(_("Unable to write new index file"));
 
        return 0;
index 9e088ebd11dced248640df9e17adbbd8b9a73ffb..387ddf85d21a443f060dbb212fe95a983d9e4f58 100644 (file)
@@ -328,7 +328,7 @@ static void show_name(const struct object *obj,
        else if (allow_undefined)
                printf("undefined\n");
        else if (always)
-               printf("%s\n", find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+               printf("%s\n", find_unique_abbrev(oid, DEFAULT_ABBREV));
        else
                die("cannot describe '%s'", oid_to_hex(oid));
        strbuf_release(&buf);
index 7c8176164561be1d57771ad2fdeb9260cc1d8c87..921e08d5bf545ac4124933459d257d95e2144409 100644 (file)
@@ -118,11 +118,11 @@ static int list_each_note(const struct object_id *object_oid,
        return 0;
 }
 
-static void copy_obj_to_fd(int fd, const unsigned char *sha1)
+static void copy_obj_to_fd(int fd, const struct object_id *oid)
 {
        unsigned long size;
        enum object_type type;
-       char *buf = read_sha1_file(sha1, &type, &size);
+       char *buf = read_object_file(oid, &type, &size);
        if (buf) {
                if (size)
                        write_or_die(fd, buf, size);
@@ -162,7 +162,7 @@ static void write_commented_object(int fd, const struct object_id *object)
 }
 
 static void prepare_note_data(const struct object_id *object, struct note_data *d,
-               const unsigned char *old_note)
+               const struct object_id *old_note)
 {
        if (d->use_editor || !d->given) {
                int fd;
@@ -198,9 +198,9 @@ static void prepare_note_data(const struct object_id *object, struct note_data *
        }
 }
 
-static void write_note_data(struct note_data *d, unsigned char *sha1)
+static void write_note_data(struct note_data *d, struct object_id *oid)
 {
-       if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) {
+       if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
                error(_("unable to write note object"));
                if (d->edit_path)
                        error(_("the note contents have been left in %s"),
@@ -253,7 +253,7 @@ static int parse_reuse_arg(const struct option *opt, const char *arg, int unset)
 
        if (get_oid(arg, &object))
                die(_("failed to resolve '%s' as a valid ref."), arg);
-       if (!(buf = read_sha1_file(object.hash, &type, &len))) {
+       if (!(buf = read_object_file(&object, &type, &len))) {
                free(buf);
                die(_("failed to read object '%s'."), arg);
        }
@@ -413,7 +413,7 @@ static int add(int argc, const char **argv, const char *prefix)
                        parse_reuse_arg},
                OPT_BOOL(0, "allow-empty", &allow_empty,
                        N_("allow storing empty note")),
-               OPT__FORCE(&force, N_("replace existing notes")),
+               OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
                OPT_END()
        };
 
@@ -457,9 +457,9 @@ static int add(int argc, const char **argv, const char *prefix)
                        oid_to_hex(&object));
        }
 
-       prepare_note_data(&object, &d, note ? note->hash : NULL);
+       prepare_note_data(&object, &d, note);
        if (d.buf.len || allow_empty) {
-               write_note_data(&d, new_note.hash);
+               write_note_data(&d, &new_note);
                if (add_note(t, &object, &new_note, combine_notes_overwrite))
                        die("BUG: combine_notes_overwrite failed");
                commit_notes(t, "Notes added by 'git notes add'");
@@ -484,7 +484,7 @@ static int copy(int argc, const char **argv, const char *prefix)
        struct notes_tree *t;
        const char *rewrite_cmd = NULL;
        struct option options[] = {
-               OPT__FORCE(&force, N_("replace existing notes")),
+               OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "stdin", &from_stdin, N_("read objects from stdin")),
                OPT_STRING(0, "for-rewrite", &rewrite_cmd, N_("command"),
                           N_("load rewriting config for <command> (implies "
@@ -602,13 +602,13 @@ static int append_edit(int argc, const char **argv, const char *prefix)
        t = init_notes_check(argv[0], NOTES_INIT_WRITABLE);
        note = get_note(t, &object);
 
-       prepare_note_data(&object, &d, edit && note ? note->hash : NULL);
+       prepare_note_data(&object, &d, edit && note ? note : NULL);
 
        if (note && !edit) {
                /* Append buf to previous note contents */
                unsigned long size;
                enum object_type type;
-               char *prev_buf = read_sha1_file(note->hash, &type, &size);
+               char *prev_buf = read_object_file(note, &type, &size);
 
                strbuf_grow(&d.buf, size + 1);
                if (d.buf.len && prev_buf && size)
@@ -619,7 +619,7 @@ static int append_edit(int argc, const char **argv, const char *prefix)
        }
 
        if (d.buf.len || allow_empty) {
-               write_note_data(&d, new_note.hash);
+               write_note_data(&d, &new_note);
                if (add_note(t, &object, &new_note, combine_notes_overwrite))
                        die("BUG: combine_notes_overwrite failed");
                logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
index 6b9cfc289d87b543b747c4024504703a2e0d6641..e7e673266e8e2e6fcbe7723c851c6d216eecb84c 100644 (file)
@@ -26,7 +26,7 @@
 #include "reachable.h"
 #include "sha1-array.h"
 #include "argv-array.h"
-#include "mru.h"
+#include "list.h"
 #include "packfile.h"
 
 static const char *pack_usage[] = {
@@ -75,6 +75,8 @@ static int use_bitmap_index = -1;
 static int write_bitmap_index;
 static uint16_t write_bitmap_options;
 
+static int exclude_promisor_objects;
+
 static unsigned long delta_cache_size = 0;
 static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
 static unsigned long cache_max_small_delta_size = 1000;
@@ -84,8 +86,9 @@ static unsigned long window_memory_limit = 0;
 static struct list_objects_filter_options filter_options;
 
 enum missing_action {
-       MA_ERROR = 0,    /* fail if any missing objects are encountered */
-       MA_ALLOW_ANY,    /* silently allow ALL missing objects */
+       MA_ERROR = 0,      /* fail if any missing objects are encountered */
+       MA_ALLOW_ANY,      /* silently allow ALL missing objects */
+       MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
 };
 static enum missing_action arg_missing_action;
 static show_object_fn fn_show_object;
@@ -119,11 +122,10 @@ static void *get_delta(struct object_entry *entry)
        void *buf, *base_buf, *delta_buf;
        enum object_type type;
 
-       buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
+       buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
                die("unable to read %s", oid_to_hex(&entry->idx.oid));
-       base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
-                                 &base_size);
+       base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
        if (!base_buf)
                die("unable to read %s",
                    oid_to_hex(&entry->delta->idx.oid));
@@ -161,7 +163,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
        return stream.total_out;
 }
 
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
                                           const struct object_id *oid)
 {
        git_zstream stream;
@@ -185,7 +187,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
                        stream.next_out = obuf;
                        stream.avail_out = sizeof(obuf);
                        zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
-                       sha1write(f, obuf, stream.next_out - obuf);
+                       hashwrite(f, obuf, stream.next_out - obuf);
                        olen += stream.next_out - obuf;
                }
                if (stream.avail_in)
@@ -230,7 +232,7 @@ static int check_pack_inflate(struct packed_git *p,
                stream.total_in == len) ? 0 : -1;
 }
 
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
                struct packed_git *p,
                struct pack_window **w_curs,
                off_t offset,
@@ -243,14 +245,14 @@ static void copy_pack_data(struct sha1file *f,
                in = use_pack(p, w_curs, offset, &avail);
                if (avail > len)
                        avail = (unsigned long)len;
-               sha1write(f, in, avail);
+               hashwrite(f, in, avail);
                offset += avail;
                len -= avail;
        }
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
                                           unsigned long limit, int usable_delta)
 {
        unsigned long size, datalen;
@@ -264,11 +266,10 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
        if (!usable_delta) {
                if (entry->type == OBJ_BLOB &&
                    entry->size > big_file_threshold &&
-                   (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
+                   (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
                        buf = NULL;
                else {
-                       buf = read_sha1_file(entry->idx.oid.hash, &type,
-                                            &size);
+                       buf = read_object_file(&entry->idx.oid, &type, &size);
                        if (!buf)
                                die(_("unable to read %s"),
                                    oid_to_hex(&entry->idx.oid));
@@ -323,8 +324,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
        } else if (type == OBJ_REF_DELTA) {
                /*
@@ -337,8 +338,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
        } else {
                if (limit && hdrlen + datalen + 20 >= limit) {
@@ -347,13 +348,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        if (st) {
                datalen = write_large_blob_data(st, f, &entry->idx.oid);
                close_istream(st);
        } else {
-               sha1write(f, buf, datalen);
+               hashwrite(f, buf, datalen);
                free(buf);
        }
 
@@ -361,7 +362,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
 {
        struct packed_git *p = entry->in_pack;
@@ -412,8 +413,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
                reused_delta++;
        } else if (type == OBJ_REF_DELTA) {
@@ -421,8 +422,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
                reused_delta++;
        } else {
@@ -430,7 +431,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        copy_pack_data(f, p, &w_curs, offset, datalen);
        unuse_pack(&w_curs);
@@ -439,7 +440,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
                          struct object_entry *entry,
                          off_t write_offset)
 {
@@ -512,7 +513,7 @@ enum write_one_status {
        WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
 };
 
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
                                       struct object_entry *e,
                                       off_t *offset)
 {
@@ -731,7 +732,7 @@ static struct object_entry **compute_write_order(void)
        return wo;
 }
 
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
 {
        unsigned char buffer[8192];
        off_t to_write, total;
@@ -762,7 +763,7 @@ static off_t write_reused_pack(struct sha1file *f)
                if (read_pack > to_write)
                        read_pack = to_write;
 
-               sha1write(f, buffer, read_pack);
+               hashwrite(f, buffer, read_pack);
                to_write -= read_pack;
 
                /*
@@ -791,7 +792,7 @@ static const char no_split_warning[] = N_(
 static void write_pack_file(void)
 {
        uint32_t i = 0, j;
-       struct sha1file *f;
+       struct hashfile *f;
        off_t offset;
        uint32_t nr_remaining = nr_result;
        time_t last_mtime = 0;
@@ -807,7 +808,7 @@ static void write_pack_file(void)
                char *pack_tmp_name = NULL;
 
                if (pack_to_stdout)
-                       f = sha1fd_throughput(1, "<stdout>", progress_state);
+                       f = hashfd_throughput(1, "<stdout>", progress_state);
                else
                        f = create_tmp_packfile(&pack_tmp_name);
 
@@ -834,11 +835,11 @@ static void write_pack_file(void)
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
-                       sha1close(f, oid.hash, CSUM_CLOSE);
+                       hashclose(f, oid.hash, CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
-                       sha1close(f, oid.hash, CSUM_FSYNC);
+                       hashclose(f, oid.hash, CSUM_FSYNC);
                } else {
-                       int fd = sha1close(f, oid.hash, 0);
+                       int fd = hashclose(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);
@@ -1006,8 +1007,8 @@ static int want_object_in_pack(const struct object_id *oid,
                               struct packed_git **found_pack,
                               off_t *found_offset)
 {
-       struct mru_entry *entry;
        int want;
+       struct list_head *pos;
 
        if (!exclude && local && has_loose_object_nonlocal(oid->hash))
                return 0;
@@ -1023,8 +1024,8 @@ static int want_object_in_pack(const struct object_id *oid,
                        return want;
        }
 
-       for (entry = packed_git_mru.head; entry; entry = entry->next) {
-               struct packed_git *p = entry->item;
+       list_for_each(pos, &packed_git_mru) {
+               struct packed_git *p = list_entry(pos, struct packed_git, mru);
                off_t offset;
 
                if (p == *found_pack)
@@ -1041,7 +1042,7 @@ static int want_object_in_pack(const struct object_id *oid,
                        }
                        want = want_found_object(exclude, p);
                        if (!exclude && want > 0)
-                               mru_mark(&packed_git_mru, entry);
+                               list_move(&p->mru, &packed_git_mru);
                        if (want != -1)
                                return want;
                }
@@ -1187,7 +1188,7 @@ static struct pbase_tree_cache *pbase_tree_get(const struct object_id *oid)
        /* Did not find one.  Either we got a bogus request or
         * we need to read and perhaps cache.
         */
-       data = read_sha1_file(oid->hash, &type, &size);
+       data = read_object_file(oid, &type, &size);
        if (!data)
                return NULL;
        if (type != OBJ_TREE) {
@@ -1348,7 +1349,7 @@ static void add_preferred_base(struct object_id *oid)
        if (window <= num_preferred_base++)
                return;
 
-       data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
+       data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
        if (!data)
                return;
 
@@ -1376,10 +1377,10 @@ static void cleanup_preferred_base(void)
        it = pbase_tree;
        pbase_tree = NULL;
        while (it) {
-               struct pbase_tree *this = it;
-               it = this->next;
-               free(this->pcache.tree_data);
-               free(this);
+               struct pbase_tree *tmp = it;
+               it = tmp->next;
+               free(tmp->pcache.tree_data);
+               free(tmp);
        }
 
        for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
@@ -1513,7 +1514,7 @@ static void check_object(struct object_entry *entry)
                unuse_pack(&w_curs);
        }
 
-       entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
+       entry->type = oid_object_info(&entry->idx.oid, &entry->size);
        /*
         * The error condition is checked in prepare_pack().  This is
         * to permit a missing preferred base object to be ignored
@@ -1575,8 +1576,7 @@ static void drop_reused_delta(struct object_entry *entry)
                 * And if that fails, the error will be recorded in entry->type
                 * and dealt with in prepare_pack().
                 */
-               entry->type = sha1_object_info(entry->idx.oid.hash,
-                                              &entry->size);
+               entry->type = oid_object_info(&entry->idx.oid, &entry->size);
        }
 }
 
@@ -1868,8 +1868,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
-               trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
-                                          &sz);
+               trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
                        die("object %s cannot be read",
@@ -1882,8 +1881,7 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
        }
        if (!src->data) {
                read_lock();
-               src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
-                                          &sz);
+               src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!src->data) {
                        if (src_entry->preferred_base) {
@@ -2546,6 +2544,7 @@ static void read_object_list_from_stdin(void)
        }
 }
 
+/* Remember to update object flag allocation in object.h */
 #define OBJECT_ADDED (1u<<20)
 
 static void show_commit(struct commit *commit, void *data)
@@ -2578,6 +2577,20 @@ static void show_object__ma_allow_any(struct object *obj, const char *name, void
        show_object(obj, name, data);
 }
 
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+       assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+       /*
+        * Quietly ignore EXPECTED missing objects.  This avoids problems with
+        * staging them now and getting an odd error later.
+        */
+       if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+               return;
+
+       show_object(obj, name, data);
+}
+
 static int option_parse_missing_action(const struct option *opt,
                                       const char *arg, int unset)
 {
@@ -2592,10 +2605,18 @@ static int option_parse_missing_action(const struct option *opt,
 
        if (!strcmp(arg, "allow-any")) {
                arg_missing_action = MA_ALLOW_ANY;
+               fetch_if_missing = 0;
                fn_show_object = show_object__ma_allow_any;
                return 0;
        }
 
+       if (!strcmp(arg, "allow-promisor")) {
+               arg_missing_action = MA_ALLOW_PROMISOR;
+               fetch_if_missing = 0;
+               fn_show_object = show_object__ma_allow_promisor;
+               return 0;
+       }
+
        die(_("invalid value for --missing"));
        return 0;
 }
@@ -2683,7 +2704,7 @@ static void add_objects_in_unpacked_packs(struct rev_info *revs)
 static int add_loose_object(const struct object_id *oid, const char *path,
                            void *data)
 {
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
 
        if (type < 0) {
                warning("loose object at %s could not be examined", path);
@@ -2768,7 +2789,7 @@ static void loosen_unused_packed_objects(struct rev_info *revs)
                        if (!packlist_find(&to_pack, oid.hash, NULL) &&
                            !has_sha1_pack_kept_or_nonlocal(&oid) &&
                            !loosened_object_can_be_discarded(&oid, p->mtime))
-                               if (force_object_loose(oid.hash, p->mtime))
+                               if (force_object_loose(&oid, p->mtime))
                                        die("unable to force loose object");
                }
        }
@@ -3009,6 +3030,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                { OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
                  N_("handling for missing objects"), PARSE_OPT_NONEG,
                  option_parse_missing_action },
+               OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+                        N_("do not pack objects in promisor packfiles")),
                OPT_END(),
        };
 
@@ -3054,6 +3077,12 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                argv_array_push(&rp, "--unpacked");
        }
 
+       if (exclude_promisor_objects) {
+               use_internal_rev_list = 1;
+               fetch_if_missing = 0;
+               argv_array_push(&rp, "--exclude-promisor-objects");
+       }
+
        if (!reuse_object)
                reuse_delta = 0;
        if (pack_compression_level == -1)
index aaa8136322244ccde5b7d6804a3bc09b0c053568..991e1bb76fd66bb189a3523ea5cddf0cd823e343 100644 (file)
@@ -48,17 +48,17 @@ static inline void llist_item_put(struct llist_item *item)
 
 static inline struct llist_item *llist_item_get(void)
 {
-       struct llist_item *new;
+       struct llist_item *new_item;
        if ( free_nodes ) {
-               new = free_nodes;
+               new_item = free_nodes;
                free_nodes = free_nodes->next;
        } else {
                int i = 1;
-               ALLOC_ARRAY(new, BLKSIZE);
+               ALLOC_ARRAY(new_item, BLKSIZE);
                for (; i < BLKSIZE; i++)
-                       llist_item_put(&new[i]);
+                       llist_item_put(&new_item[i]);
        }
-       return new;
+       return new_item;
 }
 
 static void llist_free(struct llist *list)
@@ -80,26 +80,26 @@ static inline void llist_init(struct llist **list)
 static struct llist * llist_copy(struct llist *list)
 {
        struct llist *ret;
-       struct llist_item *new, *old, *prev;
+       struct llist_item *new_item, *old_item, *prev;
 
        llist_init(&ret);
 
        if ((ret->size = list->size) == 0)
                return ret;
 
-       new = ret->front = llist_item_get();
-       new->sha1 = list->front->sha1;
+       new_item = ret->front = llist_item_get();
+       new_item->sha1 = list->front->sha1;
 
-       old = list->front->next;
-       while (old) {
-               prev = new;
-               new = llist_item_get();
-               prev->next = new;
-               new->sha1 = old->sha1;
-               old = old->next;
+       old_item = list->front->next;
+       while (old_item) {
+               prev = new_item;
+               new_item = llist_item_get();
+               prev->next = new_item;
+               new_item->sha1 = old_item->sha1;
+               old_item = old_item->next;
        }
-       new->next = NULL;
-       ret->back = new;
+       new_item->next = NULL;
+       ret->back = new_item;
 
        return ret;
 }
@@ -108,24 +108,24 @@ static inline struct llist_item *llist_insert(struct llist *list,
                                              struct llist_item *after,
                                               const unsigned char *sha1)
 {
-       struct llist_item *new = llist_item_get();
-       new->sha1 = sha1;
-       new->next = NULL;
+       struct llist_item *new_item = llist_item_get();
+       new_item->sha1 = sha1;
+       new_item->next = NULL;
 
        if (after != NULL) {
-               new->next = after->next;
-               after->next = new;
+               new_item->next = after->next;
+               after->next = new_item;
                if (after == list->back)
-                       list->back = new;
+                       list->back = new_item;
        } else {/* insert in front */
                if (list->size == 0)
-                       list->back = new;
+                       list->back = new_item;
                else
-                       new->next = list->front;
-               list->front = new;
+                       new_item->next = list->front;
+               list->front = new_item;
        }
        list->size++;
-       return new;
+       return new_item;
 }
 
 static inline struct llist_item *llist_insert_back(struct llist *list,
index d2fdae680a1ebe75d55d4fb66a3bdd0b54829666..38ced18dadff03836c6bed3fcbdefe8bd528a7c0 100644 (file)
@@ -50,9 +50,9 @@ static int prune_object(const struct object_id *oid, const char *fullpath,
        if (st.st_mtime > expire)
                return 0;
        if (show_only || verbose) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(oid, NULL);
                printf("%s %s\n", oid_to_hex(oid),
-                      (type > 0) ? typename(type) : "unknown");
+                      (type > 0) ? type_name(type) : "unknown");
        }
        if (!show_only)
                unlink_or_warn(fullpath);
@@ -101,12 +101,15 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
 {
        struct rev_info revs;
        struct progress *progress = NULL;
+       int exclude_promisor_objects = 0;
        const struct option options[] = {
                OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
                OPT__VERBOSE(&verbose, N_("report pruned objects")),
                OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
                OPT_EXPIRY_DATE(0, "expire", &expire,
                                N_("expire objects older than <time>")),
+               OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+                        N_("limit traversal to objects outside promisor packfiles")),
                OPT_END()
        };
        char *s;
@@ -139,6 +142,10 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
                show_progress = isatty(2);
        if (show_progress)
                progress = start_delayed_progress(_("Checking connectivity"), 0);
+       if (exclude_promisor_objects) {
+               fetch_if_missing = 0;
+               revs.exclude_promisor_objects = 1;
+       }
 
        mark_reachable_objects(&revs, 1, expire, progress);
        stop_progress(&progress);
index 511dbbe0f6e25d8f0e8c6ce511cc0ff734adc9bc..e32d6cd5b4c999bc45b961c1387af066c72a823a 100644 (file)
@@ -193,7 +193,7 @@ static struct option pull_options[] = {
        OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
                N_("path to upload pack on remote end"),
                0),
-       OPT__FORCE(&opt_force, N_("force overwrite of local branch")),
+       OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
        OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
                N_("fetch all tags and associated objects"),
                PARSE_OPT_NOARG),
@@ -574,6 +574,7 @@ static int rebase_submodules(void)
        cp.no_stdin = 1;
        argv_array_pushl(&cp.args, "submodule", "update",
                                   "--recursive", "--rebase", NULL);
+       argv_push_verbosity(&cp.args);
 
        return run_command(&cp);
 }
@@ -586,6 +587,7 @@ static int update_submodules(void)
        cp.no_stdin = 1;
        argv_array_pushl(&cp.args, "submodule", "update",
                                   "--recursive", "--checkout", NULL);
+       argv_push_verbosity(&cp.args);
 
        return run_command(&cp);
 }
index 1c28427d82ee73631fb02b5f126c40ebd2cb1774..013c20d6164f61dc404b89271c0281d28b5069a7 100644 (file)
@@ -548,7 +548,7 @@ int cmd_push(int argc, const char **argv, const char *prefix)
                { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no",
                        N_("control recursive pushing of submodules"),
                        PARSE_OPT_OPTARG, option_parse_recurse_submodules },
-               OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")),
+               OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE),
                OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
                OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")),
                OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"),
index 7daee544b7b4cf3495355e1c6e597272d7b66440..ad074705bb51d1de4221b3c5dfaa7229903c0ef0 100644 (file)
@@ -22,6 +22,8 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
        struct option options[] = {
                OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
                OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
+               OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
+                       N_("allow commits with empty messages")),
                OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
                                CONTINUE),
                OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
@@ -43,7 +45,7 @@ int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
                OPT_END()
        };
 
-       git_config(git_default_config, NULL);
+       sequencer_init_config(&opts);
        git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
 
        opts.action = REPLAY_INTERACTIVE_REBASE;
index b7ce7c7f5275febbf6db6b71e3b610b09a4fe191..2bf7f2d1a3e589f982400fef1aa5bec6e97ccfbd 100644 (file)
@@ -69,7 +69,7 @@ static int sent_capabilities;
 static int shallow_update;
 static const char *alt_shallow_file;
 static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
 static struct signature_check sigcheck;
 static const char *push_cert_nonce;
 static const char *cert_nonce_seed;
@@ -633,8 +633,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
                int bogs /* beginning_of_gpg_sig */;
 
                already_done = 1;
-               if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
-                       hashclr(push_cert_sha1);
+               if (write_object_file(push_cert.buf, push_cert.len, "blob",
+                                     &push_cert_oid))
+                       oidclr(&push_cert_oid);
 
                memset(&sigcheck, '\0', sizeof(sigcheck));
                sigcheck.result = 'N';
@@ -655,9 +656,9 @@ static void prepare_push_cert_sha1(struct child_process *proc)
                strbuf_release(&gpg_status);
                nonce_status = check_nonce(push_cert.buf, bogs);
        }
-       if (!is_null_sha1(push_cert_sha1)) {
+       if (!is_null_oid(&push_cert_oid)) {
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
-                                sha1_to_hex(push_cert_sha1));
+                                oid_to_hex(&push_cert_oid));
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
                                 sigcheck.signer ? sigcheck.signer : "");
                argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
@@ -1241,11 +1242,11 @@ static void check_aliased_update(struct command *cmd, struct string_list *list)
        rp_error("refusing inconsistent update between symref '%s' (%s..%s) and"
                 " its target '%s' (%s..%s)",
                 cmd->ref_name,
-                find_unique_abbrev(cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(cmd->new_oid.hash, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV),
                 dst_cmd->ref_name,
-                find_unique_abbrev(dst_cmd->old_oid.hash, DEFAULT_ABBREV),
-                find_unique_abbrev(dst_cmd->new_oid.hash, DEFAULT_ABBREV));
+                find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV),
+                find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV));
 
        cmd->error_string = dst_cmd->error_string =
                "inconsistent aliased update";
index 2233725315ba32832ec5c866b7dbdf72103ac74d..a89bd1dd25252ddfe5ad6b32ee89950d3a4b258f 100644 (file)
@@ -52,6 +52,7 @@ struct collect_reflog_cb {
        int nr;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define INCOMPLETE     (1u<<10)
 #define STUDYING       (1u<<11)
 #define REACHABLE      (1u<<12)
@@ -74,7 +75,7 @@ static int tree_is_complete(const struct object_id *oid)
        if (!tree->buffer) {
                enum object_type type;
                unsigned long size;
-               void *data = read_sha1_file(oid->hash, &type, &size);
+               void *data = read_object_file(oid, &type, &size);
                if (!data) {
                        tree->object.flags |= INCOMPLETE;
                        return 0;
@@ -289,20 +290,20 @@ static int should_expire_reflog_ent(struct object_id *ooid, struct object_id *no
                                    const char *message, void *cb_data)
 {
        struct expire_reflog_policy_cb *cb = cb_data;
-       struct commit *old, *new;
+       struct commit *old_commit, *new_commit;
 
        if (timestamp < cb->cmd.expire_total)
                return 1;
 
-       old = new = NULL;
+       old_commit = new_commit = NULL;
        if (cb->cmd.stalefix &&
-           (!keep_entry(&old, ooid) || !keep_entry(&new, noid)))
+           (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid)))
                return 1;
 
        if (timestamp < cb->cmd.expire_unreachable) {
                if (cb->unreachable_expire_kind == UE_ALWAYS)
                        return 1;
-               if (unreachable(cb, old, ooid) || unreachable(cb, new, noid))
+               if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid))
                        return 1;
        }
 
index d95bf904c3b3fb40438335198a2729e45a9897cb..805ffc05cdb80e4a69de4134e757f9c71e8033dc 100644 (file)
@@ -168,7 +168,7 @@ static int add(int argc, const char **argv)
                OPT_STRING('m', "master", &master, N_("branch"), N_("master branch")),
                { OPTION_CALLBACK, 0, "mirror", &mirror, N_("push|fetch"),
                        N_("set up remote as a mirror to push to or fetch from"),
-                       PARSE_OPT_OPTARG, parse_mirror_opt },
+                       PARSE_OPT_OPTARG | PARSE_OPT_COMP_ARG, parse_mirror_opt },
                OPT_END()
        };
 
@@ -322,7 +322,7 @@ static void read_branches(void)
 
 struct ref_states {
        struct remote *remote;
-       struct string_list new, stale, tracked, heads, push;
+       struct string_list new_refs, stale, tracked, heads, push;
        int queried;
 };
 
@@ -337,12 +337,12 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
                        die(_("Could not get fetch map for refspec %s"),
                                states->remote->fetch_refspec[i]);
 
-       states->new.strdup_strings = 1;
+       states->new_refs.strdup_strings = 1;
        states->tracked.strdup_strings = 1;
        states->stale.strdup_strings = 1;
        for (ref = fetch_map; ref; ref = ref->next) {
                if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
-                       string_list_append(&states->new, abbrev_branch(ref->name));
+                       string_list_append(&states->new_refs, abbrev_branch(ref->name));
                else
                        string_list_append(&states->tracked, abbrev_branch(ref->name));
        }
@@ -356,7 +356,7 @@ static int get_ref_states(const struct ref *remote_refs, struct ref_states *stat
        free_refs(stale_refs);
        free_refs(fetch_map);
 
-       string_list_sort(&states->new);
+       string_list_sort(&states->new_refs);
        string_list_sort(&states->tracked);
        string_list_sort(&states->stale);
 
@@ -546,8 +546,8 @@ static int add_branch_for_removal(const char *refname,
 }
 
 struct rename_info {
-       const char *old;
-       const char *new;
+       const char *old_name;
+       const char *new_name;
        struct string_list *remote_branches;
 };
 
@@ -560,7 +560,7 @@ static int read_remote_branches(const char *refname,
        int flag;
        const char *symref;
 
-       strbuf_addf(&buf, "refs/remotes/%s/", rename->old);
+       strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name);
        if (starts_with(refname, buf.buf)) {
                item = string_list_append(rename->remote_branches, xstrdup(refname));
                symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
@@ -615,36 +615,36 @@ static int mv(int argc, const char **argv)
        if (argc != 3)
                usage_with_options(builtin_remote_rename_usage, options);
 
-       rename.old = argv[1];
-       rename.new = argv[2];
+       rename.old_name = argv[1];
+       rename.new_name = argv[2];
        rename.remote_branches = &remote_branches;
 
-       oldremote = remote_get(rename.old);
+       oldremote = remote_get(rename.old_name);
        if (!remote_is_configured(oldremote, 1))
-               die(_("No such remote: %s"), rename.old);
+               die(_("No such remote: %s"), rename.old_name);
 
-       if (!strcmp(rename.old, rename.new) && oldremote->origin != REMOTE_CONFIG)
+       if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG)
                return migrate_file(oldremote);
 
-       newremote = remote_get(rename.new);
+       newremote = remote_get(rename.new_name);
        if (remote_is_configured(newremote, 1))
-               die(_("remote %s already exists."), rename.new);
+               die(_("remote %s already exists."), rename.new_name);
 
-       strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new);
+       strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new_name);
        if (!valid_fetch_refspec(buf.buf))
-               die(_("'%s' is not a valid remote name"), rename.new);
+               die(_("'%s' is not a valid remote name"), rename.new_name);
 
        strbuf_reset(&buf);
-       strbuf_addf(&buf, "remote.%s", rename.old);
-       strbuf_addf(&buf2, "remote.%s", rename.new);
+       strbuf_addf(&buf, "remote.%s", rename.old_name);
+       strbuf_addf(&buf2, "remote.%s", rename.new_name);
        if (git_config_rename_section(buf.buf, buf2.buf) < 1)
                return error(_("Could not rename config section '%s' to '%s'"),
                                buf.buf, buf2.buf);
 
        strbuf_reset(&buf);
-       strbuf_addf(&buf, "remote.%s.fetch", rename.new);
+       strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
        git_config_set_multivar(buf.buf, NULL, NULL, 1);
-       strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old);
+       strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
        for (i = 0; i < oldremote->fetch_refspec_nr; i++) {
                char *ptr;
 
@@ -655,8 +655,8 @@ static int mv(int argc, const char **argv)
                        refspec_updated = 1;
                        strbuf_splice(&buf2,
                                      ptr-buf2.buf + strlen(":refs/remotes/"),
-                                     strlen(rename.old), rename.new,
-                                     strlen(rename.new));
+                                     strlen(rename.old_name), rename.new_name,
+                                     strlen(rename.new_name));
                } else
                        warning(_("Not updating non-default fetch refspec\n"
                                  "\t%s\n"
@@ -670,10 +670,10 @@ static int mv(int argc, const char **argv)
        for (i = 0; i < branch_list.nr; i++) {
                struct string_list_item *item = branch_list.items + i;
                struct branch_info *info = item->util;
-               if (info->remote_name && !strcmp(info->remote_name, rename.old)) {
+               if (info->remote_name && !strcmp(info->remote_name, rename.old_name)) {
                        strbuf_reset(&buf);
                        strbuf_addf(&buf, "branch.%s.remote", item->string);
-                       git_config_set(buf.buf, rename.new);
+                       git_config_set(buf.buf, rename.new_name);
                }
        }
 
@@ -703,8 +703,8 @@ static int mv(int argc, const char **argv)
                        continue;
                strbuf_reset(&buf);
                strbuf_addstr(&buf, item->string);
-               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf2);
                strbuf_addf(&buf2, "remote: renamed %s to %s",
                                item->string, buf.buf);
@@ -718,12 +718,12 @@ static int mv(int argc, const char **argv)
                        continue;
                strbuf_reset(&buf);
                strbuf_addstr(&buf, item->string);
-               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf2);
                strbuf_addstr(&buf2, item->util);
-               strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old),
-                               rename.new, strlen(rename.new));
+               strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old_name),
+                               rename.new_name, strlen(rename.new_name));
                strbuf_reset(&buf3);
                strbuf_addf(&buf3, "remote: renamed %s to %s",
                                item->string, buf.buf);
@@ -822,7 +822,7 @@ static void clear_push_info(void *util, const char *string)
 
 static void free_remote_ref_states(struct ref_states *states)
 {
-       string_list_clear(&states->new, 0);
+       string_list_clear(&states->new_refs, 0);
        string_list_clear(&states->stale, 1);
        string_list_clear(&states->tracked, 0);
        string_list_clear(&states->heads, 0);
@@ -907,7 +907,7 @@ static int show_remote_info_item(struct string_list_item *item, void *cb_data)
        if (states->queried) {
                const char *fmt = "%s";
                const char *arg = "";
-               if (string_list_has_string(&states->new, name)) {
+               if (string_list_has_string(&states->new_refs, name)) {
                        fmt = _(" new (next fetch will store in remotes/%s)");
                        arg = states->remote->name;
                } else if (string_list_has_string(&states->tracked, name))
@@ -1176,7 +1176,7 @@ static int show(int argc, const char **argv)
 
                /* remote branch info */
                info.width = 0;
-               for_each_string_list(&states.new, add_remote_to_show_info, &info);
+               for_each_string_list(&states.new_refs, add_remote_to_show_info, &info);
                for_each_string_list(&states.tracked, add_remote_to_show_info, &info);
                for_each_string_list(&states.stale, add_remote_to_show_info, &info);
                if (info.list->nr)
index f17a68a17da960813e6e925837638a5d9a26d5ee..7bdb40142f9261dac6514d98ee01dc44e595ec71 100644 (file)
@@ -83,7 +83,8 @@ static void remove_pack_on_signal(int signo)
 
 /*
  * Adds all packs hex strings to the fname list, which do not
- * have a corresponding .keep file.
+ * have a corresponding .keep or .promisor file. These packs are not to
+ * be kept if we are going to pack everything into one file.
  */
 static void get_non_kept_pack_filenames(struct string_list *fname_list)
 {
@@ -101,7 +102,8 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list)
 
                fname = xmemdupz(e->d_name, len);
 
-               if (!file_exists(mkpath("%s/%s.keep", packdir, fname)))
+               if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) &&
+                   !file_exists(mkpath("%s/%s.promisor", packdir, fname)))
                        string_list_append_nodup(fname_list, fname);
                else
                        free(fname);
@@ -232,6 +234,8 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
        argv_array_push(&cmd.args, "--all");
        argv_array_push(&cmd.args, "--reflog");
        argv_array_push(&cmd.args, "--indexed-objects");
+       if (repository_format_partial_clone)
+               argv_array_push(&cmd.args, "--exclude-promisor-objects");
        if (window)
                argv_array_pushf(&cmd.args, "--window=%s", window);
        if (window_memory)
index 10078ae37136f154486d8163b6b905ad163c7047..935647be6bdf2ffc2b460038a10e63c2eb387a5c 100644 (file)
@@ -53,11 +53,11 @@ static int show_reference(const char *refname, const struct object_id *oid,
                        if (get_oid(refname, &object))
                                return error("Failed to resolve '%s' as a valid ref.", refname);
 
-                       obj_type = sha1_object_info(object.hash, NULL);
-                       repl_type = sha1_object_info(oid->hash, NULL);
+                       obj_type = oid_object_info(&object, NULL);
+                       repl_type = oid_object_info(oid, NULL);
 
-                       printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type),
-                              oid_to_hex(oid), typename(repl_type));
+                       printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
+                              oid_to_hex(oid), type_name(repl_type));
                }
        }
 
@@ -162,14 +162,14 @@ static int replace_object_oid(const char *object_ref,
        struct ref_transaction *transaction;
        struct strbuf err = STRBUF_INIT;
 
-       obj_type = sha1_object_info(object->hash, NULL);
-       repl_type = sha1_object_info(repl->hash, NULL);
+       obj_type = oid_object_info(object, NULL);
+       repl_type = oid_object_info(repl, NULL);
        if (!force && obj_type != repl_type)
                die("Objects must be of the same type.\n"
                    "'%s' points to a replaced object of type '%s'\n"
                    "while '%s' points to a replacement object of type '%s'.",
-                   object_ref, typename(obj_type),
-                   replace_ref, typename(repl_type));
+                   object_ref, type_name(obj_type),
+                   replace_ref, type_name(repl_type));
 
        check_ref_valid(object, &prev, &ref, force);
 
@@ -215,7 +215,7 @@ static void export_object(const struct object_id *oid, enum object_type type,
        argv_array_push(&cmd.args, "--no-replace-objects");
        argv_array_push(&cmd.args, "cat-file");
        if (raw)
-               argv_array_push(&cmd.args, typename(type));
+               argv_array_push(&cmd.args, type_name(type));
        else
                argv_array_push(&cmd.args, "-p");
        argv_array_push(&cmd.args, oid_to_hex(oid));
@@ -284,30 +284,30 @@ static int edit_and_replace(const char *object_ref, int force, int raw)
 {
        char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
        enum object_type type;
-       struct object_id old, new, prev;
+       struct object_id old_oid, new_oid, prev;
        struct strbuf ref = STRBUF_INIT;
 
-       if (get_oid(object_ref, &old) < 0)
+       if (get_oid(object_ref, &old_oid) < 0)
                die("Not a valid object name: '%s'", object_ref);
 
-       type = sha1_object_info(old.hash, NULL);
+       type = oid_object_info(&old_oid, NULL);
        if (type < 0)
-               die("unable to get object type for %s", oid_to_hex(&old));
+               die("unable to get object type for %s", oid_to_hex(&old_oid));
 
-       check_ref_valid(&old, &prev, &ref, force);
+       check_ref_valid(&old_oid, &prev, &ref, force);
        strbuf_release(&ref);
 
-       export_object(&old, type, raw, tmpfile);
+       export_object(&old_oid, type, raw, tmpfile);
        if (launch_editor(tmpfile, NULL, NULL) < 0)
                die("editing object file failed");
-       import_object(&new, type, raw, tmpfile);
+       import_object(&new_oid, type, raw, tmpfile);
 
        free(tmpfile);
 
-       if (!oidcmp(&old, &new))
-               return error("new object is the same as the old one: '%s'", oid_to_hex(&old));
+       if (!oidcmp(&old_oid, &new_oid))
+               return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid));
 
-       return replace_object_oid(object_ref, &old, "replacement", &new, force);
+       return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
 }
 
 static void replace_parents(struct strbuf *buf, int argc, const char **argv)
@@ -355,7 +355,7 @@ static void check_one_mergetag(struct commit *commit,
        struct tag *tag;
        int i;
 
-       hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash);
+       hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
        tag = lookup_tag(&tag_oid);
        if (!tag)
                die(_("bad mergetag in commit '%s'"), ref);
@@ -386,16 +386,16 @@ static void check_mergetags(struct commit *commit, int argc, const char **argv)
 
 static int create_graft(int argc, const char **argv, int force)
 {
-       struct object_id old, new;
+       struct object_id old_oid, new_oid;
        const char *old_ref = argv[0];
        struct commit *commit;
        struct strbuf buf = STRBUF_INIT;
        const char *buffer;
        unsigned long size;
 
-       if (get_oid(old_ref, &old) < 0)
+       if (get_oid(old_ref, &old_oid) < 0)
                die(_("Not a valid object name: '%s'"), old_ref);
-       commit = lookup_commit_or_die(&old, old_ref);
+       commit = lookup_commit_or_die(&old_oid, old_ref);
 
        buffer = get_commit_buffer(commit, &size);
        strbuf_add(&buf, buffer, size);
@@ -410,15 +410,15 @@ static int create_graft(int argc, const char **argv, int force)
 
        check_mergetags(commit, argc, argv);
 
-       if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash))
+       if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
                die(_("could not write replacement commit for: '%s'"), old_ref);
 
        strbuf_release(&buf);
 
-       if (!oidcmp(&old, &new))
-               return error("new commit is the same as the old one: '%s'", oid_to_hex(&old));
+       if (!oidcmp(&old_oid, &new_oid))
+               return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
 
-       return replace_object_oid(old_ref, &old, "replacement", &new, force);
+       return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
 }
 
 int cmd_replace(int argc, const char **argv, const char *prefix)
@@ -439,7 +439,8 @@ int cmd_replace(int argc, const char **argv, const char *prefix)
                OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
                OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
                OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
-               OPT_BOOL('f', "force", &force, N_("replace the ref if it exists")),
+               OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
                OPT_STRING(0, "format", &format, N_("format"), N_("use this format")),
                OPT_END()
index e15f595799c40933e31f909715e35b4d0a665fc0..7f1c3f02a302128d6c00c35b8783c1a62353b37a 100644 (file)
@@ -106,24 +106,16 @@ static int reset_index(const struct object_id *oid, int reset_type, int quiet)
 
 static void print_new_head_line(struct commit *commit)
 {
-       const char *hex, *body;
-       const char *msg;
-
-       hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
-       printf(_("HEAD is now at %s"), hex);
-       msg = logmsg_reencode(commit, NULL, get_log_output_encoding());
-       body = strstr(msg, "\n\n");
-       if (body) {
-               const char *eol;
-               size_t len;
-               body = skip_blank_lines(body + 2);
-               eol = strchr(body, '\n');
-               len = eol ? eol - body : strlen(body);
-               printf(" %.*s\n", (int) len, body);
-       }
-       else
-               printf("\n");
-       unuse_commit_buffer(commit, msg);
+       struct strbuf buf = STRBUF_INIT;
+
+       printf(_("HEAD is now at %s"),
+               find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV));
+
+       pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+       if (buf.len > 0)
+               printf(" %s", buf.buf);
+       putchar('\n');
+       strbuf_release(&buf);
 }
 
 static void update_index_from_diff(struct diff_queue_struct *q,
index d95acaa40e5ca0fe0755e87437bd98e52457dcc4..fadd3ec14cbf0469c332a85278e5d1b4932ef788 100644 (file)
@@ -15,6 +15,7 @@
 #include "progress.h"
 #include "reflog-walk.h"
 #include "oidset.h"
+#include "packfile.h"
 
 static const char rev_list_usage[] =
 "git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
@@ -67,6 +68,7 @@ enum missing_action {
        MA_ERROR = 0,    /* fail if any missing objects are encountered */
        MA_ALLOW_ANY,    /* silently allow ALL missing objects */
        MA_PRINT,        /* print ALL missing objects in special section */
+       MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
 };
 static enum missing_action arg_missing_action;
 
@@ -106,7 +108,7 @@ static void show_commit(struct commit *commit, void *data)
        if (!revs->graph)
                fputs(get_revision_mark(revs, commit), stdout);
        if (revs->abbrev_commit && revs->abbrev)
-               fputs(find_unique_abbrev(commit->object.oid.hash, revs->abbrev),
+               fputs(find_unique_abbrev(&commit->object.oid, revs->abbrev),
                      stdout);
        else
                fputs(oid_to_hex(&commit->object.oid), stdout);
@@ -197,6 +199,12 @@ static void finish_commit(struct commit *commit, void *data)
 
 static inline void finish_object__ma(struct object *obj)
 {
+       /*
+        * Whether or not we try to dynamically fetch missing objects
+        * from the server, we currently DO NOT have the object.  We
+        * can either print, allow (ignore), or conditionally allow
+        * (ignore) them.
+        */
        switch (arg_missing_action) {
        case MA_ERROR:
                die("missing blob object '%s'", oid_to_hex(&obj->oid));
@@ -209,25 +217,36 @@ static inline void finish_object__ma(struct object *obj)
                oidset_insert(&missing_objects, &obj->oid);
                return;
 
+       case MA_ALLOW_PROMISOR:
+               if (is_promisor_object(&obj->oid))
+                       return;
+               die("unexpected missing blob object '%s'",
+                   oid_to_hex(&obj->oid));
+               return;
+
        default:
                BUG("unhandled missing_action");
                return;
        }
 }
 
-static void finish_object(struct object *obj, const char *name, void *cb_data)
+static int finish_object(struct object *obj, const char *name, void *cb_data)
 {
        struct rev_list_info *info = cb_data;
-       if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
+       if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
                finish_object__ma(obj);
+               return 1;
+       }
        if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
                parse_object(&obj->oid);
+       return 0;
 }
 
 static void show_object(struct object *obj, const char *name, void *cb_data)
 {
        struct rev_list_info *info = cb_data;
-       finish_object(obj, name, cb_data);
+       if (finish_object(obj, name, cb_data))
+               return;
        display_progress(progress, ++progress_counter);
        if (info->flags & REV_LIST_QUIET)
                return;
@@ -315,11 +334,19 @@ static inline int parse_missing_action_value(const char *value)
 
        if (!strcmp(value, "allow-any")) {
                arg_missing_action = MA_ALLOW_ANY;
+               fetch_if_missing = 0;
                return 1;
        }
 
        if (!strcmp(value, "print")) {
                arg_missing_action = MA_PRINT;
+               fetch_if_missing = 0;
+               return 1;
+       }
+
+       if (!strcmp(value, "allow-promisor")) {
+               arg_missing_action = MA_ALLOW_PROMISOR;
+               fetch_if_missing = 0;
                return 1;
        }
 
@@ -344,6 +371,35 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
        init_revisions(&revs, prefix);
        revs.abbrev = DEFAULT_ABBREV;
        revs.commit_format = CMIT_FMT_UNSPECIFIED;
+
+       /*
+        * Scan the argument list before invoking setup_revisions(), so that we
+        * know if fetch_if_missing needs to be set to 0.
+        *
+        * "--exclude-promisor-objects" acts as a pre-filter on missing objects
+        * by not crossing the boundary from realized objects to promisor
+        * objects.
+        *
+        * Let "--missing" to conditionally set fetch_if_missing.
+        */
+       for (i = 1; i < argc; i++) {
+               const char *arg = argv[i];
+               if (!strcmp(arg, "--exclude-promisor-objects")) {
+                       fetch_if_missing = 0;
+                       revs.exclude_promisor_objects = 1;
+                       break;
+               }
+       }
+       for (i = 1; i < argc; i++) {
+               const char *arg = argv[i];
+               if (skip_prefix(arg, "--missing=", &arg)) {
+                       if (revs.exclude_promisor_objects)
+                               die(_("cannot combine --exclude-promisor-objects and --missing"));
+                       if (parse_missing_action_value(arg))
+                               break;
+               }
+       }
+
        argc = setup_revisions(argc, argv, &revs, NULL);
 
        memset(&info, 0, sizeof(info));
@@ -404,7 +460,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                        continue;
                }
                if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
-                       list_objects_filter_release(&filter_options);
+                       list_objects_filter_set_no_filter(&filter_options);
                        continue;
                }
                if (!strcmp(arg, "--filter-print-omitted")) {
@@ -412,9 +468,10 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                        continue;
                }
 
-               if (skip_prefix(arg, "--missing=", &arg) &&
-                   parse_missing_action_value(arg))
-                       continue;
+               if (!strcmp(arg, "--exclude-promisor-objects"))
+                       continue; /* already handled above */
+               if (skip_prefix(arg, "--missing=", &arg))
+                       continue; /* already handled above */
 
                usage(rev_list_usage);
 
@@ -479,7 +536,7 @@ int cmd_rev_list(int argc, const char **argv, const char *prefix)
                mark_edges_uninteresting(&revs, show_edge);
 
        if (bisect_list) {
-               int reaches = reaches, all = all;
+               int reaches, all;
 
                find_bisection(&revs.commits, &reaches, &all, bisect_find_all);
 
index 74aa644cbb37b5741c521eba53d0a040aaf1582e..36b208778280e6019d9bc4fb4063dff3d44f08e6 100644 (file)
@@ -159,7 +159,7 @@ static void show_rev(int type, const struct object_id *oid, const char *name)
                }
        }
        else if (abbrev)
-               show_with_type(type, find_unique_abbrev(oid->hash, abbrev));
+               show_with_type(type, find_unique_abbrev(oid, abbrev));
        else
                show_with_type(type, oid_to_hex(oid));
 }
@@ -243,28 +243,28 @@ static int show_file(const char *arg, int output_prefix)
 static int try_difference(const char *arg)
 {
        char *dotdot;
-       struct object_id oid;
-       struct object_id end;
-       const char *next;
-       const char *this;
+       struct object_id start_oid;
+       struct object_id end_oid;
+       const char *end;
+       const char *start;
        int symmetric;
        static const char head_by_default[] = "HEAD";
 
        if (!(dotdot = strstr(arg, "..")))
                return 0;
-       next = dotdot + 2;
-       this = arg;
-       symmetric = (*next == '.');
+       end = dotdot + 2;
+       start = arg;
+       symmetric = (*end == '.');
 
        *dotdot = 0;
-       next += symmetric;
+       end += symmetric;
 
-       if (!*next)
-               next = head_by_default;
+       if (!*end)
+               end = head_by_default;
        if (dotdot == arg)
-               this = head_by_default;
+               start = head_by_default;
 
-       if (this == head_by_default && next == head_by_default &&
+       if (start == head_by_default && end == head_by_default &&
            !symmetric) {
                /*
                 * Just ".."?  That is not a range but the
@@ -274,14 +274,14 @@ static int try_difference(const char *arg)
                return 0;
        }
 
-       if (!get_oid_committish(this, &oid) && !get_oid_committish(next, &end)) {
-               show_rev(NORMAL, &end, next);
-               show_rev(symmetric ? NORMAL : REVERSED, &oid, this);
+       if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) {
+               show_rev(NORMAL, &end_oid, end);
+               show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start);
                if (symmetric) {
                        struct commit_list *exclude;
                        struct commit *a, *b;
-                       a = lookup_commit_reference(&oid);
-                       b = lookup_commit_reference(&end);
+                       a = lookup_commit_reference(&start_oid);
+                       b = lookup_commit_reference(&end_oid);
                        exclude = get_merge_bases(a, b);
                        while (exclude) {
                                struct commit *commit = pop_commit(&exclude);
@@ -516,7 +516,7 @@ static int cmd_parseopt(int argc, const char **argv, const char *prefix)
                        PARSE_OPT_SHELL_EVAL);
 
        strbuf_addstr(&parsed, " --");
-       sq_quote_argv(&parsed, argv, 0);
+       sq_quote_argv(&parsed, argv);
        puts(parsed.buf);
        return 0;
 }
@@ -526,7 +526,7 @@ static int cmd_sq_quote(int argc, const char **argv)
        struct strbuf buf = STRBUF_INIT;
 
        if (argc)
-               sq_quote_argv(&buf, argv, 0);
+               sq_quote_argv(&buf, argv);
        printf("%s\n", buf.buf);
        strbuf_release(&buf);
 
index b9d927eb09c9ed87c84681df1396f4e6d9b13c97..76f0a35b074b858ab4cb3e3894bc7c877401b7e8 100644 (file)
@@ -208,7 +208,7 @@ int cmd_revert(int argc, const char **argv, const char *prefix)
        if (isatty(0))
                opts.edit = 1;
        opts.action = REPLAY_REVERT;
-       git_config(git_default_config, NULL);
+       sequencer_init_config(&opts);
        res = run_sequencer(argc, argv, &opts);
        if (res < 0)
                die(_("revert failed"));
@@ -221,7 +221,7 @@ int cmd_cherry_pick(int argc, const char **argv, const char *prefix)
        int res;
 
        opts.action = REPLAY_PICK;
-       git_config(git_default_config, NULL);
+       sequencer_init_config(&opts);
        res = run_sequencer(argc, argv, &opts);
        if (res < 0)
                die(_("cherry-pick failed"));
index 4a2fcca27b3f722ca520c2411b80e6984ecf780a..5b6fc7ee818be4a4f060dc06f12fb45a25a2ea9b 100644 (file)
@@ -178,7 +178,7 @@ static int check_local_mod(struct object_id *head, int index_only)
                 * way as changed from the HEAD.
                 */
                if (no_head
-                    || get_tree_entry(head->hash, name, oid.hash, &mode)
+                    || get_tree_entry(head, name, &oid, &mode)
                     || ce->ce_mode != create_ce_mode(mode)
                     || oidcmp(&ce->oid, &oid))
                        staged_changes = 1;
@@ -242,7 +242,7 @@ static struct option builtin_rm_options[] = {
        OPT__DRY_RUN(&show_only, N_("dry run")),
        OPT__QUIET(&quiet, N_("do not list removed files")),
        OPT_BOOL( 0 , "cached",         &index_only, N_("only remove from the index")),
-       OPT__FORCE(&force, N_("override the up-to-date check")),
+       OPT__FORCE(&force, N_("override the up-to-date check"), PARSE_OPT_NOCOMPLETE),
        OPT_BOOL('r', NULL,             &recursive,  N_("allow recursive removal")),
        OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch,
                                N_("exit with a zero status even if nothing matched")),
@@ -385,10 +385,9 @@ int cmd_rm(int argc, const char **argv, const char *prefix)
                        stage_updated_gitmodules(&the_index);
        }
 
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
-                       die(_("Unable to write new index file"));
-       }
+       if (write_locked_index(&the_index, &lock_file,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die(_("Unable to write new index file"));
 
        return 0;
 }
index e29875b84389b25237e39e0112eafa8ac34599ee..608d6ba77bdfb4673513444651053d0e8e789020 100644 (file)
@@ -11,7 +11,8 @@
 #include "parse-options.h"
 
 static char const * const shortlog_usage[] = {
-       N_("git shortlog [<options>] [<revision-range>] [[--] [<path>...]]"),
+       N_("git shortlog [<options>] [<revision-range>] [[--] <path>...]"),
+       N_("git log --pretty=short | git shortlog [<options>]"),
        NULL
 };
 
@@ -283,6 +284,7 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
        for (;;) {
                switch (parse_options_step(&ctx, options, shortlog_usage)) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_DONE:
                        goto parse_done;
@@ -292,6 +294,11 @@ int cmd_shortlog(int argc, const char **argv, const char *prefix)
 parse_done:
        argc = parse_options_end(&ctx);
 
+       if (nongit && argc > 1) {
+               error(_("too many arguments given outside repository"));
+               usage_with_options(shortlog_usage, options);
+       }
+
        if (setup_revisions(argc, argv, &rev, NULL) != 1) {
                error(_("unrecognized argument: %s"), argv[1]);
                usage_with_options(shortlog_usage, options);
index e8a4aa40cb4b6cf8787af3dd35d833a92a85bba3..6c2148b71db593af1a4ef8d2d1bdbdfe16661851 100644 (file)
@@ -292,7 +292,7 @@ static void show_one_commit(struct commit *commit, int no_name)
                }
                else
                        printf("[%s] ",
-                              find_unique_abbrev(commit->object.oid.hash,
+                              find_unique_abbrev(&commit->object.oid,
                                                  DEFAULT_ABBREV));
        }
        puts(pretty_str);
index 41e5e71cad660d26ddc90ffeaa383fd7bf10f79f..f2eb1a7724058bb1db237a6199d16e5ff1ef495a 100644 (file)
@@ -29,7 +29,7 @@ static void show_one(const char *refname, const struct object_id *oid)
        if (quiet)
                return;
 
-       hex = find_unique_abbrev(oid->hash, abbrev);
+       hex = find_unique_abbrev(oid, abbrev);
        if (hash_only)
                printf("%s\n", hex);
        else
@@ -39,7 +39,7 @@ static void show_one(const char *refname, const struct object_id *oid)
                return;
 
        if (!peel_ref(refname, &peeled)) {
-               hex = find_unique_abbrev(peeled.hash, abbrev);
+               hex = find_unique_abbrev(&peeled, abbrev);
                printf("%s %s^{}\n", hex, refname);
        }
 }
index a5c4a8a6941d46c78e4dc58d0df47532f0bb2b45..6ba8587b6d3b7b8b1bc7a96451916c60210b093b 100644 (file)
@@ -20,6 +20,7 @@
 #define OPT_QUIET (1 << 0)
 #define OPT_CACHED (1 << 1)
 #define OPT_RECURSIVE (1 << 2)
+#define OPT_FORCE (1 << 3)
 
 typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
                                  void *cb_data);
@@ -50,6 +51,20 @@ static char *get_default_remote(void)
        return ret;
 }
 
+static int print_default_remote(int argc, const char **argv, const char *prefix)
+{
+       const char *remote;
+
+       if (argc != 1)
+               die(_("submodule--helper print-default-remote takes no arguments"));
+
+       remote = get_default_remote();
+       if (remote)
+               printf("%s\n", remote);
+
+       return 0;
+}
+
 static int starts_with_dot_slash(const char *str)
 {
        return str[0] == '.' && is_dir_sep(str[1]);
@@ -358,6 +373,25 @@ static void module_list_active(struct module_list *list)
        *list = active_modules;
 }
 
+static char *get_up_path(const char *path)
+{
+       int i;
+       struct strbuf sb = STRBUF_INIT;
+
+       for (i = count_slashes(path); i; i--)
+               strbuf_addstr(&sb, "../");
+
+       /*
+        * Check if 'path' ends with slash or not
+        * for having the same output for dir/sub_dir
+        * and dir/sub_dir/
+        */
+       if (!is_dir_sep(path[strlen(path) - 1]))
+               strbuf_addstr(&sb, "../");
+
+       return strbuf_detach(&sb, NULL);
+}
+
 static int module_list(int argc, const char **argv, const char *prefix)
 {
        int i;
@@ -718,6 +752,309 @@ static int module_name(int argc, const char **argv, const char *prefix)
        return 0;
 }
 
+struct sync_cb {
+       const char *prefix;
+       unsigned int flags;
+};
+
+#define SYNC_CB_INIT { NULL, 0 }
+
+static void sync_submodule(const char *path, const char *prefix,
+                          unsigned int flags)
+{
+       const struct submodule *sub;
+       char *remote_key = NULL;
+       char *sub_origin_url, *super_config_url, *displaypath;
+       struct strbuf sb = STRBUF_INIT;
+       struct child_process cp = CHILD_PROCESS_INIT;
+       char *sub_config_path = NULL;
+
+       if (!is_submodule_active(the_repository, path))
+               return;
+
+       sub = submodule_from_path(&null_oid, path);
+
+       if (sub && sub->url) {
+               if (starts_with_dot_dot_slash(sub->url) ||
+                   starts_with_dot_slash(sub->url)) {
+                       char *remote_url, *up_path;
+                       char *remote = get_default_remote();
+                       strbuf_addf(&sb, "remote.%s.url", remote);
+
+                       if (git_config_get_string(sb.buf, &remote_url))
+                               remote_url = xgetcwd();
+
+                       up_path = get_up_path(path);
+                       sub_origin_url = relative_url(remote_url, sub->url, up_path);
+                       super_config_url = relative_url(remote_url, sub->url, NULL);
+
+                       free(remote);
+                       free(up_path);
+                       free(remote_url);
+               } else {
+                       sub_origin_url = xstrdup(sub->url);
+                       super_config_url = xstrdup(sub->url);
+               }
+       } else {
+               sub_origin_url = xstrdup("");
+               super_config_url = xstrdup("");
+       }
+
+       displaypath = get_submodule_displaypath(path, prefix);
+
+       if (!(flags & OPT_QUIET))
+               printf(_("Synchronizing submodule url for '%s'\n"),
+                        displaypath);
+
+       strbuf_reset(&sb);
+       strbuf_addf(&sb, "submodule.%s.url", sub->name);
+       if (git_config_set_gently(sb.buf, super_config_url))
+               die(_("failed to register url for submodule path '%s'"),
+                     displaypath);
+
+       if (!is_submodule_populated_gently(path, NULL))
+               goto cleanup;
+
+       prepare_submodule_repo_env(&cp.env_array);
+       cp.git_cmd = 1;
+       cp.dir = path;
+       argv_array_pushl(&cp.args, "submodule--helper",
+                        "print-default-remote", NULL);
+
+       strbuf_reset(&sb);
+       if (capture_command(&cp, &sb, 0))
+               die(_("failed to get the default remote for submodule '%s'"),
+                     path);
+
+       strbuf_strip_suffix(&sb, "\n");
+       remote_key = xstrfmt("remote.%s.url", sb.buf);
+
+       strbuf_reset(&sb);
+       submodule_to_gitdir(&sb, path);
+       strbuf_addstr(&sb, "/config");
+
+       if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url))
+               die(_("failed to update remote for submodule '%s'"),
+                     path);
+
+       if (flags & OPT_RECURSIVE) {
+               struct child_process cpr = CHILD_PROCESS_INIT;
+
+               cpr.git_cmd = 1;
+               cpr.dir = path;
+               prepare_submodule_repo_env(&cpr.env_array);
+
+               argv_array_push(&cpr.args, "--super-prefix");
+               argv_array_pushf(&cpr.args, "%s/", displaypath);
+               argv_array_pushl(&cpr.args, "submodule--helper", "sync",
+                                "--recursive", NULL);
+
+               if (flags & OPT_QUIET)
+                       argv_array_push(&cpr.args, "--quiet");
+
+               if (run_command(&cpr))
+                       die(_("failed to recurse into submodule '%s'"),
+                             path);
+       }
+
+cleanup:
+       free(super_config_url);
+       free(sub_origin_url);
+       strbuf_release(&sb);
+       free(remote_key);
+       free(displaypath);
+       free(sub_config_path);
+}
+
+static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+       struct sync_cb *info = cb_data;
+       sync_submodule(list_item->name, info->prefix, info->flags);
+
+}
+
+static int module_sync(int argc, const char **argv, const char *prefix)
+{
+       struct sync_cb info = SYNC_CB_INIT;
+       struct pathspec pathspec;
+       struct module_list list = MODULE_LIST_INIT;
+       int quiet = 0;
+       int recursive = 0;
+
+       struct option module_sync_options[] = {
+               OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")),
+               OPT_BOOL(0, "recursive", &recursive,
+                       N_("Recurse into nested submodules")),
+               OPT_END()
+       };
+
+       const char *const git_submodule_helper_usage[] = {
+               N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+               NULL
+       };
+
+       argc = parse_options(argc, argv, prefix, module_sync_options,
+                            git_submodule_helper_usage, 0);
+
+       if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+               return 1;
+
+       info.prefix = prefix;
+       if (quiet)
+               info.flags |= OPT_QUIET;
+       if (recursive)
+               info.flags |= OPT_RECURSIVE;
+
+       for_each_listed_submodule(&list, sync_submodule_cb, &info);
+
+       return 0;
+}
+
+struct deinit_cb {
+       const char *prefix;
+       unsigned int flags;
+};
+#define DEINIT_CB_INIT { NULL, 0 }
+
+static void deinit_submodule(const char *path, const char *prefix,
+                            unsigned int flags)
+{
+       const struct submodule *sub;
+       char *displaypath = NULL;
+       struct child_process cp_config = CHILD_PROCESS_INIT;
+       struct strbuf sb_config = STRBUF_INIT;
+       char *sub_git_dir = xstrfmt("%s/.git", path);
+
+       sub = submodule_from_path(&null_oid, path);
+
+       if (!sub || !sub->name)
+               goto cleanup;
+
+       displaypath = get_submodule_displaypath(path, prefix);
+
+       /* remove the submodule work tree (unless the user already did it) */
+       if (is_directory(path)) {
+               struct strbuf sb_rm = STRBUF_INIT;
+               const char *format;
+
+               /*
+                * protect submodules containing a .git directory
+                * NEEDSWORK: instead of dying, automatically call
+                * absorbgitdirs and (possibly) warn.
+                */
+               if (is_directory(sub_git_dir))
+                       die(_("Submodule work tree '%s' contains a .git "
+                             "directory (use 'rm -rf' if you really want "
+                             "to remove it including all of its history)"),
+                           displaypath);
+
+               if (!(flags & OPT_FORCE)) {
+                       struct child_process cp_rm = CHILD_PROCESS_INIT;
+                       cp_rm.git_cmd = 1;
+                       argv_array_pushl(&cp_rm.args, "rm", "-qn",
+                                        path, NULL);
+
+                       if (run_command(&cp_rm))
+                               die(_("Submodule work tree '%s' contains local "
+                                     "modifications; use '-f' to discard them"),
+                                     displaypath);
+               }
+
+               strbuf_addstr(&sb_rm, path);
+
+               if (!remove_dir_recursively(&sb_rm, 0))
+                       format = _("Cleared directory '%s'\n");
+               else
+                       format = _("Could not remove submodule work tree '%s'\n");
+
+               if (!(flags & OPT_QUIET))
+                       printf(format, displaypath);
+
+               strbuf_release(&sb_rm);
+       }
+
+       if (mkdir(path, 0777))
+               printf(_("could not create empty submodule directory %s"),
+                     displaypath);
+
+       cp_config.git_cmd = 1;
+       argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL);
+       argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name);
+
+       /* remove the .git/config entries (unless the user already did it) */
+       if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
+               char *sub_key = xstrfmt("submodule.%s", sub->name);
+               /*
+                * remove the whole section so we have a clean state when
+                * the user later decides to init this submodule again
+                */
+               git_config_rename_section_in_file(NULL, sub_key, NULL);
+               if (!(flags & OPT_QUIET))
+                       printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"),
+                                sub->name, sub->url, displaypath);
+               free(sub_key);
+       }
+
+cleanup:
+       free(displaypath);
+       free(sub_git_dir);
+       strbuf_release(&sb_config);
+}
+
+static void deinit_submodule_cb(const struct cache_entry *list_item,
+                               void *cb_data)
+{
+       struct deinit_cb *info = cb_data;
+       deinit_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_deinit(int argc, const char **argv, const char *prefix)
+{
+       struct deinit_cb info = DEINIT_CB_INIT;
+       struct pathspec pathspec;
+       struct module_list list = MODULE_LIST_INIT;
+       int quiet = 0;
+       int force = 0;
+       int all = 0;
+
+       struct option module_deinit_options[] = {
+               OPT__QUIET(&quiet, N_("Suppress submodule status output")),
+               OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes"), 0),
+               OPT_BOOL(0, "all", &all, N_("Unregister all submodules")),
+               OPT_END()
+       };
+
+       const char *const git_submodule_helper_usage[] = {
+               N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
+               NULL
+       };
+
+       argc = parse_options(argc, argv, prefix, module_deinit_options,
+                            git_submodule_helper_usage, 0);
+
+       if (all && argc) {
+               error("pathspec and --all are incompatible");
+               usage_with_options(git_submodule_helper_usage,
+                                  module_deinit_options);
+       }
+
+       if (!argc && !all)
+               die(_("Use '--all' if you really want to deinitialize all submodules"));
+
+       if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+               return 1;
+
+       info.prefix = prefix;
+       if (quiet)
+               info.flags |= OPT_QUIET;
+       if (force)
+               info.flags |= OPT_FORCE;
+
+       for_each_listed_submodule(&list, deinit_submodule_cb, &info);
+
+       return 0;
+}
+
 static int clone_submodule(const char *path, const char *gitdir, const char *url,
                           const char *depth, struct string_list *reference,
                           int quiet, int progress)
@@ -1498,6 +1835,9 @@ static struct cmd_struct commands[] = {
        {"resolve-relative-url-test", resolve_relative_url_test, 0},
        {"init", module_init, SUPPORT_SUPER_PREFIX},
        {"status", module_status, SUPPORT_SUPER_PREFIX},
+       {"print-default-remote", print_default_remote, 0},
+       {"sync", module_sync, SUPPORT_SUPER_PREFIX},
+       {"deinit", module_deinit, 0},
        {"remote-branch", resolve_remote_submodule_branch, 0},
        {"push-check", push_check, 0},
        {"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
index a7e6a5b0f234a95fb45a71d7e9aa7f0baa2b47f8..8cff6d0b727a572a34adcfe72246953b93c79edd 100644 (file)
@@ -99,7 +99,8 @@ static int delete_tag(const char *name, const char *ref,
 {
        if (delete_ref(NULL, ref, oid, 0))
                return 1;
-       printf(_("Deleted tag '%s' (was %s)\n"), name, find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+       printf(_("Deleted tag '%s' (was %s)\n"), name,
+              find_unique_abbrev(oid, DEFAULT_ABBREV));
        return 0;
 }
 
@@ -167,7 +168,7 @@ static void write_tag_body(int fd, const struct object_id *oid)
        enum object_type type;
        char *buf, *sp;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return;
        /* skip header */
@@ -187,13 +188,14 @@ static int build_tag_object(struct strbuf *buf, int sign, struct object_id *resu
 {
        if (sign && do_sign(buf) < 0)
                return error(_("unable to sign the tag"));
-       if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+       if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
                return error(_("unable to write tag file"));
        return 0;
 }
 
 struct create_tag_options {
        unsigned int message_given:1;
+       unsigned int use_editor:1;
        unsigned int sign;
        enum {
                CLEANUP_NONE,
@@ -210,7 +212,7 @@ static void create_tag(const struct object_id *object, const char *tag,
        struct strbuf header = STRBUF_INIT;
        char *path = NULL;
 
-       type = sha1_object_info(object->hash, NULL);
+       type = oid_object_info(object, NULL);
        if (type <= OBJ_NONE)
            die(_("bad object type."));
 
@@ -220,11 +222,11 @@ static void create_tag(const struct object_id *object, const char *tag,
                    "tag %s\n"
                    "tagger %s\n\n",
                    oid_to_hex(object),
-                   typename(type),
+                   type_name(type),
                    tag,
                    git_committer_info(IDENT_STRICT));
 
-       if (!opt->message_given) {
+       if (!opt->message_given || opt->use_editor) {
                int fd;
 
                /* write the template message before editing: */
@@ -233,7 +235,10 @@ static void create_tag(const struct object_id *object, const char *tag,
                if (fd < 0)
                        die_errno(_("could not create file '%s'"), path);
 
-               if (!is_null_oid(prev)) {
+               if (opt->message_given) {
+                       write_or_die(fd, buf->buf, buf->len);
+                       strbuf_reset(buf);
+               } else if (!is_null_oid(prev)) {
                        write_tag_body(fd, prev);
                } else {
                        struct strbuf buf = STRBUF_INIT;
@@ -289,17 +294,17 @@ static void create_reflog_msg(const struct object_id *oid, struct strbuf *sb)
                strbuf_addstr(sb, rla);
        } else {
                strbuf_addstr(sb, "tag: tagging ");
-               strbuf_add_unique_abbrev(sb, oid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(sb, oid, DEFAULT_ABBREV);
        }
 
        strbuf_addstr(sb, " (");
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        switch (type) {
        default:
                strbuf_addstr(sb, "object of unknown type");
                break;
        case OBJ_COMMIT:
-               if ((buf = read_sha1_file(oid->hash, &type, &size)) != NULL) {
+               if ((buf = read_object_file(oid, &type, &size)) != NULL) {
                        subject_len = find_commit_subject(buf, &subject_start);
                        strbuf_insert(sb, sb->len, subject_start, subject_len);
                } else {
@@ -372,6 +377,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
        static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
        struct ref_format format = REF_FORMAT_INIT;
        int icase = 0;
+       int edit_flag = 0;
        struct option options[] = {
                OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
                { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
@@ -386,12 +392,13 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                OPT_CALLBACK('m', "message", &msg, N_("message"),
                             N_("tag message"), parse_msg_arg),
                OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+               OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
                OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
                OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
                        N_("how to strip spaces and #comments from message")),
                OPT_STRING('u', "local-user", &keyid, N_("key-id"),
                                        N_("use another key to sign the tag")),
-               OPT__FORCE(&force, N_("replace the tag if exists")),
+               OPT__FORCE(&force, N_("replace the tag if exists"), 0),
                OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")),
 
                OPT_GROUP(N_("Tag listing options")),
@@ -524,6 +531,7 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                die(_("tag '%s' already exists"), tag);
 
        opt.message_given = msg.given || msgfile;
+       opt.use_editor = edit_flag;
 
        if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
                opt.cleanup_mode = CLEANUP_ALL;
@@ -551,7 +559,8 @@ int cmd_tag(int argc, const char **argv, const char *prefix)
                die("%s", err.buf);
        ref_transaction_free(transaction);
        if (force && !is_null_oid(&prev) && oidcmp(&prev, &object))
-               printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(prev.hash, DEFAULT_ABBREV));
+               printf(_("Updated tag '%s' (was %s)\n"), tag,
+                      find_unique_abbrev(&prev, DEFAULT_ABBREV));
 
        UNLEAK(buf);
        UNLEAK(ref);
index 32e01555774c838e489fd33c675488e754c3e8e2..300eb59657e29cace38798029a9170834cac7c9e 100644 (file)
@@ -9,7 +9,7 @@ static char *create_temp_file(struct object_id *oid)
        unsigned long size;
        int fd;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
 
index 62ea264c46783374d0f1968c19ea7581498a1f87..b7755c6cc5a05a536666d0ae2fd9d0c3171a6cfd 100644 (file)
@@ -21,7 +21,7 @@ static unsigned char buffer[4096];
 static unsigned int offset, len;
 static off_t consumed_bytes;
 static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
 static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
 
 /*
@@ -62,7 +62,7 @@ static void *fill(int min)
        if (min > sizeof(buffer))
                die("cannot fill %d bytes", min);
        if (offset) {
-               git_SHA1_Update(&ctx, buffer, offset);
+               the_hash_algo->update_fn(&ctx, buffer, offset);
                memmove(buffer, buffer + offset, len);
                offset = 0;
        }
@@ -158,6 +158,7 @@ struct obj_info {
        struct object *obj;
 };
 
+/* Remember to update object flag allocation in object.h */
 #define FLAG_OPEN (1u<<20)
 #define FLAG_WRITTEN (1u<<21)
 
@@ -172,7 +173,8 @@ static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
 {
        struct object_id oid;
 
-       if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0)
+       if (write_object_file(obj_buf->buffer, obj_buf->size,
+                             type_name(obj->type), &oid) < 0)
                die("failed to write object %s", oid_to_hex(&obj->oid));
        obj->flags |= FLAG_WRITTEN;
 }
@@ -197,7 +199,7 @@ static int check_object(struct object *obj, int type, void *data, struct fsck_op
 
        if (!(obj->flags & FLAG_OPEN)) {
                unsigned long size;
-               int type = sha1_object_info(obj->oid.hash, &size);
+               int type = oid_object_info(&obj->oid, &size);
                if (type != obj->type || type <= 0)
                        die("object of unexpected type");
                obj->flags |= FLAG_WRITTEN;
@@ -237,14 +239,16 @@ static void write_object(unsigned nr, enum object_type type,
                         void *buf, unsigned long size)
 {
        if (!strict) {
-               if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+               if (write_object_file(buf, size, type_name(type),
+                                     &obj_list[nr].oid) < 0)
                        die("failed to write object");
                added_object(nr, type, buf, size);
                free(buf);
                obj_list[nr].obj = NULL;
        } else if (type == OBJ_BLOB) {
                struct blob *blob;
-               if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+               if (write_object_file(buf, size, type_name(type),
+                                     &obj_list[nr].oid) < 0)
                        die("failed to write object");
                added_object(nr, type, buf, size);
                free(buf);
@@ -258,12 +262,12 @@ static void write_object(unsigned nr, enum object_type type,
        } else {
                struct object *obj;
                int eaten;
-               hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash);
+               hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
                added_object(nr, type, buf, size);
                obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
                                          &eaten);
                if (!obj)
-                       die("invalid %s", typename(type));
+                       die("invalid %s", type_name(type));
                add_object_buffer(obj, buf, size);
                obj->flags |= FLAG_OPEN;
                obj_list[nr].obj = obj;
@@ -345,8 +349,8 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
        struct object_id base_oid;
 
        if (type == OBJ_REF_DELTA) {
-               hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
-               use(GIT_SHA1_RAWSZ);
+               hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+               use(the_hash_algo->rawsz);
                delta_data = get_data(delta_size);
                if (dry_run || !delta_data) {
                        free(delta_data);
@@ -419,7 +423,7 @@ static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
        if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
                return;
 
-       base = read_sha1_file(base_oid.hash, &type, &base_size);
+       base = read_object_file(&base_oid, &type, &base_size);
        if (!base) {
                error("failed to read delta-pack base object %s",
                      oid_to_hex(&base_oid));
@@ -564,15 +568,15 @@ int cmd_unpack_objects(int argc, const char **argv, const char *prefix)
                /* We don't take any non-flag arguments now.. Maybe some day */
                usage(unpack_usage);
        }
-       git_SHA1_Init(&ctx);
+       the_hash_algo->init_fn(&ctx);
        unpack_all();
-       git_SHA1_Update(&ctx, buffer, offset);
-       git_SHA1_Final(oid.hash, &ctx);
+       the_hash_algo->update_fn(&ctx, buffer, offset);
+       the_hash_algo->final_fn(oid.hash, &ctx);
        if (strict)
                write_rest();
-       if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+       if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
                die("final sha1 did not match");
-       use(GIT_SHA1_RAWSZ);
+       use(the_hash_algo->rawsz);
 
        /* Write the last part of the buffer to stdout */
        while (len) {
index 58d1c2d2827d61899d73f1ea7632c5ee219f3ace..10d070a76fb1b0b94c058f60934bb05db37a4164 100644 (file)
@@ -592,7 +592,7 @@ static struct cache_entry *read_one_ent(const char *which,
        int size;
        struct cache_entry *ce;
 
-       if (get_tree_entry(ent->hash, path, oid.hash, &mode)) {
+       if (get_tree_entry(ent, path, &oid, &mode)) {
                if (which)
                        error("%s: not in %s branch.", path, which);
                return NULL;
@@ -1059,6 +1059,7 @@ int cmd_update_index(int argc, const char **argv, const char *prefix)
                        break;
                switch (parseopt_state) {
                case PARSE_OPT_HELP:
+               case PARSE_OPT_ERROR:
                        exit(129);
                case PARSE_OPT_NON_OPTION:
                case PARSE_OPT_DONE:
index 873070e517090cbb21bf53cbc03088da6f85ec1f..4321a344567ed83e6828be41eb150d90c4d7756a 100644 (file)
@@ -12,7 +12,7 @@ int cmd_update_server_info(int argc, const char **argv, const char *prefix)
 {
        int force = 0;
        struct option options[] = {
-               OPT__FORCE(&force, N_("update the info files from scratch")),
+               OPT__FORCE(&force, N_("update the info files from scratch"), 0),
                OPT_END()
        };
 
index ba38ac9b1518884693e2c89ec19cc9e00fce9fa3..dcdaada111071c84b56022d9050ae06ffafbc25f 100644 (file)
@@ -44,12 +44,12 @@ static int verify_commit(const char *name, unsigned flags)
        if (get_oid(name, &oid))
                return error("commit '%s' not found.", name);
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.", name);
        if (type != OBJ_COMMIT)
                return error("%s: cannot verify a non-commit object of type %s.",
-                               name, typename(type));
+                               name, type_name(type));
 
        ret = run_gpg_verify(&oid, buf, size, flags);
 
index 9efdc224661b02192d06c6877e49c5585ddbcdca..40a438ed6ce802e0745495ba611e49c23e2eefce 100644 (file)
@@ -17,7 +17,9 @@ static const char * const worktree_usage[] = {
        N_("git worktree add [<options>] <path> [<commit-ish>]"),
        N_("git worktree list [<options>]"),
        N_("git worktree lock [<options>] <path>"),
+       N_("git worktree move <worktree> <new-path>"),
        N_("git worktree prune [<options>]"),
+       N_("git worktree remove [<options>] <worktree>"),
        N_("git worktree unlock <path>"),
        NULL
 };
@@ -99,16 +101,9 @@ static int prune_worktree(const char *id, struct strbuf *reason)
        }
        path[len] = '\0';
        if (!file_exists(path)) {
-               struct stat st_link;
                free(path);
-               /*
-                * the repo is moved manually and has not been
-                * accessed since?
-                */
-               if (!stat(git_path("worktrees/%s/link", id), &st_link) &&
-                   st_link.st_nlink > 1)
-                       return 0;
-               if (st.st_mtime <= expire) {
+               if (stat(git_path("worktrees/%s/index", id), &st) ||
+                   st.st_mtime <= expire) {
                        strbuf_addf(reason, _("Removing worktrees/%s: gitdir file points to non-existent location"), id);
                        return 1;
                } else {
@@ -345,9 +340,23 @@ static int add_worktree(const char *path, const char *refname,
         * Hook failure does not warrant worktree deletion, so run hook after
         * is_junk is cleared, but do return appropriate code when hook fails.
         */
-       if (!ret && opts->checkout)
-               ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid),
-                                 oid_to_hex(&commit->object.oid), "1", NULL);
+       if (!ret && opts->checkout) {
+               const char *hook = find_hook("post-checkout");
+               if (hook) {
+                       const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL };
+                       cp.git_cmd = 0;
+                       cp.no_stdin = 1;
+                       cp.stdout_to_stderr = 1;
+                       cp.dir = path;
+                       cp.env = env;
+                       cp.argv = NULL;
+                       argv_array_pushl(&cp.args, absolute_path(hook),
+                                        oid_to_hex(&null_oid),
+                                        oid_to_hex(&commit->object.oid),
+                                        "1", NULL);
+                       ret = run_command(&cp);
+               }
+       }
 
        argv_array_clear(&child_env);
        strbuf_release(&sb);
@@ -365,7 +374,9 @@ static int add(int ac, const char **av, const char *prefix)
        const char *branch;
        const char *opt_track = NULL;
        struct option options[] = {
-               OPT__FORCE(&opts.force, N_("checkout <branch> even if already checked out in other worktree")),
+               OPT__FORCE(&opts.force,
+                          N_("checkout <branch> even if already checked out in other worktree"),
+                          PARSE_OPT_NOCOMPLETE),
                OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
                           N_("create a new branch")),
                OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
@@ -484,7 +495,7 @@ static void show_worktree(struct worktree *wt, int path_maxlen, int abbrev_len)
                strbuf_addstr(&sb, "(bare)");
        else {
                strbuf_addf(&sb, "%-*s ", abbrev_len,
-                               find_unique_abbrev(wt->head_oid.hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(&wt->head_oid, DEFAULT_ABBREV));
                if (wt->is_detached)
                        strbuf_addstr(&sb, "(detached HEAD)");
                else if (wt->head_ref) {
@@ -509,7 +520,7 @@ static void measure_widths(struct worktree **wt, int *abbrev, int *maxlen)
 
                if (path_len > *maxlen)
                        *maxlen = path_len;
-               sha1_len = strlen(find_unique_abbrev(wt[i]->head_oid.hash, *abbrev));
+               sha1_len = strlen(find_unique_abbrev(&wt[i]->head_oid, *abbrev));
                if (sha1_len > *abbrev)
                        *abbrev = sha1_len;
        }
@@ -605,6 +616,220 @@ static int unlock_worktree(int ac, const char **av, const char *prefix)
        return ret;
 }
 
+static void validate_no_submodules(const struct worktree *wt)
+{
+       struct index_state istate = { NULL };
+       int i, found_submodules = 0;
+
+       if (read_index_from(&istate, worktree_git_path(wt, "index"),
+                           get_worktree_git_dir(wt)) > 0) {
+               for (i = 0; i < istate.cache_nr; i++) {
+                       struct cache_entry *ce = istate.cache[i];
+
+                       if (S_ISGITLINK(ce->ce_mode)) {
+                               found_submodules = 1;
+                               break;
+                       }
+               }
+       }
+       discard_index(&istate);
+
+       if (found_submodules)
+               die(_("working trees containing submodules cannot be moved or removed"));
+}
+
+static int move_worktree(int ac, const char **av, const char *prefix)
+{
+       struct option options[] = {
+               OPT_END()
+       };
+       struct worktree **worktrees, *wt;
+       struct strbuf dst = STRBUF_INIT;
+       struct strbuf errmsg = STRBUF_INIT;
+       const char *reason;
+       char *path;
+
+       ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+       if (ac != 2)
+               usage_with_options(worktree_usage, options);
+
+       path = prefix_filename(prefix, av[1]);
+       strbuf_addstr(&dst, path);
+       free(path);
+
+       worktrees = get_worktrees(0);
+       wt = find_worktree(worktrees, prefix, av[0]);
+       if (!wt)
+               die(_("'%s' is not a working tree"), av[0]);
+       if (is_main_worktree(wt))
+               die(_("'%s' is a main working tree"), av[0]);
+       if (is_directory(dst.buf)) {
+               const char *sep = find_last_dir_sep(wt->path);
+
+               if (!sep)
+                       die(_("could not figure out destination name from '%s'"),
+                           wt->path);
+               strbuf_trim_trailing_dir_sep(&dst);
+               strbuf_addstr(&dst, sep);
+       }
+       if (file_exists(dst.buf))
+               die(_("target '%s' already exists"), dst.buf);
+
+       validate_no_submodules(wt);
+
+       reason = is_worktree_locked(wt);
+       if (reason) {
+               if (*reason)
+                       die(_("cannot move a locked working tree, lock reason: %s"),
+                           reason);
+               die(_("cannot move a locked working tree"));
+       }
+       if (validate_worktree(wt, &errmsg, 0))
+               die(_("validation failed, cannot move working tree: %s"),
+                   errmsg.buf);
+       strbuf_release(&errmsg);
+
+       if (rename(wt->path, dst.buf) == -1)
+               die_errno(_("failed to move '%s' to '%s'"), wt->path, dst.buf);
+
+       update_worktree_location(wt, dst.buf);
+
+       strbuf_release(&dst);
+       free_worktrees(worktrees);
+       return 0;
+}
+
+/*
+ * Note, "git status --porcelain" is used to determine if it's safe to
+ * delete a whole worktree. "git status" does not ignore user
+ * configuration, so if a normal "git status" shows "clean" for the
+ * user, then it's ok to remove it.
+ *
+ * This assumption may be a bad one. We may want to ignore
+ * (potentially bad) user settings and only delete a worktree when
+ * it's absolutely safe to do so from _our_ point of view because we
+ * know better.
+ */
+static void check_clean_worktree(struct worktree *wt,
+                                const char *original_path)
+{
+       struct argv_array child_env = ARGV_ARRAY_INIT;
+       struct child_process cp;
+       char buf[1];
+       int ret;
+
+       /*
+        * Until we sort this out, all submodules are "dirty" and
+        * will abort this function.
+        */
+       validate_no_submodules(wt);
+
+       argv_array_pushf(&child_env, "%s=%s/.git",
+                        GIT_DIR_ENVIRONMENT, wt->path);
+       argv_array_pushf(&child_env, "%s=%s",
+                        GIT_WORK_TREE_ENVIRONMENT, wt->path);
+       memset(&cp, 0, sizeof(cp));
+       argv_array_pushl(&cp.args, "status",
+                        "--porcelain", "--ignore-submodules=none",
+                        NULL);
+       cp.env = child_env.argv;
+       cp.git_cmd = 1;
+       cp.dir = wt->path;
+       cp.out = -1;
+       ret = start_command(&cp);
+       if (ret)
+               die_errno(_("failed to run 'git status' on '%s'"),
+                         original_path);
+       ret = xread(cp.out, buf, sizeof(buf));
+       if (ret)
+               die(_("'%s' is dirty, use --force to delete it"),
+                   original_path);
+       close(cp.out);
+       ret = finish_command(&cp);
+       if (ret)
+               die_errno(_("failed to run 'git status' on '%s', code %d"),
+                         original_path, ret);
+}
+
+static int delete_git_work_tree(struct worktree *wt)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int ret = 0;
+
+       strbuf_addstr(&sb, wt->path);
+       if (remove_dir_recursively(&sb, 0)) {
+               error_errno(_("failed to delete '%s'"), sb.buf);
+               ret = -1;
+       }
+       strbuf_release(&sb);
+       return ret;
+}
+
+static int delete_git_dir(struct worktree *wt)
+{
+       struct strbuf sb = STRBUF_INIT;
+       int ret = 0;
+
+       strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id));
+       if (remove_dir_recursively(&sb, 0)) {
+               error_errno(_("failed to delete '%s'"), sb.buf);
+               ret = -1;
+       }
+       strbuf_release(&sb);
+       return ret;
+}
+
+static int remove_worktree(int ac, const char **av, const char *prefix)
+{
+       int force = 0;
+       struct option options[] = {
+               OPT_BOOL(0, "force", &force,
+                        N_("force removing even if the worktree is dirty")),
+               OPT_END()
+       };
+       struct worktree **worktrees, *wt;
+       struct strbuf errmsg = STRBUF_INIT;
+       const char *reason;
+       int ret = 0;
+
+       ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+       if (ac != 1)
+               usage_with_options(worktree_usage, options);
+
+       worktrees = get_worktrees(0);
+       wt = find_worktree(worktrees, prefix, av[0]);
+       if (!wt)
+               die(_("'%s' is not a working tree"), av[0]);
+       if (is_main_worktree(wt))
+               die(_("'%s' is a main working tree"), av[0]);
+       reason = is_worktree_locked(wt);
+       if (reason) {
+               if (*reason)
+                       die(_("cannot remove a locked working tree, lock reason: %s"),
+                           reason);
+               die(_("cannot remove a locked working tree"));
+       }
+       if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
+               die(_("validation failed, cannot remove working tree: %s"),
+                   errmsg.buf);
+       strbuf_release(&errmsg);
+
+       if (file_exists(wt->path)) {
+               if (!force)
+                       check_clean_worktree(wt, av[0]);
+
+               ret |= delete_git_work_tree(wt);
+       }
+       /*
+        * continue on even if ret is non-zero, there's no going back
+        * from here.
+        */
+       ret |= delete_git_dir(wt);
+
+       free_worktrees(worktrees);
+       return ret;
+}
+
 int cmd_worktree(int ac, const char **av, const char *prefix)
 {
        struct option options[] = {
@@ -627,5 +852,9 @@ int cmd_worktree(int ac, const char **av, const char *prefix)
                return lock_worktree(ac - 1, av + 1, prefix);
        if (!strcmp(av[1], "unlock"))
                return unlock_worktree(ac - 1, av + 1, prefix);
+       if (!strcmp(av[1], "move"))
+               return move_worktree(ac - 1, av + 1, prefix);
+       if (!strcmp(av[1], "remove"))
+               return remove_worktree(ac - 1, av + 1, prefix);
        usage_with_options(worktree_usage, options);
 }
index bd0a78aa3c56b7e817c7ddf6fcaba703d6d5fecc..c9d3c544e79f46bab9e5fd50079d1bb574b722f2 100644 (file)
@@ -19,7 +19,7 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
 {
        int flags = 0, ret;
        const char *prefix = NULL;
-       unsigned char sha1[20];
+       struct object_id oid;
        const char *me = "git-write-tree";
        struct option write_tree_options[] = {
                OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"),
@@ -38,10 +38,10 @@ int cmd_write_tree(int argc, const char **argv, const char *unused_prefix)
        argc = parse_options(argc, argv, unused_prefix, write_tree_options,
                             write_tree_usage, 0);
 
-       ret = write_cache_as_tree(sha1, flags, prefix);
+       ret = write_cache_as_tree(&oid, flags, prefix);
        switch (ret) {
        case 0:
-               printf("%s\n", sha1_to_hex(sha1));
+               printf("%s\n", oid_to_hex(&oid));
                break;
        case WRITE_TREE_UNREADABLE_INDEX:
                die("%s: error reading the index", me);
index 3310fd210a151545076169b45f5555b52acbbf9d..e5ce2a795433b5d369cf5a03aaa7d35694ae81fd 100644 (file)
@@ -12,7 +12,7 @@ static struct bulk_checkin_state {
        unsigned plugged:1;
 
        char *pack_tmp_name;
-       struct sha1file *f;
+       struct hashfile *f;
        off_t offset;
        struct pack_idx_option pack_idx_opts;
 
@@ -35,9 +35,9 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
                unlink(state->pack_tmp_name);
                goto clear_exit;
        } else if (state->nr_written == 1) {
-               sha1close(state->f, oid.hash, CSUM_FSYNC);
+               hashclose(state->f, oid.hash, CSUM_FSYNC);
        } else {
-               int fd = sha1close(state->f, oid.hash, 0);
+               int fd = hashclose(state->f, oid.hash, 0);
                fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
                                         state->nr_written, oid.hash,
                                         state->offset);
@@ -60,17 +60,17 @@ static void finish_bulk_checkin(struct bulk_checkin_state *state)
        reprepare_packed_git();
 }
 
-static int already_written(struct bulk_checkin_state *state, unsigned char sha1[])
+static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
 {
        int i;
 
        /* The object may already exist in the repository */
-       if (has_sha1_file(sha1))
+       if (has_sha1_file(oid->hash))
                return 1;
 
        /* Might want to keep the list sorted */
        for (i = 0; i < state->nr_written; i++)
-               if (!hashcmp(state->written[i]->oid.hash, sha1))
+               if (!oidcmp(&state->written[i]->oid, oid))
                        return 1;
 
        /* This is a new object we need to keep */
@@ -93,7 +93,7 @@ static int already_written(struct bulk_checkin_state *state, unsigned char sha1[
  * with a new pack.
  */
 static int stream_to_pack(struct bulk_checkin_state *state,
-                         git_SHA_CTX *ctx, off_t *already_hashed_to,
+                         git_hash_ctx *ctx, off_t *already_hashed_to,
                          int fd, size_t size, enum object_type type,
                          const char *path, unsigned flags)
 {
@@ -127,7 +127,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
                                if (rsize < hsize)
                                        hsize = rsize;
                                if (hsize)
-                                       git_SHA1_Update(ctx, ibuf, hsize);
+                                       the_hash_algo->update_fn(ctx, ibuf, hsize);
                                *already_hashed_to = offset;
                        }
                        s.next_in = ibuf;
@@ -149,7 +149,7 @@ static int stream_to_pack(struct bulk_checkin_state *state,
                                        return -1;
                                }
 
-                               sha1write(state->f, obuf, written);
+                               hashwrite(state->f, obuf, written);
                                state->offset += written;
                        }
                        s.next_out = obuf;
@@ -186,16 +186,16 @@ static void prepare_to_stream(struct bulk_checkin_state *state,
 }
 
 static int deflate_to_pack(struct bulk_checkin_state *state,
-                          unsigned char result_sha1[],
+                          struct object_id *result_oid,
                           int fd, size_t size,
                           enum object_type type, const char *path,
                           unsigned flags)
 {
        off_t seekback, already_hashed_to;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        unsigned char obuf[16384];
        unsigned header_len;
-       struct sha1file_checkpoint checkpoint;
+       struct hashfile_checkpoint checkpoint;
        struct pack_idx_entry *idx = NULL;
 
        seekback = lseek(fd, 0, SEEK_CUR);
@@ -203,9 +203,9 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
                return error("cannot find the current offset");
 
        header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
-                              typename(type), (uintmax_t)size) + 1;
-       git_SHA1_Init(&ctx);
-       git_SHA1_Update(&ctx, obuf, header_len);
+                              type_name(type), (uintmax_t)size) + 1;
+       the_hash_algo->init_fn(&ctx);
+       the_hash_algo->update_fn(&ctx, obuf, header_len);
 
        /* Note: idx is non-NULL when we are writing */
        if ((flags & HASH_WRITE_OBJECT) != 0)
@@ -216,7 +216,7 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
        while (1) {
                prepare_to_stream(state, flags);
                if (idx) {
-                       sha1file_checkpoint(state->f, &checkpoint);
+                       hashfile_checkpoint(state->f, &checkpoint);
                        idx->offset = state->offset;
                        crc32_begin(state->f);
                }
@@ -230,23 +230,23 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
                 */
                if (!idx)
                        die("BUG: should not happen");
-               sha1file_truncate(state->f, &checkpoint);
+               hashfile_truncate(state->f, &checkpoint);
                state->offset = checkpoint.offset;
                finish_bulk_checkin(state);
                if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
                        return error("cannot seek back");
        }
-       git_SHA1_Final(result_sha1, &ctx);
+       the_hash_algo->final_fn(result_oid->hash, &ctx);
        if (!idx)
                return 0;
 
        idx->crc32 = crc32_end(state->f);
-       if (already_written(state, result_sha1)) {
-               sha1file_truncate(state->f, &checkpoint);
+       if (already_written(state, result_oid)) {
+               hashfile_truncate(state->f, &checkpoint);
                state->offset = checkpoint.offset;
                free(idx);
        } else {
-               hashcpy(idx->oid.hash, result_sha1);
+               oidcpy(&idx->oid, result_oid);
                ALLOC_GROW(state->written,
                           state->nr_written + 1,
                           state->alloc_written);
@@ -255,11 +255,11 @@ static int deflate_to_pack(struct bulk_checkin_state *state,
        return 0;
 }
 
-int index_bulk_checkin(unsigned char *sha1,
+int index_bulk_checkin(struct object_id *oid,
                       int fd, size_t size, enum object_type type,
                       const char *path, unsigned flags)
 {
-       int status = deflate_to_pack(&state, sha1, fd, size, type,
+       int status = deflate_to_pack(&state, oid, fd, size, type,
                                     path, flags);
        if (!state.plugged)
                finish_bulk_checkin(&state);
index fbd40fc98c955c192a6de698a75b8d56af766f09..a85527318b15b36bb60b0b6b166569b4fcaa9dcf 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef BULK_CHECKIN_H
 #define BULK_CHECKIN_H
 
-extern int index_bulk_checkin(unsigned char sha1[],
+extern int index_bulk_checkin(struct object_id *oid,
                              int fd, size_t size, enum object_type type,
                              const char *path, unsigned flags);
 
index efe547e25fe2a53bd0ef7954cf3bec6d55218365..902c9b54485be2000696a697472fa10d97b36153 100644 (file)
--- a/bundle.c
+++ b/bundle.c
@@ -222,7 +222,7 @@ static int is_tag_in_date_range(struct object *tag, struct rev_info *revs)
        if (revs->max_age == -1 && revs->min_age == -1)
                goto out;
 
-       buf = read_sha1_file(tag->oid.hash, &type, &size);
+       buf = read_object_file(&tag->oid, &type, &size);
        if (!buf)
                goto out;
        line = memmem(buf, size, "\ntagger ", 8);
index 0dd6292a94e55baff0a42df89c7006a757f61065..6a555f4d431f9f6dbf8dad06d75a4ec81a4254fd 100644 (file)
@@ -84,9 +84,8 @@ static struct cache_tree_sub *find_subtree(struct cache_tree *it,
        down->namelen = pathlen;
 
        if (pos < it->subtree_nr)
-               memmove(it->down + pos + 1,
-                       it->down + pos,
-                       sizeof(down) * (it->subtree_nr - pos - 1));
+               MOVE_ARRAY(it->down + pos + 1, it->down + pos,
+                          it->subtree_nr - pos - 1);
        it->down[pos] = down;
        return down;
 }
@@ -321,7 +320,7 @@ static int update_one(struct cache_tree *it,
                struct cache_tree_sub *sub = NULL;
                const char *path, *slash;
                int pathlen, entlen;
-               const unsigned char *sha1;
+               const struct object_id *oid;
                unsigned mode;
                int expected_missing = 0;
                int contains_ita = 0;
@@ -339,7 +338,7 @@ static int update_one(struct cache_tree *it,
                                die("cache-tree.c: '%.*s' in '%s' not found",
                                    entlen, path + baselen, path);
                        i += sub->count;
-                       sha1 = sub->cache_tree->oid.hash;
+                       oid = &sub->cache_tree->oid;
                        mode = S_IFDIR;
                        contains_ita = sub->cache_tree->entry_count < 0;
                        if (contains_ita) {
@@ -348,19 +347,19 @@ static int update_one(struct cache_tree *it,
                        }
                }
                else {
-                       sha1 = ce->oid.hash;
+                       oid = &ce->oid;
                        mode = ce->ce_mode;
                        entlen = pathlen - baselen;
                        i++;
                }
 
-               if (is_null_sha1(sha1) ||
-                   (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1))) {
+               if (is_null_oid(oid) ||
+                   (mode != S_IFGITLINK && !missing_ok && !has_object_file(oid))) {
                        strbuf_release(&buffer);
                        if (expected_missing)
                                return -1;
                        return error("invalid object %06o %s for '%.*s'",
-                               mode, sha1_to_hex(sha1), entlen+baselen, path);
+                               mode, oid_to_hex(oid), entlen+baselen, path);
                }
 
                /*
@@ -386,12 +385,12 @@ static int update_one(struct cache_tree *it,
                /*
                 * "sub" can be an empty tree if all subentries are i-t-a.
                 */
-               if (contains_ita && !hashcmp(sha1, EMPTY_TREE_SHA1_BIN))
+               if (contains_ita && !oidcmp(oid, &empty_tree_oid))
                        continue;
 
                strbuf_grow(&buffer, entlen + 100);
                strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
-               strbuf_add(&buffer, sha1, 20);
+               strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
 
 #if DEBUG
                fprintf(stderr, "cache-tree update-one %o %.*s\n",
@@ -400,16 +399,16 @@ static int update_one(struct cache_tree *it,
        }
 
        if (repair) {
-               unsigned char sha1[20];
-               hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
-               if (has_sha1_file(sha1))
-                       hashcpy(it->oid.hash, sha1);
+               struct object_id oid;
+               hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+               if (has_object_file(&oid))
+                       oidcpy(&it->oid, &oid);
                else
                        to_invalidate = 1;
-       } else if (dryrun)
-               hash_sha1_file(buffer.buf, buffer.len, tree_type,
-                              it->oid.hash);
-       else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+       } else if (dryrun) {
+               hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+       } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+                                    &it->oid)) {
                strbuf_release(&buffer);
                return -1;
        }
@@ -466,7 +465,7 @@ static void write_one(struct strbuf *buffer, struct cache_tree *it,
 #endif
 
        if (0 <= it->entry_count) {
-               strbuf_add(buffer, it->oid.hash, 20);
+               strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
        }
        for (i = 0; i < it->subtree_nr; i++) {
                struct cache_tree_sub *down = it->down[i];
@@ -493,6 +492,7 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
        char *ep;
        struct cache_tree *it;
        int i, subtree_nr;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        it = NULL;
        /* skip name, but make sure name exists */
@@ -521,11 +521,11 @@ static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
                goto free_return;
        buf++; size--;
        if (0 <= it->entry_count) {
-               if (size < 20)
+               if (size < rawsz)
                        goto free_return;
-               hashcpy(it->oid.hash, (const unsigned char*)buf);
-               buf += 20;
-               size -= 20;
+               memcpy(it->oid.hash, (const unsigned char*)buf, rawsz);
+               buf += rawsz;
+               size -= rawsz;
        }
 
 #if DEBUG
@@ -600,7 +600,7 @@ static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *pat
        return it;
 }
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
 {
        int entries, was_valid;
        struct lock_file lock_file = LOCK_INIT;
@@ -641,19 +641,19 @@ int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, co
                        ret = WRITE_TREE_PREFIX_ERROR;
                        goto out;
                }
-               hashcpy(sha1, subtree->oid.hash);
+               oidcpy(oid, &subtree->oid);
        }
        else
-               hashcpy(sha1, index_state->cache_tree->oid.hash);
+               oidcpy(oid, &index_state->cache_tree->oid);
 
 out:
        rollback_lock_file(&lock_file);
        return ret;
 }
 
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix)
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix)
 {
-       return write_index_as_tree(sha1, &the_index, get_index_file(), flags, prefix);
+       return write_index_as_tree(oid, &the_index, get_index_file(), flags, prefix);
 }
 
 static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
index f7b9cab7ee87dd04cecbd25f34101a58fc002014..cfd5328cc93694e23037e15148241e17bd4f3a04 100644 (file)
@@ -47,8 +47,8 @@ int update_main_cache_tree(int);
 #define WRITE_TREE_UNMERGED_INDEX (-2)
 #define WRITE_TREE_PREFIX_ERROR (-3)
 
-int write_index_as_tree(unsigned char *sha1, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
-int write_cache_as_tree(unsigned char *sha1, int flags, const char *prefix);
+int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix);
+int write_cache_as_tree(struct object_id *oid, int flags, const char *prefix);
 void prime_cache_tree(struct index_state *, struct tree *);
 
 extern int cache_tree_matches_traversal(struct cache_tree *, struct name_entry *ent, struct traverse_info *info);
diff --git a/cache.h b/cache.h
index fd755c32cf5daf91115f6f534fdd9029609ee546..6e45c1b5377cfca2e0621d529f69daac304e3f12 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -4,7 +4,7 @@
 #include "git-compat-util.h"
 #include "strbuf.h"
 #include "hashmap.h"
-#include "mru.h"
+#include "list.h"
 #include "advice.h"
 #include "gettext.h"
 #include "convert.h"
 #include "sha1-array.h"
 #include "repository.h"
 
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc...  Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX       SHA_CTX
-#define platform_SHA1_Init     SHA1_Init
-#define platform_SHA1_Update   SHA1_Update
-#define platform_SHA1_Final            SHA1_Final
-#endif
-
-#define git_SHA_CTX            platform_SHA_CTX
-#define git_SHA1_Init          platform_SHA1_Init
-#define git_SHA1_Update                platform_SHA1_Update
-#define git_SHA1_Final         platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update                git_SHA1_Update_Chunked
-#endif
-
 #include <zlib.h>
 typedef struct git_zstream {
        z_stream z;
@@ -484,7 +459,7 @@ static inline enum object_type object_type(unsigned int mode)
  */
 extern const char * const local_repo_env[];
 
-extern void setup_git_env(void);
+extern void setup_git_env(const char *git_dir);
 
 /*
  * Returns true iff we have a configured git repository (either via
@@ -624,6 +599,7 @@ extern int read_index_unmerged(struct index_state *);
 
 /* For use with `write_locked_index()`. */
 #define COMMIT_LOCK            (1 << 0)
+#define SKIP_IF_UNCHANGED      (1 << 1)
 
 /*
  * Write the index while holding an already-taken lock. Close the lock,
@@ -640,6 +616,9 @@ extern int read_index_unmerged(struct index_state *);
  * With `COMMIT_LOCK`, the lock is always committed or rolled back.
  * Without it, the lock is closed, but neither committed nor rolled
  * back.
+ *
+ * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
+ * is written (and the lock is rolled back if `COMMIT_LOCK` is given).
  */
 extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
 
@@ -916,10 +895,13 @@ extern int grafts_replace_parents;
 #define GIT_REPO_VERSION 0
 #define GIT_REPO_VERSION_READ 1
 extern int repository_format_precious_objects;
+extern char *repository_format_partial_clone;
+extern const char *core_partial_clone_filter_default;
 
 struct repository_format {
        int version;
        int precious_objects;
+       char *partial_clone; /* value of extensions.partialclone */
        int is_bare;
        int hash_algo;
        char *work_tree;
@@ -959,12 +941,10 @@ extern void check_repository_format(void);
 #define TYPE_CHANGED    0x0040
 
 /*
- * Return the name of the file in the local object database that would
- * be used to store a loose object with the specified sha1.  The
- * return value is a pointer to a statically allocated buffer that is
- * overwritten each time the function is called.
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
  */
-extern const char *sha1_file_name(const unsigned char *sha1);
+extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
 
 /*
  * Return an abbreviated sha1 unique within this repository's object database.
@@ -975,14 +955,14 @@ extern const char *sha1_file_name(const unsigned char *sha1);
  * more calls to find_unique_abbrev are made.
  *
  * The `_r` variant writes to a buffer supplied by the caller, which must be at
- * least `GIT_SHA1_HEXSZ + 1` bytes. The return value is the number of bytes
+ * least `GIT_MAX_HEXSZ + 1` bytes. The return value is the number of bytes
  * written (excluding the NUL terminator).
  *
  * Note that while this version avoids the static buffer, it is not fully
  * reentrant, as it calls into other non-reentrant git code.
  */
-extern const char *find_unique_abbrev(const unsigned char *sha1, int len);
-extern int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len);
+extern const char *find_unique_abbrev(const struct object_id *oid, int len);
+extern int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len);
 
 extern const unsigned char null_sha1[GIT_MAX_RAWSZ];
 extern const struct object_id null_oid;
@@ -1031,7 +1011,7 @@ static inline void hashclr(unsigned char *hash)
 
 static inline void oidclr(struct object_id *oid)
 {
-       hashclr(oid->hash);
+       memset(oid->hash, 0, GIT_MAX_RAWSZ);
 }
 
 
@@ -1049,8 +1029,6 @@ extern const struct object_id empty_tree_oid;
        "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
        "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
 extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
 
 static inline int is_empty_blob_sha1(const unsigned char *sha1)
 {
@@ -1211,19 +1189,19 @@ extern char *xdg_config_home(const char *filename);
  */
 extern char *xdg_cache_home(const char *filename);
 
-extern void *read_sha1_file_extended(const unsigned char *sha1,
-                                    enum object_type *type,
-                                    unsigned long *size, int lookup_replace);
-static inline void *read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size)
+extern void *read_object_file_extended(const struct object_id *oid,
+                                      enum object_type *type,
+                                      unsigned long *size, int lookup_replace);
+static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
 {
-       return read_sha1_file_extended(sha1, type, size, 1);
+       return read_object_file_extended(oid, type, size, 1);
 }
 
 /*
  * This internal function is only declared here for the benefit of
  * lookup_replace_object().  Please do not call it directly.
  */
-extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
+extern const struct object_id *do_lookup_replace_object(const struct object_id *oid);
 
 /*
  * If object sha1 should be replaced, return the replacement object's
@@ -1231,39 +1209,50 @@ extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
  * either sha1 or a pointer to a permanently-allocated value.  When
  * object replacement is suppressed, always return sha1.
  */
-static inline const unsigned char *lookup_replace_object(const unsigned char *sha1)
+static inline const struct object_id *lookup_replace_object(const struct object_id *oid)
 {
        if (!check_replace_refs)
-               return sha1;
-       return do_lookup_replace_object(sha1);
+               return oid;
+       return do_lookup_replace_object(oid);
 }
 
-/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
-extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+/* Read and unpack an object file into memory, write memory to an object file */
+extern int oid_object_info(const struct object_id *, unsigned long *);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+                           const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+                            const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+                                     const char *type, struct object_id *oid,
+                                     unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+                              struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
 extern int git_open_cloexec(const char *name, int flags);
 #define git_open(name) git_open_cloexec(name, O_RDONLY)
 extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
 extern int unpack_sha1_header(git_zstream *stream, unsigned char *map, unsigned long mapsize, void *buffer, unsigned long bufsiz);
 extern int parse_sha1_header(const char *hdr, unsigned long *sizep);
 
-extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
+extern int check_object_signature(const struct object_id *oid, void *buf, unsigned long size, const char *type);
 
 extern int finalize_object_file(const char *tmpfile, const char *filename);
 
 /*
- * Open the loose object at path, check its sha1, and return the contents,
+ * Open the loose object at path, check its hash, and return the contents,
  * type, and size. If the object is a blob, then "contents" may return NULL,
  * to allow streaming of large blobs.
  *
  * Returns 0 on success, negative on error (details may be written to stderr).
  */
 int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
+                     const struct object_id *expected_oid,
                      enum object_type *type,
                      unsigned long *size,
                      void **contents);
@@ -1290,7 +1279,7 @@ extern int has_object_file_with_flags(const struct object_id *oid, int flags);
  */
 extern int has_loose_object_nonlocal(const unsigned char *sha1);
 
-extern void assert_sha1_type(const unsigned char *sha1, enum object_type expect);
+extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
 
 /* Helper to check and "touch" a file */
 extern int check_and_freshen_file(const char *fn, int freshen);
@@ -1446,10 +1435,10 @@ extern int df_name_compare(const char *name1, int len1, int mode1, const char *n
 extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
 extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
 
-extern void *read_object_with_reference(const unsigned char *sha1,
+extern void *read_object_with_reference(const struct object_id *oid,
                                        const char *required_type,
                                        unsigned long *size,
-                                       unsigned char *sha1_ret);
+                                       struct object_id *oid_ret);
 
 extern struct object *peel_to_type(const char *name, int namelen,
                                   struct object *o, enum object_type);
@@ -1637,6 +1626,7 @@ struct pack_window {
 
 extern struct packed_git {
        struct packed_git *next;
+       struct list_head mru;
        struct pack_window *windows;
        off_t pack_size;
        const void *index_data;
@@ -1650,7 +1640,8 @@ extern struct packed_git {
        unsigned pack_local:1,
                 pack_keep:1,
                 freshened:1,
-                do_not_close:1;
+                do_not_close:1,
+                pack_promisor:1;
        unsigned char sha1[20];
        struct revindex_entry *revindex;
        /* something like ".git/objects/pack/xxxxx.pack" */
@@ -1658,10 +1649,9 @@ extern struct packed_git {
 } *packed_git;
 
 /*
- * A most-recently-used ordered version of the packed_git list, which can
- * be iterated instead of packed_git (and marked via mru_mark).
+ * A most-recently-used ordered version of the packed_git list.
  */
-extern struct mru packed_git_mru;
+extern struct list_head packed_git_mru;
 
 struct pack_entry {
        off_t offset;
@@ -1675,7 +1665,7 @@ struct pack_entry {
  * usual "XXXXXX" trailer, and the resulting filename is written into the
  * "template" buffer. Returns the open descriptor.
  */
-extern int odb_mkstemp(struct strbuf *template, const char *pattern);
+extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
 
 /*
  * Create a pack .keep file named "name" (which should generally be the output
@@ -1746,7 +1736,7 @@ struct object_info {
        unsigned long *sizep;
        off_t *disk_sizep;
        unsigned char *delta_base_sha1;
-       struct strbuf *typename;
+       struct strbuf *type_name;
        void **contentp;
 
        /* Response */
@@ -1787,7 +1777,17 @@ struct object_info {
 #define OBJECT_INFO_SKIP_CACHED 4
 /* Do not retry packed storage after checking packed and loose storage */
 #define OBJECT_INFO_QUICK 8
-extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/* Do not check loose object */
+#define OBJECT_INFO_IGNORE_LOOSE 16
+extern int oid_object_info_extended(const struct object_id *, struct object_info *, unsigned flags);
+
+/*
+ * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * blobs. This has a difference only if extensions.partialClone is set.
+ *
+ * Its default value is 1.
+ */
+extern int fetch_if_missing;
 
 /* Dumb servers support */
 extern int update_server_info(int);
index 07f27c72700702e745eb0a9d1f9b9aa8ec549f4f..109ef280da9884027f17fe7f233b6b818bd71393 100755 (executable)
@@ -21,8 +21,6 @@ skip_branch_tip_with_tag () {
        fi
 }
 
-good_trees_file="$HOME/travis-cache/good-trees"
-
 # Save some info about the current commit's tree, so we can skip the build
 # job if we encounter the same tree again and can provide a useful info
 # message.
@@ -83,7 +81,10 @@ check_unignored_build_artifacts ()
 # and installing dependencies.
 set -ex
 
-mkdir -p "$HOME/travis-cache"
+cache_dir="$HOME/travis-cache"
+good_trees_file="$cache_dir/good-trees"
+
+mkdir -p "$cache_dir"
 
 skip_branch_tip_with_tag
 skip_good_tree
@@ -96,7 +97,7 @@ fi
 export DEVELOPER=1
 export DEFAULT_TEST_TARGET=prove
 export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
-export GIT_TEST_OPTS="--verbose-log"
+export GIT_TEST_OPTS="--verbose-log -x"
 export GIT_TEST_CLONE_2GB=YesPlease
 
 case "$jobname" in
index 3e23e65f9eed88d1251c9ae32cb9a24e28da8ebe..3735ce413f1835b3222fab05ba9ff5ab5205c2c0 100755 (executable)
@@ -5,7 +5,7 @@
 
 . ${0%/*}/lib-travisci.sh
 
-ln -s $HOME/travis-cache/.prove t/.prove
+ln -s "$cache_dir/.prove" t/.prove
 
 make --jobs=2
 make --quiet test
index c19c50c1c9a57c14c07ffe4f5a061bb764e51aeb..2c60d2e70ae56b59b1ff8046a21b9bc4bf87e3e6 100755 (executable)
@@ -3,31 +3,58 @@
 # Build and test Git in a 32-bit environment
 #
 # Usage:
-#   run-linux32-build.sh [host-user-id]
+#   run-linux32-build.sh <host-user-id>
 #
 
-set -x
+set -ex
+
+if test $# -ne 1 || test -z "$1"
+then
+       echo >&2 "usage: run-linux32-build.sh <host-user-id>"
+       exit 1
+fi
 
 # Update packages to the latest available versions
 linux32 --32bit i386 sh -c '
     apt update >/dev/null &&
     apt install -y build-essential libcurl4-openssl-dev libssl-dev \
        libexpat-dev gettext python >/dev/null
-' &&
+'
 
 # If this script runs inside a docker container, then all commands are
 # usually executed as root. Consequently, the host user might not be
 # able to access the test output files.
-# If a host user id is given, then create a user "ci" with the host user
-# id to make everything accessible to the host user.
-HOST_UID=$1 &&
-CI_USER=$USER &&
-test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) &&
+# If a non 0 host user id is given, then create a user "ci" with that
+# user id to make everything accessible to the host user.
+HOST_UID=$1
+if test $HOST_UID -eq 0
+then
+       # Just in case someone does want to run the test suite as root.
+       CI_USER=root
+else
+       CI_USER=ci
+       if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID
+       then
+               echo "user '$CI_USER' already exists with the requested ID $HOST_UID"
+       else
+               useradd -u $HOST_UID $CI_USER
+       fi
+
+       # Due to a bug the test suite was run as root in the past, so
+       # a prove state file created back then is only accessible by
+       # root.  Now that bug is fixed, the test suite is run as a
+       # regular user, but the prove state file coming from Travis
+       # CI's cache might still be owned by root.
+       # Make sure that this user has rights to any cached files,
+       # including an existing prove state file.
+       test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir"
+fi
 
 # Build and test
 linux32 --32bit i386 su -m -l $CI_USER -c '
-    cd /usr/src/git &&
-    ln -s /tmp/travis-cache/.prove t/.prove &&
-    make --jobs=2 &&
-    make --quiet test
+       set -ex
+       cd /usr/src/git
+       test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove
+       make --jobs=2
+       make --quiet test
 '
index 4f191c5bb12185251f98a2d54422640cb19b8be1..21637903ce0790b1689907f64cca302b32285028 100755 (executable)
@@ -9,7 +9,9 @@ docker pull daald/ubuntu32:xenial
 
 # Use the following command to debug the docker build locally:
 # $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
-# root@container:/# /usr/src/git/ci/run-linux32-build.sh
+# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id>
+
+container_cache_dir=/tmp/travis-cache
 
 docker run \
        --interactive \
@@ -18,8 +20,9 @@ docker run \
        --env GIT_PROVE_OPTS \
        --env GIT_TEST_OPTS \
        --env GIT_TEST_CLONE_2GB \
+       --env cache_dir="$container_cache_dir" \
        --volume "${PWD}:/usr/src/git" \
-       --volume "${HOME}/travis-cache:/tmp/travis-cache" \
+       --volume "$cache_dir:$container_cache_dir" \
        daald/ubuntu32:xenial \
        /usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
 
diff --git a/color.c b/color.c
index d48dd947c987cdca13f103fd8b4a2e56eceab238..f277e72e4ce04815f71c949dfdf7c89c9462c5b7 100644 (file)
--- a/color.c
+++ b/color.c
@@ -161,11 +161,6 @@ int color_parse(const char *value, char *dst)
        return color_parse_mem(value, strlen(value), dst);
 }
 
-void color_set(char *dst, const char *color_bytes)
-{
-       xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
-}
-
 /*
  * Write the ANSI color codes for "c" to "out"; the string should
  * already have the ANSI escape code in it. "out" should have enough
@@ -399,8 +394,6 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt,
        return r;
 }
 
-
-
 int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
 {
        va_list args;
diff --git a/color.h b/color.h
index fd2b688dfbccbe4eef4c1f79b680db2a01e2f9ef..cd0bcedd084f3741fad55569b18ec15e12d75cf8 100644 (file)
--- a/color.h
+++ b/color.h
@@ -76,22 +76,46 @@ int git_color_config(const char *var, const char *value, void *cb);
 int git_color_default_config(const char *var, const char *value, void *cb);
 
 /*
- * Set the color buffer (which must be COLOR_MAXLEN bytes)
- * to the raw color bytes; this is useful for initializing
- * default color variables.
+ * Parse a config option, which can be a boolean or one of
+ * "never", "auto", "always". Return a constant of
+ * GIT_COLOR_NEVER for "never" or negative boolean,
+ * GIT_COLOR_ALWAYS for "always" or a positive boolean,
+ * and GIT_COLOR_AUTO for "auto".
  */
-void color_set(char *dst, const char *color_bytes);
-
 int git_config_colorbool(const char *var, const char *value);
+
+/*
+ * Return a boolean whether to use color, where the argument 'var' is
+ * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO.
+ */
 int want_color(int var);
+
+/*
+ * Translate a Git color from 'value' into a string that the terminal can
+ * interpret and store it into 'dst'. The Git color values are of the form
+ * "foreground [background] [attr]" where fore- and background can be a color
+ * name ("red"), a RGB code (#0xFF0000) or a 256-color-mode from the terminal.
+ */
 int color_parse(const char *value, char *dst);
 int color_parse_mem(const char *value, int len, char *dst);
+
+/*
+ * Output the formatted string in the specified color (and then reset to normal
+ * color so subsequent output is uncolored). Omits the color encapsulation if
+ * `color` is NULL. The `color_fprintf_ln` prints a new line after resetting
+ * the color.  The `color_print_strbuf` prints the contents of the given
+ * strbuf (BUG: but only up to its first NUL character).
+ */
 __attribute__((format (printf, 3, 4)))
 int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
 __attribute__((format (printf, 3, 4)))
 int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
 void color_print_strbuf(FILE *fp, const char *color, const struct strbuf *sb);
 
+/*
+ * Check if the given color is GIT_COLOR_NIL that means "no color selected".
+ * The caller needs to replace the color with the actual desired color.
+ */
 int color_is_nil(const char *color);
 
 #endif /* COLOR_H */
index 2505de119a2be37e9dfb313a2e44db68595ad13f..2ef495963fc1cf2e092778d35f1965912d7eaac0 100644 (file)
@@ -162,7 +162,7 @@ enum coalesce_direction { MATCH, BASE, NEW };
 
 /* Coalesce new lines into base by finding LCS */
 static struct lline *coalesce_lines(struct lline *base, int *lenbase,
-                                   struct lline *new, int lennew,
+                                   struct lline *newline, int lennew,
                                    unsigned long parent, long flags)
 {
        int **lcs;
@@ -170,12 +170,12 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
        struct lline *baseend, *newend = NULL;
        int i, j, origbaselen = *lenbase;
 
-       if (new == NULL)
+       if (newline == NULL)
                return base;
 
        if (base == NULL) {
                *lenbase = lennew;
-               return new;
+               return newline;
        }
 
        /*
@@ -200,7 +200,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                directions[0][j] = NEW;
 
        for (i = 1, baseend = base; i < origbaselen + 1; i++) {
-               for (j = 1, newend = new; j < lennew + 1; j++) {
+               for (j = 1, newend = newline; j < lennew + 1; j++) {
                        if (match_string_spaces(baseend->line, baseend->len,
                                                newend->line, newend->len, flags)) {
                                lcs[i][j] = lcs[i - 1][j - 1] + 1;
@@ -241,7 +241,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                        if (lline->prev)
                                lline->prev->next = lline->next;
                        else
-                               new = lline->next;
+                               newline = lline->next;
                        if (lline->next)
                                lline->next->prev = lline->prev;
 
@@ -270,7 +270,7 @@ static struct lline *coalesce_lines(struct lline *base, int *lenbase,
                }
        }
 
-       newend = new;
+       newend = newline;
        while (newend) {
                struct lline *lline = newend;
                newend = newend->next;
@@ -306,7 +306,7 @@ static char *grab_blob(const struct object_id *oid, unsigned int mode,
                *size = fill_textconv(textconv, df, &blob);
                free_filespec(df);
        } else {
-               blob = read_sha1_file(oid->hash, &type, size);
+               blob = read_object_file(oid, &type, size);
                if (type != OBJ_BLOB)
                        die("object '%s' is not a blob!", oid_to_hex(oid));
        }
@@ -915,11 +915,11 @@ static void show_combined_header(struct combine_diff_path *elem,
                         "", elem->path, line_prefix, c_meta, c_reset);
        printf("%s%sindex ", line_prefix, c_meta);
        for (i = 0; i < num_parent; i++) {
-               abb = find_unique_abbrev(elem->parent[i].oid.hash,
+               abb = find_unique_abbrev(&elem->parent[i].oid,
                                         abbrev);
                printf("%s%s", i ? "," : "", abb);
        }
-       abb = find_unique_abbrev(elem->oid.hash, abbrev);
+       abb = find_unique_abbrev(&elem->oid, abbrev);
        printf("..%s%s\n", abb, c_reset);
 
        if (mode_differs) {
@@ -1053,7 +1053,7 @@ static void show_patch_diff(struct combine_diff_path *elem, int num_parent,
                        if (is_file) {
                                struct strbuf buf = STRBUF_INIT;
 
-                               if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) {
+                               if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) {
                                        free(result);
                                        result = strbuf_detach(&buf, &len);
                                        result_size = len;
@@ -1438,7 +1438,7 @@ void diff_tree_combined(const struct object_id *oid,
                        opt->flags.follow_renames       ||
                        opt->break_opt != -1    ||
                        opt->detect_rename      ||
-                       opt->pickaxe            ||
+                       (opt->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)   ||
                        opt->filter;
 
 
index ff51c9f34a975fdd3320f8c185f6e3c8b5176e81..ca474a7c112855a49b77cce7cdfb83937fa17fae 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -126,10 +126,8 @@ int register_commit_graft(struct commit_graft *graft, int ignore_dups)
        ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
        commit_graft_nr++;
        if (pos < commit_graft_nr)
-               memmove(commit_graft + pos + 1,
-                       commit_graft + pos,
-                       (commit_graft_nr - pos - 1) *
-                       sizeof(*commit_graft));
+               MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
+                          commit_graft_nr - pos - 1);
        commit_graft[pos] = graft;
        return 0;
 }
@@ -268,13 +266,13 @@ const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
        if (!ret) {
                enum object_type type;
                unsigned long size;
-               ret = read_sha1_file(commit->object.oid.hash, &type, &size);
+               ret = read_object_file(&commit->object.oid, &type, &size);
                if (!ret)
                        die("cannot read commit object %s",
                            oid_to_hex(&commit->object.oid));
                if (type != OBJ_COMMIT)
                        die("expected commit for %s, got %s",
-                           oid_to_hex(&commit->object.oid), typename(type));
+                           oid_to_hex(&commit->object.oid), type_name(type));
                if (sizep)
                        *sizep = size;
        }
@@ -385,7 +383,7 @@ int parse_commit_gently(struct commit *item, int quiet_on_missing)
                return -1;
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
@@ -861,19 +859,19 @@ struct commit_list *get_octopus_merge_bases(struct commit_list *in)
        commit_list_insert(in->item, &ret);
 
        for (i = in->next; i; i = i->next) {
-               struct commit_list *new = NULL, *end = NULL;
+               struct commit_list *new_commits = NULL, *end = NULL;
 
                for (j = ret; j; j = j->next) {
                        struct commit_list *bases;
                        bases = get_merge_bases(i->item, j->item);
-                       if (!new)
-                               new = bases;
+                       if (!new_commits)
+                               new_commits = bases;
                        else
                                end->next = bases;
                        for (k = bases; k; k = k->next)
                                end = k;
                }
-               ret = new;
+               ret = new_commits;
        }
        return ret;
 }
@@ -1208,7 +1206,7 @@ static void handle_signed_tag(struct commit *parent, struct commit_extra_header
        desc = merge_remote_util(parent);
        if (!desc || !desc->obj)
                return;
-       buf = read_sha1_file(desc->obj->oid.hash, &type, &size);
+       buf = read_object_file(&desc->obj->oid, &type, &size);
        if (!buf || type != OBJ_TAG)
                goto free_return;
        len = parse_signature(buf, size);
@@ -1380,9 +1378,8 @@ void free_commit_extra_headers(struct commit_extra_header *extra)
        }
 }
 
-int commit_tree(const char *msg, size_t msg_len,
-               const unsigned char *tree,
-               struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+               struct commit_list *parents, struct object_id *ret,
                const char *author, const char *sign_commit)
 {
        struct commit_extra_header *extra = NULL, **tail = &extra;
@@ -1511,8 +1508,8 @@ N_("Warning: commit message did not conform to UTF-8.\n"
    "variable i18n.commitencoding to the encoding your project uses.\n");
 
 int commit_tree_extended(const char *msg, size_t msg_len,
-                        const unsigned char *tree,
-                        struct commit_list *parents, unsigned char *ret,
+                        const struct object_id *tree,
+                        struct commit_list *parents, struct object_id *ret,
                         const char *author, const char *sign_commit,
                         struct commit_extra_header *extra)
 {
@@ -1520,7 +1517,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
        int encoding_is_utf8;
        struct strbuf buffer;
 
-       assert_sha1_type(tree, OBJ_TREE);
+       assert_oid_type(tree, OBJ_TREE);
 
        if (memchr(msg, '\0', msg_len))
                return error("a NUL byte in commit log message not allowed.");
@@ -1529,7 +1526,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
        encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
 
        strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
-       strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+       strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
 
        /*
         * NOTE! This ordering means that the same exact tree merged with a
@@ -1568,7 +1565,7 @@ int commit_tree_extended(const char *msg, size_t msg_len,
                goto out;
        }
 
-       result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+       result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
 out:
        strbuf_release(&buffer);
        return result;
@@ -1617,11 +1614,11 @@ struct commit *get_merge_parent(const char *name)
 struct commit_list **commit_list_append(struct commit *commit,
                                        struct commit_list **next)
 {
-       struct commit_list *new = xmalloc(sizeof(struct commit_list));
-       new->item = commit;
-       *next = new;
-       new->next = NULL;
-       return &new->next;
+       struct commit_list *new_commit = xmalloc(sizeof(struct commit_list));
+       new_commit->item = commit;
+       *next = new_commit;
+       new_commit->next = NULL;
+       return &new_commit->next;
 }
 
 const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
index 425f4027752fb47190a53ddc72a64ebf9d928633..0fb8271665c6c98ccca803fbe002327bf38fcfb3 100644 (file)
--- a/commit.h
+++ b/commit.h
@@ -262,14 +262,15 @@ extern void append_merge_tag_headers(struct commit_list *parents,
                                     struct commit_extra_header ***tail);
 
 extern int commit_tree(const char *msg, size_t msg_len,
-                      const unsigned char *tree,
-                      struct commit_list *parents, unsigned char *ret,
+                      const struct object_id *tree,
+                      struct commit_list *parents, struct object_id *ret,
                       const char *author, const char *sign_commit);
 
 extern int commit_tree_extended(const char *msg, size_t msg_len,
-                               const unsigned char *tree,
-                               struct commit_list *parents, unsigned char *ret,
-                               const char *author, const char *sign_commit,
+                               const struct object_id *tree,
+                               struct commit_list *parents,
+                               struct object_id *ret, const char *author,
+                               const char *sign_commit,
                                struct commit_extra_header *);
 
 extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
index 6a689007e7ce3fe08f148e8b82c0a1c618c513a5..7d716d5a5491ba6b3a596f2128c4f08253d1d364 100644 (file)
@@ -34,6 +34,8 @@ int main(int argc, const char **argv)
 
        git_setup_gettext();
 
+       initialize_the_repository();
+
        attr_start();
 
        git_extract_argv0_path(argv[0]);
index 2d44d21aca8d31f67b16cb9a90245b4db526ff76..a67872babf332b7d8177e8477c2ee595d8cbbd3f 100644 (file)
@@ -761,6 +761,17 @@ int mingw_utime (const char *file_name, const struct utimbuf *times)
        return rc;
 }
 
+#undef strftime
+size_t mingw_strftime(char *s, size_t max,
+                     const char *format, const struct tm *tm)
+{
+       size_t ret = strftime(s, max, format, tm);
+
+       if (!ret && errno == EINVAL)
+               die("invalid strftime format: '%s'", format);
+       return ret;
+}
+
 unsigned int sleep (unsigned int seconds)
 {
        Sleep(seconds*1000);
index e03aecfe2e6556e1ef513922104557373eaa9260..571019d0bddceaf3245e15dbcc4ebfb70a501d17 100644 (file)
@@ -361,6 +361,9 @@ int mingw_fstat(int fd, struct stat *buf);
 
 int mingw_utime(const char *file_name, const struct utimbuf *times);
 #define utime mingw_utime
+size_t mingw_strftime(char *s, size_t max,
+                  const char *format, const struct tm *tm);
+#define strftime mingw_strftime
 
 pid_t mingw_spawnvpe(const char *cmd, const char **argv, char **env,
                     const char *dir,
index e617c2018d22b6d389548a3573d2deb2ec608b99..c698988f5e11cd416bef9a77c998d94b9ef87930 100644 (file)
--- a/config.c
+++ b/config.c
@@ -1149,11 +1149,14 @@ static int git_default_core_config(const char *var, const char *value)
        }
 
        if (!strcmp(var, "core.safecrlf")) {
+               int eol_rndtrp_die;
                if (value && !strcasecmp(value, "warn")) {
-                       safe_crlf = SAFE_CRLF_WARN;
+                       global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
                        return 0;
                }
-               safe_crlf = git_config_bool(var, value);
+               eol_rndtrp_die = git_config_bool(var, value);
+               global_conv_flags_eol = eol_rndtrp_die ?
+                       CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN;
                return 0;
        }
 
@@ -1251,6 +1254,11 @@ static int git_default_core_config(const char *var, const char *value)
                return 0;
        }
 
+       if (!strcmp(var, "core.partialclonefilter")) {
+               return git_config_string(&core_partial_clone_filter_default,
+                                        var, value);
+       }
+
        /* Add other config variables here and to Documentation/config.txt. */
        return 0;
 }
@@ -1480,7 +1488,7 @@ int git_config_from_blob_oid(config_fn_t fn,
        unsigned long size;
        int ret;
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("unable to load config blob object '%s'", name);
        if (type != OBJ_BLOB) {
index 7f8415140f309e0522310fac3160b5a01cefc7cd..6f1fd9df35ef7d477ddb75de454a4708176ad634 100644 (file)
@@ -254,25 +254,25 @@ GIT_PARSE_WITH([openssl]))
 # Perl-compatible regular expressions instead of standard or extended
 # POSIX regular expressions.
 #
-# Currently USE_LIBPCRE is a synonym for USE_LIBPCRE1, define
-# USE_LIBPCRE2 instead if you'd like to use version 2 of the PCRE
-# library. The USE_LIBPCRE flag will likely be changed to mean v2 by
-# default in future releases.
+# USE_LIBPCRE is a synonym for USE_LIBPCRE2, define USE_LIBPCRE1
+# instead if you'd like to use the legacy version 1 of the PCRE
+# library. Support for version 1 will likely be removed in some future
+# release of Git, as upstream has all but abandoned it.
 #
 # Define LIBPCREDIR=/foo/bar if your PCRE header and library files are in
 # /foo/bar/include and /foo/bar/lib directories.
 #
 AC_ARG_WITH(libpcre,
-AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre1]),
+AS_HELP_STRING([--with-libpcre],[synonym for --with-libpcre2]),
     if test "$withval" = "no"; then
-       USE_LIBPCRE1=
+       USE_LIBPCRE2=
     elif test "$withval" = "yes"; then
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
     else
-       USE_LIBPCRE1=YesPlease
+       USE_LIBPCRE2=YesPlease
        LIBPCREDIR=$withval
        AC_MSG_NOTICE([Setting LIBPCREDIR to $LIBPCREDIR])
-        dnl USE_LIBPCRE1 can still be modified below, so don't substitute
+        dnl USE_LIBPCRE2 can still be modified below, so don't substitute
         dnl it yet.
        GIT_CONF_SUBST([LIBPCREDIR])
     fi)
@@ -296,6 +296,10 @@ AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and hea
 AC_ARG_WITH(libpcre2,
 AS_HELP_STRING([--with-libpcre2],[support Perl-compatible regexes via libpcre2 (default is NO)])
 AS_HELP_STRING([],           [ARG can be also prefix for libpcre library and headers]),
+    if test -n "$USE_LIBPCRE2"; then
+        AC_MSG_ERROR([Only supply one of --with-libpcre or its synonym --with-libpcre2!])
+    fi
+
     if test -n "$USE_LIBPCRE1"; then
         AC_MSG_ERROR([Only supply one of --with-libpcre1 or --with-libpcre2!])
     fi
@@ -549,8 +553,8 @@ if test -n "$USE_LIBPCRE1"; then
 GIT_STASH_FLAGS($LIBPCREDIR)
 
 AC_CHECK_LIB([pcre], [pcre_version],
-[USE_LIBPCRE=YesPlease],
-[USE_LIBPCRE=])
+[USE_LIBPCRE1=YesPlease],
+[USE_LIBPCRE1=])
 
 GIT_UNSTASH_FLAGS($LIBPCREDIR)
 
index 4a47f332706a332174610ae275b3bcb5afd30f2f..91feb7881545f4143f45b9eb3393de65739cfa92 100644 (file)
@@ -56,6 +56,8 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
        argv_array_push(&rev_list.args,"rev-list");
        argv_array_push(&rev_list.args, "--objects");
        argv_array_push(&rev_list.args, "--stdin");
+       if (repository_format_partial_clone)
+               argv_array_push(&rev_list.args, "--exclude-promisor-objects");
        argv_array_push(&rev_list.args, "--not");
        argv_array_push(&rev_list.args, "--all");
        argv_array_push(&rev_list.args, "--quiet");
index ce2e92c6e95d90d490e958ddf820f44d1f51061e..e34eada1ad52933230c7fcace595677747f9e1d6 100644 (file)
@@ -14,8 +14,9 @@ constant fmt !~ "%";
 
 @@
 expression E1, E2;
+format F =~ "s";
 @@
-- strbuf_addf(E1, "%s", E2);
+- strbuf_addf(E1, "%@F@", E2);
 + strbuf_addstr(E1, E2);
 
 @@
index 88813e91244d820b4aa2c0fb5e60a44db03c3e59..b09c8a23626b431a0cb97f6f7f930cccce25bf07 100644 (file)
@@ -280,6 +280,43 @@ __gitcomp ()
        esac
 }
 
+# Clear the variables caching builtins' options when (re-)sourcing
+# the completion script.
+unset $(set |sed -ne 's/^\(__gitcomp_builtin_[a-zA-Z0-9_][a-zA-Z0-9_]*\)=.*/\1/p') 2>/dev/null
+
+# This function is equivalent to
+#
+#    __gitcomp "$(git xxx --git-completion-helper) ..."
+#
+# except that the output is cached. Accept 1-3 arguments:
+# 1: the git command to execute, this is also the cache key
+# 2: extra options to be added on top (e.g. negative forms)
+# 3: options to be excluded
+__gitcomp_builtin ()
+{
+       # spaces must be replaced with underscore for multi-word
+       # commands, e.g. "git remote add" becomes remote_add.
+       local cmd="$1"
+       local incl="$2"
+       local excl="$3"
+
+       local var=__gitcomp_builtin_"${cmd/-/_}"
+       local options
+       eval "options=\$$var"
+
+       if [ -z "$options" ]; then
+               # leading and trailing spaces are significant to make
+               # option removal work correctly.
+               options=" $(__git ${cmd/_/ } --git-completion-helper) $incl "
+               for i in $excl; do
+                       options="${options/ $i / }"
+               done
+               eval "$var=\"$options\""
+       fi
+
+       __gitcomp "$options"
+}
+
 # Variation of __gitcomp_nl () that appends to the existing list of
 # completion candidates, COMPREPLY.
 __gitcomp_nl_append ()
@@ -439,7 +476,7 @@ __git_refs ()
                        track=""
                        ;;
                *)
-                       for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do
+                       for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
                                case "$i" in
                                $match*)
                                        if [ -e "$dir/$i" ]; then
@@ -1072,12 +1109,13 @@ __git_count_arguments ()
 }
 
 __git_whitespacelist="nowarn warn error error-all fix"
+__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch"
 
 _git_am ()
 {
        __git_find_repo_path
        if [ -d "$__git_repo_path"/rebase-apply ]; then
-               __gitcomp "--skip --continue --resolved --abort"
+               __gitcomp "$__git_am_inprogress_options"
                return
        fi
        case "$cur" in
@@ -1086,12 +1124,8 @@ _git_am ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --3way --committer-date-is-author-date --ignore-date
-                       --ignore-whitespace --ignore-space-change
-                       --interactive --keep --no-utf8 --signoff --utf8
-                       --whitespace= --scissors
-                       "
+               __gitcomp_builtin am "--no-utf8" \
+                       "$__git_am_inprogress_options"
                return
        esac
 }
@@ -1104,14 +1138,7 @@ _git_apply ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --stat --numstat --summary --check --index
-                       --cached --index-info --reverse --reject --unidiff-zero
-                       --apply --no-add --exclude=
-                       --ignore-whitespace --ignore-space-change
-                       --whitespace= --inaccurate-eof --verbose
-                       --recount --directory=
-                       "
+               __gitcomp_builtin apply
                return
        esac
 }
@@ -1120,10 +1147,7 @@ _git_add ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --interactive --refresh --patch --update --dry-run
-                       --ignore-errors --intent-to-add --force --edit --chmod=
-                       "
+               __gitcomp_builtin add
                return
        esac
 
@@ -1200,12 +1224,8 @@ _git_branch ()
                __git_complete_refs --cur="${cur##--set-upstream-to=}"
                ;;
        --*)
-               __gitcomp "
-                       --color --no-color --verbose --abbrev= --no-abbrev
-                       --track --no-track --contains --no-contains --merged --no-merged
-                       --set-upstream-to= --edit-description --list
-                       --unset-upstream --delete --move --copy --remotes
-                       --column --no-column --sort= --points-at
+               __gitcomp_builtin branch "--no-color --no-abbrev
+                       --no-track --no-column
                        "
                ;;
        *)
@@ -1247,11 +1267,7 @@ _git_checkout ()
                __gitcomp "diff3 merge" "" "${cur##--conflict=}"
                ;;
        --*)
-               __gitcomp "
-                       --quiet --ours --theirs --track --no-track --merge
-                       --conflict= --orphan --patch --detach --ignore-skip-worktree-bits
-                       --recurse-submodules --no-recurse-submodules
-                       "
+               __gitcomp_builtin checkout "--no-track --no-recurse-submodules"
                ;;
        *)
                # check if --track, --no-track, or --no-guess was specified
@@ -1271,16 +1287,19 @@ _git_cherry ()
        __git_complete_refs
 }
 
+__git_cherry_pick_inprogress_options="--continue --quit --abort"
+
 _git_cherry_pick ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/CHERRY_PICK_HEAD ]; then
-               __gitcomp "--continue --quit --abort"
+               __gitcomp "$__git_cherry_pick_inprogress_options"
                return
        fi
        case "$cur" in
        --*)
-               __gitcomp "--edit --no-commit --signoff --strategy= --mainline"
+               __gitcomp_builtin cherry-pick "" \
+                       "$__git_cherry_pick_inprogress_options"
                ;;
        *)
                __git_complete_refs
@@ -1292,7 +1311,7 @@ _git_clean ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--dry-run --quiet"
+               __gitcomp_builtin clean
                return
                ;;
        esac
@@ -1305,26 +1324,7 @@ _git_clone ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --local
-                       --no-hardlinks
-                       --shared
-                       --reference
-                       --quiet
-                       --no-checkout
-                       --bare
-                       --mirror
-                       --origin
-                       --upload-pack
-                       --template=
-                       --depth
-                       --single-branch
-                       --no-tags
-                       --branch
-                       --recurse-submodules
-                       --no-single-branch
-                       --shallow-submodules
-                       "
+               __gitcomp_builtin clone "--no-single-branch"
                return
                ;;
        esac
@@ -1357,16 +1357,7 @@ _git_commit ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --all --author= --signoff --verify --no-verify
-                       --edit --no-edit
-                       --amend --include --only --interactive
-                       --dry-run --reuse-message= --reedit-message=
-                       --reset-author --file= --message= --template=
-                       --cleanup= --untracked-files --untracked-files=
-                       --verbose --quiet --fixup= --squash=
-                       --patch --short --date --allow-empty
-                       "
+               __gitcomp_builtin commit "--no-edit --verify"
                return
        esac
 
@@ -1382,11 +1373,7 @@ _git_describe ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --all --tags --contains --abbrev= --candidates=
-                       --exact-match --debug --long --match --always --first-parent
-                       --exclude --dirty --broken
-                       "
+               __gitcomp_builtin describe
                return
        esac
        __git_complete_refs
@@ -1411,7 +1398,7 @@ __git_diff_common_options="--stat --numstat --shortstat --summary
                        --dirstat --dirstat= --dirstat-by-file
                        --dirstat-by-file= --cumulative
                        --diff-algorithm=
-                       --submodule --submodule=
+                       --submodule --submodule= --ignore-submodules
 "
 
 _git_diff ()
@@ -1452,11 +1439,11 @@ _git_difftool ()
                return
                ;;
        --*)
-               __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex
-                       --base --ours --theirs
-                       --no-renames --diff-filter= --find-copies-harder
-                       --relative --ignore-submodules
-                       --tool="
+               __gitcomp_builtin difftool "$__git_diff_common_options
+                                       --base --cached --ours --theirs
+                                       --pickaxe-all --pickaxe-regex
+                                       --relative --staged
+                                       "
                return
                ;;
        esac
@@ -1465,12 +1452,6 @@ _git_difftool ()
 
 __git_fetch_recurse_submodules="yes on-demand no"
 
-__git_fetch_options="
-       --quiet --verbose --append --upload-pack --force --keep --depth=
-       --tags --no-tags --all --prune --dry-run --recurse-submodules=
-       --unshallow --update-shallow
-"
-
 _git_fetch ()
 {
        case "$cur" in
@@ -1479,7 +1460,7 @@ _git_fetch ()
                return
                ;;
        --*)
-               __gitcomp "$__git_fetch_options"
+               __gitcomp_builtin fetch "--no-tags"
                return
                ;;
        esac
@@ -1516,10 +1497,7 @@ _git_fsck ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --tags --root --unreachable --cache --no-reflogs --full
-                       --strict --verbose --lost-found --name-objects
-                       "
+               __gitcomp_builtin fsck "--no-reflogs"
                return
                ;;
        esac
@@ -1529,7 +1507,7 @@ _git_gc ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--prune --aggressive"
+               __gitcomp_builtin gc
                return
                ;;
        esac
@@ -1585,21 +1563,7 @@ _git_grep ()
 
        case "$cur" in
        --*)
-               __gitcomp "
-                       --cached
-                       --text --ignore-case --word-regexp --invert-match
-                       --full-name --line-number
-                       --extended-regexp --basic-regexp --fixed-strings
-                       --perl-regexp
-                       --threads
-                       --files-with-matches --name-only
-                       --files-without-match
-                       --max-depth
-                       --count
-                       --and --or --not --all-match
-                       --break --heading --show-function --function-context
-                       --untracked --no-index
-                       "
+               __gitcomp_builtin grep
                return
                ;;
        esac
@@ -1617,7 +1581,7 @@ _git_help ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--all --guides --info --man --web"
+               __gitcomp_builtin help
                return
                ;;
        esac
@@ -1640,7 +1604,7 @@ _git_init ()
                return
                ;;
        --*)
-               __gitcomp "--quiet --bare --template= --shared --shared="
+               __gitcomp_builtin init
                return
                ;;
        esac
@@ -1650,13 +1614,7 @@ _git_ls_files ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--cached --deleted --modified --others --ignored
-                       --stage --directory --no-empty-directory --unmerged
-                       --killed --exclude= --exclude-from=
-                       --exclude-per-directory= --exclude-standard
-                       --error-unmatch --with-tree= --full-name
-                       --abbrev --ignored --exclude-per-directory
-                       "
+               __gitcomp_builtin ls-files "--no-empty-directory"
                return
                ;;
        esac
@@ -1670,7 +1628,7 @@ _git_ls_remote ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--heads --tags --refs --get-url --symref"
+               __gitcomp_builtin ls-remote
                return
                ;;
        esac
@@ -1794,22 +1752,18 @@ _git_log ()
        __git_complete_revlist
 }
 
-# Common merge options shared by git-merge(1) and git-pull(1).
-__git_merge_options="
-       --no-commit --no-stat --log --no-log --squash --strategy
-       --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
-       --verify-signatures --no-verify-signatures --gpg-sign
-       --quiet --verbose --progress --no-progress
-"
-
 _git_merge ()
 {
        __git_complete_strategy && return
 
        case "$cur" in
        --*)
-               __gitcomp "$__git_merge_options
-                       --rerere-autoupdate --no-rerere-autoupdate --abort --continue"
+               __gitcomp_builtin merge "--no-rerere-autoupdate
+                               --no-commit --no-edit --no-ff
+                               --no-log --no-progress
+                               --no-squash --no-stat
+                               --no-verify-signatures
+                               "
                return
        esac
        __git_complete_refs
@@ -1833,7 +1787,7 @@ _git_merge_base ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--octopus --independent --is-ancestor --fork-point"
+               __gitcomp_builtin merge-base
                return
                ;;
        esac
@@ -1844,7 +1798,7 @@ _git_mv ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--dry-run"
+               __gitcomp_builtin mv
                return
                ;;
        esac
@@ -1860,17 +1814,17 @@ _git_mv ()
 
 _git_name_rev ()
 {
-       __gitcomp "--tags --all --stdin"
+       __gitcomp_builtin name-rev
 }
 
 _git_notes ()
 {
-       local subcommands='add append copy edit list prune remove show'
+       local subcommands='add append copy edit get-ref list merge prune remove show'
        local subcommand="$(__git_find_on_cmdline "$subcommands")"
 
        case "$subcommand,$cur" in
        ,--*)
-               __gitcomp '--ref'
+               __gitcomp_builtin notes
                ;;
        ,*)
                case "$prev" in
@@ -1882,21 +1836,14 @@ _git_notes ()
                        ;;
                esac
                ;;
-       add,--reuse-message=*|append,--reuse-message=*|\
-       add,--reedit-message=*|append,--reedit-message=*)
+       *,--reuse-message=*|*,--reedit-message=*)
                __git_complete_refs --cur="${cur#*=}"
                ;;
-       add,--*|append,--*)
-               __gitcomp '--file= --message= --reedit-message=
-                               --reuse-message='
+       *,--*)
+               __gitcomp_builtin notes_$subcommand
                ;;
-       copy,--*)
-               __gitcomp '--stdin'
-               ;;
-       prune,--*)
-               __gitcomp '--dry-run --verbose'
-               ;;
-       prune,*)
+       prune,*|get-ref,*)
+               # this command does not take a ref, do not complete it
                ;;
        *)
                case "$prev" in
@@ -1920,12 +1867,11 @@ _git_pull ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --rebase --no-rebase
-                       --autostash --no-autostash
-                       $__git_merge_options
-                       $__git_fetch_options
-               "
+               __gitcomp_builtin pull "--no-autostash --no-commit --no-edit
+                                       --no-ff --no-log --no-progress --no-rebase
+                                       --no-squash --no-stat --no-tags
+                                       --no-verify-signatures"
+
                return
                ;;
        esac
@@ -1976,12 +1922,7 @@ _git_push ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --all --mirror --tags --dry-run --force --verbose
-                       --quiet --prune --delete --follow-tags
-                       --receive-pack= --repo= --set-upstream
-                       --force-with-lease --force-with-lease= --recurse-submodules=
-               "
+               __gitcomp_builtin push
                return
                ;;
        esac
@@ -1992,11 +1933,11 @@ _git_rebase ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then
-               __gitcomp "--continue --skip --abort --quit --edit-todo"
+               __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch"
                return
        elif [ -d "$__git_repo_path"/rebase-apply ] || \
             [ -d "$__git_repo_path"/rebase-merge ]; then
-               __gitcomp "--continue --skip --abort --quit"
+               __gitcomp "--continue --skip --abort --quit --show-current-patch"
                return
        fi
        __git_complete_strategy && return
@@ -2016,6 +1957,7 @@ _git_rebase ()
                        --autostash --no-autostash
                        --verify --no-verify
                        --keep-empty --root --force-rebase --no-ff
+                       --rerere-autoupdate
                        --exec
                        "
 
@@ -2081,7 +2023,7 @@ _git_send_email ()
                        --compose --confirm= --dry-run --envelope-sender
                        --from --identity
                        --in-reply-to --no-chain-reply-to --no-signed-off-by-cc
-                       --no-suppress-from --no-thread --quiet
+                       --no-suppress-from --no-thread --quiet --reply-to
                        --signed-off-by-cc --smtp-pass --smtp-server
                        --smtp-server-port --smtp-encryption= --smtp-user
                        --subject --suppress-cc= --suppress-from --thread --to
@@ -2119,11 +2061,7 @@ _git_status ()
                return
                ;;
        --*)
-               __gitcomp "
-                       --short --branch --porcelain --long --verbose
-                       --untracked-files= --ignore-submodules= --ignored
-                       --column= --no-column
-                       "
+               __gitcomp_builtin status "--no-column"
                return
                ;;
        esac
@@ -2265,14 +2203,7 @@ _git_config ()
        esac
        case "$cur" in
        --*)
-               __gitcomp "
-                       --system --global --local --file=
-                       --list --replace-all
-                       --get --get-all --get-regexp
-                       --add --unset --unset-all
-                       --remove-section --rename-section
-                       --name-only
-                       "
+               __gitcomp_builtin config
                return
                ;;
        branch.*.*)
@@ -2672,7 +2603,7 @@ _git_remote ()
        if [ -z "$subcommand" ]; then
                case "$cur" in
                --*)
-                       __gitcomp "--verbose"
+                       __gitcomp_builtin remote
                        ;;
                *)
                        __gitcomp "$subcommands"
@@ -2683,33 +2614,33 @@ _git_remote ()
 
        case "$subcommand,$cur" in
        add,--*)
-               __gitcomp "--track --master --fetch --tags --no-tags --mirror="
+               __gitcomp_builtin remote_add "--no-tags"
                ;;
        add,*)
                ;;
        set-head,--*)
-               __gitcomp "--auto --delete"
+               __gitcomp_builtin remote_set-head
                ;;
        set-branches,--*)
-               __gitcomp "--add"
+               __gitcomp_builtin remote_set-branches
                ;;
        set-head,*|set-branches,*)
                __git_complete_remote_or_refspec
                ;;
        update,--*)
-               __gitcomp "--prune"
+               __gitcomp_builtin remote_update
                ;;
        update,*)
                __gitcomp "$(__git_get_config_variables "remotes")"
                ;;
        set-url,--*)
-               __gitcomp "--push --add --delete"
+               __gitcomp_builtin remote_set-url
                ;;
        get-url,--*)
-               __gitcomp "--push --all"
+               __gitcomp_builtin remote_get-url
                ;;
        prune,--*)
-               __gitcomp "--dry-run"
+               __gitcomp_builtin remote_prune
                ;;
        *)
                __gitcomp_nl "$(__git_remotes)"
@@ -2721,7 +2652,7 @@ _git_replace ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--edit --graft --format= --list --delete"
+               __gitcomp_builtin replace
                return
                ;;
        esac
@@ -2745,26 +2676,26 @@ _git_reset ()
 
        case "$cur" in
        --*)
-               __gitcomp "--merge --mixed --hard --soft --patch --keep"
+               __gitcomp_builtin reset
                return
                ;;
        esac
        __git_complete_refs
 }
 
+__git_revert_inprogress_options="--continue --quit --abort"
+
 _git_revert ()
 {
        __git_find_repo_path
        if [ -f "$__git_repo_path"/REVERT_HEAD ]; then
-               __gitcomp "--continue --quit --abort"
+               __gitcomp "$__git_revert_inprogress_options"
                return
        fi
        case "$cur" in
        --*)
-               __gitcomp "
-                       --edit --mainline --no-edit --no-commit --signoff
-                       --strategy= --strategy-option=
-                       "
+               __gitcomp_builtin revert "--no-edit" \
+                       "$__git_revert_inprogress_options"
                return
                ;;
        esac
@@ -2775,7 +2706,7 @@ _git_rm ()
 {
        case "$cur" in
        --*)
-               __gitcomp "--cached --dry-run --ignore-unmatch --quiet"
+               __gitcomp_builtin rm
                return
                ;;
        esac
@@ -2833,12 +2764,7 @@ _git_show_branch ()
 {
        case "$cur" in
        --*)
-               __gitcomp "
-                       --all --remotes --topo-order --date-order --current --more=
-                       --list --independent --merge-base --no-name
-                       --color --no-color
-                       --sha1-name --sparse --topics --reflog
-                       "
+               __gitcomp_builtin show-branch "--no-color"
                return
                ;;
        esac
@@ -3045,7 +2971,7 @@ _git_tag ()
        while [ $c -lt $cword ]; do
                i="${words[c]}"
                case "$i" in
-               -d|-v)
+               -d|--delete|-v|--verify)
                        __gitcomp_direct "$(__git_tags "" "$cur" " ")"
                        return
                        ;;
@@ -3071,11 +2997,7 @@ _git_tag ()
 
        case "$cur" in
        --*)
-               __gitcomp "
-                       --list --delete --verify --annotate --message --file
-                       --sign --cleanup --local-user --force --column --sort=
-                       --contains --no-contains --points-at --merged --no-merged --create-reflog
-                       "
+               __gitcomp_builtin tag
                ;;
        esac
 }
@@ -3087,23 +3009,26 @@ _git_whatchanged ()
 
 _git_worktree ()
 {
-       local subcommands="add list lock prune unlock"
+       local subcommands="add list lock move prune remove unlock"
        local subcommand="$(__git_find_on_cmdline "$subcommands")"
        if [ -z "$subcommand" ]; then
                __gitcomp "$subcommands"
        else
                case "$subcommand,$cur" in
                add,--*)
-                       __gitcomp "--detach"
+                       __gitcomp_builtin worktree_add
                        ;;
                list,--*)
-                       __gitcomp "--porcelain"
+                       __gitcomp_builtin worktree_list
                        ;;
                lock,--*)
-                       __gitcomp "--reason"
+                       __gitcomp_builtin worktree_lock
                        ;;
                prune,--*)
-                       __gitcomp "--dry-run --expire --verbose"
+                       __gitcomp_builtin worktree_prune
+                       ;;
+               remove,--*)
+                       __gitcomp "--force"
                        ;;
                *)
                        ;;
diff --git a/contrib/emacs/.gitignore b/contrib/emacs/.gitignore
deleted file mode 100644 (file)
index c531d98..0000000
+++ /dev/null
@@ -1 +0,0 @@
-*.elc
diff --git a/contrib/emacs/Makefile b/contrib/emacs/Makefile
deleted file mode 100644 (file)
index 24d9312..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-## Build and install stuff
-
-EMACS = emacs
-
-ELC = git.elc git-blame.elc
-INSTALL ?= install
-INSTALL_ELC = $(INSTALL) -m 644
-prefix ?= $(HOME)
-emacsdir = $(prefix)/share/emacs/site-lisp
-RM ?= rm -f
-
-all: $(ELC)
-
-install: all
-       $(INSTALL) -d $(DESTDIR)$(emacsdir)
-       $(INSTALL_ELC) $(ELC:.elc=.el) $(ELC) $(DESTDIR)$(emacsdir)
-
-%.elc: %.el
-       $(EMACS) -batch -f batch-byte-compile $<
-
-clean:; $(RM) $(ELC)
index 82368bdbfff199465ff8e8cbea49d99e5485e1d7..977a16f1e339faca937dfd1a60bb10395bfd59c4 100644 (file)
@@ -1,30 +1,24 @@
-This directory contains various modules for Emacs support.
+This directory used to contain various modules for Emacs support.
 
-To make the modules available to Emacs, you should add this directory
-to your load-path, and then require the modules you want. This can be
-done by adding to your .emacs something like this:
+These were added shortly after Git was first released. Since then
+Emacs's own support for Git got better than what was offered by these
+modes. There are also popular 3rd-party Git modes such as Magit which
+offer replacements for these.
 
-  (add-to-list 'load-path ".../git/contrib/emacs")
-  (require 'git)
-  (require 'git-blame)
-
-
-The following modules are available:
+The following modules were available, and can be dug up from the Git
+history:
 
 * git.el:
 
-  Status manager that displays the state of all the files of the
-  project, and provides easy access to the most frequently used git
-  commands. The user interface is as far as possible compatible with
-  the pcl-cvs mode. It can be started with `M-x git-status'.
+  Wrapper for "git status" that provided access to other git commands.
+
+  Modern alternatives to this include Magit, and VC mode that ships
+  with Emacs.
 
 * git-blame.el:
 
-  Emacs implementation of incremental git-blame.  When you turn it on
-  while viewing a file, the editor buffer will be updated by setting
-  the background of individual lines to a color that reflects which
-  commit it comes from.  And when you move around the buffer, a
-  one-line summary will be shown in the echo area.
+  A wrapper for "git blame" written before Emacs's own vc-annotate
+  mode learned to invoke git-blame, which can be done via C-x v g.
 
 * vc-git.el:
 
diff --git a/contrib/emacs/git-blame.el b/contrib/emacs/git-blame.el
deleted file mode 100644 (file)
index 510e0f7..0000000
+++ /dev/null
@@ -1,483 +0,0 @@
-;;; git-blame.el --- Minor mode for incremental blame for Git  -*- coding: utf-8 -*-
-;;
-;; Copyright (C) 2007  David KÃ¥gedal
-;;
-;; Authors:    David KÃ¥gedal <davidk@lysator.liu.se>
-;; Created:    31 Jan 2007
-;; Message-ID: <87iren2vqx.fsf@morpheus.local>
-;; License:    GPL
-;; Keywords:   git, version control, release management
-;;
-;; Compatibility: Emacs21, Emacs22 and EmacsCVS
-;;                Git 1.5 and up
-
-;; This file is *NOT* part of GNU Emacs.
-;; This file is distributed under the same terms as GNU Emacs.
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;; http://www.fsf.org/copyleft/gpl.html
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Commentary:
-;;
-;; Here is an Emacs implementation of incremental git-blame.  When you
-;; turn it on while viewing a file, the editor buffer will be updated by
-;; setting the background of individual lines to a color that reflects
-;; which commit it comes from.  And when you move around the buffer, a
-;; one-line summary will be shown in the echo area.
-
-;;; Installation:
-;;
-;; To use this package, put it somewhere in `load-path' (or add
-;; directory with git-blame.el to `load-path'), and add the following
-;; line to your .emacs:
-;;
-;;    (require 'git-blame)
-;;
-;; If you do not want to load this package before it is necessary, you
-;; can make use of the `autoload' feature, e.g. by adding to your .emacs
-;; the following lines
-;;
-;;    (autoload 'git-blame-mode "git-blame"
-;;              "Minor mode for incremental blame for Git." t)
-;;
-;; Then first use of `M-x git-blame-mode' would load the package.
-
-;;; Compatibility:
-;;
-;; It requires GNU Emacs 21 or later and Git 1.5.0 and up
-;;
-;; If you'are using Emacs 20, try changing this:
-;;
-;;            (overlay-put ovl 'face (list :background
-;;                                         (cdr (assq 'color (cddddr info)))))
-;;
-;; to
-;;
-;;            (overlay-put ovl 'face (cons 'background-color
-;;                                         (cdr (assq 'color (cddddr info)))))
-
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;;; Code:
-
-(eval-when-compile (require 'cl))                            ; to use `push', `pop'
-(require 'format-spec)
-
-(defface git-blame-prefix-face
-  '((((background dark)) (:foreground "gray"
-                          :background "black"))
-    (((background light)) (:foreground "gray"
-                           :background "white"))
-    (t (:weight bold)))
-  "The face used for the hash prefix."
-  :group 'git-blame)
-
-(defgroup git-blame nil
-  "A minor mode showing Git blame information."
-  :group 'git
-  :link '(function-link git-blame-mode))
-
-
-(defcustom git-blame-use-colors t
-  "Use colors to indicate commits in `git-blame-mode'."
-  :type 'boolean
-  :group 'git-blame)
-
-(defcustom git-blame-prefix-format
-  "%h %20A:"
-  "The format of the prefix added to each line in `git-blame'
-mode. The format is passed to `format-spec' with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-(defcustom git-blame-mouseover-format
-  "%h %a %A: %s"
-  "The format of the description shown when pointing at a line in
-`git-blame' mode. The format string is passed to `format-spec'
-with the following format keys:
-
-  %h - the abbreviated hash
-  %H - the full hash
-  %a - the author name
-  %A - the author email
-  %c - the committer name
-  %C - the committer email
-  %s - the commit summary
-"
-  :group 'git-blame)
-
-
-(defun git-blame-color-scale (&rest elements)
-  "Given a list, returns a list of triples formed with each
-elements of the list.
-
-a b => bbb bba bab baa abb aba aaa aab"
-  (let (result)
-    (dolist (a elements)
-      (dolist (b elements)
-        (dolist (c elements)
-          (setq result (cons (format "#%s%s%s" a b c) result)))))
-    result))
-
-;; (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c") =>
-;; ("#3c3c3c" "#3c3c14" "#3c3c34" "#3c3c2c" "#3c3c1c" "#3c3c24"
-;; "#3c3c04" "#3c3c0c" "#3c143c" "#3c1414" "#3c1434" "#3c142c" ...)
-
-(defmacro git-blame-random-pop (l)
-  "Select a random element from L and returns it. Also remove
-selected element from l."
-  ;; only works on lists with unique elements
-  `(let ((e (elt ,l (random (length ,l)))))
-     (setq ,l (remove e ,l))
-     e))
-
-(defvar git-blame-log-oneline-format
-  "format:[%cr] %cn: %s"
-  "*Formatting option used for describing current line in the minibuffer.
-
-This option is used to pass to git log --pretty= command-line option,
-and describe which commit the current line was made.")
-
-(defvar git-blame-dark-colors
-  (git-blame-color-scale "0c" "04" "24" "1c" "2c" "34" "14" "3c")
-  "*List of colors (format #RGB) to use in a dark environment.
-
-To check out the list, evaluate (list-colors-display git-blame-dark-colors).")
-
-(defvar git-blame-light-colors
-  (git-blame-color-scale "c4" "d4" "cc" "dc" "f4" "e4" "fc" "ec")
-  "*List of colors (format #RGB) to use in a light environment.
-
-To check out the list, evaluate (list-colors-display git-blame-light-colors).")
-
-(defvar git-blame-colors '()
-  "Colors used by git-blame. The list is built once when activating git-blame
-minor mode.")
-
-(defvar git-blame-ancient-color "dark green"
-  "*Color to be used for ancient commit.")
-
-(defvar git-blame-autoupdate t
-  "*Automatically update the blame display while editing")
-
-(defvar git-blame-proc nil
-  "The running git-blame process")
-(make-variable-buffer-local 'git-blame-proc)
-
-(defvar git-blame-overlays nil
-  "The git-blame overlays used in the current buffer.")
-(make-variable-buffer-local 'git-blame-overlays)
-
-(defvar git-blame-cache nil
-  "A cache of git-blame information for the current buffer")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-idle-timer nil
-  "An idle timer that updates the blame")
-(make-variable-buffer-local 'git-blame-cache)
-
-(defvar git-blame-update-queue nil
-  "A queue of update requests")
-(make-variable-buffer-local 'git-blame-update-queue)
-
-;; FIXME: docstrings
-(defvar git-blame-file nil)
-(defvar git-blame-current nil)
-
-(defvar git-blame-mode nil)
-(make-variable-buffer-local 'git-blame-mode)
-
-(defvar git-blame-mode-line-string " blame"
-  "String to display on the mode line when git-blame is active.")
-
-(or (assq 'git-blame-mode minor-mode-alist)
-    (setq minor-mode-alist
-         (cons '(git-blame-mode git-blame-mode-line-string) minor-mode-alist)))
-
-;;;###autoload
-(defun git-blame-mode (&optional arg)
-  "Toggle minor mode for displaying Git blame
-
-With prefix ARG, turn the mode on if ARG is positive."
-  (interactive "P")
-  (cond
-   ((null arg)
-    (if git-blame-mode (git-blame-mode-off) (git-blame-mode-on)))
-   ((> (prefix-numeric-value arg) 0) (git-blame-mode-on))
-   (t (git-blame-mode-off))))
-
-(defun git-blame-mode-on ()
-  "Turn on git-blame mode.
-
-See also function `git-blame-mode'."
-  (make-local-variable 'git-blame-colors)
-  (if git-blame-autoupdate
-      (add-hook 'after-change-functions 'git-blame-after-change nil t)
-    (remove-hook 'after-change-functions 'git-blame-after-change t))
-  (git-blame-cleanup)
-  (let ((bgmode (cdr (assoc 'background-mode (frame-parameters)))))
-    (if (eq bgmode 'dark)
-       (setq git-blame-colors git-blame-dark-colors)
-      (setq git-blame-colors git-blame-light-colors)))
-  (setq git-blame-cache (make-hash-table :test 'equal))
-  (setq git-blame-mode t)
-  (git-blame-run))
-
-(defun git-blame-mode-off ()
-  "Turn off git-blame mode.
-
-See also function `git-blame-mode'."
-  (git-blame-cleanup)
-  (if git-blame-idle-timer (cancel-timer git-blame-idle-timer))
-  (setq git-blame-mode nil))
-
-;;;###autoload
-(defun git-reblame ()
-  "Recalculate all blame information in the current buffer"
-  (interactive)
-  (unless git-blame-mode
-    (error "Git-blame is not active"))
-
-  (git-blame-cleanup)
-  (git-blame-run))
-
-(defun git-blame-run (&optional startline endline)
-  (if git-blame-proc
-      ;; Should maybe queue up a new run here
-      (message "Already running git blame")
-    (let ((display-buf (current-buffer))
-          (blame-buf (get-buffer-create
-                      (concat " git blame for " (buffer-name))))
-          (args '("--incremental" "--contents" "-")))
-      (if startline
-          (setq args (append args
-                             (list "-L" (format "%d,%d" startline endline)))))
-      (setq args (append args
-                         (list (file-name-nondirectory buffer-file-name))))
-      (setq git-blame-proc
-            (apply 'start-process
-                   "git-blame" blame-buf
-                   "git" "blame"
-                   args))
-      (with-current-buffer blame-buf
-        (erase-buffer)
-        (make-local-variable 'git-blame-file)
-        (make-local-variable 'git-blame-current)
-        (setq git-blame-file display-buf)
-        (setq git-blame-current nil))
-      (set-process-filter git-blame-proc 'git-blame-filter)
-      (set-process-sentinel git-blame-proc 'git-blame-sentinel)
-      (process-send-region git-blame-proc (point-min) (point-max))
-      (process-send-eof git-blame-proc))))
-
-(defun remove-git-blame-text-properties (start end)
-  (let ((modified (buffer-modified-p))
-        (inhibit-read-only t))
-    (remove-text-properties start end '(point-entered nil))
-    (set-buffer-modified-p modified)))
-
-(defun git-blame-cleanup ()
-  "Remove all blame properties"
-    (mapc 'delete-overlay git-blame-overlays)
-    (setq git-blame-overlays nil)
-    (remove-git-blame-text-properties (point-min) (point-max)))
-
-(defun git-blame-update-region (start end)
-  "Rerun blame to get updates between START and END"
-  (let ((overlays (overlays-in start end)))
-    (while overlays
-      (let ((overlay (pop overlays)))
-        (if (< (overlay-start overlay) start)
-            (setq start (overlay-start overlay)))
-        (if (> (overlay-end overlay) end)
-            (setq end (overlay-end overlay)))
-        (setq git-blame-overlays (delete overlay git-blame-overlays))
-        (delete-overlay overlay))))
-  (remove-git-blame-text-properties start end)
-  ;; We can be sure that start and end are at line breaks
-  (git-blame-run (1+ (count-lines (point-min) start))
-                 (count-lines (point-min) end)))
-
-(defun git-blame-sentinel (proc status)
-  (with-current-buffer (process-buffer proc)
-    (with-current-buffer git-blame-file
-      (setq git-blame-proc nil)
-      (if git-blame-update-queue
-          (git-blame-delayed-update))))
-  ;;(kill-buffer (process-buffer proc))
-  ;;(message "git blame finished")
-  )
-
-(defvar in-blame-filter nil)
-
-(defun git-blame-filter (proc str)
-  (with-current-buffer (process-buffer proc)
-    (save-excursion
-      (goto-char (process-mark proc))
-      (insert-before-markers str)
-      (goto-char (point-min))
-      (unless in-blame-filter
-        (let ((more t)
-              (in-blame-filter t))
-          (while more
-            (setq more (git-blame-parse))))))))
-
-(defun git-blame-parse ()
-  (cond ((looking-at "\\([0-9a-f]\\{40\\}\\) \\([0-9]+\\) \\([0-9]+\\) \\([0-9]+\\)\n")
-         (let ((hash (match-string 1))
-               (src-line (string-to-number (match-string 2)))
-               (res-line (string-to-number (match-string 3)))
-               (num-lines (string-to-number (match-string 4))))
-           (delete-region (point) (match-end 0))
-           (setq git-blame-current (list (git-blame-new-commit hash)
-                                         src-line res-line num-lines)))
-         t)
-        ((looking-at "\\([a-z-]+\\) \\(.+\\)\n")
-         (let ((key (match-string 1))
-               (value (match-string 2)))
-           (delete-region (point) (match-end 0))
-           (git-blame-add-info (car git-blame-current) key value)
-           (when (string= key "filename")
-             (git-blame-create-overlay (car git-blame-current)
-                                       (caddr git-blame-current)
-                                       (cadddr git-blame-current))
-             (setq git-blame-current nil)))
-         t)
-        (t
-         nil)))
-
-(defun git-blame-new-commit (hash)
-  (with-current-buffer git-blame-file
-    (or (gethash hash git-blame-cache)
-        ;; Assign a random color to each new commit info
-        ;; Take care not to select the same color multiple times
-        (let* ((color (if git-blame-colors
-                          (git-blame-random-pop git-blame-colors)
-                        git-blame-ancient-color))
-               (info `(,hash (color . ,color))))
-          (puthash hash info git-blame-cache)
-          info))))
-
-(defun git-blame-create-overlay (info start-line num-lines)
-  (with-current-buffer git-blame-file
-    (save-excursion
-      (let ((inhibit-point-motion-hooks t)
-            (inhibit-modification-hooks t))
-        (goto-char (point-min))
-        (forward-line (1- start-line))
-        (let* ((start (point))
-               (end (progn (forward-line num-lines) (point)))
-               (ovl (make-overlay start end))
-               (hash (car info))
-               (spec `((?h . ,(substring hash 0 6))
-                       (?H . ,hash)
-                       (?a . ,(git-blame-get-info info 'author))
-                       (?A . ,(git-blame-get-info info 'author-mail))
-                       (?c . ,(git-blame-get-info info 'committer))
-                       (?C . ,(git-blame-get-info info 'committer-mail))
-                       (?s . ,(git-blame-get-info info 'summary)))))
-          (push ovl git-blame-overlays)
-          (overlay-put ovl 'git-blame info)
-          (overlay-put ovl 'help-echo
-                       (format-spec git-blame-mouseover-format spec))
-          (if git-blame-use-colors
-              (overlay-put ovl 'face (list :background
-                                           (cdr (assq 'color (cdr info))))))
-          (overlay-put ovl 'line-prefix
-                       (propertize (format-spec git-blame-prefix-format spec)
-                                   'face 'git-blame-prefix-face)))))))
-
-(defun git-blame-add-info (info key value)
-  (nconc info (list (cons (intern key) value))))
-
-(defun git-blame-get-info (info key)
-  (cdr (assq key (cdr info))))
-
-(defun git-blame-current-commit ()
-  (let ((info (get-char-property (point) 'git-blame)))
-    (if info
-        (car info)
-      (error "No commit info"))))
-
-(defun git-describe-commit (hash)
-  (with-temp-buffer
-    (call-process "git" nil t nil
-                  "log" "-1"
-                 (concat "--pretty=" git-blame-log-oneline-format)
-                  hash)
-    (buffer-substring (point-min) (point-max))))
-
-(defvar git-blame-last-identification nil)
-(make-variable-buffer-local 'git-blame-last-identification)
-(defun git-blame-identify (&optional hash)
-  (interactive)
-  (let ((info (gethash (or hash (git-blame-current-commit)) git-blame-cache)))
-    (when (and info (not (eq info git-blame-last-identification)))
-      (message "%s" (nth 4 info))
-      (setq git-blame-last-identification info))))
-
-;; (defun git-blame-after-save ()
-;;   (when git-blame-mode
-;;     (git-blame-cleanup)
-;;     (git-blame-run)))
-;; (add-hook 'after-save-hook 'git-blame-after-save)
-
-(defun git-blame-after-change (start end length)
-  (when git-blame-mode
-    (git-blame-enq-update start end)))
-
-(defvar git-blame-last-update nil)
-(make-variable-buffer-local 'git-blame-last-update)
-(defun git-blame-enq-update (start end)
-  "Mark the region between START and END as needing blame update"
-  ;; Try to be smart and avoid multiple callouts for sequential
-  ;; editing
-  (cond ((and git-blame-last-update
-              (= start (cdr git-blame-last-update)))
-         (setcdr git-blame-last-update end))
-        ((and git-blame-last-update
-              (= end (car git-blame-last-update)))
-         (setcar git-blame-last-update start))
-        (t
-         (setq git-blame-last-update (cons start end))
-         (setq git-blame-update-queue (nconc git-blame-update-queue
-                                             (list git-blame-last-update)))))
-  (unless (or git-blame-proc git-blame-idle-timer)
-    (setq git-blame-idle-timer
-          (run-with-idle-timer 0.5 nil 'git-blame-delayed-update))))
-
-(defun git-blame-delayed-update ()
-  (setq git-blame-idle-timer nil)
-  (if git-blame-update-queue
-      (let ((first (pop git-blame-update-queue))
-            (inhibit-point-motion-hooks t))
-        (git-blame-update-region (car first) (cdr first)))))
-
-(provide 'git-blame)
-
-;;; git-blame.el ends here
diff --git a/contrib/emacs/git.el b/contrib/emacs/git.el
deleted file mode 100644 (file)
index 97919f2..0000000
+++ /dev/null
@@ -1,1704 +0,0 @@
-;;; git.el --- A user interface for git
-
-;; Copyright (C) 2005, 2006, 2007, 2008, 2009 Alexandre Julliard <julliard@winehq.org>
-
-;; Version: 1.0
-
-;; This program is free software; you can redistribute it and/or
-;; modify it under the terms of the GNU General Public License as
-;; published by the Free Software Foundation; either version 2 of
-;; the License, or (at your option) any later version.
-;;
-;; This program is distributed in the hope that it will be
-;; useful, but WITHOUT ANY WARRANTY; without even the implied
-;; warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-;; PURPOSE.  See the GNU General Public License for more details.
-;;
-;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, see
-;; <http://www.gnu.org/licenses/>.
-
-;;; Commentary:
-
-;; This file contains an interface for the git version control
-;; system. It provides easy access to the most frequently used git
-;; commands. The user interface is as far as possible identical to
-;; that of the PCL-CVS mode.
-;;
-;; To install: put this file on the load-path and place the following
-;; in your .emacs file:
-;;
-;;    (require 'git)
-;;
-;; To start: `M-x git-status'
-;;
-;; TODO
-;;  - diff against other branch
-;;  - renaming files from the status buffer
-;;  - creating tags
-;;  - fetch/pull
-;;  - revlist browser
-;;  - git-show-branch browser
-;;
-
-;;; Compatibility:
-;;
-;; This file works on GNU Emacs 21 or later. It may work on older
-;; versions but this is not guaranteed.
-;;
-;; It may work on XEmacs 21, provided that you first install the ewoc
-;; and log-edit packages.
-;;
-
-(eval-when-compile (require 'cl))
-(require 'ewoc)
-(require 'log-edit)
-(require 'easymenu)
-
-
-;;;; Customizations
-;;;; ------------------------------------------------------------
-
-(defgroup git nil
-  "A user interface for the git versioning system."
-  :group 'tools)
-
-(defcustom git-committer-name nil
-  "User name to use for commits.
-The default is to fall back to the repository config,
-then to `add-log-full-name' and then to `user-full-name'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Name")))
-
-(defcustom git-committer-email nil
-  "Email address to use for commits.
-The default is to fall back to the git repository config,
-then to `add-log-mailing-address' and then to `user-mail-address'."
-  :group 'git
-  :type '(choice (const :tag "Default" nil)
-                 (string :tag "Email")))
-
-(defcustom git-commits-coding-system nil
-  "Default coding system for the log message of git commits."
-  :group 'git
-  :type '(choice (const :tag "From repository config" nil)
-                 (coding-system)))
-
-(defcustom git-append-signed-off-by nil
-  "Whether to append a Signed-off-by line to the commit message before editing."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-reuse-status-buffer t
-  "Whether `git-status' should try to reuse an existing buffer
-if there is already one that displays the same directory."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-per-dir-ignore-file ".gitignore"
-  "Name of the per-directory ignore file."
-  :group 'git
-  :type 'string)
-
-(defcustom git-show-uptodate nil
-  "Whether to display up-to-date files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-ignored nil
-  "Whether to display ignored files."
-  :group 'git
-  :type 'boolean)
-
-(defcustom git-show-unknown t
-  "Whether to display unknown files."
-  :group 'git
-  :type 'boolean)
-
-
-(defface git-status-face
-  '((((class color) (background light)) (:foreground "purple"))
-    (((class color) (background dark)) (:foreground "salmon")))
-  "Git mode face used to highlight added and modified files."
-  :group 'git)
-
-(defface git-unmerged-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "red" :bold t)))
-  "Git mode face used to highlight unmerged files."
-  :group 'git)
-
-(defface git-unknown-face
-  '((((class color) (background light)) (:foreground "goldenrod" :bold t))
-    (((class color) (background dark)) (:foreground "goldenrod" :bold t)))
-  "Git mode face used to highlight unknown files."
-  :group 'git)
-
-(defface git-uptodate-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight up-to-date files."
-  :group 'git)
-
-(defface git-ignored-face
-  '((((class color) (background light)) (:foreground "grey60"))
-    (((class color) (background dark)) (:foreground "grey40")))
-  "Git mode face used to highlight ignored files."
-  :group 'git)
-
-(defface git-mark-face
-  '((((class color) (background light)) (:foreground "red" :bold t))
-    (((class color) (background dark)) (:foreground "tomato" :bold t)))
-  "Git mode face used for the file marks."
-  :group 'git)
-
-(defface git-header-face
-  '((((class color) (background light)) (:foreground "blue"))
-    (((class color) (background dark)) (:foreground "blue")))
-  "Git mode face used for commit headers."
-  :group 'git)
-
-(defface git-separator-face
-  '((((class color) (background light)) (:foreground "brown"))
-    (((class color) (background dark)) (:foreground "brown")))
-  "Git mode face used for commit separator."
-  :group 'git)
-
-(defface git-permission-face
-  '((((class color) (background light)) (:foreground "green" :bold t))
-    (((class color) (background dark)) (:foreground "green" :bold t)))
-  "Git mode face used for permission changes."
-  :group 'git)
-
-
-;;;; Utilities
-;;;; ------------------------------------------------------------
-
-(defconst git-log-msg-separator "--- log message follows this line ---")
-
-(defvar git-log-edit-font-lock-keywords
-  `(("^\\(Author:\\|Date:\\|Merge:\\|Signed-off-by:\\)\\(.*\\)$"
-     (1 font-lock-keyword-face)
-     (2 font-lock-function-name-face))
-    (,(concat "^\\(" (regexp-quote git-log-msg-separator) "\\)$")
-     (1 font-lock-comment-face))))
-
-(defun git-get-env-strings (env)
-  "Build a list of NAME=VALUE strings from a list of environment strings."
-  (mapcar (lambda (entry) (concat (car entry) "=" (cdr entry))) env))
-
-(defun git-call-process (buffer &rest args)
-  "Wrapper for call-process that sets environment strings."
-  (apply #'call-process "git" nil buffer nil args))
-
-(defun git-call-process-display-error (&rest args)
-  "Wrapper for call-process that displays error messages."
-  (let* ((dir default-directory)
-         (buffer (get-buffer-create "*Git Command Output*"))
-         (ok (with-current-buffer buffer
-               (let ((default-directory dir)
-                     (buffer-read-only nil))
-                 (erase-buffer)
-                 (eq 0 (apply #'git-call-process (list buffer t) args))))))
-    (unless ok (display-message-or-buffer buffer))
-    ok))
-
-(defun git-call-process-string (&rest args)
-  "Wrapper for call-process that returns the process output as a string,
-or nil if the git command failed."
-  (with-temp-buffer
-    (and (eq 0 (apply #'git-call-process t args))
-         (buffer-string))))
-
-(defun git-call-process-string-display-error (&rest args)
-  "Wrapper for call-process that displays error message and returns
-the process output as a string, or nil if the git command failed."
-  (with-temp-buffer
-    (if (eq 0 (apply #'git-call-process (list t t) args))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-process-region (buffer start end program args)
-  "Run a git process with a buffer region as input."
-  (let ((output-buffer (current-buffer))
-        (dir default-directory))
-    (with-current-buffer buffer
-      (cd dir)
-      (apply #'call-process-region start end program
-             nil (list output-buffer t) nil args))))
-
-(defun git-run-command-buffer (buffer-name &rest args)
-  "Run a git command, sending the output to a buffer named BUFFER-NAME."
-  (let ((dir default-directory)
-        (buffer (get-buffer-create buffer-name)))
-    (message "Running git %s..." (car args))
-    (with-current-buffer buffer
-      (let ((default-directory dir)
-            (buffer-read-only nil))
-        (erase-buffer)
-        (apply #'git-call-process buffer args)))
-    (message "Running git %s...done" (car args))
-    buffer))
-
-(defun git-run-command-region (buffer start end env &rest args)
-  "Run a git command with specified buffer region as input."
-  (with-temp-buffer
-    (if (eq 0 (if env
-                  (git-run-process-region
-                   buffer start end "env"
-                   (append (git-get-env-strings env) (list "git") args))
-                (git-run-process-region buffer start end "git" args)))
-        (buffer-string)
-      (display-message-or-buffer (current-buffer))
-      nil)))
-
-(defun git-run-hook (hook env &rest args)
-  "Run a git hook and display its output if any."
-  (let ((dir default-directory)
-        (hook-name (expand-file-name (concat ".git/hooks/" hook))))
-    (or (not (file-executable-p hook-name))
-        (let (status (buffer (get-buffer-create "*Git Hook Output*")))
-          (with-current-buffer buffer
-            (erase-buffer)
-            (cd dir)
-            (setq status
-                  (if env
-                      (apply #'call-process "env" nil (list buffer t) nil
-                             (append (git-get-env-strings env) (list hook-name) args))
-                    (apply #'call-process hook-name nil (list buffer t) nil args))))
-          (display-message-or-buffer buffer)
-          (eq 0 status)))))
-
-(defun git-get-string-sha1 (string)
-  "Read a SHA1 from the specified string."
-  (and string
-       (string-match "[0-9a-f]\\{40\\}" string)
-       (match-string 0 string)))
-
-(defun git-get-committer-name ()
-  "Return the name to use as GIT_COMMITTER_NAME."
-  ; copied from log-edit
-  (or git-committer-name
-      (git-config "user.name")
-      (and (boundp 'add-log-full-name) add-log-full-name)
-      (and (fboundp 'user-full-name) (user-full-name))
-      (and (boundp 'user-full-name) user-full-name)))
-
-(defun git-get-committer-email ()
-  "Return the email address to use as GIT_COMMITTER_EMAIL."
-  ; copied from log-edit
-  (or git-committer-email
-      (git-config "user.email")
-      (and (boundp 'add-log-mailing-address) add-log-mailing-address)
-      (and (fboundp 'user-mail-address) (user-mail-address))
-      (and (boundp 'user-mail-address) user-mail-address)))
-
-(defun git-get-commits-coding-system ()
-  "Return the coding system to use for commits."
-  (let ((repo-config (git-config "i18n.commitencoding")))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-get-logoutput-coding-system ()
-  "Return the coding system used for git-log output."
-  (let ((repo-config (or (git-config "i18n.logoutputencoding")
-                         (git-config "i18n.commitencoding"))))
-    (or git-commits-coding-system
-        (and repo-config
-             (fboundp 'locale-charset-to-coding-system)
-             (locale-charset-to-coding-system repo-config))
-      'utf-8)))
-
-(defun git-escape-file-name (name)
-  "Escape a file name if necessary."
-  (if (string-match "[\n\t\"\\]" name)
-      (concat "\""
-              (mapconcat (lambda (c)
-                   (case c
-                     (?\n "\\n")
-                     (?\t "\\t")
-                     (?\\ "\\\\")
-                     (?\" "\\\"")
-                     (t (char-to-string c))))
-                 name "")
-              "\"")
-    name))
-
-(defun git-success-message (text files)
-  "Print a success message after having handled FILES."
-  (let ((n (length files)))
-    (if (equal n 1)
-        (message "%s %s" text (car files))
-      (message "%s %d files" text n))))
-
-(defun git-get-top-dir (dir)
-  "Retrieve the top-level directory of a git tree."
-  (let ((cdup (with-output-to-string
-                (with-current-buffer standard-output
-                  (cd dir)
-                  (unless (eq 0 (git-call-process t "rev-parse" "--show-cdup"))
-                    (error "cannot find top-level git tree for %s." dir))))))
-    (expand-file-name (concat (file-name-as-directory dir)
-                              (car (split-string cdup "\n"))))))
-
-;stolen from pcl-cvs
-(defun git-append-to-ignore (file)
-  "Add a file name to the ignore file in its directory."
-  (let* ((fullname (expand-file-name file))
-         (dir (file-name-directory fullname))
-         (name (file-name-nondirectory fullname))
-         (ignore-name (expand-file-name git-per-dir-ignore-file dir))
-         (created (not (file-exists-p ignore-name))))
-  (save-window-excursion
-    (set-buffer (find-file-noselect ignore-name))
-    (goto-char (point-max))
-    (unless (zerop (current-column)) (insert "\n"))
-    (insert "/" name "\n")
-    (sort-lines nil (point-min) (point-max))
-    (save-buffer))
-  (when created
-    (git-call-process nil "update-index" "--add" "--" (file-relative-name ignore-name)))
-  (git-update-status-files (list (file-relative-name ignore-name)))))
-
-; propertize definition for XEmacs, stolen from erc-compat
-(eval-when-compile
-  (unless (fboundp 'propertize)
-    (defun propertize (string &rest props)
-      (let ((string (copy-sequence string)))
-        (while props
-          (put-text-property 0 (length string) (nth 0 props) (nth 1 props) string)
-          (setq props (cddr props)))
-        string))))
-
-;;;; Wrappers for basic git commands
-;;;; ------------------------------------------------------------
-
-(defun git-rev-parse (rev)
-  "Parse a revision name and return its SHA1."
-  (git-get-string-sha1
-   (git-call-process-string "rev-parse" rev)))
-
-(defun git-config (key)
-  "Retrieve the value associated to KEY in the git repository config file."
-  (let ((str (git-call-process-string "config" key)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-symbolic-ref (ref)
-  "Wrapper for the git-symbolic-ref command."
-  (let ((str (git-call-process-string "symbolic-ref" ref)))
-    (and str (car (split-string str "\n")))))
-
-(defun git-update-ref (ref newval &optional oldval reason)
-  "Update a reference by calling git-update-ref."
-  (let ((args (and oldval (list oldval))))
-    (when newval (push newval args))
-    (push ref args)
-    (when reason
-     (push reason args)
-     (push "-m" args))
-    (unless newval (push "-d" args))
-    (apply 'git-call-process-display-error "update-ref" args)))
-
-(defun git-for-each-ref (&rest specs)
-  "Return a list of refs using git-for-each-ref.
-Each entry is a cons of (SHORT-NAME . FULL-NAME)."
-  (let (refs)
-    (with-temp-buffer
-      (apply #'git-call-process t "for-each-ref" "--format=%(refname)" specs)
-      (goto-char (point-min))
-      (while (re-search-forward "^[^/\n]+/[^/\n]+/\\(.+\\)$" nil t)
-       (push (cons (match-string 1) (match-string 0)) refs)))
-    (nreverse refs)))
-
-(defun git-read-tree (tree &optional index-file)
-  "Read a tree into the index file."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (apply 'git-call-process-display-error "read-tree" (if tree (list tree)))))
-
-(defun git-write-tree (&optional index-file)
-  "Call git-write-tree and return the resulting tree SHA1 as a string."
-  (let ((process-environment
-         (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file))) process-environment)))
-    (git-get-string-sha1
-     (git-call-process-string-display-error "write-tree"))))
-
-(defun git-commit-tree (buffer tree parent)
-  "Create a commit and possibly update HEAD.
-Create a commit with the message in BUFFER using the tree with hash TREE.
-Use PARENT as the parent of the new commit. If PARENT is the current \"HEAD\",
-update the \"HEAD\" reference to the new commit."
-  (let ((author-name (git-get-committer-name))
-        (author-email (git-get-committer-email))
-        (subject "commit (initial): ")
-        author-date log-start log-end args coding-system-for-write)
-    (when parent
-      (setq subject "commit: ")
-      (push "-p" args)
-      (push parent args))
-    (with-current-buffer buffer
-      (goto-char (point-min))
-      (if
-          (setq log-start (re-search-forward (concat "^" (regexp-quote git-log-msg-separator) "\n") nil t))
-          (save-restriction
-            (narrow-to-region (point-min) log-start)
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: +\\(.*?\\) *<\\(.*\\)> *$" nil t)
-              (setq author-name (match-string 1)
-                    author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: +\\(.*\\)$" nil t)
-              (setq author-date (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Merge: +\\(.*\\)" nil t)
-              (setq subject "commit (merge): ")
-              (dolist (parent (split-string (match-string 1) " +" t))
-                (push "-p" args)
-                (push parent args))))
-        (setq log-start (point-min)))
-      (setq log-end (point-max))
-      (goto-char log-start)
-      (when (re-search-forward ".*$" nil t)
-        (setq subject (concat subject (match-string 0))))
-      (setq coding-system-for-write buffer-file-coding-system))
-    (let ((commit
-           (git-get-string-sha1
-            (let ((env `(("GIT_AUTHOR_NAME" . ,author-name)
-                         ("GIT_AUTHOR_EMAIL" . ,author-email)
-                         ("GIT_COMMITTER_NAME" . ,(git-get-committer-name))
-                         ("GIT_COMMITTER_EMAIL" . ,(git-get-committer-email)))))
-              (when author-date (push `("GIT_AUTHOR_DATE" . ,author-date) env))
-              (apply #'git-run-command-region
-                     buffer log-start log-end env
-                     "commit-tree" tree (nreverse args))))))
-      (when commit (git-update-ref "HEAD" commit parent subject))
-      commit)))
-
-(defun git-empty-db-p ()
-  "Check if the git db is empty (no commit done yet)."
-  (not (eq 0 (git-call-process nil "rev-parse" "--verify" "HEAD"))))
-
-(defun git-get-merge-heads ()
-  "Retrieve the merge heads from the MERGE_HEAD file if present."
-  (let (heads)
-    (when (file-readable-p ".git/MERGE_HEAD")
-      (with-temp-buffer
-        (insert-file-contents ".git/MERGE_HEAD" nil nil nil t)
-        (goto-char (point-min))
-        (while (re-search-forward "[0-9a-f]\\{40\\}" nil t)
-          (push (match-string 0) heads))))
-    (nreverse heads)))
-
-(defun git-get-commit-description (commit)
-  "Get a one-line description of COMMIT."
-  (let ((coding-system-for-read (git-get-logoutput-coding-system)))
-    (let ((descr (git-call-process-string "log" "--max-count=1" "--pretty=oneline" commit)))
-      (if (and descr (string-match "\\`\\([0-9a-f]\\{40\\}\\) *\\(.*\\)$" descr))
-          (concat (substring (match-string 1 descr) 0 10) " - " (match-string 2 descr))
-        descr))))
-
-;;;; File info structure
-;;;; ------------------------------------------------------------
-
-; fileinfo structure stolen from pcl-cvs
-(defstruct (git-fileinfo
-            (:copier nil)
-            (:constructor git-create-fileinfo (state name &optional old-perm new-perm rename-state orig-name marked))
-            (:conc-name git-fileinfo->))
-  marked              ;; t/nil
-  state               ;; current state
-  name                ;; file name
-  old-perm new-perm   ;; permission flags
-  rename-state        ;; rename or copy state
-  orig-name           ;; original name for renames or copies
-  needs-update        ;; whether file needs to be updated
-  needs-refresh)      ;; whether file needs to be refreshed
-
-(defvar git-status nil)
-
-(defun git-set-fileinfo-state (info state)
-  "Set the state of a file info."
-  (unless (eq (git-fileinfo->state info) state)
-    (setf (git-fileinfo->state info) state
-         (git-fileinfo->new-perm info) (git-fileinfo->old-perm info)
-          (git-fileinfo->rename-state info) nil
-          (git-fileinfo->orig-name info) nil
-          (git-fileinfo->needs-update info) nil
-          (git-fileinfo->needs-refresh info) t)))
-
-(defun git-status-filenames-map (status func files &rest args)
-  "Apply FUNC to the status files names in the FILES list.
-The list must be sorted."
-  (when files
-    (let ((file (pop files))
-          (node (ewoc-nth status 0)))
-      (while (and file node)
-        (let* ((info (ewoc-data node))
-               (name (git-fileinfo->name info)))
-          (if (string-lessp name file)
-              (setq node (ewoc-next status node))
-            (if (string-equal name file)
-                (apply func info args))
-            (setq file (pop files))))))))
-
-(defun git-set-filenames-state (status files state)
-  "Set the state of a list of named files. The list must be sorted"
-  (when files
-    (git-status-filenames-map status #'git-set-fileinfo-state files state)
-    (unless state  ;; delete files whose state has been set to nil
-      (ewoc-filter status (lambda (info) (git-fileinfo->state info))))))
-
-(defun git-state-code (code)
-  "Convert from a string to a added/deleted/modified state."
-  (case (string-to-char code)
-    (?M 'modified)
-    (?? 'unknown)
-    (?A 'added)
-    (?D 'deleted)
-    (?U 'unmerged)
-    (?T 'modified)
-    (t nil)))
-
-(defun git-status-code-as-string (code)
-  "Format a git status code as string."
-  (case code
-    ('modified (propertize "Modified" 'face 'git-status-face))
-    ('unknown  (propertize "Unknown " 'face 'git-unknown-face))
-    ('added    (propertize "Added   " 'face 'git-status-face))
-    ('deleted  (propertize "Deleted " 'face 'git-status-face))
-    ('unmerged (propertize "Unmerged" 'face 'git-unmerged-face))
-    ('uptodate (propertize "Uptodate" 'face 'git-uptodate-face))
-    ('ignored  (propertize "Ignored " 'face 'git-ignored-face))
-    (t "?       ")))
-
-(defun git-file-type-as-string (old-perm new-perm)
-  "Return a string describing the file type based on its permissions."
-  (let* ((old-type (lsh (or old-perm 0) -9))
-        (new-type (lsh (or new-perm 0) -9))
-        (str (case new-type
-               (64  ;; file
-                (case old-type
-                  (64 nil)
-                  (80 "   (type change symlink -> file)")
-                  (112 "   (type change subproject -> file)")))
-                (80  ;; symlink
-                 (case old-type
-                   (64 "   (type change file -> symlink)")
-                   (112 "   (type change subproject -> symlink)")
-                   (t "   (symlink)")))
-                 (112  ;; subproject
-                  (case old-type
-                    (64 "   (type change file -> subproject)")
-                    (80 "   (type change symlink -> subproject)")
-                    (t "   (subproject)")))
-                  (72 nil)  ;; directory (internal, not a real git state)
-                 (0  ;; deleted or unknown
-                  (case old-type
-                    (80 "   (symlink)")
-                    (112 "   (subproject)")))
-                 (t (format "   (unknown type %o)" new-type)))))
-    (cond (str (propertize str 'face 'git-status-face))
-          ((eq new-type 72) "/")
-          (t ""))))
-
-(defun git-rename-as-string (info)
-  "Return a string describing the copy or rename associated with INFO, or an empty string if none."
-  (let ((state (git-fileinfo->rename-state info)))
-    (if state
-        (propertize
-         (concat "   ("
-                 (if (eq state 'copy) "copied from "
-                   (if (eq (git-fileinfo->state info) 'added) "renamed from "
-                     "renamed to "))
-                 (git-escape-file-name (git-fileinfo->orig-name info))
-                 ")") 'face 'git-status-face)
-      "")))
-
-(defun git-permissions-as-string (old-perm new-perm)
-  "Format a permission change as string."
-  (propertize
-   (if (or (not old-perm)
-           (not new-perm)
-           (eq 0 (logand ?\111 (logxor old-perm new-perm))))
-       "  "
-     (if (eq 0 (logand ?\111 old-perm)) "+x" "-x"))
-  'face 'git-permission-face))
-
-(defun git-fileinfo-prettyprint (info)
-  "Pretty-printer for the git-fileinfo structure."
-  (let ((old-perm (git-fileinfo->old-perm info))
-       (new-perm (git-fileinfo->new-perm info)))
-    (insert (concat "   " (if (git-fileinfo->marked info) (propertize "*" 'face 'git-mark-face) " ")
-                   " " (git-status-code-as-string (git-fileinfo->state info))
-                   " " (git-permissions-as-string old-perm new-perm)
-                   "  " (git-escape-file-name (git-fileinfo->name info))
-                   (git-file-type-as-string old-perm new-perm)
-                   (git-rename-as-string info)))))
-
-(defun git-update-node-fileinfo (node info)
-  "Update the fileinfo of the specified node. The names are assumed to match already."
-  (let ((data (ewoc-data node)))
-    (setf
-     ;; preserve the marked flag
-     (git-fileinfo->marked info) (git-fileinfo->marked data)
-     (git-fileinfo->needs-update data) nil)
-    (when (not (equal info data))
-      (setf (git-fileinfo->needs-refresh info) t
-            (ewoc-data node) info))))
-
-(defun git-insert-info-list (status infolist files)
-  "Insert a sorted list of file infos in the status buffer, replacing existing ones if any."
-  (let* ((info (pop infolist))
-         (node (ewoc-nth status 0))
-         (name (and info (git-fileinfo->name info)))
-         remaining)
-    (while info
-      (let ((nodename (and node (git-fileinfo->name (ewoc-data node)))))
-        (while (and files (string-lessp (car files) name))
-          (push (pop files) remaining))
-        (when (and files (string-equal (car files) name))
-          (setq files (cdr files)))
-        (cond ((not nodename)
-               (setq node (ewoc-enter-last status info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              ((string-lessp nodename name)
-               (setq node (ewoc-next status node)))
-              ((string-equal nodename name)
-               ;; preserve the marked flag
-               (git-update-node-fileinfo node info)
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info))))
-              (t
-               (setq node (ewoc-enter-before status node info))
-               (setq info (pop infolist))
-               (setq name (and info (git-fileinfo->name info)))))))
-    (nconc (nreverse remaining) files)))
-
-(defun git-run-diff-index (status files)
-  "Run git-diff-index on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "diff-index" "-z" "-M" "HEAD" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward
-             ":\\([0-7]\\{6\\}\\) \\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} [0-9a-f]\\{40\\} \\(\\([ADMUT]\\)\0\\([^\0]+\\)\\|\\([CR]\\)[0-9]*\0\\([^\0]+\\)\0\\([^\0]+\\)\\)\0"
-              nil t 1)
-        (let ((old-perm (string-to-number (match-string 1) 8))
-              (new-perm (string-to-number (match-string 2) 8))
-              (state (or (match-string 4) (match-string 6)))
-              (name (or (match-string 5) (match-string 7)))
-              (new-name (match-string 8)))
-          (if new-name  ; copy or rename
-              (if (eq ?C (string-to-char state))
-                  (push (git-create-fileinfo 'added new-name old-perm new-perm 'copy name) infolist)
-                (push (git-create-fileinfo 'deleted name 0 0 'rename new-name) infolist)
-                (push (git-create-fileinfo 'added new-name old-perm new-perm 'rename name) infolist))
-            (push (git-create-fileinfo (git-state-code state) name old-perm new-perm) infolist)))))
-    (setq infolist (sort (nreverse infolist)
-                         (lambda (info1 info2)
-                           (string-lessp (git-fileinfo->name info1)
-                                         (git-fileinfo->name info2)))))
-    (git-insert-info-list status infolist files)))
-
-(defun git-find-status-file (status file)
-  "Find a given file in the status ewoc and return its node."
-  (let ((node (ewoc-nth status 0)))
-    (while (and node (not (string= file (git-fileinfo->name (ewoc-data node)))))
-      (setq node (ewoc-next status node)))
-    node))
-
-(defun git-run-ls-files (status files default-state &rest options)
-  "Run git-ls-files on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" (append options (list "--") files))
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*?\\)\\(/?\\)\0" nil t 1)
-        (let ((name (match-string 1)))
-          (push (git-create-fileinfo default-state name 0
-                                     (if (string-equal "/" (match-string 2)) (lsh ?\110 9) 0))
-                infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-files-cached (status files default-state)
-  "Run git-ls-files -c on FILES and parse the results into STATUS.
-Return the list of files that haven't been handled."
-  (let (infolist)
-    (with-temp-buffer
-      (apply #'git-call-process t "ls-files" "-z" "-s" "-c" "--" files)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([0-7]\\{6\\}\\) [0-9a-f]\\{40\\} 0\t\\([^\0]+\\)\0" nil t)
-       (let* ((new-perm (string-to-number (match-string 1) 8))
-              (old-perm (if (eq default-state 'added) 0 new-perm))
-              (name (match-string 2)))
-         (push (git-create-fileinfo default-state name old-perm new-perm) infolist))))
-    (setq infolist (nreverse infolist))  ;; assume it is sorted already
-    (git-insert-info-list status infolist files)))
-
-(defun git-run-ls-unmerged (status files)
-  "Run git-ls-files -u on FILES and parse the results into STATUS."
-  (with-temp-buffer
-    (apply #'git-call-process t "ls-files" "-z" "-u" "--" files)
-    (goto-char (point-min))
-    (let (unmerged-files)
-      (while (re-search-forward "[0-7]\\{6\\} [0-9a-f]\\{40\\} [123]\t\\([^\0]+\\)\0" nil t)
-        (push (match-string 1) unmerged-files))
-      (setq unmerged-files (nreverse unmerged-files))  ;; assume it is sorted already
-      (git-set-filenames-state status unmerged-files 'unmerged))))
-
-(defun git-get-exclude-files ()
-  "Get the list of exclude files to pass to git-ls-files."
-  (let (files
-        (config (git-config "core.excludesfile")))
-    (when (file-readable-p ".git/info/exclude")
-      (push ".git/info/exclude" files))
-    (when (and config (file-readable-p config))
-      (push config files))
-    files))
-
-(defun git-run-ls-files-with-excludes (status files default-state &rest options)
-  "Run git-ls-files on FILES with appropriate --exclude-from options."
-  (let ((exclude-files (git-get-exclude-files)))
-    (apply #'git-run-ls-files status files default-state "--directory" "--no-empty-directory"
-           (concat "--exclude-per-directory=" git-per-dir-ignore-file)
-           (append options (mapcar (lambda (f) (concat "--exclude-from=" f)) exclude-files)))))
-
-(defun git-update-status-files (&optional files mark-files)
-  "Update the status of FILES from the index.
-The FILES list must be sorted."
-  (unless git-status (error "Not in git-status buffer."))
-  ;; set the needs-update flag on existing files
-  (if files
-      (git-status-filenames-map
-       git-status (lambda (info) (setf (git-fileinfo->needs-update info) t)) files)
-    (ewoc-map (lambda (info) (setf (git-fileinfo->needs-update info) t) nil) git-status)
-    (git-call-process nil "update-index" "--refresh")
-    (when git-show-uptodate
-      (git-run-ls-files-cached git-status nil 'uptodate)))
-  (let ((remaining-files
-          (if (git-empty-db-p) ; we need some special handling for an empty db
-             (git-run-ls-files-cached git-status files 'added)
-            (git-run-diff-index git-status files))))
-    (git-run-ls-unmerged git-status files)
-    (when (or remaining-files (and git-show-unknown (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'unknown "-o")))
-    (when (or remaining-files (and git-show-ignored (not files)))
-      (setq remaining-files (git-run-ls-files-with-excludes git-status remaining-files 'ignored "-o" "-i")))
-    (unless files
-      (setq remaining-files (git-get-filenames (ewoc-collect git-status #'git-fileinfo->needs-update))))
-    (when remaining-files
-      (setq remaining-files (git-run-ls-files-cached git-status remaining-files 'uptodate)))
-    (git-set-filenames-state git-status remaining-files nil)
-    (when mark-files (git-mark-files git-status files))
-    (git-refresh-files)
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-mark-files (status files)
-  "Mark all the specified FILES, and unmark the others."
-  (let ((file (and files (pop files)))
-        (node (ewoc-nth status 0)))
-    (while node
-      (let ((info (ewoc-data node)))
-        (if (and file (string-equal (git-fileinfo->name info) file))
-            (progn
-              (unless (git-fileinfo->marked info)
-                (setf (git-fileinfo->marked info) t)
-                (setf (git-fileinfo->needs-refresh info) t))
-              (setq file (pop files))
-              (setq node (ewoc-next status node)))
-          (when (git-fileinfo->marked info)
-            (setf (git-fileinfo->marked info) nil)
-            (setf (git-fileinfo->needs-refresh info) t))
-          (if (and file (string-lessp file (git-fileinfo->name info)))
-              (setq file (pop files))
-            (setq node (ewoc-next status node))))))))
-
-(defun git-marked-files ()
-  "Return a list of all marked files, or if none a list containing just the file at cursor position."
-  (unless git-status (error "Not in git-status buffer."))
-  (or (ewoc-collect git-status (lambda (info) (git-fileinfo->marked info)))
-      (list (ewoc-data (ewoc-locate git-status)))))
-
-(defun git-marked-files-state (&rest states)
-  "Return a sorted list of marked files that are in the specified states."
-  (let ((files (git-marked-files))
-        result)
-    (dolist (info files)
-      (when (memq (git-fileinfo->state info) states)
-        (push info result)))
-    (nreverse result)))
-
-(defun git-refresh-files ()
-  "Refresh all files that need it and clear the needs-refresh flag."
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map
-   (lambda (info)
-     (let ((refresh (git-fileinfo->needs-refresh info)))
-       (setf (git-fileinfo->needs-refresh info) nil)
-       refresh))
-   git-status)
-  ; move back to goal column
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-refresh-ewoc-hf (status)
-  "Refresh the ewoc header and footer."
-  (let ((branch (git-symbolic-ref "HEAD"))
-        (head (if (git-empty-db-p) "Nothing committed yet"
-                (git-get-commit-description "HEAD")))
-        (merge-heads (git-get-merge-heads)))
-    (ewoc-set-hf status
-                 (format "Directory:  %s\nBranch:     %s\nHead:       %s%s\n"
-                         default-directory
-                         (if branch
-                             (if (string-match "^refs/heads/" branch)
-                                 (substring branch (match-end 0))
-                               branch)
-                           "none (detached HEAD)")
-                         head
-                         (if merge-heads
-                             (concat "\nMerging:    "
-                                     (mapconcat (lambda (str) (git-get-commit-description str)) merge-heads "\n            "))
-                           ""))
-                 (if (ewoc-nth status 0) "" "    No changes."))))
-
-(defun git-get-filenames (files)
-  (mapcar (lambda (info) (git-fileinfo->name info)) files))
-
-(defun git-update-index (index-file files)
-  "Run git-update-index on a list of files."
-  (let ((process-environment (append (and index-file (list (concat "GIT_INDEX_FILE=" index-file)))
-                                     process-environment))
-        added deleted modified)
-    (dolist (info files)
-      (case (git-fileinfo->state info)
-        ('added (push info added))
-        ('deleted (push info deleted))
-        ('modified (push info modified))))
-    (and
-     (or (not added) (apply #'git-call-process-display-error "update-index" "--add" "--" (git-get-filenames added)))
-     (or (not deleted) (apply #'git-call-process-display-error "update-index" "--remove" "--" (git-get-filenames deleted)))
-     (or (not modified) (apply #'git-call-process-display-error "update-index" "--" (git-get-filenames modified))))))
-
-(defun git-run-pre-commit-hook ()
-  "Run the pre-commit hook if any."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (or (not files)
-        (not (file-executable-p ".git/hooks/pre-commit"))
-        (let ((index-file (make-temp-file "gitidx")))
-          (unwind-protect
-            (let ((head-tree (unless (git-empty-db-p) (git-rev-parse "HEAD^{tree}"))))
-              (git-read-tree head-tree index-file)
-              (git-update-index index-file files)
-              (git-run-hook "pre-commit" `(("GIT_INDEX_FILE" . ,index-file))))
-          (delete-file index-file))))))
-
-(defun git-do-commit ()
-  "Perform the actual commit using the current buffer as log message."
-  (interactive)
-  (let ((buffer (current-buffer))
-        (index-file (make-temp-file "gitidx")))
-    (with-current-buffer log-edit-parent-buffer
-      (if (git-marked-files-state 'unmerged)
-          (message "You cannot commit unmerged files, resolve them first.")
-        (unwind-protect
-            (let ((files (git-marked-files-state 'added 'deleted 'modified))
-                  head tree head-tree)
-              (unless (git-empty-db-p)
-                (setq head (git-rev-parse "HEAD")
-                      head-tree (git-rev-parse "HEAD^{tree}")))
-              (message "Running git commit...")
-              (when
-                  (and
-                   (git-read-tree head-tree index-file)
-                   (git-update-index nil files)         ;update both the default index
-                   (git-update-index index-file files)  ;and the temporary one
-                   (setq tree (git-write-tree index-file)))
-                (if (or (not (string-equal tree head-tree))
-                        (yes-or-no-p "The tree was not modified, do you really want to perform an empty commit? "))
-                    (let ((commit (git-commit-tree buffer tree head)))
-                      (when commit
-                        (condition-case nil (delete-file ".git/MERGE_HEAD") (error nil))
-                        (condition-case nil (delete-file ".git/MERGE_MSG") (error nil))
-                        (with-current-buffer buffer (erase-buffer))
-                        (git-update-status-files (git-get-filenames files))
-                        (git-call-process nil "rerere")
-                        (git-call-process nil "gc" "--auto")
-                        (message "Committed %s." commit)
-                        (git-run-hook "post-commit" nil)))
-                  (message "Commit aborted."))))
-          (delete-file index-file))))))
-
-
-;;;; Interactive functions
-;;;; ------------------------------------------------------------
-
-(defun git-mark-file ()
-  "Mark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) t)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file ()
-  "Unmark the file that the cursor is on and move to the next one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-next git-status 1)))
-
-(defun git-unmark-file-up ()
-  "Unmark the file that the cursor is on and move to the previous one."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((pos (ewoc-locate git-status))
-         (info (ewoc-data pos)))
-    (setf (git-fileinfo->marked info) nil)
-    (ewoc-invalidate git-status pos)
-    (ewoc-goto-prev git-status 1)))
-
-(defun git-mark-all ()
-  "Mark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (unless (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) t))) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-unmark-all ()
-  "Unmark all files."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (when (git-fileinfo->marked info)
-                             (setf (git-fileinfo->marked info) nil)
-                             t)) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-toggle-all-marks ()
-  "Toggle all file marks."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-map (lambda (info) (setf (git-fileinfo->marked info) (not (git-fileinfo->marked info))) t) git-status)
-  ; move back to goal column after invalidate
-  (when goal-column (move-to-column goal-column)))
-
-(defun git-next-file (&optional n)
-  "Move the selection down N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-next git-status n))
-
-(defun git-prev-file (&optional n)
-  "Move the selection up N files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (ewoc-goto-prev git-status n))
-
-(defun git-next-unmerged-file (&optional n)
-  "Move the selection down N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-next git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-next git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-prev-unmerged-file (&optional n)
-  "Move the selection up N unmerged files."
-  (interactive "p")
-  (unless git-status (error "Not in git-status buffer."))
-  (let* ((last (ewoc-locate git-status))
-         (node (ewoc-prev git-status last)))
-    (while (and node (> n 0))
-      (when (eq 'unmerged (git-fileinfo->state (ewoc-data node)))
-        (setq n (1- n))
-        (setq last node))
-      (setq node (ewoc-prev git-status node)))
-    (ewoc-goto-node git-status last)))
-
-(defun git-insert-file (file)
-  "Insert file(s) into the git-status buffer."
-  (interactive "fInsert file: ")
-  (git-update-status-files (list (file-relative-name file))))
-
-(defun git-add-file ()
-  "Add marked file(s) to the index cache."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown 'ignored 'unmerged))))
-    ;; FIXME: add support for directories
-    (unless files
-      (push (file-relative-name (read-file-name "File to add: " nil nil t)) files))
-    (when (apply 'git-call-process-display-error "update-index" "--add" "--" files)
-      (git-update-status-files files)
-      (git-success-message "Added" files))))
-
-(defun git-ignore-file ()
-  "Add marked file(s) to the ignore list."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'unknown))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to ignore: " nil nil t)) files))
-    (dolist (f files) (git-append-to-ignore f))
-    (git-update-status-files files)
-    (git-success-message "Ignored" files)))
-
-(defun git-remove-file ()
-  "Remove the marked file(s)."
-  (interactive)
-  (let ((files (git-get-filenames (git-marked-files-state 'added 'modified 'unknown 'uptodate 'ignored))))
-    (unless files
-      (push (file-relative-name (read-file-name "File to remove: " nil nil t)) files))
-    (if (yes-or-no-p
-         (if (cdr files)
-             (format "Remove %d files? " (length files))
-           (format "Remove %s? " (car files))))
-        (progn
-          (dolist (name files)
-            (ignore-errors
-              (if (file-directory-p name)
-                  (delete-directory name)
-                (delete-file name))))
-          (when (apply 'git-call-process-display-error "update-index" "--remove" "--" files)
-            (git-update-status-files files)
-            (git-success-message "Removed" files)))
-      (message "Aborting"))))
-
-(defun git-revert-file ()
-  "Revert changes to the marked file(s)."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified 'unmerged))
-        added modified)
-    (when (and files
-               (yes-or-no-p
-                (if (cdr files)
-                    (format "Revert %d files? " (length files))
-                  (format "Revert %s? " (git-fileinfo->name (car files))))))
-      (dolist (info files)
-        (case (git-fileinfo->state info)
-          ('added (push (git-fileinfo->name info) added))
-          ('deleted (push (git-fileinfo->name info) modified))
-          ('unmerged (push (git-fileinfo->name info) modified))
-          ('modified (push (git-fileinfo->name info) modified))))
-      ;; check if a buffer contains one of the files and isn't saved
-      (dolist (file modified)
-        (let ((buffer (get-file-buffer file)))
-          (when (and buffer (buffer-modified-p buffer))
-            (error "Buffer %s is modified. Please kill or save modified buffers before reverting." (buffer-name buffer)))))
-      (let ((ok (and
-                 (or (not added)
-                     (apply 'git-call-process-display-error "update-index" "--force-remove" "--" added))
-                 (or (not modified)
-                     (apply 'git-call-process-display-error "checkout" "HEAD" modified))))
-            (names (git-get-filenames files)))
-        (git-update-status-files names)
-        (when ok
-          (dolist (file modified)
-            (let ((buffer (get-file-buffer file)))
-              (when buffer (with-current-buffer buffer (revert-buffer t t t)))))
-          (git-success-message "Reverted" names))))))
-
-(defun git-remove-handled ()
-  "Remove handled files from the status list."
-  (interactive)
-  (ewoc-filter git-status
-               (lambda (info)
-                 (case (git-fileinfo->state info)
-                   ('ignored git-show-ignored)
-                   ('uptodate git-show-uptodate)
-                   ('unknown git-show-unknown)
-                   (t t))))
-  (unless (ewoc-nth git-status 0)  ; refresh header if list is empty
-    (git-refresh-ewoc-hf git-status)))
-
-(defun git-toggle-show-uptodate ()
-  "Toogle the option for showing up-to-date files."
-  (interactive)
-  (if (setq git-show-uptodate (not git-show-uptodate))
-      (git-refresh-status)
-    (git-remove-handled)))
-
-(defun git-toggle-show-ignored ()
-  "Toogle the option for showing ignored files."
-  (interactive)
-  (if (setq git-show-ignored (not git-show-ignored))
-      (progn
-        (message "Inserting ignored files...")
-        (git-run-ls-files-with-excludes git-status nil 'ignored "-o" "-i")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting ignored files...done"))
-    (git-remove-handled)))
-
-(defun git-toggle-show-unknown ()
-  "Toogle the option for showing unknown files."
-  (interactive)
-  (if (setq git-show-unknown (not git-show-unknown))
-      (progn
-        (message "Inserting unknown files...")
-        (git-run-ls-files-with-excludes git-status nil 'unknown "-o")
-        (git-refresh-files)
-        (git-refresh-ewoc-hf git-status)
-        (message "Inserting unknown files...done"))
-    (git-remove-handled)))
-
-(defun git-expand-directory (info)
-  "Expand the directory represented by INFO to list its files."
-  (when (eq (lsh (git-fileinfo->new-perm info) -9) ?\110)
-    (let ((dir (git-fileinfo->name info)))
-      (git-set-filenames-state git-status (list dir) nil)
-      (git-run-ls-files-with-excludes git-status (list (concat dir "/")) 'unknown "-o")
-      (git-refresh-files)
-      (git-refresh-ewoc-hf git-status)
-      t)))
-
-(defun git-setup-diff-buffer (buffer)
-  "Setup a buffer for displaying a diff."
-  (let ((dir default-directory))
-    (with-current-buffer buffer
-      (diff-mode)
-      (goto-char (point-min))
-      (setq default-directory dir)
-      (setq buffer-read-only t)))
-  (display-buffer buffer)
-  ; shrink window only if it displays the status buffer
-  (when (eq (window-buffer) (current-buffer))
-    (shrink-window-if-larger-than-buffer)))
-
-(defun git-diff-file ()
-  "Diff the marked file(s) against HEAD."
-  (interactive)
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M" "HEAD" "--" (git-get-filenames files)))))
-
-(defun git-diff-file-merge-head (arg)
-  "Diff the marked file(s) against the first merge head (or the nth one with a numeric prefix)."
-  (interactive "p")
-  (let ((files (git-marked-files))
-        (merge-heads (git-get-merge-heads)))
-    (unless merge-heads (error "No merge in progress"))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-index" "-p" "-M"
-            (or (nth (1- arg) merge-heads) "HEAD") "--" (git-get-filenames files)))))
-
-(defun git-diff-unmerged-file (stage)
-  "Diff the marked unmerged file(s) against the specified stage."
-  (let ((files (git-marked-files)))
-    (git-setup-diff-buffer
-     (apply #'git-run-command-buffer "*git-diff*" "diff-files" "-p" stage "--" (git-get-filenames files)))))
-
-(defun git-diff-file-base ()
-  "Diff the marked unmerged file(s) against the common base file."
-  (interactive)
-  (git-diff-unmerged-file "-1"))
-
-(defun git-diff-file-mine ()
-  "Diff the marked unmerged file(s) against my pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-2"))
-
-(defun git-diff-file-other ()
-  "Diff the marked unmerged file(s) against the other's pre-merge version."
-  (interactive)
-  (git-diff-unmerged-file "-3"))
-
-(defun git-diff-file-combined ()
-  "Do a combined diff of the marked unmerged file(s)."
-  (interactive)
-  (git-diff-unmerged-file "-c"))
-
-(defun git-diff-file-idiff ()
-  "Perform an interactive diff on the current file."
-  (interactive)
-  (let ((files (git-marked-files-state 'added 'deleted 'modified)))
-    (unless (eq 1 (length files))
-      (error "Cannot perform an interactive diff on multiple files."))
-    (let* ((filename (car (git-get-filenames files)))
-           (buff1 (find-file-noselect filename))
-           (buff2 (git-run-command-buffer (concat filename ".~HEAD~") "cat-file" "blob" (concat "HEAD:" filename))))
-      (ediff-buffers buff1 buff2))))
-
-(defun git-log-file ()
-  "Display a log of changes to the marked file(s)."
-  (interactive)
-  (let* ((files (git-marked-files))
-         (coding-system-for-read git-commits-coding-system)
-         (buffer (apply #'git-run-command-buffer "*git-log*" "rev-list" "--pretty" "HEAD" "--" (git-get-filenames files))))
-    (with-current-buffer buffer
-      ; (git-log-mode)  FIXME: implement log mode
-      (goto-char (point-min))
-      (setq buffer-read-only t))
-    (display-buffer buffer)))
-
-(defun git-log-edit-files ()
-  "Return a list of marked files for use in the log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-get-filenames (git-marked-files-state 'added 'deleted 'modified))))
-
-(defun git-log-edit-diff ()
-  "Run a diff of the current files being committed from a log-edit buffer."
-  (with-current-buffer log-edit-parent-buffer
-    (git-diff-file)))
-
-(defun git-append-sign-off (name email)
-  "Append a Signed-off-by entry to the current buffer, avoiding duplicates."
-  (let ((sign-off (format "Signed-off-by: %s <%s>" name email))
-        (case-fold-search t))
-    (goto-char (point-min))
-    (unless (re-search-forward (concat "^" (regexp-quote sign-off)) nil t)
-      (goto-char (point-min))
-      (unless (re-search-forward "^Signed-off-by: " nil t)
-        (setq sign-off (concat "\n" sign-off)))
-      (goto-char (point-max))
-      (insert sign-off "\n"))))
-
-(defun git-setup-log-buffer (buffer &optional merge-heads author-name author-email subject date msg)
-  "Setup the log buffer for a commit."
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((dir default-directory)
-        (committer-name (git-get-committer-name))
-        (committer-email (git-get-committer-email))
-        (sign-off git-append-signed-off-by))
-    (with-current-buffer buffer
-      (cd dir)
-      (erase-buffer)
-      (insert
-       (propertize
-        (format "Author: %s <%s>\n%s%s"
-                (or author-name committer-name)
-                (or author-email committer-email)
-                (if date (format "Date: %s\n" date) "")
-                (if merge-heads
-                    (format "Merge: %s\n"
-                            (mapconcat 'identity merge-heads " "))
-                  ""))
-        'face 'git-header-face)
-       (propertize git-log-msg-separator 'face 'git-separator-face)
-       "\n")
-      (when subject (insert subject "\n\n"))
-      (cond (msg (insert msg "\n"))
-            ((file-readable-p ".git/rebase-apply/msg")
-             (insert-file-contents ".git/rebase-apply/msg"))
-            ((file-readable-p ".git/MERGE_MSG")
-             (insert-file-contents ".git/MERGE_MSG")))
-      ; delete empty lines at end
-      (goto-char (point-min))
-      (when (re-search-forward "\n+\\'" nil t)
-        (replace-match "\n" t t))
-      (when sign-off (git-append-sign-off committer-name committer-email)))
-    buffer))
-
-(define-derived-mode git-log-edit-mode log-edit-mode "Git-Log-Edit"
-  "Major mode for editing git log messages.
-
-Set up git-specific `font-lock-keywords' for `log-edit-mode'."
-  (set (make-local-variable 'font-lock-defaults)
-       '(git-log-edit-font-lock-keywords t t)))
-
-(defun git-commit-file ()
-  "Commit the marked file(s), asking for a commit message."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-run-pre-commit-hook)
-    (let ((buffer (get-buffer-create "*git-commit*"))
-          (coding-system (git-get-commits-coding-system))
-          author-name author-email subject date)
-      (when (eq 0 (buffer-size buffer))
-        (when (file-readable-p ".git/rebase-apply/info")
-          (with-temp-buffer
-            (insert-file-contents ".git/rebase-apply/info")
-            (goto-char (point-min))
-            (when (re-search-forward "^Author: \\(.*\\)\nEmail: \\(.*\\)$" nil t)
-              (setq author-name (match-string 1))
-              (setq author-email (match-string 2)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Subject: \\(.*\\)$" nil t)
-              (setq subject (match-string 1)))
-            (goto-char (point-min))
-            (when (re-search-forward "^Date: \\(.*\\)$" nil t)
-              (setq date (match-string 1)))))
-        (git-setup-log-buffer buffer (git-get-merge-heads) author-name author-email subject date))
-      (if (boundp 'log-edit-diff-function)
-         (log-edit 'git-do-commit nil '((log-edit-listfun . git-log-edit-files)
-                                        (log-edit-diff-function . git-log-edit-diff)) buffer 'git-log-edit-mode)
-       (log-edit 'git-do-commit nil 'git-log-edit-files buffer
-                  'git-log-edit-mode))
-      (setq paragraph-separate (concat (regexp-quote git-log-msg-separator) "$\\|Author: \\|Date: \\|Merge: \\|Signed-off-by: \\|\f\\|[        ]*$"))
-      (setq buffer-file-coding-system coding-system)
-      (re-search-forward (regexp-quote (concat git-log-msg-separator "\n")) nil t))))
-
-(defun git-setup-commit-buffer (commit)
-  "Setup the commit buffer with the contents of COMMIT."
-  (let (parents author-name author-email subject date msg)
-    (with-temp-buffer
-      (let ((coding-system (git-get-logoutput-coding-system)))
-        (git-call-process t "log" "-1" "--pretty=medium" "--abbrev=40" commit)
-        (goto-char (point-min))
-        (when (re-search-forward "^Merge: *\\(.*\\)$" nil t)
-          (setq parents (cdr (split-string (match-string 1) " +"))))
-        (when (re-search-forward "^Author: *\\(.*\\) <\\(.*\\)>$" nil t)
-          (setq author-name (match-string 1))
-          (setq author-email (match-string 2)))
-        (when (re-search-forward "^Date: *\\(.*\\)$" nil t)
-          (setq date (match-string 1)))
-        (while (re-search-forward "^    \\(.*\\)$" nil t)
-          (push (match-string 1) msg))
-        (setq msg (nreverse msg))
-        (setq subject (pop msg))
-        (while (and msg (zerop (length (car msg))) (pop msg)))))
-    (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                          parents author-name author-email subject date
-                          (mapconcat #'identity msg "\n"))))
-
-(defun git-get-commit-files (commit)
-  "Retrieve a sorted list of files modified by COMMIT."
-  (let (files)
-    (with-temp-buffer
-      (git-call-process t "diff-tree" "-m" "-r" "-z" "--name-only" "--no-commit-id" "--root" commit)
-      (goto-char (point-min))
-      (while (re-search-forward "\\([^\0]*\\)\0" nil t 1)
-        (push (match-string 1) files)))
-    (sort files #'string-lessp)))
-
-(defun git-read-commit-name (prompt &optional default)
-  "Ask for a commit name, with completion for local branch, remote branch and tag."
-  (completing-read prompt
-                   (list* "HEAD" "ORIG_HEAD" "FETCH_HEAD" (mapcar #'car (git-for-each-ref)))
-                  nil nil nil nil default))
-
-(defun git-checkout (branch &optional merge)
-  "Checkout a branch, tag, or any commit.
-Use a prefix arg if git should merge while checking out."
-  (interactive
-   (list (git-read-commit-name "Checkout: ")
-         current-prefix-arg))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((args (list branch "--")))
-    (when merge (push "-m" args))
-    (when (apply #'git-call-process-display-error "checkout" args)
-      (git-update-status-files))))
-
-(defun git-branch (branch)
-  "Create a branch from the current HEAD and switch to it."
-  (interactive (list (git-read-commit-name "Branch: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (if (git-rev-parse (concat "refs/heads/" branch))
-      (if (yes-or-no-p (format "Branch %s already exists, replace it? " branch))
-          (and (git-call-process-display-error "branch" "-f" branch)
-               (git-call-process-display-error "checkout" branch))
-        (message "Canceled."))
-    (git-call-process-display-error "checkout" "-b" branch))
-    (git-refresh-ewoc-hf git-status))
-
-(defun git-amend-commit ()
-  "Undo the last commit on HEAD, and set things up to commit an
-amended version of it."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (when (git-empty-db-p) (error "No commit to amend."))
-  (let* ((commit (git-rev-parse "HEAD"))
-         (files (git-get-commit-files commit)))
-    (when (if (git-rev-parse "HEAD^")
-              (git-call-process-display-error "reset" "--soft" "HEAD^")
-            (and (git-update-ref "ORIG_HEAD" commit)
-                 (git-update-ref "HEAD" nil commit)))
-      (git-update-status-files files t)
-      (git-setup-commit-buffer commit)
-      (git-commit-file))))
-
-(defun git-cherry-pick-commit (arg)
-  "Cherry-pick a commit."
-  (interactive (list (git-read-commit-name "Cherry-pick commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot cherry-pick a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (ok (git-call-process-display-error "cherry-pick" "-n" commit)))
-      (git-update-status-files files ok)
-      (with-current-buffer (git-setup-commit-buffer commit)
-        (goto-char (point-min))
-        (if (re-search-forward "^\n*Signed-off-by:" nil t 1)
-            (goto-char (match-beginning 0))
-          (goto-char (point-max)))
-        (insert "(cherry picked from commit " commit ")\n"))
-      (when ok (git-commit-file)))))
-
-(defun git-revert-commit (arg)
-  "Revert a commit."
-  (interactive (list (git-read-commit-name "Revert commit: ")))
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((commit (git-rev-parse (concat arg "^0"))))
-    (unless commit (error "Not a valid commit '%s'." arg))
-    (when (git-rev-parse (concat commit "^2"))
-      (error "Cannot revert a merge commit."))
-    (let ((files (git-get-commit-files commit))
-          (subject (git-get-commit-description commit))
-          (ok (git-call-process-display-error "revert" "-n" commit)))
-      (git-update-status-files files ok)
-      (when (string-match "^[0-9a-f]+ - \\(.*\\)$" subject)
-        (setq subject (match-string 1 subject)))
-      (git-setup-log-buffer (get-buffer-create "*git-commit*")
-                            (git-get-merge-heads) nil nil (format "Revert \"%s\"" subject) nil
-                            (format "This reverts commit %s.\n" commit))
-      (when ok (git-commit-file)))))
-
-(defun git-find-file ()
-  "Visit the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (unless (git-expand-directory info)
-      (find-file (git-fileinfo->name info))
-      (when (eq 'unmerged (git-fileinfo->state info))
-        (smerge-mode 1)))))
-
-(defun git-find-file-other-window ()
-  "Visit the current file in its own buffer in another window."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file-other-window (git-fileinfo->name info))
-    (when (eq 'unmerged (git-fileinfo->state info))
-      (smerge-mode))))
-
-(defun git-find-file-imerge ()
-  "Visit the current file in interactive merge mode."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (find-file (git-fileinfo->name info))
-    (smerge-ediff)))
-
-(defun git-view-file ()
-  "View the current file in its own buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (let ((info (ewoc-data (ewoc-locate git-status))))
-    (view-file (git-fileinfo->name info))))
-
-(defun git-refresh-status ()
-  "Refresh the git status buffer."
-  (interactive)
-  (unless git-status (error "Not in git-status buffer."))
-  (message "Refreshing git status...")
-  (git-update-status-files)
-  (message "Refreshing git status...done"))
-
-(defun git-status-quit ()
-  "Quit git-status mode."
-  (interactive)
-  (bury-buffer))
-
-;;;; Major Mode
-;;;; ------------------------------------------------------------
-
-(defvar git-status-mode-hook nil
-  "Run after `git-status-mode' is setup.")
-
-(defvar git-status-mode-map nil
-  "Keymap for git major mode.")
-
-(defvar git-status nil
-  "List of all files managed by the git-status mode.")
-
-(unless git-status-mode-map
-  (let ((map (make-keymap))
-        (commit-map (make-sparse-keymap))
-        (diff-map (make-sparse-keymap))
-        (toggle-map (make-sparse-keymap)))
-    (suppress-keymap map)
-    (define-key map "?"   'git-help)
-    (define-key map "h"   'git-help)
-    (define-key map " "   'git-next-file)
-    (define-key map "a"   'git-add-file)
-    (define-key map "c"   'git-commit-file)
-    (define-key map "\C-c" commit-map)
-    (define-key map "d"    diff-map)
-    (define-key map "="   'git-diff-file)
-    (define-key map "f"   'git-find-file)
-    (define-key map "\r"  'git-find-file)
-    (define-key map "g"   'git-refresh-status)
-    (define-key map "i"   'git-ignore-file)
-    (define-key map "I"   'git-insert-file)
-    (define-key map "l"   'git-log-file)
-    (define-key map "m"   'git-mark-file)
-    (define-key map "M"   'git-mark-all)
-    (define-key map "n"   'git-next-file)
-    (define-key map "N"   'git-next-unmerged-file)
-    (define-key map "o"   'git-find-file-other-window)
-    (define-key map "p"   'git-prev-file)
-    (define-key map "P"   'git-prev-unmerged-file)
-    (define-key map "q"   'git-status-quit)
-    (define-key map "r"   'git-remove-file)
-    (define-key map "t"    toggle-map)
-    (define-key map "T"   'git-toggle-all-marks)
-    (define-key map "u"   'git-unmark-file)
-    (define-key map "U"   'git-revert-file)
-    (define-key map "v"   'git-view-file)
-    (define-key map "x"   'git-remove-handled)
-    (define-key map "\C-?" 'git-unmark-file-up)
-    (define-key map "\M-\C-?" 'git-unmark-all)
-    ; the commit submap
-    (define-key commit-map "\C-a" 'git-amend-commit)
-    (define-key commit-map "\C-b" 'git-branch)
-    (define-key commit-map "\C-o" 'git-checkout)
-    (define-key commit-map "\C-p" 'git-cherry-pick-commit)
-    (define-key commit-map "\C-v" 'git-revert-commit)
-    ; the diff submap
-    (define-key diff-map "b" 'git-diff-file-base)
-    (define-key diff-map "c" 'git-diff-file-combined)
-    (define-key diff-map "=" 'git-diff-file)
-    (define-key diff-map "e" 'git-diff-file-idiff)
-    (define-key diff-map "E" 'git-find-file-imerge)
-    (define-key diff-map "h" 'git-diff-file-merge-head)
-    (define-key diff-map "m" 'git-diff-file-mine)
-    (define-key diff-map "o" 'git-diff-file-other)
-    ; the toggle submap
-    (define-key toggle-map "u" 'git-toggle-show-uptodate)
-    (define-key toggle-map "i" 'git-toggle-show-ignored)
-    (define-key toggle-map "k" 'git-toggle-show-unknown)
-    (define-key toggle-map "m" 'git-toggle-all-marks)
-    (setq git-status-mode-map map))
-  (easy-menu-define git-menu git-status-mode-map
-    "Git Menu"
-    `("Git"
-      ["Refresh" git-refresh-status t]
-      ["Commit" git-commit-file t]
-      ["Checkout..." git-checkout t]
-      ["New Branch..." git-branch t]
-      ["Cherry-pick Commit..." git-cherry-pick-commit t]
-      ["Revert Commit..." git-revert-commit t]
-      ("Merge"
-       ["Next Unmerged File" git-next-unmerged-file t]
-       ["Prev Unmerged File" git-prev-unmerged-file t]
-       ["Interactive Merge File" git-find-file-imerge t]
-       ["Diff Against Common Base File" git-diff-file-base t]
-       ["Diff Combined" git-diff-file-combined t]
-       ["Diff Against Merge Head" git-diff-file-merge-head t]
-       ["Diff Against Mine" git-diff-file-mine t]
-       ["Diff Against Other" git-diff-file-other t])
-      "--------"
-      ["Add File" git-add-file t]
-      ["Revert File" git-revert-file t]
-      ["Ignore File" git-ignore-file t]
-      ["Remove File" git-remove-file t]
-      ["Insert File" git-insert-file t]
-      "--------"
-      ["Find File" git-find-file t]
-      ["View File" git-view-file t]
-      ["Diff File" git-diff-file t]
-      ["Interactive Diff File" git-diff-file-idiff t]
-      ["Log" git-log-file t]
-      "--------"
-      ["Mark" git-mark-file t]
-      ["Mark All" git-mark-all t]
-      ["Unmark" git-unmark-file t]
-      ["Unmark All" git-unmark-all t]
-      ["Toggle All Marks" git-toggle-all-marks t]
-      ["Hide Handled Files" git-remove-handled t]
-      "--------"
-      ["Show Uptodate Files" git-toggle-show-uptodate :style toggle :selected git-show-uptodate]
-      ["Show Ignored Files" git-toggle-show-ignored :style toggle :selected git-show-ignored]
-      ["Show Unknown Files" git-toggle-show-unknown :style toggle :selected git-show-unknown]
-      "--------"
-      ["Quit" git-status-quit t])))
-
-
-;; git mode should only run in the *git status* buffer
-(put 'git-status-mode 'mode-class 'special)
-
-(defun git-status-mode ()
-  "Major mode for interacting with Git.
-Commands:
-\\{git-status-mode-map}"
-  (kill-all-local-variables)
-  (buffer-disable-undo)
-  (setq mode-name "git status"
-        major-mode 'git-status-mode
-        goal-column 17
-        buffer-read-only t)
-  (use-local-map git-status-mode-map)
-  (let ((buffer-read-only nil))
-    (erase-buffer)
-  (let ((status (ewoc-create 'git-fileinfo-prettyprint "" "")))
-    (set (make-local-variable 'git-status) status))
-  (set (make-local-variable 'list-buffers-directory) default-directory)
-  (make-local-variable 'git-show-uptodate)
-  (make-local-variable 'git-show-ignored)
-  (make-local-variable 'git-show-unknown)
-  (run-hooks 'git-status-mode-hook)))
-
-(defun git-find-status-buffer (dir)
-  "Find the git status buffer handling a specified directory."
-  (let ((list (buffer-list))
-        (fulldir (expand-file-name dir))
-        found)
-    (while (and list (not found))
-      (let ((buffer (car list)))
-        (with-current-buffer buffer
-          (when (and list-buffers-directory
-                     (string-equal fulldir (expand-file-name list-buffers-directory))
-                    (eq major-mode 'git-status-mode))
-            (setq found buffer))))
-      (setq list (cdr list)))
-    found))
-
-(defun git-status (dir)
-  "Entry point into git-status mode."
-  (interactive "DSelect directory: ")
-  (setq dir (git-get-top-dir dir))
-  (if (file-exists-p (concat (file-name-as-directory dir) ".git"))
-      (let ((buffer (or (and git-reuse-status-buffer (git-find-status-buffer dir))
-                        (create-file-buffer (expand-file-name "*git-status*" dir)))))
-        (switch-to-buffer buffer)
-        (cd dir)
-        (git-status-mode)
-        (git-refresh-status)
-        (goto-char (point-min))
-        (add-hook 'after-save-hook 'git-update-saved-file))
-    (message "%s is not a git working tree." dir)))
-
-(defun git-update-saved-file ()
-  "Update the corresponding git-status buffer when a file is saved.
-Meant to be used in `after-save-hook'."
-  (let* ((file (expand-file-name buffer-file-name))
-         (dir (condition-case nil (git-get-top-dir (file-name-directory file)) (error nil)))
-         (buffer (and dir (git-find-status-buffer dir))))
-    (when buffer
-      (with-current-buffer buffer
-        (let ((filename (file-relative-name file dir)))
-          ; skip files located inside the .git directory
-          (unless (string-match "^\\.git/" filename)
-            (git-call-process nil "add" "--refresh" "--" filename)
-            (git-update-status-files (list filename))))))))
-
-(defun git-help ()
-  "Display help for Git mode."
-  (interactive)
-  (describe-function 'git-status-mode))
-
-(provide 'git)
-;;; git.el ends here
index a3eb19de0418cbb3bccf54ba7e97e331f079e32e..22648c3afb7b8fed6d05235dc33f0482aef672fa 100644 (file)
@@ -15,7 +15,7 @@ static char *get_stdin(void)
 
 static void show_new(enum object_type type, unsigned char *sha1_new)
 {
-       fprintf(stderr, "  %s: %s\n", typename(type),
+       fprintf(stderr, "  %s: %s\n", type_name(type),
                find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
 }
 
index df59bdfe97786b5d9f48777cfad80545ff6bbdc2..b2ea80f9edcafab01de5d4a5339341cd542034f3 100755 (executable)
@@ -13,7 +13,7 @@
 use 5.008;
 use strict;
 use warnings;
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
 use File::Basename qw(dirname);
 use File::Copy;
 use File::Find;
index c414f0d9c7ecfac7074d0e052c19f73cae4344d1..75a43e23b6138754e7dc780aa3a643be38e1ba14 100755 (executable)
@@ -238,7 +238,7 @@ ($)
        my($d) = @_;
        $d =~ m#(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)#
                or die "Unparseable date: $d\n";
-       my $y=$1; $y-=1900 if $y>1900;
+       my $y=$1; $y+=1900 if $y<1000;
        return timegm($6||0,$5,$4,$3,$2-1,$y);
 }
 
index 6a2cdebdb78c5d9d5a507635f748bda7b2be676d..7ba78c4dff6811d4b60b01ec66882ebcd468df14 100755 (executable)
@@ -17,7 +17,7 @@
 # ln -sf /usr/share/git-core/contrib/hooks/pre-auto-gc-battery \
 #      hooks/pre-auto-gc
 
-if test -x /sbin/on_ac_power && /sbin/on_ac_power
+if test -x /sbin/on_ac_power && (/sbin/on_ac_power;test $? -ne 1)
 then
        exit 0
 elif test "$(cat /sys/class/power_supply/AC/online 2>/dev/null)" = 1
index dec085a235f4477c9079dc643c76db166f487761..d3f39a862ac9f6ec39bd08b1c3b48ea787bba04b 100755 (executable)
@@ -297,7 +297,7 @@ find_latest_squash () {
        main=
        sub=
        git log --grep="^git-subtree-dir: $dir/*\$" \
-               --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
+               --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
        while read a b junk
        do
                debug "$a $b $junk"
@@ -341,7 +341,7 @@ find_existing_splits () {
        main=
        sub=
        git log --grep="^git-subtree-dir: $dir/*\$" \
-               --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
+               --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
        while read a b junk
        do
                case "$a" in
@@ -382,7 +382,7 @@ copy_commit () {
        # We're going to set some environment vars here, so
        # do it in a subshell to get rid of them safely later
        debug copy_commit "{$1}" "{$2}" "{$3}"
-       git log -1 --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
+       git log -1 --no-show-signature --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
        (
                read GIT_AUTHOR_NAME
                read GIT_AUTHOR_EMAIL
@@ -462,8 +462,8 @@ squash_msg () {
                oldsub_short=$(git rev-parse --short "$oldsub")
                echo "Squashed '$dir/' changes from $oldsub_short..$newsub_short"
                echo
-               git log --pretty=tformat:'%h %s' "$oldsub..$newsub"
-               git log --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
+               git log --no-show-signature --pretty=tformat:'%h %s' "$oldsub..$newsub"
+               git log --no-show-signature --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
        else
                echo "Squashed '$dir/' content from commit $newsub_short"
        fi
@@ -475,7 +475,7 @@ squash_msg () {
 
 toptree_for_commit () {
        commit="$1"
-       git log -1 --pretty=format:'%T' "$commit" -- || exit $?
+       git rev-parse --verify "$commit^{tree}" || exit $?
 }
 
 subtree_for_commit () {
index 1a41a48e15efd7a6c3030e7a0d1097cbc08099c1..c480097a2a0cb3d780bdeff252da912c1b8e63b7 100644 (file)
--- a/convert.c
+++ b/convert.c
@@ -193,30 +193,30 @@ static enum eol output_eol(enum crlf_action crlf_action)
        return core_eol;
 }
 
-static void check_safe_crlf(const char *path, enum crlf_action crlf_action,
+static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action,
                            struct text_stat *old_stats, struct text_stat *new_stats,
-                           enum safe_crlf checksafe)
+                           int conv_flags)
 {
        if (old_stats->crlf && !new_stats->crlf ) {
                /*
                 * CRLFs would not be restored by checkout
                 */
-               if (checksafe == SAFE_CRLF_WARN)
+               if (conv_flags & CONV_EOL_RNDTRP_DIE)
+                       die(_("CRLF would be replaced by LF in %s."), path);
+               else if (conv_flags & CONV_EOL_RNDTRP_WARN)
                        warning(_("CRLF will be replaced by LF in %s.\n"
                                  "The file will have its original line"
                                  " endings in your working directory."), path);
-               else /* i.e. SAFE_CRLF_FAIL */
-                       die(_("CRLF would be replaced by LF in %s."), path);
        } else if (old_stats->lonelf && !new_stats->lonelf ) {
                /*
                 * CRLFs would be added by checkout
                 */
-               if (checksafe == SAFE_CRLF_WARN)
+               if (conv_flags & CONV_EOL_RNDTRP_DIE)
+                       die(_("LF would be replaced by CRLF in %s"), path);
+               else if (conv_flags & CONV_EOL_RNDTRP_WARN)
                        warning(_("LF will be replaced by CRLF in %s.\n"
                                  "The file will have its original line"
                                  " endings in your working directory."), path);
-               else /* i.e. SAFE_CRLF_FAIL */
-                       die(_("LF would be replaced by CRLF in %s"), path);
        }
 }
 
@@ -268,7 +268,7 @@ static int will_convert_lf_to_crlf(size_t len, struct text_stat *stats,
 static int crlf_to_git(const struct index_state *istate,
                       const char *path, const char *src, size_t len,
                       struct strbuf *buf,
-                      enum crlf_action crlf_action, enum safe_crlf checksafe)
+                      enum crlf_action crlf_action, int conv_flags)
 {
        struct text_stat stats;
        char *dst;
@@ -298,12 +298,12 @@ static int crlf_to_git(const struct index_state *istate,
                 * unless we want to renormalize in a merge or
                 * cherry-pick.
                 */
-               if ((checksafe != SAFE_CRLF_RENORMALIZE) &&
+               if ((!(conv_flags & CONV_EOL_RENORMALIZE)) &&
                    has_crlf_in_index(istate, path))
                        convert_crlf_into_lf = 0;
        }
-       if ((checksafe == SAFE_CRLF_WARN ||
-           (checksafe == SAFE_CRLF_FAIL)) && len) {
+       if (((conv_flags & CONV_EOL_RNDTRP_WARN) ||
+            ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) {
                struct text_stat new_stats;
                memcpy(&new_stats, &stats, sizeof(new_stats));
                /* simulate "git add" */
@@ -316,7 +316,7 @@ static int crlf_to_git(const struct index_state *istate,
                        new_stats.crlf += new_stats.lonelf;
                        new_stats.lonelf = 0;
                }
-               check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe);
+               check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags);
        }
        if (!convert_crlf_into_lf)
                return 0;
@@ -898,7 +898,7 @@ static int ident_to_git(const char *path, const char *src, size_t len,
 static int ident_to_worktree(const char *path, const char *src, size_t len,
                              struct strbuf *buf, int ident)
 {
-       unsigned char sha1[20];
+       struct object_id oid;
        char *to_free = NULL, *dollar, *spc;
        int cnt;
 
@@ -912,9 +912,9 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
        /* are we "faking" in place editing ? */
        if (src == buf->buf)
                to_free = strbuf_detach(buf, NULL);
-       hash_sha1_file(src, len, "blob", sha1);
+       hash_object_file(src, len, "blob", &oid);
 
-       strbuf_grow(buf, len + cnt * 43);
+       strbuf_grow(buf, len + cnt * (the_hash_algo->hexsz + 3));
        for (;;) {
                /* step 1: run to the next '$' */
                dollar = memchr(src, '$', len);
@@ -969,7 +969,7 @@ static int ident_to_worktree(const char *path, const char *src, size_t len,
 
                /* step 4: substitute */
                strbuf_addstr(buf, "Id: ");
-               strbuf_add(buf, sha1_to_hex(sha1), 40);
+               strbuf_addstr(buf, oid_to_hex(&oid));
                strbuf_addstr(buf, " $");
        }
        strbuf_add(buf, src, len);
@@ -1129,7 +1129,7 @@ const char *get_convert_attr_ascii(const char *path)
 
 int convert_to_git(const struct index_state *istate,
                   const char *path, const char *src, size_t len,
-                   struct strbuf *dst, enum safe_crlf checksafe)
+                  struct strbuf *dst, int conv_flags)
 {
        int ret = 0;
        struct conv_attrs ca;
@@ -1144,8 +1144,8 @@ int convert_to_git(const struct index_state *istate,
                src = dst->buf;
                len = dst->len;
        }
-       if (checksafe != SAFE_CRLF_KEEP_CRLF) {
-               ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe);
+       if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
+               ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
                if (ret && dst) {
                        src = dst->buf;
                        len = dst->len;
@@ -1156,7 +1156,7 @@ int convert_to_git(const struct index_state *istate,
 
 void convert_to_git_filter_fd(const struct index_state *istate,
                              const char *path, int fd, struct strbuf *dst,
-                             enum safe_crlf checksafe)
+                             int conv_flags)
 {
        struct conv_attrs ca;
        convert_attrs(&ca, path);
@@ -1167,7 +1167,7 @@ void convert_to_git_filter_fd(const struct index_state *istate,
        if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
                die("%s: clean filter '%s' failed", path, ca.drv->name);
 
-       crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
+       crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
        ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
 }
 
@@ -1226,7 +1226,7 @@ int renormalize_buffer(const struct index_state *istate, const char *path,
                src = dst->buf;
                len = dst->len;
        }
-       return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE);
+       return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE);
 }
 
 /*****************************************************************
@@ -1510,7 +1510,7 @@ struct ident_filter {
        struct stream_filter filter;
        struct strbuf left;
        int state;
-       char ident[45]; /* ": x40 $" */
+       char ident[GIT_MAX_HEXSZ + 5]; /* ": x40 $" */
 };
 
 static int is_foreign_ident(const char *str)
@@ -1635,12 +1635,12 @@ static struct stream_filter_vtbl ident_vtbl = {
        ident_free_fn,
 };
 
-static struct stream_filter *ident_filter(const unsigned char *sha1)
+static struct stream_filter *ident_filter(const struct object_id *oid)
 {
        struct ident_filter *ident = xmalloc(sizeof(*ident));
 
        xsnprintf(ident->ident, sizeof(ident->ident),
-                 ": %s $", sha1_to_hex(sha1));
+                 ": %s $", oid_to_hex(oid));
        strbuf_init(&ident->left, 0);
        ident->filter.vtbl = &ident_vtbl;
        ident->state = 0;
@@ -1655,7 +1655,7 @@ static struct stream_filter *ident_filter(const unsigned char *sha1)
  * Note that you would be crazy to set CRLF, smuge/clean or ident to a
  * large binary blob you would want us not to slurp into the memory!
  */
-struct stream_filter *get_stream_filter(const char *path, const unsigned char *sha1)
+struct stream_filter *get_stream_filter(const char *path, const struct object_id *oid)
 {
        struct conv_attrs ca;
        struct stream_filter *filter = NULL;
@@ -1668,7 +1668,7 @@ struct stream_filter *get_stream_filter(const char *path, const unsigned char *s
                return NULL;
 
        if (ca.ident)
-               filter = ident_filter(sha1);
+               filter = ident_filter(oid);
 
        if (output_eol(ca.crlf_action) == EOL_CRLF)
                filter = cascade_filter(filter, lf_to_crlf_filter());
index 4f2da225a8926f92e465c7dea27cdc4589864e1f..2e9b4f49cc0acc697bf0304306b96e0b50e30aab 100644 (file)
--- a/convert.h
+++ b/convert.h
@@ -8,15 +8,12 @@
 
 struct index_state;
 
-enum safe_crlf {
-       SAFE_CRLF_FALSE = 0,
-       SAFE_CRLF_FAIL = 1,
-       SAFE_CRLF_WARN = 2,
-       SAFE_CRLF_RENORMALIZE = 3,
-       SAFE_CRLF_KEEP_CRLF = 4
-};
+#define CONV_EOL_RNDTRP_DIE   (1<<0) /* Die if CRLF to LF to CRLF is different */
+#define CONV_EOL_RNDTRP_WARN  (1<<1) /* Warn if CRLF to LF to CRLF is different */
+#define CONV_EOL_RENORMALIZE  (1<<2) /* Convert CRLF to LF */
+#define CONV_EOL_KEEP_CRLF    (1<<3) /* Keep CRLF line endings as is */
 
-extern enum safe_crlf safe_crlf;
+extern int global_conv_flags_eol;
 
 enum auto_crlf {
        AUTO_CRLF_FALSE = 0,
@@ -66,7 +63,7 @@ extern const char *get_convert_attr_ascii(const char *path);
 /* returns 1 if *dst was used */
 extern int convert_to_git(const struct index_state *istate,
                          const char *path, const char *src, size_t len,
-                         struct strbuf *dst, enum safe_crlf checksafe);
+                         struct strbuf *dst, int conv_flags);
 extern int convert_to_working_tree(const char *path, const char *src,
                                   size_t len, struct strbuf *dst);
 extern int async_convert_to_working_tree(const char *path, const char *src,
@@ -85,7 +82,7 @@ static inline int would_convert_to_git(const struct index_state *istate,
 extern void convert_to_git_filter_fd(const struct index_state *istate,
                                     const char *path, int fd,
                                     struct strbuf *dst,
-                                    enum safe_crlf checksafe);
+                                    int conv_flags);
 extern int would_convert_to_git_filter_fd(const char *path);
 
 /*****************************************************************
@@ -96,7 +93,7 @@ extern int would_convert_to_git_filter_fd(const char *path);
 
 struct stream_filter; /* opaque */
 
-extern struct stream_filter *get_stream_filter(const char *path, const unsigned char *);
+extern struct stream_filter *get_stream_filter(const char *path, const struct object_id *);
 extern void free_stream_filter(struct stream_filter *);
 extern int is_null_stream_filter(struct stream_filter *);
 
index 2adae04073816a781d01d85433d8d8922baafd7f..5eda7fb6af673ae82989dab871aaac74ec9ff629 100644 (file)
@@ -11,7 +11,7 @@
 #include "progress.h"
 #include "csum-file.h"
 
-static void flush(struct sha1file *f, const void *buf, unsigned int count)
+static void flush(struct hashfile *f, const void *buf, unsigned int count)
 {
        if (0 <= f->check_fd && count)  {
                unsigned char check_buffer[8192];
@@ -42,28 +42,28 @@ static void flush(struct sha1file *f, const void *buf, unsigned int count)
        }
 }
 
-void sha1flush(struct sha1file *f)
+void hashflush(struct hashfile *f)
 {
        unsigned offset = f->offset;
 
        if (offset) {
-               git_SHA1_Update(&f->ctx, f->buffer, offset);
+               the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
                flush(f, f->buffer, offset);
                f->offset = 0;
        }
 }
 
-int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
+int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
 {
        int fd;
 
-       sha1flush(f);
-       git_SHA1_Final(f->buffer, &f->ctx);
+       hashflush(f);
+       the_hash_algo->final_fn(f->buffer, &f->ctx);
        if (result)
                hashcpy(result, f->buffer);
        if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
                /* write checksum and close fd */
-               flush(f, f->buffer, 20);
+               flush(f, f->buffer, the_hash_algo->rawsz);
                if (flags & CSUM_FSYNC)
                        fsync_or_die(f->fd, f->name);
                if (close(f->fd))
@@ -86,7 +86,7 @@ int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
        return fd;
 }
 
-void sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
 {
        while (count) {
                unsigned offset = f->offset;
@@ -110,7 +110,7 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
                buf = (char *) buf + nr;
                left -= nr;
                if (!left) {
-                       git_SHA1_Update(&f->ctx, data, offset);
+                       the_hash_algo->update_fn(&f->ctx, data, offset);
                        flush(f, data, offset);
                        offset = 0;
                }
@@ -118,15 +118,15 @@ void sha1write(struct sha1file *f, const void *buf, unsigned int count)
        }
 }
 
-struct sha1file *sha1fd(int fd, const char *name)
+struct hashfile *hashfd(int fd, const char *name)
 {
-       return sha1fd_throughput(fd, name, NULL);
+       return hashfd_throughput(fd, name, NULL);
 }
 
-struct sha1file *sha1fd_check(const char *name)
+struct hashfile *hashfd_check(const char *name)
 {
        int sink, check;
-       struct sha1file *f;
+       struct hashfile *f;
 
        sink = open("/dev/null", O_WRONLY);
        if (sink < 0)
@@ -134,14 +134,14 @@ struct sha1file *sha1fd_check(const char *name)
        check = open(name, O_RDONLY);
        if (check < 0)
                die_errno("unable to open '%s'", name);
-       f = sha1fd(sink, name);
+       f = hashfd(sink, name);
        f->check_fd = check;
        return f;
 }
 
-struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp)
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
 {
-       struct sha1file *f = xmalloc(sizeof(*f));
+       struct hashfile *f = xmalloc(sizeof(*f));
        f->fd = fd;
        f->check_fd = -1;
        f->offset = 0;
@@ -149,18 +149,18 @@ struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp
        f->tp = tp;
        f->name = name;
        f->do_crc = 0;
-       git_SHA1_Init(&f->ctx);
+       the_hash_algo->init_fn(&f->ctx);
        return f;
 }
 
-void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
 {
-       sha1flush(f);
+       hashflush(f);
        checkpoint->offset = f->total;
        checkpoint->ctx = f->ctx;
 }
 
-int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
 {
        off_t offset = checkpoint->offset;
 
@@ -169,17 +169,17 @@ int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint
                return -1;
        f->total = offset;
        f->ctx = checkpoint->ctx;
-       f->offset = 0; /* sha1flush() was called in checkpoint */
+       f->offset = 0; /* hashflush() was called in checkpoint */
        return 0;
 }
 
-void crc32_begin(struct sha1file *f)
+void crc32_begin(struct hashfile *f)
 {
        f->crc32 = crc32(0, NULL, 0);
        f->do_crc = 1;
 }
 
-uint32_t crc32_end(struct sha1file *f)
+uint32_t crc32_end(struct hashfile *f)
 {
        f->do_crc = 0;
        return f->crc32;
index 7530927d774562f82636aa773481ae93600c1756..992e5c014122d8fed3ee782d400e61de78e55271 100644 (file)
@@ -4,11 +4,11 @@
 struct progress;
 
 /* A SHA1-protected file */
-struct sha1file {
+struct hashfile {
        int fd;
        int check_fd;
        unsigned int offset;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        off_t total;
        struct progress *tp;
        const char *name;
@@ -18,36 +18,36 @@ struct sha1file {
 };
 
 /* Checkpoint */
-struct sha1file_checkpoint {
+struct hashfile_checkpoint {
        off_t offset;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
 };
 
-extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *);
-extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *);
+extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
 
-/* sha1close flags */
+/* hashclose flags */
 #define CSUM_CLOSE     1
 #define CSUM_FSYNC     2
 
-extern struct sha1file *sha1fd(int fd, const char *name);
-extern struct sha1file *sha1fd_check(const char *name);
-extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
-extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern void sha1write(struct sha1file *, const void *, unsigned int);
-extern void sha1flush(struct sha1file *f);
-extern void crc32_begin(struct sha1file *);
-extern uint32_t crc32_end(struct sha1file *);
+extern struct hashfile *hashfd(int fd, const char *name);
+extern struct hashfile *hashfd_check(const char *name);
+extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+extern int hashclose(struct hashfile *, unsigned char *, unsigned int);
+extern void hashwrite(struct hashfile *, const void *, unsigned int);
+extern void hashflush(struct hashfile *f);
+extern void crc32_begin(struct hashfile *);
+extern uint32_t crc32_end(struct hashfile *);
 
-static inline void sha1write_u8(struct sha1file *f, uint8_t data)
+static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
 {
-       sha1write(f, &data, sizeof(data));
+       hashwrite(f, &data, sizeof(data));
 }
 
-static inline void sha1write_be32(struct sha1file *f, uint32_t data)
+static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
 {
        data = htonl(data);
-       sha1write(f, &data, sizeof(data));
+       hashwrite(f, &data, sizeof(data));
 }
 
 #endif
index 72dfeaf6e296323db7baf9e4545e412c01878417..fe833ea7de7685968915c93950bb54768dd04586 100644 (file)
--- a/daemon.c
+++ b/daemon.c
@@ -9,7 +9,12 @@
 #define initgroups(x, y) (0) /* nothing */
 #endif
 
-static int log_syslog;
+static enum log_destination {
+       LOG_DESTINATION_UNSET = -1,
+       LOG_DESTINATION_NONE = 0,
+       LOG_DESTINATION_STDERR = 1,
+       LOG_DESTINATION_SYSLOG = 2,
+} log_destination = LOG_DESTINATION_UNSET;
 static int verbose;
 static int reuseaddr;
 static int informative_errors;
@@ -25,6 +30,7 @@ static const char daemon_usage[] =
 "           [--access-hook=<path>]\n"
 "           [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n"
 "                      [--detach] [--user=<user> [--group=<group>]]\n"
+"           [--log-destination=(stderr|syslog|none)]\n"
 "           [<directory>...]";
 
 /* List of acceptable pathname prefixes */
@@ -74,11 +80,14 @@ static const char *get_ip_address(struct hostinfo *hi)
 
 static void logreport(int priority, const char *err, va_list params)
 {
-       if (log_syslog) {
+       switch (log_destination) {
+       case LOG_DESTINATION_SYSLOG: {
                char buf[1024];
                vsnprintf(buf, sizeof(buf), err, params);
                syslog(priority, "%s", buf);
-       } else {
+               break;
+       }
+       case LOG_DESTINATION_STDERR:
                /*
                 * Since stderr is set to buffered mode, the
                 * logging of different processes will not overlap
@@ -88,6 +97,11 @@ static void logreport(int priority, const char *err, va_list params)
                vfprintf(stderr, err, params);
                fputc('\n', stderr);
                fflush(stderr);
+               break;
+       case LOG_DESTINATION_NONE:
+               break;
+       case LOG_DESTINATION_UNSET:
+               BUG("log destination not initialized correctly");
        }
 }
 
@@ -1286,7 +1300,6 @@ int cmd_main(int argc, const char **argv)
                }
                if (!strcmp(arg, "--inetd")) {
                        inetd_mode = 1;
-                       log_syslog = 1;
                        continue;
                }
                if (!strcmp(arg, "--verbose")) {
@@ -1294,9 +1307,22 @@ int cmd_main(int argc, const char **argv)
                        continue;
                }
                if (!strcmp(arg, "--syslog")) {
-                       log_syslog = 1;
+                       log_destination = LOG_DESTINATION_SYSLOG;
                        continue;
                }
+               if (skip_prefix(arg, "--log-destination=", &v)) {
+                       if (!strcmp(v, "syslog")) {
+                               log_destination = LOG_DESTINATION_SYSLOG;
+                               continue;
+                       } else if (!strcmp(v, "stderr")) {
+                               log_destination = LOG_DESTINATION_STDERR;
+                               continue;
+                       } else if (!strcmp(v, "none")) {
+                               log_destination = LOG_DESTINATION_NONE;
+                               continue;
+                       } else
+                               die("unknown log destination '%s'", v);
+               }
                if (!strcmp(arg, "--export-all")) {
                        export_all_trees = 1;
                        continue;
@@ -1353,7 +1379,6 @@ int cmd_main(int argc, const char **argv)
                }
                if (!strcmp(arg, "--detach")) {
                        detach = 1;
-                       log_syslog = 1;
                        continue;
                }
                if (skip_prefix(arg, "--user=", &v)) {
@@ -1399,7 +1424,14 @@ int cmd_main(int argc, const char **argv)
                usage(daemon_usage);
        }
 
-       if (log_syslog) {
+       if (log_destination == LOG_DESTINATION_UNSET) {
+               if (inetd_mode || detach)
+                       log_destination = LOG_DESTINATION_SYSLOG;
+               else
+                       log_destination = LOG_DESTINATION_STDERR;
+       }
+
+       if (log_destination == LOG_DESTINATION_SYSLOG) {
                openlog("git-daemon", LOG_PID, LOG_DAEMON);
                set_die_routine(daemon_die);
        } else
index 8104603a3b36f2fd73761db34efde44d5938f68f..104f954a25700a4eee95507e7ebb7734e94b67c6 100644 (file)
@@ -92,6 +92,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
        int diff_unmerged_stage = revs->max_count;
        unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
                              ? CE_MATCH_RACY_IS_DIRTY : 0);
+       uint64_t start = getnanotime();
 
        diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
 
@@ -246,6 +247,7 @@ int run_diff_files(struct rev_info *revs, unsigned int option)
        }
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-files");
        return 0;
 }
 
@@ -302,7 +304,7 @@ static int get_stat_data(const struct cache_entry *ce,
 }
 
 static void show_new_file(struct rev_info *revs,
-                         const struct cache_entry *new,
+                         const struct cache_entry *new_file,
                          int cached, int match_missing)
 {
        const struct object_id *oid;
@@ -313,16 +315,16 @@ static void show_new_file(struct rev_info *revs,
         * New file in the index: it might actually be different in
         * the working tree.
         */
-       if (get_stat_data(new, &oid, &mode, cached, match_missing,
+       if (get_stat_data(new_file, &oid, &mode, cached, match_missing,
            &dirty_submodule, &revs->diffopt) < 0)
                return;
 
-       diff_index_show_file(revs, "+", new, oid, !is_null_oid(oid), mode, dirty_submodule);
+       diff_index_show_file(revs, "+", new_file, oid, !is_null_oid(oid), mode, dirty_submodule);
 }
 
 static int show_modified(struct rev_info *revs,
-                        const struct cache_entry *old,
-                        const struct cache_entry *new,
+                        const struct cache_entry *old_entry,
+                        const struct cache_entry *new_entry,
                         int report_missing,
                         int cached, int match_missing)
 {
@@ -330,47 +332,47 @@ static int show_modified(struct rev_info *revs,
        const struct object_id *oid;
        unsigned dirty_submodule = 0;
 
-       if (get_stat_data(new, &oid, &mode, cached, match_missing,
+       if (get_stat_data(new_entry, &oid, &mode, cached, match_missing,
                          &dirty_submodule, &revs->diffopt) < 0) {
                if (report_missing)
-                       diff_index_show_file(revs, "-", old,
-                                            &old->oid, 1, old->ce_mode,
+                       diff_index_show_file(revs, "-", old_entry,
+                                            &old_entry->oid, 1, old_entry->ce_mode,
                                             0);
                return -1;
        }
 
        if (revs->combine_merges && !cached &&
-           (oidcmp(oid, &old->oid) || oidcmp(&old->oid, &new->oid))) {
+           (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
                struct combine_diff_path *p;
-               int pathlen = ce_namelen(new);
+               int pathlen = ce_namelen(new_entry);
 
                p = xmalloc(combine_diff_path_size(2, pathlen));
                p->path = (char *) &p->parent[2];
                p->next = NULL;
-               memcpy(p->path, new->name, pathlen);
+               memcpy(p->path, new_entry->name, pathlen);
                p->path[pathlen] = 0;
                p->mode = mode;
                oidclr(&p->oid);
                memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
                p->parent[0].status = DIFF_STATUS_MODIFIED;
-               p->parent[0].mode = new->ce_mode;
-               oidcpy(&p->parent[0].oid, &new->oid);
+               p->parent[0].mode = new_entry->ce_mode;
+               oidcpy(&p->parent[0].oid, &new_entry->oid);
                p->parent[1].status = DIFF_STATUS_MODIFIED;
-               p->parent[1].mode = old->ce_mode;
-               oidcpy(&p->parent[1].oid, &old->oid);
+               p->parent[1].mode = old_entry->ce_mode;
+               oidcpy(&p->parent[1].oid, &old_entry->oid);
                show_combined_diff(p, 2, revs->dense_combined_merges, revs);
                free(p);
                return 0;
        }
 
-       oldmode = old->ce_mode;
-       if (mode == oldmode && !oidcmp(oid, &old->oid) && !dirty_submodule &&
+       oldmode = old_entry->ce_mode;
+       if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
            !revs->diffopt.flags.find_copies_harder)
                return 0;
 
        diff_change(&revs->diffopt, oldmode, mode,
-                   &old->oid, oid, 1, !is_null_oid(oid),
-                   old->name, 0, dirty_submodule);
+                   &old_entry->oid, oid, 1, !is_null_oid(oid),
+                   old_entry->name, 0, dirty_submodule);
        return 0;
 }
 
@@ -512,6 +514,7 @@ static int diff_cache(struct rev_info *revs,
 int run_diff_index(struct rev_info *revs, int cached)
 {
        struct object_array_entry *ent;
+       uint64_t start = getnanotime();
 
        ent = revs->pending.objects;
        if (diff_cache(revs, &ent->item->oid, ent->name, cached))
@@ -521,6 +524,7 @@ int run_diff_index(struct rev_info *revs, int cached)
        diffcore_fix_diff_index(&revs->diffopt);
        diffcore_std(&revs->diffopt);
        diff_flush(&revs->diffopt);
+       trace_performance_since(start, "diff-index");
        return 0;
 }
 
diff --git a/diff.c b/diff.c
index e9d0e38123a59f2ca54674115df4140836b98f18..1289df4b1f9f395010e475073c2c5e5ce43976a7 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -1504,7 +1504,7 @@ struct diff_words_style_elem {
 
 struct diff_words_style {
        enum diff_words_type type;
-       struct diff_words_style_elem new, old, ctx;
+       struct diff_words_style_elem new_word, old_word, ctx;
        const char *newline;
 };
 
@@ -1655,12 +1655,12 @@ static void fn_out_diff_words_aux(void *priv, char *line, unsigned long len)
        }
        if (minus_begin != minus_end) {
                fn_out_diff_words_write_helper(diff_words->opt,
-                               &style->old, style->newline,
+                               &style->old_word, style->newline,
                                minus_end - minus_begin, minus_begin);
        }
        if (plus_begin != plus_end) {
                fn_out_diff_words_write_helper(diff_words->opt,
-                               &style->new, style->newline,
+                               &style->new_word, style->newline,
                                plus_end - plus_begin, plus_begin);
        }
 
@@ -1758,7 +1758,7 @@ static void diff_words_show(struct diff_words_data *diff_words)
                emit_diff_symbol(diff_words->opt, DIFF_SYMBOL_WORD_DIFF,
                                 line_prefix, strlen(line_prefix), 0);
                fn_out_diff_words_write_helper(diff_words->opt,
-                       &style->old, style->newline,
+                       &style->old_word, style->newline,
                        diff_words->minus.text.size,
                        diff_words->minus.text.ptr);
                diff_words->minus.text.size = 0;
@@ -1883,8 +1883,8 @@ static void init_diff_words_data(struct emit_callback *ecbdata,
        }
        if (want_color(o->use_color)) {
                struct diff_words_style *st = ecbdata->diff_words->style;
-               st->old.color = diff_get_color_opt(o, DIFF_FILE_OLD);
-               st->new.color = diff_get_color_opt(o, DIFF_FILE_NEW);
+               st->old_word.color = diff_get_color_opt(o, DIFF_FILE_OLD);
+               st->new_word.color = diff_get_color_opt(o, DIFF_FILE_NEW);
                st->ctx.color = diff_get_color_opt(o, DIFF_CONTEXT);
        }
 }
@@ -2045,11 +2045,10 @@ static void fn_out_consume(void *priv, char *line, unsigned long len)
        }
 }
 
-static char *pprint_rename(const char *a, const char *b)
+static void pprint_rename(struct strbuf *name, const char *a, const char *b)
 {
-       const char *old = a;
-       const char *new = b;
-       struct strbuf name = STRBUF_INIT;
+       const char *old_name = a;
+       const char *new_name = b;
        int pfx_length, sfx_length;
        int pfx_adjust_for_slash;
        int len_a = strlen(a);
@@ -2059,24 +2058,24 @@ static char *pprint_rename(const char *a, const char *b)
        int qlen_b = quote_c_style(b, NULL, NULL, 0);
 
        if (qlen_a || qlen_b) {
-               quote_c_style(a, &name, NULL, 0);
-               strbuf_addstr(&name, " => ");
-               quote_c_style(b, &name, NULL, 0);
-               return strbuf_detach(&name, NULL);
+               quote_c_style(a, name, NULL, 0);
+               strbuf_addstr(name, " => ");
+               quote_c_style(b, name, NULL, 0);
+               return;
        }
 
        /* Find common prefix */
        pfx_length = 0;
-       while (*old && *new && *old == *new) {
-               if (*old == '/')
-                       pfx_length = old - a + 1;
-               old++;
-               new++;
+       while (*old_name && *new_name && *old_name == *new_name) {
+               if (*old_name == '/')
+                       pfx_length = old_name - a + 1;
+               old_name++;
+               new_name++;
        }
 
        /* Find common suffix */
-       old = a + len_a;
-       new = b + len_b;
+       old_name = a + len_a;
+       new_name = b + len_b;
        sfx_length = 0;
        /*
         * If there is a common prefix, it must end in a slash.  In
@@ -2087,13 +2086,13 @@ static char *pprint_rename(const char *a, const char *b)
         * underrun the input strings.
         */
        pfx_adjust_for_slash = (pfx_length ? 1 : 0);
-       while (a + pfx_length - pfx_adjust_for_slash <= old &&
-              b + pfx_length - pfx_adjust_for_slash <= new &&
-              *old == *new) {
-               if (*old == '/')
-                       sfx_length = len_a - (old - a);
-               old--;
-               new--;
+       while (a + pfx_length - pfx_adjust_for_slash <= old_name &&
+              b + pfx_length - pfx_adjust_for_slash <= new_name &&
+              *old_name == *new_name) {
+               if (*old_name == '/')
+                       sfx_length = len_a - (old_name - a);
+               old_name--;
+               new_name--;
        }
 
        /*
@@ -2109,19 +2108,18 @@ static char *pprint_rename(const char *a, const char *b)
        if (b_midlen < 0)
                b_midlen = 0;
 
-       strbuf_grow(&name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
+       strbuf_grow(name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
        if (pfx_length + sfx_length) {
-               strbuf_add(&name, a, pfx_length);
-               strbuf_addch(&name, '{');
+               strbuf_add(name, a, pfx_length);
+               strbuf_addch(name, '{');
        }
-       strbuf_add(&name, a + pfx_length, a_midlen);
-       strbuf_addstr(&name, " => ");
-       strbuf_add(&name, b + pfx_length, b_midlen);
+       strbuf_add(name, a + pfx_length, a_midlen);
+       strbuf_addstr(name, " => ");
+       strbuf_add(name, b + pfx_length, b_midlen);
        if (pfx_length + sfx_length) {
-               strbuf_addch(&name, '}');
-               strbuf_add(&name, a + len_a - sfx_length, sfx_length);
+               strbuf_addch(name, '}');
+               strbuf_add(name, a + len_a - sfx_length, sfx_length);
        }
-       return strbuf_detach(&name, NULL);
 }
 
 struct diffstat_t {
@@ -2131,6 +2129,7 @@ struct diffstat_t {
                char *from_name;
                char *name;
                char *print_name;
+               const char *comments;
                unsigned is_unmerged:1;
                unsigned is_binary:1;
                unsigned is_renamed:1;
@@ -2197,23 +2196,20 @@ static void show_graph(struct strbuf *out, char ch, int cnt,
 
 static void fill_print_name(struct diffstat_file *file)
 {
-       char *pname;
+       struct strbuf pname = STRBUF_INIT;
 
        if (file->print_name)
                return;
 
-       if (!file->is_renamed) {
-               struct strbuf buf = STRBUF_INIT;
-               if (quote_c_style(file->name, &buf, NULL, 0)) {
-                       pname = strbuf_detach(&buf, NULL);
-               } else {
-                       pname = file->name;
-                       strbuf_release(&buf);
-               }
-       } else {
-               pname = pprint_rename(file->from_name, file->name);
-       }
-       file->print_name = pname;
+       if (file->is_renamed)
+               pprint_rename(&pname, file->from_name, file->name);
+       else
+               quote_c_style(file->name, &pname, NULL, 0);
+
+       if (file->comments)
+               strbuf_addf(&pname, " (%s)", file->comments);
+
+       file->print_name = strbuf_detach(&pname, NULL);
 }
 
 static void print_stat_summary_inserts_deletes(struct diff_options *options,
@@ -2594,14 +2590,14 @@ struct dirstat_dir {
 static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                unsigned long changed, const char *base, int baselen)
 {
-       unsigned long this_dir = 0;
+       unsigned long sum_changes = 0;
        unsigned int sources = 0;
        const char *line_prefix = diff_line_prefix(opt);
 
        while (dir->nr) {
                struct dirstat_file *f = dir->files;
                int namelen = strlen(f->name);
-               unsigned long this;
+               unsigned long changes;
                char *slash;
 
                if (namelen < baselen)
@@ -2611,15 +2607,15 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                slash = strchr(f->name + baselen, '/');
                if (slash) {
                        int newbaselen = slash + 1 - f->name;
-                       this = gather_dirstat(opt, dir, changed, f->name, newbaselen);
+                       changes = gather_dirstat(opt, dir, changed, f->name, newbaselen);
                        sources++;
                } else {
-                       this = f->changed;
+                       changes = f->changed;
                        dir->files++;
                        dir->nr--;
                        sources += 2;
                }
-               this_dir += this;
+               sum_changes += changes;
        }
 
        /*
@@ -2629,8 +2625,8 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
         *    under this directory (sources == 1).
         */
        if (baselen && sources != 1) {
-               if (this_dir) {
-                       int permille = this_dir * 1000 / changed;
+               if (sum_changes) {
+                       int permille = sum_changes * 1000 / changed;
                        if (permille >= dir->permille) {
                                fprintf(opt->file, "%s%4d.%01d%% %.*s\n", line_prefix,
                                        permille / 10, permille % 10, baselen, base);
@@ -2639,7 +2635,7 @@ static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
                        }
                }
        }
-       return this_dir;
+       return sum_changes;
 }
 
 static int dirstat_compare(const void *_a, const void *_b)
@@ -2797,8 +2793,7 @@ static void free_diffstat_info(struct diffstat_t *diffstat)
        int i;
        for (i = 0; i < diffstat->nr; i++) {
                struct diffstat_file *f = diffstat->files[i];
-               if (f->name != f->print_name)
-                       free(f->print_name);
+               free(f->print_name);
                free(f->name);
                free(f->from_name);
                free(f);
@@ -3248,6 +3243,32 @@ static void builtin_diff(const char *name_a,
        return;
 }
 
+static char *get_compact_summary(const struct diff_filepair *p, int is_renamed)
+{
+       if (!is_renamed) {
+               if (p->status == DIFF_STATUS_ADDED) {
+                       if (S_ISLNK(p->two->mode))
+                               return "new +l";
+                       else if ((p->two->mode & 0777) == 0755)
+                               return "new +x";
+                       else
+                               return "new";
+               } else if (p->status == DIFF_STATUS_DELETED)
+                       return "gone";
+       }
+       if (S_ISLNK(p->one->mode) && !S_ISLNK(p->two->mode))
+               return "mode -l";
+       else if (!S_ISLNK(p->one->mode) && S_ISLNK(p->two->mode))
+               return "mode +l";
+       else if ((p->one->mode & 0777) == 0644 &&
+                (p->two->mode & 0777) == 0755)
+               return "mode +x";
+       else if ((p->one->mode & 0777) == 0755 &&
+                (p->two->mode & 0777) == 0644)
+               return "mode -x";
+       return NULL;
+}
+
 static void builtin_diffstat(const char *name_a, const char *name_b,
                             struct diff_filespec *one,
                             struct diff_filespec *two,
@@ -3267,6 +3288,8 @@ static void builtin_diffstat(const char *name_a, const char *name_b,
 
        data = diffstat_add(diffstat, name_a, name_b);
        data->is_interesting = p->status != DIFF_STATUS_UNKNOWN;
+       if (o->flags.stat_with_summary)
+               data->comments = get_compact_summary(p, data->is_renamed);
 
        if (!one || !two) {
                data->is_unmerged = 1;
@@ -3520,13 +3543,13 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
 {
        int size_only = flags & CHECK_SIZE_ONLY;
        int err = 0;
+       int conv_flags = global_conv_flags_eol;
        /*
         * demote FAIL to WARN to allow inspecting the situation
         * instead of refusing.
         */
-       enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL
-                                   ? SAFE_CRLF_WARN
-                                   : safe_crlf);
+       if (conv_flags & CONV_EOL_RNDTRP_DIE)
+               conv_flags = CONV_EOL_RNDTRP_WARN;
 
        if (!DIFF_FILE_VALID(s))
                die("internal error: asking to populate invalid file.");
@@ -3603,7 +3626,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
                /*
                 * Convert from working tree format to canonical git format
                 */
-               if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) {
+               if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) {
                        size_t size = 0;
                        munmap(s->data, s->size);
                        s->should_munmap = 0;
@@ -3615,7 +3638,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
        else {
                enum object_type type;
                if (size_only || (flags & CHECK_BINARY)) {
-                       type = sha1_object_info(s->oid.hash, &s->size);
+                       type = oid_object_info(&s->oid, &s->size);
                        if (type < 0)
                                die("unable to read %s",
                                    oid_to_hex(&s->oid));
@@ -3626,7 +3649,7 @@ int diff_populate_filespec(struct diff_filespec *s, unsigned int flags)
                                return 0;
                        }
                }
-               s->data = read_sha1_file(s->oid.hash, &type, &s->size);
+               s->data = read_object_file(&s->oid, &type, &s->size);
                if (!s->data)
                        die("unable to read %s", oid_to_hex(&s->oid));
                s->should_free = 1;
@@ -3660,15 +3683,15 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp,
                           int mode)
 {
        struct strbuf buf = STRBUF_INIT;
-       struct strbuf template = STRBUF_INIT;
+       struct strbuf tempfile = STRBUF_INIT;
        char *path_dup = xstrdup(path);
        const char *base = basename(path_dup);
 
        /* Generate "XXXXXX_basename.ext" */
-       strbuf_addstr(&template, "XXXXXX_");
-       strbuf_addstr(&template, base);
+       strbuf_addstr(&tempfile, "XXXXXX_");
+       strbuf_addstr(&tempfile, base);
 
-       temp->tempfile = mks_tempfile_ts(template.buf, strlen(base) + 1);
+       temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
        if (!temp->tempfile)
                die_errno("unable to create temp-file");
        if (convert_to_working_tree(path,
@@ -3683,7 +3706,7 @@ static void prep_temp_blob(const char *path, struct diff_tempfile *temp,
        oid_to_hex_r(temp->hex, oid);
        xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode);
        strbuf_release(&buf);
-       strbuf_release(&template);
+       strbuf_release(&tempfile);
        free(path_dup);
 }
 
@@ -3811,7 +3834,7 @@ static int similarity_index(struct diff_filepair *p)
 static const char *diff_abbrev_oid(const struct object_id *oid, int abbrev)
 {
        if (startup_info->have_repository)
-               return find_unique_abbrev(oid->hash, abbrev);
+               return find_unique_abbrev(oid, abbrev);
        else {
                char *hex = oid_to_hex(oid);
                if (abbrev < 0)
@@ -4086,6 +4109,7 @@ void diff_setup(struct diff_options *options)
        options->interhunkcontext = diff_interhunk_context_default;
        options->ws_error_highlight = ws_error_highlight_default;
        options->flags.rename_empty = 1;
+       options->objfind = NULL;
 
        /* pathchange left =NULL by default */
        options->change = diff_change;
@@ -4110,22 +4134,20 @@ void diff_setup(struct diff_options *options)
 
 void diff_setup_done(struct diff_options *options)
 {
-       int count = 0;
+       unsigned check_mask = DIFF_FORMAT_NAME |
+                             DIFF_FORMAT_NAME_STATUS |
+                             DIFF_FORMAT_CHECKDIFF |
+                             DIFF_FORMAT_NO_OUTPUT;
 
        if (options->set_default)
                options->set_default(options);
 
-       if (options->output_format & DIFF_FORMAT_NAME)
-               count++;
-       if (options->output_format & DIFF_FORMAT_NAME_STATUS)
-               count++;
-       if (options->output_format & DIFF_FORMAT_CHECKDIFF)
-               count++;
-       if (options->output_format & DIFF_FORMAT_NO_OUTPUT)
-               count++;
-       if (count > 1)
+       if (HAS_MULTI_BITS(options->output_format & check_mask))
                die(_("--name-only, --name-status, --check and -s are mutually exclusive"));
 
+       if (HAS_MULTI_BITS(options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK))
+               die(_("-G, -S and --find-object are mutually exclusive"));
+
        /*
         * Most of the time we can say "there are changes"
         * only by checking if there are changed paths, but
@@ -4175,7 +4197,7 @@ void diff_setup_done(struct diff_options *options)
        /*
         * Also pickaxe would not work very well if you do not say recursive
         */
-       if (options->pickaxe)
+       if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)
                options->flags.recursive = 1;
        /*
         * When patches are generated, submodules diffed against the work tree
@@ -4489,6 +4511,23 @@ static int parse_ws_error_highlight_opt(struct diff_options *opt, const char *ar
        return 1;
 }
 
+static int parse_objfind_opt(struct diff_options *opt, const char *arg)
+{
+       struct object_id oid;
+
+       if (get_oid(arg, &oid))
+               return error("unable to resolve '%s'", arg);
+
+       if (!opt->objfind)
+               opt->objfind = xcalloc(1, sizeof(*opt->objfind));
+
+       opt->pickaxe_opts |= DIFF_PICKAXE_KIND_OBJFIND;
+       opt->flags.recursive = 1;
+       opt->flags.tree_in_recursive = 1;
+       oidset_insert(opt->objfind, &oid);
+       return 1;
+}
+
 int diff_opt_parse(struct diff_options *options,
                   const char **av, int ac, const char *prefix)
 {
@@ -4537,6 +4576,11 @@ int diff_opt_parse(struct diff_options *options,
        else if (starts_with(arg, "--stat"))
                /* --stat, --stat-width, --stat-name-width, or --stat-count */
                return stat_opt(options, av);
+       else if (!strcmp(arg, "--compact-summary")) {
+                options->flags.stat_with_summary = 1;
+                options->output_format |= DIFF_FORMAT_DIFFSTAT;
+       } else if (!strcmp(arg, "--no-compact-summary"))
+                options->flags.stat_with_summary = 0;
 
        /* renames options */
        else if (starts_with(arg, "-B") ||
@@ -4736,7 +4780,8 @@ int diff_opt_parse(struct diff_options *options,
        else if ((argcount = short_opt('O', av, &optarg))) {
                options->orderfile = prefix_filename(prefix, optarg);
                return argcount;
-       }
+       } else if (skip_prefix(arg, "--find-object=", &arg))
+               return parse_objfind_opt(options, arg);
        else if ((argcount = parse_long_opt("diff-filter", av, &optarg))) {
                int offending = parse_diff_filter_opt(optarg, options);
                if (offending)
@@ -5224,10 +5269,12 @@ static void show_rename_copy(struct diff_options *opt, const char *renamecopy,
                struct diff_filepair *p)
 {
        struct strbuf sb = STRBUF_INIT;
-       char *names = pprint_rename(p->one->path, p->two->path);
+       struct strbuf names = STRBUF_INIT;
+
+       pprint_rename(&names, p->one->path, p->two->path);
        strbuf_addf(&sb, " %s %s (%d%%)\n",
-                       renamecopy, names, similarity_index(p));
-       free(names);
+                   renamecopy, names.buf, similarity_index(p));
+       strbuf_release(&names);
        emit_diff_symbol(opt, DIFF_SYMBOL_SUMMARY,
                                 sb.buf, sb.len, 0);
        show_mode_change(opt, p, 0);
@@ -5784,7 +5831,7 @@ void diffcore_std(struct diff_options *options)
                if (options->break_opt != -1)
                        diffcore_merge_broken();
        }
-       if (options->pickaxe)
+       if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)
                diffcore_pickaxe(options);
        if (options->orderfile)
                diffcore_order(options->orderfile);
diff --git a/diff.h b/diff.h
index 7cf276f07733afdf14618a8c47fce33c89a41e24..d29560f822ca0ee4738f769e2feb3939851d7ff1 100644 (file)
--- a/diff.h
+++ b/diff.h
@@ -7,6 +7,7 @@
 #include "tree-walk.h"
 #include "pathspec.h"
 #include "object.h"
+#include "oidset.h"
 
 struct rev_info;
 struct diff_options;
@@ -91,8 +92,8 @@ struct diff_flags {
        unsigned override_submodule_config:1;
        unsigned dirstat_by_line:1;
        unsigned funccontext:1;
-       unsigned pickaxe_ignore_case:1;
        unsigned default_follow_renames:1;
+       unsigned stat_with_summary:1;
 };
 
 static inline void diff_flags_or(struct diff_flags *a,
@@ -146,7 +147,7 @@ struct diff_options {
        int skip_stat_unmatch;
        int line_termination;
        int output_format;
-       int pickaxe_opts;
+       unsigned pickaxe_opts;
        int rename_score;
        int rename_limit;
        int needed_rename_limit;
@@ -178,6 +179,8 @@ struct diff_options {
        enum diff_words_type word_diff;
        enum diff_submodule_format submodule_format;
 
+       struct oidset *objfind;
+
        /* this is set by diffcore for DIFF_FORMAT_PATCH */
        int found_changes;
 
@@ -330,6 +333,13 @@ extern void diff_setup_done(struct diff_options *);
 
 #define DIFF_PICKAXE_KIND_S    4 /* traditional plumbing counter */
 #define DIFF_PICKAXE_KIND_G    8 /* grep in the patch */
+#define DIFF_PICKAXE_KIND_OBJFIND      16 /* specific object IDs */
+
+#define DIFF_PICKAXE_KINDS_MASK (DIFF_PICKAXE_KIND_S | \
+                                DIFF_PICKAXE_KIND_G | \
+                                DIFF_PICKAXE_KIND_OBJFIND)
+
+#define DIFF_PICKAXE_IGNORE_CASE       32
 
 extern void diffcore_std(struct diff_options *);
 extern void diffcore_fix_diff_index(struct diff_options *);
index ebe70fb068519be86437a11b980d804be0534c75..c83d45a0470d412c29f8c392e84bb010150dbb2a 100644 (file)
@@ -48,16 +48,16 @@ struct spanhash_top {
 
 static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
 {
-       struct spanhash_top *new;
+       struct spanhash_top *new_spanhash;
        int i;
        int osz = 1 << orig->alloc_log2;
        int sz = osz << 1;
 
-       new = xmalloc(st_add(sizeof(*orig),
+       new_spanhash = xmalloc(st_add(sizeof(*orig),
                             st_mult(sizeof(struct spanhash), sz)));
-       new->alloc_log2 = orig->alloc_log2 + 1;
-       new->free = INITIAL_FREE(new->alloc_log2);
-       memset(new->data, 0, sizeof(struct spanhash) * sz);
+       new_spanhash->alloc_log2 = orig->alloc_log2 + 1;
+       new_spanhash->free = INITIAL_FREE(new_spanhash->alloc_log2);
+       memset(new_spanhash->data, 0, sizeof(struct spanhash) * sz);
        for (i = 0; i < osz; i++) {
                struct spanhash *o = &(orig->data[i]);
                int bucket;
@@ -65,11 +65,11 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
                        continue;
                bucket = o->hashval & (sz - 1);
                while (1) {
-                       struct spanhash *h = &(new->data[bucket++]);
+                       struct spanhash *h = &(new_spanhash->data[bucket++]);
                        if (!h->cnt) {
                                h->hashval = o->hashval;
                                h->cnt = o->cnt;
-                               new->free--;
+                               new_spanhash->free--;
                                break;
                        }
                        if (sz <= bucket)
@@ -77,7 +77,7 @@ static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
                }
        }
        free(orig);
-       return new;
+       return new_spanhash;
 }
 
 static struct spanhash_top *add_spanhash(struct spanhash_top *top,
index 9476bd21081f456be6ae6412ea591e11dde50686..239ce5122b3a45bec211e0c20113385c0e37805d 100644 (file)
@@ -124,13 +124,20 @@ static int pickaxe_match(struct diff_filepair *p, struct diff_options *o,
        mmfile_t mf1, mf2;
        int ret;
 
-       if (!o->pickaxe[0])
-               return 0;
-
        /* ignore unmerged */
        if (!DIFF_FILE_VALID(p->one) && !DIFF_FILE_VALID(p->two))
                return 0;
 
+       if (o->objfind) {
+               return  (DIFF_FILE_VALID(p->one) &&
+                        oidset_contains(o->objfind, &p->one->oid)) ||
+                       (DIFF_FILE_VALID(p->two) &&
+                        oidset_contains(o->objfind, &p->two->oid));
+       }
+
+       if (!o->pickaxe[0])
+               return 0;
+
        if (o->flags.allow_textconv) {
                textconv_one = get_textconv(p->one);
                textconv_two = get_textconv(p->two);
@@ -222,33 +229,34 @@ void diffcore_pickaxe(struct diff_options *o)
 
        if (opts & (DIFF_PICKAXE_REGEX | DIFF_PICKAXE_KIND_G)) {
                int cflags = REG_EXTENDED | REG_NEWLINE;
-               if (o->flags.pickaxe_ignore_case)
+               if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE)
                        cflags |= REG_ICASE;
                regcomp_or_die(&regex, needle, cflags);
                regexp = &regex;
-       } else if (o->flags.pickaxe_ignore_case &&
-                  has_non_ascii(needle)) {
-               struct strbuf sb = STRBUF_INIT;
-               int cflags = REG_NEWLINE | REG_ICASE;
-
-               basic_regex_quote_buf(&sb, needle);
-               regcomp_or_die(&regex, sb.buf, cflags);
-               strbuf_release(&sb);
-               regexp = &regex;
-       } else {
-               kws = kwsalloc(o->flags.pickaxe_ignore_case
-                              ? tolower_trans_tbl : NULL);
-               kwsincr(kws, needle, strlen(needle));
-               kwsprep(kws);
+       } else if (opts & DIFF_PICKAXE_KIND_S) {
+               if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE &&
+                   has_non_ascii(needle)) {
+                       struct strbuf sb = STRBUF_INIT;
+                       int cflags = REG_NEWLINE | REG_ICASE;
+
+                       basic_regex_quote_buf(&sb, needle);
+                       regcomp_or_die(&regex, sb.buf, cflags);
+                       strbuf_release(&sb);
+                       regexp = &regex;
+               } else {
+                       kws = kwsalloc(o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE
+                                      ? tolower_trans_tbl : NULL);
+                       kwsincr(kws, needle, strlen(needle));
+                       kwsprep(kws);
+               }
        }
 
-       /* Might want to warn when both S and G are on; I don't care... */
        pickaxe(&diff_queued_diff, o, regexp, kws,
                (opts & DIFF_PICKAXE_KIND_G) ? diff_grep : has_changes);
 
        if (regexp)
                regfree(regexp);
-       else
+       if (kws)
                kwsfree(kws);
        return;
 }
index 245e999fe5c6e9da62303423570d07aae5a88897..0b7e4989a87214faa22e4f8ec75a719d3fd857ae 100644 (file)
@@ -57,8 +57,8 @@ static int add_rename_dst(struct diff_filespec *two)
        ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc);
        rename_dst_nr++;
        if (first < rename_dst_nr)
-               memmove(rename_dst + first + 1, rename_dst + first,
-                       (rename_dst_nr - first - 1) * sizeof(*rename_dst));
+               MOVE_ARRAY(rename_dst + first + 1, rename_dst + first,
+                          rename_dst_nr - first - 1);
        rename_dst[first].two = alloc_filespec(two->path);
        fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid,
                      two->mode);
@@ -98,8 +98,8 @@ static struct diff_rename_src *register_rename_src(struct diff_filepair *p)
        ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc);
        rename_src_nr++;
        if (first < rename_src_nr)
-               memmove(rename_src + first + 1, rename_src + first,
-                       (rename_src_nr - first - 1) * sizeof(*rename_src));
+               MOVE_ARRAY(rename_src + first + 1, rename_src + first,
+                          rename_src_nr - first - 1);
        rename_src[first].p = p;
        rename_src[first].score = score;
        return &(rename_src[first]);
@@ -260,8 +260,8 @@ static unsigned int hash_filespec(struct diff_filespec *filespec)
        if (!filespec->oid_valid) {
                if (diff_populate_filespec(filespec, 0))
                        return 0;
-               hash_sha1_file(filespec->data, filespec->size, "blob",
-                              filespec->oid.hash);
+               hash_object_file(filespec->data, filespec->size, "blob",
+                                &filespec->oid);
        }
        return sha1hash(filespec->oid.hash);
 }
diff --git a/dir.c b/dir.c
index 7c4b45e30e0ac87527ff0ef3fd8c90670a1e2064..63a917be45db99c278cc86012ff74718043dc63d 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -231,12 +231,10 @@ int within_depth(const char *name, int namelen,
  *     1 along with { data, size } of the (possibly augmented) buffer
  *       when successful.
  *
- * Optionally updates the given sha1_stat with the given OID (when valid).
+ * Optionally updates the given oid_stat with the given OID (when valid).
  */
-static int do_read_blob(const struct object_id *oid,
-                       struct sha1_stat *sha1_stat,
-                       size_t *size_out,
-                       char **data_out)
+static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
+                       size_t *size_out, char **data_out)
 {
        enum object_type type;
        unsigned long sz;
@@ -245,15 +243,15 @@ static int do_read_blob(const struct object_id *oid,
        *size_out = 0;
        *data_out = NULL;
 
-       data = read_sha1_file(oid->hash, &type, &sz);
+       data = read_object_file(oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return -1;
        }
 
-       if (sha1_stat) {
-               memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
-               hashcpy(sha1_stat->sha1, oid->hash);
+       if (oid_stat) {
+               memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
+               oidcpy(&oid_stat->oid, oid);
        }
 
        if (sz == 0) {
@@ -654,9 +652,8 @@ void add_exclude(const char *string, const char *base,
 
 static int read_skip_worktree_file_from_index(const struct index_state *istate,
                                              const char *path,
-                                             size_t *size_out,
-                                             char **data_out,
-                                             struct sha1_stat *sha1_stat)
+                                             size_t *size_out, char **data_out,
+                                             struct oid_stat *oid_stat)
 {
        int pos, len;
 
@@ -667,7 +664,7 @@ static int read_skip_worktree_file_from_index(const struct index_state *istate,
        if (!ce_skip_worktree(istate->cache[pos]))
                return -1;
 
-       return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out);
+       return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
 }
 
 /*
@@ -747,8 +744,8 @@ static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc,
        FLEX_ALLOC_MEM(d, name, name, len);
 
        ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
-       memmove(dir->dirs + first + 1, dir->dirs + first,
-               (dir->dirs_nr - first) * sizeof(*dir->dirs));
+       MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first,
+                  dir->dirs_nr - first);
        dir->dirs_nr++;
        dir->dirs[first] = d;
        return d;
@@ -774,7 +771,16 @@ static void invalidate_directory(struct untracked_cache *uc,
                                 struct untracked_cache_dir *dir)
 {
        int i;
-       uc->dir_invalidated++;
+
+       /*
+        * Invalidation increment here is just roughly correct. If
+        * untracked_nr or any of dirs[].recurse is non-zero, we
+        * should increment dir_invalidated too. But that's more
+        * expensive to do.
+        */
+       if (dir->valid)
+               uc->dir_invalidated++;
+
        dir->valid = 0;
        dir->untracked_nr = 0;
        for (i = 0; i < dir->dirs_nr; i++)
@@ -795,9 +801,8 @@ static int add_excludes_from_buffer(char *buf, size_t size,
  * ss_valid is non-zero, "ss" must contain good value as input.
  */
 static int add_excludes(const char *fname, const char *base, int baselen,
-                       struct exclude_list *el,
-                       struct index_state *istate,
-                       struct sha1_stat *sha1_stat)
+                       struct exclude_list *el, struct index_state *istate,
+                       struct oid_stat *oid_stat)
 {
        struct stat st;
        int r;
@@ -815,16 +820,16 @@ static int add_excludes(const char *fname, const char *base, int baselen,
                        return -1;
                r = read_skip_worktree_file_from_index(istate, fname,
                                                       &size, &buf,
-                                                      sha1_stat);
+                                                      oid_stat);
                if (r != 1)
                        return r;
        } else {
                size = xsize_t(st.st_size);
                if (size == 0) {
-                       if (sha1_stat) {
-                               fill_stat_data(&sha1_stat->stat, &st);
-                               hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
-                               sha1_stat->valid = 1;
+                       if (oid_stat) {
+                               fill_stat_data(&oid_stat->stat, &st);
+                               oidcpy(&oid_stat->oid, &empty_blob_oid);
+                               oid_stat->valid = 1;
                        }
                        close(fd);
                        return 0;
@@ -837,22 +842,23 @@ static int add_excludes(const char *fname, const char *base, int baselen,
                }
                buf[size++] = '\n';
                close(fd);
-               if (sha1_stat) {
+               if (oid_stat) {
                        int pos;
-                       if (sha1_stat->valid &&
-                           !match_stat_data_racy(istate, &sha1_stat->stat, &st))
+                       if (oid_stat->valid &&
+                           !match_stat_data_racy(istate, &oid_stat->stat, &st))
                                ; /* no content change, ss->sha1 still good */
                        else if (istate &&
                                 (pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
                                 !ce_stage(istate->cache[pos]) &&
                                 ce_uptodate(istate->cache[pos]) &&
                                 !would_convert_to_git(istate, fname))
-                               hashcpy(sha1_stat->sha1,
-                                       istate->cache[pos]->oid.hash);
+                               oidcpy(&oid_stat->oid,
+                                      &istate->cache[pos]->oid);
                        else
-                               hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
-                       fill_stat_data(&sha1_stat->stat, &st);
-                       sha1_stat->valid = 1;
+                               hash_object_file(buf, size, "blob",
+                                                &oid_stat->oid);
+                       fill_stat_data(&oid_stat->stat, &st);
+                       oid_stat->valid = 1;
                }
        }
 
@@ -930,7 +936,7 @@ struct exclude_list *add_exclude_list(struct dir_struct *dir,
  * Used to set up core.excludesfile and .git/info/exclude lists.
  */
 static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
-                                    struct sha1_stat *sha1_stat)
+                                    struct oid_stat *oid_stat)
 {
        struct exclude_list *el;
        /*
@@ -941,7 +947,7 @@ static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
        if (!dir->untracked)
                dir->unmanaged_exclude_files++;
        el = add_exclude_list(dir, EXC_FILE, fname);
-       if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0)
+       if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
                die("cannot use %s as an exclude file", fname);
 }
 
@@ -1180,7 +1186,7 @@ static void prep_exclude(struct dir_struct *dir,
 
        while (current < baselen) {
                const char *cp;
-               struct sha1_stat sha1_stat;
+               struct oid_stat oid_stat;
 
                stk = xcalloc(1, sizeof(*stk));
                if (current < 0) {
@@ -1223,8 +1229,8 @@ static void prep_exclude(struct dir_struct *dir,
                }
 
                /* Try to read per-directory file */
-               hashclr(sha1_stat.sha1);
-               sha1_stat.valid = 0;
+               oidclr(&oid_stat.oid);
+               oid_stat.valid = 0;
                if (dir->exclude_per_dir &&
                    /*
                     * If we know that no files have been added in
@@ -1252,7 +1258,7 @@ static void prep_exclude(struct dir_struct *dir,
                        strbuf_addstr(&sb, dir->exclude_per_dir);
                        el->src = strbuf_detach(&sb, NULL);
                        add_excludes(el->src, el->src, stk->baselen, el, istate,
-                                    untracked ? &sha1_stat : NULL);
+                                    untracked ? &oid_stat : NULL);
                }
                /*
                 * NEEDSWORK: when untracked cache is enabled, prep_exclude()
@@ -1269,9 +1275,9 @@ static void prep_exclude(struct dir_struct *dir,
                 * order, though, if you do that.
                 */
                if (untracked &&
-                   hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
+                   hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
                        invalidate_gitignore(dir->untracked, untracked);
-                       hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
+                       hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
                }
                dir->exclude_stack = stk;
                current = stk->baselen;
@@ -1773,7 +1779,7 @@ static enum path_treatment treat_path(struct dir_struct *dir,
        if (!de)
                return treat_path_fast(dir, untracked, cdir, istate, path,
                                       baselen, pathspec);
-       if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git"))
+       if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git"))
                return path_none;
        strbuf_setlen(path, baselen);
        strbuf_addstr(path, de->d_name);
@@ -1809,24 +1815,19 @@ static int valid_cached_dir(struct dir_struct *dir,
         */
        refresh_fsmonitor(istate);
        if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
-               if (stat(path->len ? path->buf : ".", &st)) {
-                       invalidate_directory(dir->untracked, untracked);
+               if (lstat(path->len ? path->buf : ".", &st)) {
                        memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
                        return 0;
                }
                if (!untracked->valid ||
                        match_stat_data_racy(istate, &untracked->stat_data, &st)) {
-                       if (untracked->valid)
-                               invalidate_directory(dir->untracked, untracked);
                        fill_stat_data(&untracked->stat_data, &st);
                        return 0;
                }
        }
 
-       if (untracked->check_only != !!check_only) {
-               invalidate_directory(dir->untracked, untracked);
+       if (untracked->check_only != !!check_only)
                return 0;
-       }
 
        /*
         * prep_exclude will be called eventually on this directory,
@@ -1853,13 +1854,20 @@ static int open_cached_dir(struct cached_dir *cdir,
                           struct strbuf *path,
                           int check_only)
 {
+       const char *c_path;
+
        memset(cdir, 0, sizeof(*cdir));
        cdir->untracked = untracked;
        if (valid_cached_dir(dir, untracked, istate, path, check_only))
                return 0;
-       cdir->fdir = opendir(path->len ? path->buf : ".");
-       if (dir->untracked)
+       c_path = path->len ? path->buf : ".";
+       cdir->fdir = opendir(c_path);
+       if (!cdir->fdir)
+               warning_errno(_("could not open directory '%s'"), c_path);
+       if (dir->untracked) {
+               invalidate_directory(dir->untracked, untracked);
                dir->untracked->dir_opened++;
+       }
        if (!cdir->fdir)
                return -1;
        return 0;
@@ -2164,8 +2172,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
                                                      const struct pathspec *pathspec)
 {
        struct untracked_cache_dir *root;
+       static int untracked_cache_disabled = -1;
 
-       if (!dir->untracked || getenv("GIT_DISABLE_UNTRACKED_CACHE"))
+       if (!dir->untracked)
+               return NULL;
+       if (untracked_cache_disabled < 0)
+               untracked_cache_disabled = git_env_bool("GIT_DISABLE_UNTRACKED_CACHE", 0);
+       if (untracked_cache_disabled)
                return NULL;
 
        /*
@@ -2228,13 +2241,13 @@ static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *d
 
        /* Validate $GIT_DIR/info/exclude and core.excludesfile */
        root = dir->untracked->root;
-       if (hashcmp(dir->ss_info_exclude.sha1,
-                   dir->untracked->ss_info_exclude.sha1)) {
+       if (oidcmp(&dir->ss_info_exclude.oid,
+                  &dir->untracked->ss_info_exclude.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_info_exclude = dir->ss_info_exclude;
        }
-       if (hashcmp(dir->ss_excludes_file.sha1,
-                   dir->untracked->ss_excludes_file.sha1)) {
+       if (oidcmp(&dir->ss_excludes_file.oid,
+                  &dir->untracked->ss_excludes_file.oid)) {
                invalidate_gitignore(dir->untracked, root);
                dir->untracked->ss_excludes_file = dir->ss_excludes_file;
        }
@@ -2248,6 +2261,7 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                   const char *path, int len, const struct pathspec *pathspec)
 {
        struct untracked_cache_dir *untracked;
+       uint64_t start = getnanotime();
 
        if (has_symlink_leading_path(path, len))
                return dir->nr;
@@ -2286,8 +2300,14 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                dir->nr = i;
        }
 
+       trace_performance_since(start, "read directory %.*s", len, path);
        if (dir->untracked) {
+               static int force_untracked_cache = -1;
                static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
+
+               if (force_untracked_cache < 0)
+                       force_untracked_cache =
+                               git_env_bool("GIT_FORCE_UNTRACKED_CACHE", 0);
                trace_printf_key(&trace_untracked_stats,
                                 "node creation: %u\n"
                                 "gitignore invalidation: %u\n"
@@ -2297,7 +2317,8 @@ int read_directory(struct dir_struct *dir, struct index_state *istate,
                                 dir->untracked->gitignore_invalidated,
                                 dir->untracked->dir_invalidated,
                                 dir->untracked->dir_opened);
-               if (dir->untracked == istate->untracked &&
+               if (force_untracked_cache &&
+                       dir->untracked == istate->untracked &&
                    (dir->untracked->dir_opened ||
                     dir->untracked->gitignore_invalidated ||
                     dir->untracked->dir_invalidated))
@@ -2638,8 +2659,8 @@ void write_untracked_extension(struct strbuf *out, struct untracked_cache *untra
        FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
        stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
        stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
-       hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
-       hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
+       hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
+       hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
        ouc->dir_flags = htonl(untracked->dir_flags);
 
        varint_len = encode_varint(untracked->ident.len, varbuf);
@@ -2816,13 +2837,12 @@ static void read_sha1(size_t pos, void *cb)
        rd->data += 20;
 }
 
-static void load_sha1_stat(struct sha1_stat *sha1_stat,
-                          const unsigned char *data,
-                          const unsigned char *sha1)
+static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
+                         const unsigned char *sha1)
 {
-       stat_data_from_disk(&sha1_stat->stat, data);
-       hashcpy(sha1_stat->sha1, sha1);
-       sha1_stat->valid = 1;
+       stat_data_from_disk(&oid_stat->stat, data);
+       hashcpy(oid_stat->oid.hash, sha1);
+       oid_stat->valid = 1;
 }
 
 struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
@@ -2850,12 +2870,12 @@ struct untracked_cache *read_untracked_extension(const void *data, unsigned long
        uc = xcalloc(1, sizeof(*uc));
        strbuf_init(&uc->ident, ident_len);
        strbuf_add(&uc->ident, ident, ident_len);
-       load_sha1_stat(&uc->ss_info_exclude,
-                      next + ouc_offset(info_exclude_stat),
-                      next + ouc_offset(info_exclude_sha1));
-       load_sha1_stat(&uc->ss_excludes_file,
-                      next + ouc_offset(excludes_file_stat),
-                      next + ouc_offset(excludes_file_sha1));
+       load_oid_stat(&uc->ss_info_exclude,
+                     next + ouc_offset(info_exclude_stat),
+                     next + ouc_offset(info_exclude_sha1));
+       load_oid_stat(&uc->ss_excludes_file,
+                     next + ouc_offset(excludes_file_stat),
+                     next + ouc_offset(excludes_file_sha1));
        uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
        exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
        uc->exclude_per_dir = xstrdup(exclude_per_dir);
@@ -2968,10 +2988,12 @@ static int invalidate_one_component(struct untracked_cache *uc,
 }
 
 void untracked_cache_invalidate_path(struct index_state *istate,
-                                    const char *path)
+                                    const char *path, int safe_path)
 {
        if (!istate->untracked || !istate->untracked->root)
                return;
+       if (!safe_path && !verify_path(path))
+               return;
        invalidate_one_component(istate->untracked, istate->untracked->root,
                                 path, strlen(path));
 }
@@ -2979,13 +3001,13 @@ void untracked_cache_invalidate_path(struct index_state *istate,
 void untracked_cache_remove_from_index(struct index_state *istate,
                                       const char *path)
 {
-       untracked_cache_invalidate_path(istate, path);
+       untracked_cache_invalidate_path(istate, path, 1);
 }
 
 void untracked_cache_add_to_index(struct index_state *istate,
                                  const char *path)
 {
-       untracked_cache_invalidate_path(istate, path);
+       untracked_cache_invalidate_path(istate, path, 1);
 }
 
 /* Update gitfile and core.worktree setting to connect work tree and git dir */
diff --git a/dir.h b/dir.h
index 11a047ba486b81f624fb021418f06cbbd65da676..b0758b82a20017dd3ce29c54454678f026718078 100644 (file)
--- a/dir.h
+++ b/dir.h
@@ -74,9 +74,9 @@ struct exclude_list_group {
        struct exclude_list *el;
 };
 
-struct sha1_stat {
+struct oid_stat {
        struct stat_data stat;
-       unsigned char sha1[20];
+       struct object_id oid;
        int valid;
 };
 
@@ -124,8 +124,8 @@ struct untracked_cache_dir {
 };
 
 struct untracked_cache {
-       struct sha1_stat ss_info_exclude;
-       struct sha1_stat ss_excludes_file;
+       struct oid_stat ss_info_exclude;
+       struct oid_stat ss_excludes_file;
        const char *exclude_per_dir;
        struct strbuf ident;
        /*
@@ -195,8 +195,8 @@ struct dir_struct {
 
        /* Enable untracked file cache if set */
        struct untracked_cache *untracked;
-       struct sha1_stat ss_info_exclude;
-       struct sha1_stat ss_excludes_file;
+       struct oid_stat ss_info_exclude;
+       struct oid_stat ss_excludes_file;
        unsigned unmanaged_exclude_files;
 };
 
@@ -350,7 +350,7 @@ static inline int dir_path_match(const struct dir_entry *ent,
 int cmp_dir_entry(const void *p1, const void *p2);
 int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in);
 
-void untracked_cache_invalidate_path(struct index_state *, const char *);
+void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path);
 void untracked_cache_remove_from_index(struct index_state *, const char *);
 void untracked_cache_add_to_index(struct index_state *, const char *);
 
diff --git a/entry.c b/entry.c
index 30211447ac839899d2c232af232c95312856816c..2101201a111785f449a65e785e9ed5b57b7aa196 100644 (file)
--- a/entry.c
+++ b/entry.c
@@ -85,12 +85,12 @@ static int create_file(const char *path, unsigned int mode)
 static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
 {
        enum object_type type;
-       void *new = read_sha1_file(ce->oid.hash, &type, size);
+       void *blob_data = read_object_file(&ce->oid, &type, size);
 
-       if (new) {
+       if (blob_data) {
                if (type == OBJ_BLOB)
-                       return new;
-               free(new);
+                       return blob_data;
+               free(blob_data);
        }
        return NULL;
 }
@@ -256,7 +256,7 @@ static int write_entry(struct cache_entry *ce,
        unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
        struct delayed_checkout *dco = state->delayed_checkout;
        int fd, ret, fstat_done = 0;
-       char *new;
+       char *new_blob;
        struct strbuf buf = STRBUF_INIT;
        unsigned long size;
        ssize_t wrote;
@@ -266,7 +266,7 @@ static int write_entry(struct cache_entry *ce,
 
        if (ce_mode_s_ifmt == S_IFREG) {
                struct stream_filter *filter = get_stream_filter(ce->name,
-                                                                ce->oid.hash);
+                                                                &ce->oid);
                if (filter &&
                    !streaming_write_entry(ce, path, filter,
                                           state, to_tempfile,
@@ -276,8 +276,8 @@ static int write_entry(struct cache_entry *ce,
 
        switch (ce_mode_s_ifmt) {
        case S_IFLNK:
-               new = read_blob_entry(ce, &size);
-               if (!new)
+               new_blob = read_blob_entry(ce, &size);
+               if (!new_blob)
                        return error("unable to read sha1 file of %s (%s)",
                                     path, oid_to_hex(&ce->oid));
 
@@ -288,8 +288,8 @@ static int write_entry(struct cache_entry *ce,
                if (!has_symlinks || to_tempfile)
                        goto write_file_entry;
 
-               ret = symlink(new, path);
-               free(new);
+               ret = symlink(new_blob, path);
+               free(new_blob);
                if (ret)
                        return error_errno("unable to create symlink %s", path);
                break;
@@ -300,11 +300,11 @@ static int write_entry(struct cache_entry *ce,
                 * bother reading it at all.
                 */
                if (dco && dco->state == CE_RETRY) {
-                       new = NULL;
+                       new_blob = NULL;
                        size = 0;
                } else {
-                       new = read_blob_entry(ce, &size);
-                       if (!new)
+                       new_blob = read_blob_entry(ce, &size);
+                       if (!new_blob)
                                return error("unable to read sha1 file of %s (%s)",
                                             path, oid_to_hex(&ce->oid));
                }
@@ -313,18 +313,18 @@ static int write_entry(struct cache_entry *ce,
                 * Convert from git internal format to working tree format
                 */
                if (dco && dco->state != CE_NO_DELAY) {
-                       ret = async_convert_to_working_tree(ce->name, new,
+                       ret = async_convert_to_working_tree(ce->name, new_blob,
                                                            size, &buf, dco);
                        if (ret && string_list_has_string(&dco->paths, ce->name)) {
-                               free(new);
+                               free(new_blob);
                                goto delayed;
                        }
                } else
-                       ret = convert_to_working_tree(ce->name, new, size, &buf);
+                       ret = convert_to_working_tree(ce->name, new_blob, size, &buf);
 
                if (ret) {
-                       free(new);
-                       new = strbuf_detach(&buf, &newsize);
+                       free(new_blob);
+                       new_blob = strbuf_detach(&buf, &newsize);
                        size = newsize;
                }
                /*
@@ -336,15 +336,15 @@ static int write_entry(struct cache_entry *ce,
        write_file_entry:
                fd = open_output_fd(path, ce, to_tempfile);
                if (fd < 0) {
-                       free(new);
+                       free(new_blob);
                        return error_errno("unable to create file %s", path);
                }
 
-               wrote = write_in_full(fd, new, size);
+               wrote = write_in_full(fd, new_blob, size);
                if (!to_tempfile)
                        fstat_done = fstat_output(fd, state, &st);
                close(fd);
-               free(new);
+               free(new_blob);
                if (wrote < 0)
                        return error("unable to write file %s", path);
                break;
index 63ac38a46f8f01ee3d09ed8e09bd93acba63b21a..21565c3c525c64793282d8c3f029733615a87fc2 100644 (file)
@@ -13,6 +13,7 @@
 #include "refs.h"
 #include "fmt-merge-msg.h"
 #include "commit.h"
+#include "argv-array.h"
 
 int trust_executable_bit = 1;
 int trust_ctime = 1;
@@ -27,6 +28,8 @@ int warn_ambiguous_refs = 1;
 int warn_on_object_refname_ambiguity = 1;
 int ref_paranoia = -1;
 int repository_format_precious_objects;
+char *repository_format_partial_clone;
+const char *core_partial_clone_filter_default;
 const char *git_commit_encoding;
 const char *git_log_output_encoding;
 const char *apply_default_whitespace;
@@ -49,7 +52,7 @@ enum auto_crlf auto_crlf = AUTO_CRLF_FALSE;
 int check_replace_refs = 1;
 char *git_replace_ref_base;
 enum eol core_eol = EOL_UNSET;
-enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
+int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
 unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
 enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
 enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
@@ -98,7 +101,7 @@ int ignore_untracked_cache_config;
 /* This is set by setup_git_dir_gently() and/or git_default_config() */
 char *git_work_tree_cfg;
 
-static char *namespace;
+static char *git_namespace;
 
 static const char *super_prefix;
 
@@ -145,10 +148,35 @@ static char *expand_namespace(const char *raw_namespace)
        return strbuf_detach(&buf, NULL);
 }
 
-void setup_git_env(void)
+/*
+ * Wrapper of getenv() that returns a strdup value. This value is kept
+ * in argv to be freed later.
+ */
+static const char *getenv_safe(struct argv_array *argv, const char *name)
+{
+       const char *value = getenv(name);
+
+       if (!value)
+               return NULL;
+
+       argv_array_push(argv, value);
+       return argv->argv[argv->argc - 1];
+}
+
+void setup_git_env(const char *git_dir)
 {
        const char *shallow_file;
        const char *replace_ref_base;
+       struct set_gitdir_args args = { NULL };
+       struct argv_array to_free = ARGV_ARRAY_INIT;
+
+       args.commondir = getenv_safe(&to_free, GIT_COMMON_DIR_ENVIRONMENT);
+       args.object_dir = getenv_safe(&to_free, DB_ENVIRONMENT);
+       args.graft_file = getenv_safe(&to_free, GRAFT_ENVIRONMENT);
+       args.index_file = getenv_safe(&to_free, INDEX_ENVIRONMENT);
+       args.alternate_db = getenv_safe(&to_free, ALTERNATE_DB_ENVIRONMENT);
+       repo_set_gitdir(the_repository, git_dir, &args);
+       argv_array_clear(&to_free);
 
        if (getenv(NO_REPLACE_OBJECTS_ENVIRONMENT))
                check_replace_refs = 0;
@@ -156,8 +184,8 @@ void setup_git_env(void)
        free(git_replace_ref_base);
        git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
                                                          : "refs/replace/");
-       free(namespace);
-       namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
+       free(git_namespace);
+       git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
        shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
        if (shallow_file)
                set_alternate_shallow_file(shallow_file, 0);
@@ -191,9 +219,9 @@ const char *get_git_common_dir(void)
 
 const char *get_git_namespace(void)
 {
-       if (!namespace)
+       if (!git_namespace)
                BUG("git environment hasn't been setup");
-       return namespace;
+       return git_namespace;
 }
 
 const char *strip_namespace(const char *namespaced_ref)
@@ -247,7 +275,7 @@ char *get_object_directory(void)
        return the_repository->objectdir;
 }
 
-int odb_mkstemp(struct strbuf *template, const char *pattern)
+int odb_mkstemp(struct strbuf *temp_filename, const char *pattern)
 {
        int fd;
        /*
@@ -255,16 +283,16 @@ int odb_mkstemp(struct strbuf *template, const char *pattern)
         * restrictive except to remove write permission.
         */
        int mode = 0444;
-       git_path_buf(template, "objects/%s", pattern);
-       fd = git_mkstemp_mode(template->buf, mode);
+       git_path_buf(temp_filename, "objects/%s", pattern);
+       fd = git_mkstemp_mode(temp_filename->buf, mode);
        if (0 <= fd)
                return fd;
 
        /* slow path */
-       /* some mkstemp implementations erase template on failure */
-       git_path_buf(template, "objects/%s", pattern);
-       safe_create_leading_directories(template->buf);
-       return xmkstemp_mode(template->buf, mode);
+       /* some mkstemp implementations erase temp_filename on failure */
+       git_path_buf(temp_filename, "objects/%s", pattern);
+       safe_create_leading_directories(temp_filename->buf);
+       return xmkstemp_mode(temp_filename->buf, mode);
 }
 
 int odb_pack_keep(const char *name)
@@ -298,8 +326,7 @@ int set_git_dir(const char *path)
 {
        if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
                return error("Could not set GIT_DIR to '%s'", path);
-       repo_set_gitdir(the_repository, path);
-       setup_git_env();
+       setup_git_env(path);
        return 0;
 }
 
index b70ac025e0428b239c9cb8b8a70dda70c0cc1007..a2e8b1d763a30e1bbec97a92da46c82f0f33353d 100644 (file)
@@ -316,7 +316,7 @@ static struct atom_str **atom_table;
 /* The .pack file being generated */
 static struct pack_idx_option pack_idx_opts;
 static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
 static struct packed_git *pack_data;
 static struct packed_git **all_packs;
 static off_t pack_size;
@@ -905,12 +905,12 @@ static void start_packfile(void)
 
        p->pack_fd = pack_fd;
        p->do_not_close = 1;
-       pack_file = sha1fd(pack_fd, p->pack_name);
+       pack_file = hashfd(pack_fd, p->pack_name);
 
        hdr.hdr_signature = htonl(PACK_SIGNATURE);
        hdr.hdr_version = htonl(2);
        hdr.hdr_entries = 0;
-       sha1write(pack_file, &hdr, sizeof(hdr));
+       hashwrite(pack_file, &hdr, sizeof(hdr));
 
        pack_data = p;
        pack_size = sizeof(hdr);
@@ -1016,7 +1016,7 @@ static void end_packfile(void)
                struct tag *t;
 
                close_pack_windows(pack_data);
-               sha1close(pack_file, cur_pack_oid.hash, 0);
+               hashclose(pack_file, cur_pack_oid.hash, 0);
                fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
                                    pack_data->pack_name, object_count,
                                    cur_pack_oid.hash, pack_size);
@@ -1092,15 +1092,15 @@ static int store_object(
        unsigned char hdr[96];
        struct object_id oid;
        unsigned long hdrlen, deltalen;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        git_zstream s;
 
        hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
-                          typename(type), (unsigned long)dat->len) + 1;
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, hdrlen);
-       git_SHA1_Update(&c, dat->buf, dat->len);
-       git_SHA1_Final(oid.hash, &c);
+                          type_name(type), (unsigned long)dat->len) + 1;
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
+       the_hash_algo->update_fn(&c, dat->buf, dat->len);
+       the_hash_algo->final_fn(oid.hash, &c);
        if (oidout)
                oidcpy(oidout, &oid);
 
@@ -1118,11 +1118,13 @@ static int store_object(
                return 1;
        }
 
-       if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+       if (last && last->data.buf && last->depth < max_depth
+               && dat->len > the_hash_algo->rawsz) {
+
                delta_count_attempts_by_type[type]++;
                delta = diff_delta(last->data.buf, last->data.len,
                        dat->buf, dat->len,
-                       &deltalen, dat->len - 20);
+                       &deltalen, dat->len - the_hash_algo->rawsz);
        } else
                delta = NULL;
 
@@ -1180,23 +1182,23 @@ static int store_object(
 
                hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
                                                      OBJ_OFS_DELTA, deltalen);
-               sha1write(pack_file, hdr, hdrlen);
+               hashwrite(pack_file, hdr, hdrlen);
                pack_size += hdrlen;
 
                hdr[pos] = ofs & 127;
                while (ofs >>= 7)
                        hdr[--pos] = 128 | (--ofs & 127);
-               sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+               hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
                pack_size += sizeof(hdr) - pos;
        } else {
                e->depth = 0;
                hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
                                                      type, dat->len);
-               sha1write(pack_file, hdr, hdrlen);
+               hashwrite(pack_file, hdr, hdrlen);
                pack_size += hdrlen;
        }
 
-       sha1write(pack_file, out, s.total_out);
+       hashwrite(pack_file, out, s.total_out);
        pack_size += s.total_out;
 
        e->idx.crc32 = crc32_end(pack_file);
@@ -1215,9 +1217,9 @@ static int store_object(
        return 0;
 }
 
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
 {
-       if (sha1file_truncate(pack_file, checkpoint))
+       if (hashfile_truncate(pack_file, checkpoint))
                die_errno("cannot truncate pack to skip duplicate");
        pack_size = checkpoint->offset;
 }
@@ -1231,9 +1233,9 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
        struct object_id oid;
        unsigned long hdrlen;
        off_t offset;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        git_zstream s;
-       struct sha1file_checkpoint checkpoint;
+       struct hashfile_checkpoint checkpoint;
        int status = Z_OK;
 
        /* Determine if we should auto-checkpoint. */
@@ -1241,13 +1243,13 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                || (pack_size + 60 + len) < pack_size)
                cycle_packfile();
 
-       sha1file_checkpoint(pack_file, &checkpoint);
+       hashfile_checkpoint(pack_file, &checkpoint);
        offset = checkpoint.offset;
 
        hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, out_buf, hdrlen);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, out_buf, hdrlen);
 
        crc32_begin(pack_file);
 
@@ -1265,7 +1267,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                        if (!n && feof(stdin))
                                die("EOF in data (%" PRIuMAX " bytes remaining)", len);
 
-                       git_SHA1_Update(&c, in_buf, n);
+                       the_hash_algo->update_fn(&c, in_buf, n);
                        s.next_in = in_buf;
                        s.avail_in = n;
                        len -= n;
@@ -1275,7 +1277,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
 
                if (!s.avail_out || status == Z_STREAM_END) {
                        size_t n = s.next_out - out_buf;
-                       sha1write(pack_file, out_buf, n);
+                       hashwrite(pack_file, out_buf, n);
                        pack_size += n;
                        s.next_out = out_buf;
                        s.avail_out = out_sz;
@@ -1291,7 +1293,7 @@ static void stream_blob(uintmax_t len, struct object_id *oidout, uintmax_t mark)
                }
        }
        git_deflate_end(&s);
-       git_SHA1_Final(oid.hash, &c);
+       the_hash_algo->final_fn(oid.hash, &c);
 
        if (oidout)
                oidcpy(oidout, &oid);
@@ -1350,25 +1352,25 @@ static void *gfi_unpack_entry(
 {
        enum object_type type;
        struct packed_git *p = all_packs[oe->pack_id];
-       if (p == pack_data && p->pack_size < (pack_size + 20)) {
+       if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
                /* The object is stored in the packfile we are writing to
                 * and we have modified it since the last time we scanned
                 * back to read a previously written object.  If an old
-                * window covered [p->pack_size, p->pack_size + 20) its
+                * window covered [p->pack_size, p->pack_size + rawsz) its
                 * data is stale and is not valid.  Closing all windows
                 * and updating the packfile length ensures we can read
                 * the newly written data.
                 */
                close_pack_windows(p);
-               sha1flush(pack_file);
+               hashflush(pack_file);
 
-               /* We have to offer 20 bytes additional on the end of
+               /* We have to offer rawsz bytes additional on the end of
                 * the packfile as the core unpacker code assumes the
                 * footer is present at the file end and must promise
-                * at least 20 bytes within any window it maps.  But
+                * at least rawsz bytes within any window it maps.  But
                 * we don't actually create the footer here.
                 */
-               p->pack_size = pack_size + 20;
+               p->pack_size = pack_size + the_hash_algo->rawsz;
        }
        return unpack_entry(p, oe->idx.offset, &type, sizep);
 }
@@ -1410,7 +1412,7 @@ static void load_tree(struct tree_entry *root)
                        die("Can't load tree %s", oid_to_hex(oid));
        } else {
                enum object_type type;
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf || type != OBJ_TREE)
                        die("Can't load tree %s", oid_to_hex(oid));
        }
@@ -1911,7 +1913,7 @@ static void read_marks(void)
                        die("corrupt mark line: %s", line);
                e = find_object(&oid);
                if (!e) {
-                       enum object_type type = sha1_object_info(oid.hash, NULL);
+                       enum object_type type = oid_object_info(&oid, NULL);
                        if (type < 0)
                                die("object not found: %s", oid_to_hex(&oid));
                        e = insert_object(&oid);
@@ -2204,7 +2206,7 @@ static void construct_path_with_fanout(const char *hex_sha1,
                unsigned char fanout, char *path)
 {
        unsigned int i = 0, j = 0;
-       if (fanout >= 20)
+       if (fanout >= the_hash_algo->rawsz)
                die("Too large fanout (%u)", fanout);
        while (fanout) {
                path[i++] = hex_sha1[j++];
@@ -2212,8 +2214,8 @@ static void construct_path_with_fanout(const char *hex_sha1,
                path[i++] = '/';
                fanout--;
        }
-       memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
-       path[i + GIT_SHA1_HEXSZ - j] = '\0';
+       memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+       path[i + the_hash_algo->hexsz - j] = '\0';
 }
 
 static uintmax_t do_change_note_fanout(
@@ -2421,7 +2423,7 @@ static void file_change_m(const char *p, struct branch *b)
                else if (oe) {
                        if (oe->type != OBJ_COMMIT)
                                die("Not a commit (actually a %s): %s",
-                                       typename(oe->type), command_buf.buf);
+                                       type_name(oe->type), command_buf.buf);
                }
                /*
                 * Accept the sha1 without checking; it expected to be in
@@ -2441,14 +2443,14 @@ static void file_change_m(const char *p, struct branch *b)
                enum object_type expected = S_ISDIR(mode) ?
                                                OBJ_TREE: OBJ_BLOB;
                enum object_type type = oe ? oe->type :
-                                       sha1_object_info(oid.hash, NULL);
+                                       oid_object_info(&oid, NULL);
                if (type < 0)
                        die("%s not found: %s",
                                        S_ISDIR(mode) ?  "Tree" : "Blob",
                                        command_buf.buf);
                if (type != expected)
                        die("Not a %s (actually a %s): %s",
-                               typename(expected), typename(type),
+                               type_name(expected), type_name(type),
                                command_buf.buf);
        }
 
@@ -2581,8 +2583,9 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
                oidcpy(&commit_oid, &commit_oe->idx.oid);
        } else if (!get_oid(p, &commit_oid)) {
                unsigned long size;
-               char *buf = read_object_with_reference(commit_oid.hash,
-                       commit_type, &size, commit_oid.hash);
+               char *buf = read_object_with_reference(&commit_oid,
+                                                      commit_type, &size,
+                                                      &commit_oid);
                if (!buf || size < 46)
                        die("Not a valid commit: %s", p);
                free(buf);
@@ -2599,14 +2602,14 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
        } else if (oe) {
                if (oe->type != OBJ_BLOB)
                        die("Not a blob (actually a %s): %s",
-                               typename(oe->type), command_buf.buf);
+                               type_name(oe->type), command_buf.buf);
        } else if (!is_null_oid(&oid)) {
-               enum object_type type = sha1_object_info(oid.hash, NULL);
+               enum object_type type = oid_object_info(&oid, NULL);
                if (type < 0)
                        die("Blob not found: %s", command_buf.buf);
                if (type != OBJ_BLOB)
                        die("Not a blob (actually a %s): %s",
-                           typename(type), command_buf.buf);
+                           type_name(type), command_buf.buf);
        }
 
        construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
@@ -2651,9 +2654,8 @@ static void parse_from_existing(struct branch *b)
                unsigned long size;
                char *buf;
 
-               buf = read_object_with_reference(b->oid.hash,
-                                                commit_type, &size,
-                                                b->oid.hash);
+               buf = read_object_with_reference(&b->oid, commit_type, &size,
+                                                &b->oid);
                parse_from_commit(b, buf, size);
                free(buf);
        }
@@ -2730,8 +2732,9 @@ static struct hash_list *parse_merge(unsigned int *count)
                        oidcpy(&n->oid, &oe->idx.oid);
                } else if (!get_oid(from, &n->oid)) {
                        unsigned long size;
-                       char *buf = read_object_with_reference(n->oid.hash,
-                               commit_type, &size, n->oid.hash);
+                       char *buf = read_object_with_reference(&n->oid,
+                                                              commit_type,
+                                                              &size, &n->oid);
                        if (!buf || size < 46)
                                die("Not a valid commit: %s", from);
                        free(buf);
@@ -2888,7 +2891,7 @@ static void parse_new_tag(const char *arg)
        } else if (!get_oid(from, &oid)) {
                struct object_entry *oe = find_object(&oid);
                if (!oe) {
-                       type = sha1_object_info(oid.hash, NULL);
+                       type = oid_object_info(&oid, NULL);
                        if (type < 0)
                                die("Not a valid object: %s", from);
                } else
@@ -2914,7 +2917,7 @@ static void parse_new_tag(const char *arg)
                    "object %s\n"
                    "type %s\n"
                    "tag %s\n",
-                   oid_to_hex(&oid), typename(type), t->name);
+                   oid_to_hex(&oid), type_name(type), t->name);
        if (tagger)
                strbuf_addf(&new_data,
                            "tagger %s\n", tagger);
@@ -2964,7 +2967,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
        char *buf;
 
        if (!oe || oe->pack_id == MAX_PACK_ID) {
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
        } else {
                type = oe->type;
                buf = gfi_unpack_entry(oe, &size);
@@ -2985,10 +2988,10 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
                die("Can't read object %s", oid_to_hex(oid));
        if (type != OBJ_BLOB)
                die("Object %s is a %s but a blob was expected.",
-                   oid_to_hex(oid), typename(type));
+                   oid_to_hex(oid), type_name(type));
        strbuf_reset(&line);
        strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
-                                               typename(type), size);
+                                               type_name(type), size);
        cat_blob_write(line.buf, line.len);
        strbuf_release(&line);
        cat_blob_write(buf, size);
@@ -3003,7 +3006,7 @@ static void cat_blob(struct object_entry *oe, struct object_id *oid)
 
 static void parse_get_mark(const char *p)
 {
-       struct object_entry *oe = oe;
+       struct object_entry *oe;
        char output[GIT_MAX_HEXSZ + 2];
 
        /* get-mark SP <object> LF */
@@ -3020,7 +3023,7 @@ static void parse_get_mark(const char *p)
 
 static void parse_cat_blob(const char *p)
 {
-       struct object_entry *oe = oe;
+       struct object_entry *oe;
        struct object_id oid;
 
        /* cat-blob SP <object> LF */
@@ -3046,7 +3049,7 @@ static struct object_entry *dereference(struct object_entry *oe,
        unsigned long size;
        char *buf = NULL;
        if (!oe) {
-               enum object_type type = sha1_object_info(oid->hash, NULL);
+               enum object_type type = oid_object_info(oid, NULL);
                if (type < 0)
                        die("object not found: %s", oid_to_hex(oid));
                /* cache it! */
@@ -3069,7 +3072,7 @@ static struct object_entry *dereference(struct object_entry *oe,
                buf = gfi_unpack_entry(oe, &size);
        } else {
                enum object_type unused;
-               buf = read_sha1_file(oid->hash, &unused, &size);
+               buf = read_object_file(oid, &unused, &size);
        }
        if (!buf)
                die("Can't load object %s", oid_to_hex(oid));
diff --git a/fetch-object.c b/fetch-object.c
new file mode 100644 (file)
index 0000000..853624f
--- /dev/null
@@ -0,0 +1,45 @@
+#include "cache.h"
+#include "packfile.h"
+#include "pkt-line.h"
+#include "strbuf.h"
+#include "transport.h"
+#include "fetch-object.h"
+
+static void fetch_refs(const char *remote_name, struct ref *ref)
+{
+       struct remote *remote;
+       struct transport *transport;
+       int original_fetch_if_missing = fetch_if_missing;
+
+       fetch_if_missing = 0;
+       remote = remote_get(remote_name);
+       if (!remote->url[0])
+               die(_("Remote with no URL"));
+       transport = transport_get(remote, remote->url[0]);
+
+       transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+       transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
+       transport_fetch_refs(transport, ref);
+       fetch_if_missing = original_fetch_if_missing;
+}
+
+void fetch_object(const char *remote_name, const unsigned char *sha1)
+{
+       struct ref *ref = alloc_ref(sha1_to_hex(sha1));
+       hashcpy(ref->old_oid.hash, sha1);
+       fetch_refs(remote_name, ref);
+}
+
+void fetch_objects(const char *remote_name, const struct oid_array *to_fetch)
+{
+       struct ref *ref = NULL;
+       int i;
+
+       for (i = 0; i < to_fetch->nr; i++) {
+               struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i]));
+               oidcpy(&new_ref->old_oid, &to_fetch->oid[i]);
+               new_ref->next = ref;
+               ref = new_ref;
+       }
+       fetch_refs(remote_name, ref);
+}
diff --git a/fetch-object.h b/fetch-object.h
new file mode 100644 (file)
index 0000000..4b269d0
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef FETCH_OBJECT_H
+#define FETCH_OBJECT_H
+
+#include "sha1-array.h"
+
+extern void fetch_object(const char *remote_name, const unsigned char *sha1);
+
+extern void fetch_objects(const char *remote_name,
+                         const struct oid_array *to_fetch);
+
+#endif
index 9f6b07ad91f8c2c17e85004a0e71a8b69752d120..52932b37f8dce61296c37d6fa821d564f0f4a38b 100644 (file)
@@ -29,6 +29,7 @@ static int deepen_not_ok;
 static int fetch_fsck_objects = -1;
 static int transfer_fsck_objects = -1;
 static int agent_supported;
+static int server_supports_filtering;
 static struct lock_file shallow_lock;
 static const char *alternate_shallow_file;
 
@@ -260,8 +261,8 @@ static enum ack_type get_ack(int fd, struct object_id *result_oid)
        char *line = packet_read_line(fd, &len);
        const char *arg;
 
-       if (!len)
-               die(_("git fetch-pack: expected ACK/NAK, got EOF"));
+       if (!line)
+               die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
        if (!strcmp(line, "NAK"))
                return NAK;
        if (skip_prefix(line, "ACK ", &arg)) {
@@ -379,6 +380,8 @@ static int find_common(struct fetch_pack_args *args,
                        if (deepen_not_ok)      strbuf_addstr(&c, " deepen-not");
                        if (agent_supported)    strbuf_addf(&c, " agent=%s",
                                                            git_user_agent_sanitized());
+                       if (args->filter_options.choice)
+                               strbuf_addstr(&c, " filter");
                        packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
                        strbuf_release(&c);
                } else
@@ -407,6 +410,9 @@ static int find_common(struct fetch_pack_args *args,
                        packet_buf_write(&req_buf, "deepen-not %s", s->string);
                }
        }
+       if (server_supports_filtering && args->filter_options.choice)
+               packet_buf_write(&req_buf, "filter %s",
+                                args->filter_options.filter_spec);
        packet_buf_flush(&req_buf);
        state_len = req_buf.len;
 
@@ -450,6 +456,8 @@ static int find_common(struct fetch_pack_args *args,
 
        flushes = 0;
        retval = -1;
+       if (args->no_dependents)
+               goto done;
        while ((oid = get_rev())) {
                packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
                print_verbose(args, "have %s", oid_to_hex(oid));
@@ -703,23 +711,61 @@ static void mark_alternate_complete(struct object *obj)
        mark_complete(&obj->oid);
 }
 
+struct loose_object_iter {
+       struct oidset *loose_object_set;
+       struct ref *refs;
+};
+
+/*
+ *  If the number of refs is not larger than the number of loose objects,
+ *  this function stops inserting.
+ */
+static int add_loose_objects_to_set(const struct object_id *oid,
+                                   const char *path,
+                                   void *data)
+{
+       struct loose_object_iter *iter = data;
+       oidset_insert(iter->loose_object_set, oid);
+       if (iter->refs == NULL)
+               return 1;
+
+       iter->refs = iter->refs->next;
+       return 0;
+}
+
 static int everything_local(struct fetch_pack_args *args,
                            struct ref **refs,
                            struct ref **sought, int nr_sought)
 {
        struct ref *ref;
        int retval;
+       int old_save_commit_buffer = save_commit_buffer;
        timestamp_t cutoff = 0;
+       struct oidset loose_oid_set = OIDSET_INIT;
+       int use_oidset = 0;
+       struct loose_object_iter iter = {&loose_oid_set, *refs};
+
+       /* Enumerate all loose objects or know refs are not so many. */
+       use_oidset = !for_each_loose_object(add_loose_objects_to_set,
+                                           &iter, 0);
 
        save_commit_buffer = 0;
 
        for (ref = *refs; ref; ref = ref->next) {
                struct object *o;
+               unsigned int flags = OBJECT_INFO_QUICK;
 
-               if (!has_object_file_with_flags(&ref->old_oid,
-                                               OBJECT_INFO_QUICK))
-                       continue;
+               if (use_oidset &&
+                   !oidset_contains(&loose_oid_set, &ref->old_oid)) {
+                       /*
+                        * I know this does not exist in the loose form,
+                        * so check if it exists in a non-loose form.
+                        */
+                       flags |= OBJECT_INFO_IGNORE_LOOSE;
+               }
 
+               if (!has_object_file_with_flags(&ref->old_oid, flags))
+                       continue;
                o = parse_object(&ref->old_oid);
                if (!o)
                        continue;
@@ -735,29 +781,33 @@ static int everything_local(struct fetch_pack_args *args,
                }
        }
 
-       if (!args->deepen) {
-               for_each_ref(mark_complete_oid, NULL);
-               for_each_cached_alternate(mark_alternate_complete);
-               commit_list_sort_by_date(&complete);
-               if (cutoff)
-                       mark_recent_complete_commits(args, cutoff);
-       }
+       oidset_clear(&loose_oid_set);
 
-       /*
-        * Mark all complete remote refs as common refs.
-        * Don't mark them common yet; the server has to be told so first.
-        */
-       for (ref = *refs; ref; ref = ref->next) {
-               struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
-                                            NULL, 0);
+       if (!args->no_dependents) {
+               if (!args->deepen) {
+                       for_each_ref(mark_complete_oid, NULL);
+                       for_each_cached_alternate(mark_alternate_complete);
+                       commit_list_sort_by_date(&complete);
+                       if (cutoff)
+                               mark_recent_complete_commits(args, cutoff);
+               }
 
-               if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
-                       continue;
+               /*
+                * Mark all complete remote refs as common refs.
+                * Don't mark them common yet; the server has to be told so first.
+                */
+               for (ref = *refs; ref; ref = ref->next) {
+                       struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+                                                    NULL, 0);
+
+                       if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+                               continue;
 
-               if (!(o->flags & SEEN)) {
-                       rev_list_push((struct commit *)o, COMMON_REF | SEEN);
+                       if (!(o->flags & SEEN)) {
+                               rev_list_push((struct commit *)o, COMMON_REF | SEEN);
 
-                       mark_common((struct commit *)o, 1, 1);
+                               mark_common((struct commit *)o, 1, 1);
+                       }
                }
        }
 
@@ -777,6 +827,9 @@ static int everything_local(struct fetch_pack_args *args,
                print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
                              ref->name);
        }
+
+       save_commit_buffer = old_save_commit_buffer;
+
        return retval;
 }
 
@@ -833,7 +886,7 @@ static int get_pack(struct fetch_pack_args *args,
                argv_array_push(&cmd.args, alternate_shallow_file);
        }
 
-       if (do_keep) {
+       if (do_keep || args->from_promisor) {
                if (pack_lockfile)
                        cmd.out = -1;
                cmd_name = "index-pack";
@@ -843,7 +896,7 @@ static int get_pack(struct fetch_pack_args *args,
                        argv_array_push(&cmd.args, "-v");
                if (args->use_thin_pack)
                        argv_array_push(&cmd.args, "--fix-thin");
-               if (args->lock_pack || unpack_limit) {
+               if (do_keep && (args->lock_pack || unpack_limit)) {
                        char hostname[HOST_NAME_MAX + 1];
                        if (xgethostname(hostname, sizeof(hostname)))
                                xsnprintf(hostname, sizeof(hostname), "localhost");
@@ -853,6 +906,8 @@ static int get_pack(struct fetch_pack_args *args,
                }
                if (args->check_self_contained_and_connected)
                        argv_array_push(&cmd.args, "--check-self-contained-and-connected");
+               if (args->from_promisor)
+                       argv_array_push(&cmd.args, "--promisor");
        }
        else {
                cmd_name = "unpack-objects";
@@ -870,8 +925,17 @@ static int get_pack(struct fetch_pack_args *args,
            ? fetch_fsck_objects
            : transfer_fsck_objects >= 0
            ? transfer_fsck_objects
-           : 0)
-               argv_array_push(&cmd.args, "--strict");
+           : 0) {
+               if (args->from_promisor)
+                       /*
+                        * We cannot use --strict in index-pack because it
+                        * checks both broken objects and links, but we only
+                        * want to check for broken objects.
+                        */
+                       argv_array_push(&cmd.args, "--fsck-objects");
+               else
+                       argv_array_push(&cmd.args, "--strict");
+       }
 
        cmd.in = demux.out;
        cmd.git_cmd = 1;
@@ -964,6 +1028,13 @@ static struct ref *do_fetch_pack(struct fetch_pack_args *args,
        else
                prefer_ofs_delta = 0;
 
+       if (server_supports("filter")) {
+               server_supports_filtering = 1;
+               print_verbose(args, _("Server supports filter"));
+       } else if (args->filter_options.choice) {
+               warning("filtering not recognized by server, ignoring");
+       }
+
        if ((agent_feature = server_feature_value("agent", &agent_len))) {
                agent_supported = 1;
                if (agent_len)
index b6aeb43a8e21437581e0e834e77754922f8c6ee9..3e224a18226ec6219b09387704baf178821c4c23 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "string-list.h"
 #include "run-command.h"
+#include "list-objects-filter-options.h"
 
 struct oid_array;
 
@@ -12,6 +13,7 @@ struct fetch_pack_args {
        int depth;
        const char *deepen_since;
        const struct string_list *deepen_not;
+       struct list_objects_filter_options filter_options;
        unsigned deepen_relative:1;
        unsigned quiet:1;
        unsigned keep_pack:1;
@@ -29,6 +31,14 @@ struct fetch_pack_args {
        unsigned cloning:1;
        unsigned update_shallow:1;
        unsigned deepen:1;
+       unsigned from_promisor:1;
+
+       /*
+        * If 1, fetch_pack() will also not modify any object flags.
+        * This allows fetch_pack() to safely be called by any function,
+        * regardless of which object flags it uses (if any).
+        */
+       unsigned no_dependents:1;
 };
 
 /*
diff --git a/fsck.c b/fsck.c
index 032699e9ac2622c7d089523a456db0fa83310e14..9218c2a643b83e68b9f59479bcc1be833ae8be96 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -811,7 +811,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data,
                enum object_type type;
 
                buffer = to_free =
-                       read_sha1_file(tag->object.oid.hash, &type, &size);
+                       read_object_file(&tag->object.oid, &type, &size);
                if (!buffer)
                        return report(options, &tag->object,
                                FSCK_MSG_MISSING_TAG_OBJECT,
@@ -821,7 +821,7 @@ static int fsck_tag_buffer(struct tag *tag, const char *data,
                        ret = report(options, &tag->object,
                                FSCK_MSG_TAG_OBJECT_NOT_TAG,
                                "expected tag got %s",
-                           typename(type));
+                           type_name(type));
                        goto done;
                }
        }
index 0af7c4edba37fd9f6a8cee83cd2715c3fa08b488..6d7bcd5d0ed8f2d3f5abdea2f26c6be72909b657 100644 (file)
@@ -130,7 +130,7 @@ static void fsmonitor_refresh_callback(struct index_state *istate, const char *n
         * as it could be a new untracked file.
         */
        trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
-       untracked_cache_invalidate_path(istate, name);
+       untracked_cache_invalidate_path(istate, name, 0);
 }
 
 void refresh_fsmonitor(struct index_state *istate)
index cd3cc0ccf228c9d601621685042d51368a71c707..65f37436369cd934f2bdb3b627396c5f72a7cb03 100644 (file)
@@ -65,7 +65,7 @@ static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cac
 {
        if (core_fsmonitor) {
                ce->ce_flags &= ~CE_FSMONITOR_VALID;
-               untracked_cache_invalidate_path(istate, ce->name);
+               untracked_cache_invalidate_path(istate, ce->name, 1);
                trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
        }
 }
index 964c3a75420db4751cf11125b68b6904112632f1..d190469cd8b5e1dc427b4029d0d1fb937faef584 100755 (executable)
@@ -677,7 +677,7 @@ sub add_untracked_cmd {
 sub run_git_apply {
        my $cmd = shift;
        my $fh;
-       open $fh, '| git ' . $cmd . " --recount --allow-overlap";
+       open $fh, '| git ' . $cmd . " --allow-overlap";
        print $fh @_;
        return close $fh;
 }
@@ -705,6 +705,14 @@ sub parse_diff {
        }
        my (@hunk) = { TEXT => [], DISPLAY => [], TYPE => 'header' };
 
+       if (@colored && @colored != @diff) {
+               print STDERR
+                 "fatal: mismatched output from interactive.diffFilter\n",
+                 "hint: Your filter must maintain a one-to-one correspondence\n",
+                 "hint: between its input and output lines.\n";
+               exit 1;
+       }
+
        for (my $i = 0; $i < @diff; $i++) {
                if ($diff[$i] =~ /^@@ /) {
                        push @hunk, { TEXT => [], DISPLAY => [],
@@ -751,6 +759,15 @@ sub parse_hunk_header {
        return ($o_ofs, $o_cnt, $n_ofs, $n_cnt);
 }
 
+sub format_hunk_header {
+       my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) = @_;
+       return ("@@ -$o_ofs" .
+               (($o_cnt != 1) ? ",$o_cnt" : '') .
+               " +$n_ofs" .
+               (($n_cnt != 1) ? ",$n_cnt" : '') .
+               " @@\n");
+}
+
 sub split_hunk {
        my ($text, $display) = @_;
        my @split = ();
@@ -784,6 +801,11 @@ sub split_hunk {
                while (++$i < @$text) {
                        my $line = $text->[$i];
                        my $display = $display->[$i];
+                       if ($line =~ /^\\/) {
+                               push @{$this->{TEXT}}, $line;
+                               push @{$this->{DISPLAY}}, $display;
+                               next;
+                       }
                        if ($line =~ /^ /) {
                                if ($this->{ADDDEL} &&
                                    !defined $next_hunk_start) {
@@ -838,11 +860,7 @@ sub split_hunk {
                my $o_cnt = $hunk->{OCNT};
                my $n_cnt = $hunk->{NCNT};
 
-               my $head = ("@@ -$o_ofs" .
-                           (($o_cnt != 1) ? ",$o_cnt" : '') .
-                           " +$n_ofs" .
-                           (($n_cnt != 1) ? ",$n_cnt" : '') .
-                           " @@\n");
+               my $head = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
                my $display_head = $head;
                unshift @{$hunk->{TEXT}}, $head;
                if ($diff_use_color) {
@@ -886,6 +904,9 @@ sub merge_hunk {
                        $n_cnt++;
                        push @line, $line;
                        next;
+               } elsif ($line =~ /^\\/) {
+                       push @line, $line;
+                       next;
                }
 
                last if ($o1_ofs <= $ofs);
@@ -904,6 +925,9 @@ sub merge_hunk {
                        $n_cnt++;
                        push @line, $line;
                        next;
+               } elsif ($line =~ /^\\/) {
+                       push @line, $line;
+                       next;
                }
                $ofs++;
                $o_cnt++;
@@ -912,11 +936,7 @@ sub merge_hunk {
                }
                push @line, $line;
        }
-       my $head = ("@@ -$o0_ofs" .
-                   (($o_cnt != 1) ? ",$o_cnt" : '') .
-                   " +$n0_ofs" .
-                   (($n_cnt != 1) ? ",$n_cnt" : '') .
-                   " @@\n");
+       my $head = format_hunk_header($o0_ofs, $o_cnt, $n0_ofs, $n_cnt);
        @{$prev->{TEXT}} = ($head, @line);
 }
 
@@ -925,14 +945,35 @@ sub coalesce_overlapping_hunks {
        my @out = ();
 
        my ($last_o_ctx, $last_was_dirty);
+       my $ofs_delta = 0;
 
-       for (grep { $_->{USE} } @in) {
+       for (@in) {
                if ($_->{TYPE} ne 'hunk') {
                        push @out, $_;
                        next;
                }
                my $text = $_->{TEXT};
-               my ($o_ofs) = parse_hunk_header($text->[0]);
+               my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) =
+                                               parse_hunk_header($text->[0]);
+               unless ($_->{USE}) {
+                       $ofs_delta += $o_cnt - $n_cnt;
+                       # If this hunk has been edited then subtract
+                       # the delta that is due to the edit.
+                       if ($_->{OFS_DELTA}) {
+                               $ofs_delta -= $_->{OFS_DELTA};
+                       }
+                       next;
+               }
+               if ($ofs_delta) {
+                       $n_ofs += $ofs_delta;
+                       $_->{TEXT}->[0] = format_hunk_header($o_ofs, $o_cnt,
+                                                            $n_ofs, $n_cnt);
+               }
+               # If this hunk was edited then adjust the offset delta
+               # to reflect the edit.
+               if ($_->{OFS_DELTA}) {
+                       $ofs_delta += $_->{OFS_DELTA};
+               }
                if (defined $last_o_ctx &&
                    $o_ofs <= $last_o_ctx &&
                    !$_->{DIRTY} &&
@@ -1004,6 +1045,30 @@ sub color_diff {
 marked for applying."),
 );
 
+sub recount_edited_hunk {
+       local $_;
+       my ($oldtext, $newtext) = @_;
+       my ($o_cnt, $n_cnt) = (0, 0);
+       for (@{$newtext}[1..$#{$newtext}]) {
+               my $mode = substr($_, 0, 1);
+               if ($mode eq '-') {
+                       $o_cnt++;
+               } elsif ($mode eq '+') {
+                       $n_cnt++;
+               } elsif ($mode eq ' ') {
+                       $o_cnt++;
+                       $n_cnt++;
+               }
+       }
+       my ($o_ofs, undef, $n_ofs, undef) =
+                                       parse_hunk_header($newtext->[0]);
+       $newtext->[0] = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
+       my (undef, $orig_o_cnt, undef, $orig_n_cnt) =
+                                       parse_hunk_header($oldtext->[0]);
+       # Return the change in the number of lines inserted by this hunk
+       return $orig_o_cnt - $orig_n_cnt - $o_cnt + $n_cnt;
+}
+
 sub edit_hunk_manually {
        my ($oldtext) = @_;
 
@@ -1102,25 +1167,32 @@ sub prompt_yesno {
 }
 
 sub edit_hunk_loop {
-       my ($head, $hunk, $ix) = @_;
-       my $text = $hunk->[$ix]->{TEXT};
+       my ($head, $hunks, $ix) = @_;
+       my $hunk = $hunks->[$ix];
+       my $text = $hunk->{TEXT};
 
        while (1) {
-               $text = edit_hunk_manually($text);
-               if (!defined $text) {
+               my $newtext = edit_hunk_manually($text);
+               if (!defined $newtext) {
                        return undef;
                }
                my $newhunk = {
-                       TEXT => $text,
-                       TYPE => $hunk->[$ix]->{TYPE},
+                       TEXT => $newtext,
+                       TYPE => $hunk->{TYPE},
                        USE => 1,
                        DIRTY => 1,
                };
+               $newhunk->{OFS_DELTA} = recount_edited_hunk($text, $newtext);
+               # If this hunk has already been edited then add the
+               # offset delta of the previous edit to get the real
+               # delta from the original unedited hunk.
+               $hunk->{OFS_DELTA} and
+                               $newhunk->{OFS_DELTA} += $hunk->{OFS_DELTA};
                if (diff_applies($head,
-                                @{$hunk}[0..$ix-1],
+                                @{$hunks}[0..$ix-1],
                                 $newhunk,
-                                @{$hunk}[$ix+1..$#{$hunk}])) {
-                       $newhunk->{DISPLAY} = [color_diff(@{$text})];
+                                @{$hunks}[$ix+1..$#{$hunks}])) {
+                       $newhunk->{DISPLAY} = [color_diff(@{$newtext})];
                        return $newhunk;
                }
                else {
@@ -1184,7 +1256,13 @@ sub edit_hunk_loop {
 );
 
 sub help_patch_cmd {
-       print colored $help_color, __($help_patch_modes{$patch_mode}), "\n", __ <<EOF ;
+       local $_;
+       my $other = $_[0] . ",?";
+       print colored $help_color, __($help_patch_modes{$patch_mode}), "\n",
+               map { "$_\n" } grep {
+                       my $c = quotemeta(substr($_, 0, 1));
+                       $other =~ /,$c/
+               } split "\n", __ <<EOF ;
 g - select a hunk to go to
 / - search for a hunk matching the given regex
 j - leave this hunk undecided, see next undecided hunk
@@ -1302,39 +1380,39 @@ sub display_hunks {
 
 my %patch_update_prompt_modes = (
        stage => {
-               mode => N__("Stage mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Stage deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Stage this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Stage mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Stage deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Stage this hunk [y,n,q,a,d%s,?]? "),
        },
        stash => {
-               mode => N__("Stash mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Stash deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Stash this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Stash mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Stash deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Stash this hunk [y,n,q,a,d%s,?]? "),
        },
        reset_head => {
-               mode => N__("Unstage mode change [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Unstage deletion [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Unstage this hunk [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Unstage mode change [y,n,q,a,d%s,?]? "),
+               deletion => N__("Unstage deletion [y,n,q,a,d%s,?]? "),
+               hunk => N__("Unstage this hunk [y,n,q,a,d%s,?]? "),
        },
        reset_nothead => {
-               mode => N__("Apply mode change to index [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Apply deletion to index [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Apply this hunk to index [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Apply mode change to index [y,n,q,a,d%s,?]? "),
+               deletion => N__("Apply deletion to index [y,n,q,a,d%s,?]? "),
+               hunk => N__("Apply this hunk to index [y,n,q,a,d%s,?]? "),
        },
        checkout_index => {
-               mode => N__("Discard mode change from worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Discard deletion from worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Discard this hunk from worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Discard mode change from worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Discard deletion from worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Discard this hunk from worktree [y,n,q,a,d%s,?]? "),
        },
        checkout_head => {
-               mode => N__("Discard mode change from index and worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Discard deletion from index and worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "),
        },
        checkout_nothead => {
-               mode => N__("Apply mode change to index and worktree [y,n,q,a,d,/%s,?]? "),
-               deletion => N__("Apply deletion to index and worktree [y,n,q,a,d,/%s,?]? "),
-               hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d,/%s,?]? "),
+               mode => N__("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "),
+               deletion => N__("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "),
+               hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "),
        },
 );
 
@@ -1390,7 +1468,7 @@ sub patch_update_file {
                        $other .= ',J';
                }
                if ($num > 1) {
-                       $other .= ',g';
+                       $other .= ',g,/';
                }
                for ($i = 0; $i < $num; $i++) {
                        if (!defined $hunk[$i]{USE}) {
@@ -1431,8 +1509,12 @@ sub patch_update_file {
                                }
                                next;
                        }
-                       elsif ($other =~ /g/ && $line =~ /^g(.*)/) {
+                       elsif ($line =~ /^g(.*)/) {
                                my $response = $1;
+                               unless ($other =~ /g/) {
+                                       error_msg __("No other hunks to goto\n");
+                                       next;
+                               }
                                my $no = $ix > 10 ? $ix - 10 : 0;
                                while ($response eq '') {
                                        $no = display_hunks(\@hunk, $no);
@@ -1478,6 +1560,10 @@ sub patch_update_file {
                        }
                        elsif ($line =~ m|^/(.*)|) {
                                my $regex = $1;
+                               unless ($other =~ m|/|) {
+                                       error_msg __("No other hunks to search\n");
+                                       next;
+                               }
                                if ($1 eq "") {
                                        print colored $prompt_color, __("search for regex? ");
                                        $regex = <STDIN>;
@@ -1546,7 +1632,11 @@ sub patch_update_file {
                                        next;
                                }
                        }
-                       elsif ($other =~ /s/ && $line =~ /^s/) {
+                       elsif ($line =~ /^s/) {
+                               unless ($other =~ /s/) {
+                                       error_msg __("Sorry, cannot split this hunk\n");
+                                       next;
+                               }
                                my @split = split_hunk($hunk[$ix]{TEXT}, $hunk[$ix]{DISPLAY});
                                if (1 < @split) {
                                        print colored $header_color, sprintf(
@@ -1558,7 +1648,11 @@ sub patch_update_file {
                                $num = scalar @hunk;
                                next;
                        }
-                       elsif ($other =~ /e/ && $line =~ /^e/) {
+                       elsif ($line =~ /^e/) {
+                               unless ($other =~ /e/) {
+                                       error_msg __("Sorry, cannot edit this hunk\n");
+                                       next;
+                               }
                                my $newhunk = edit_hunk_loop($head, \@hunk, $ix);
                                if (defined $newhunk) {
                                        splice @hunk, $ix, 1, $newhunk;
index 68b2ad531ea6f9cf1127a0e5986c523fd86b0967..07e383257b4985f7400f167d683a5fb692237d93 100644 (file)
@@ -826,8 +826,8 @@ extern ssize_t xpread(int fd, void *buf, size_t len, off_t offset);
 extern int xdup(int fd);
 extern FILE *xfopen(const char *path, const char *mode);
 extern FILE *xfdopen(int fd, const char *mode);
-extern int xmkstemp(char *template);
-extern int xmkstemp_mode(char *template, int mode);
+extern int xmkstemp(char *temp_filename);
+extern int xmkstemp_mode(char *temp_filename, int mode);
 extern char *xgetcwd(void);
 extern FILE *fopen_for_writing(const char *path);
 extern FILE *fopen_or_warn(const char *path, const char *mode);
index 2d8df831722913093a46e9325796b85f67a97073..b31613cb8aa8decd3f808d5d29e047243fa1eac6 100755 (executable)
@@ -601,7 +601,9 @@ ($)
        my ($d) = @_;
        m#(\d{2,4})/(\d\d)/(\d\d)\s(\d\d):(\d\d)(?::(\d\d))?#
                or die "Unparseable date: $d\n";
-       my $y=$1; $y-=1900 if $y>1900;
+       my $y=$1;
+       $y+=100 if $y<70;
+       $y+=1900 if $y<1000;
        return timegm($6||0,$5,$4,$3,$2-1,$y);
 }
 
index 1b7e4b2cdbdf36e090c4b1f531dcc48222d8f8b1..2587a01b9b4fa0344c695cb635e6eed26257e9f8 100755 (executable)
@@ -310,7 +310,7 @@ git rev-list --reverse --topo-order --default HEAD \
        die "Could not get the commits"
 commits=$(wc -l <../revs | tr -d " ")
 
-test $commits -eq 0 && die "Found nothing to rewrite"
+test $commits -eq 0 && die_with_status 2 "Found nothing to rewrite"
 
 # Rewrite the commits
 report_progress ()
@@ -627,12 +627,12 @@ then
                                print H "$_:$f\n" or die;
                        }
                        close(H) or die;' || die "Unable to save state")
-       state_tree=$(/bin/echo -e "100644 blob $state_blob\tfilter.map" | git mktree)
+       state_tree=$(printf '100644 blob %s\tfilter.map\n' "$state_blob" | git mktree)
        if test -n "$state_commit"
        then
-               state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" -p "$state_commit")
+               state_commit=$(echo "Sync" | git commit-tree "$state_tree" -p "$state_commit")
        else
-               state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" )
+               state_commit=$(echo "Sync" | git commit-tree "$state_tree" )
        fi
        git update-ref "$state_branch" "$state_commit"
 fi
index 14c50782e096966b860d49cbaed7658e9f56958f..be3f068922c5a3ba0d1112f3b8b1cf1228567d2e 100644 (file)
@@ -27,6 +27,9 @@ skip)
        move_to_original_branch
        return
        ;;
+show-current-patch)
+       exec git am --show-current-patch
+       ;;
 esac
 
 if test -z "$rebase_root"
@@ -46,6 +49,7 @@ then
        # makes this easy
        git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
                $allow_rerere_autoupdate --right-only "$revisions" \
+               $allow_empty_message \
                ${restrict_revision+^$restrict_revision}
        ret=$?
 else
index d47bd29593ad8711448293f3b6bf2e059323ff78..331c8dfeac3cac2fd2d9d9287d5c487669fe80a0 100644 (file)
@@ -199,12 +199,14 @@ make_patch () {
 
 die_with_patch () {
        echo "$1" > "$state_dir"/stopped-sha
+       git update-ref REBASE_HEAD "$1"
        make_patch "$1"
        die "$2"
 }
 
 exit_with_patch () {
        echo "$1" > "$state_dir"/stopped-sha
+       git update-ref REBASE_HEAD "$1"
        make_patch $1
        git rev-parse --verify HEAD > "$amend"
        gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
@@ -281,7 +283,7 @@ pick_one () {
 
        test -d "$rewritten" &&
                pick_one_preserving_merges "$@" && return
-       output eval git cherry-pick $allow_rerere_autoupdate \
+       output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
                        ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
                        "$strategy_args" $empty_args $ff "$@"
 
@@ -396,7 +398,7 @@ pick_one_preserving_merges () {
                                        --sq-quote "$gpg_sign_opt")} \
                                $allow_rerere_autoupdate "$merge_args" \
                                "$strategy_args" \
-                               -m $(git rev-parse --sq-quote "$msg_content") \
+                               -m "$(git rev-parse --sq-quote "$msg_content")" \
                                "$new_parents"
                        then
                                printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG
@@ -406,6 +408,7 @@ pick_one_preserving_merges () {
                        ;;
                *)
                        output eval git cherry-pick $allow_rerere_autoupdate \
+                               $allow_empty_message \
                                ${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
                                "$strategy_args" "$@" ||
                                die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")"
@@ -559,7 +562,8 @@ do_next () {
 
                mark_action_done
                do_pick $sha1 "$rest"
-               git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || {
+               git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \
+                       $allow_empty_message || {
                        warn "$(eval_gettext "\
 Could not amend commit after successfully picking \$sha1... \$rest
 This is most likely due to an empty commit message, or the pre-commit hook
@@ -607,7 +611,7 @@ you are able to reword the commit.")"
                        # This is an intermediate commit; its message will only be
                        # used in case of trouble.  So use the long version:
                        do_with_author output git commit --amend --no-verify -F "$squash_msg" \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                die_failed_squash $sha1 "$rest"
                        ;;
                *)
@@ -615,13 +619,13 @@ you are able to reword the commit.")"
                        if test -f "$fixup_msg"
                        then
                                do_with_author git commit --amend --no-verify -F "$fixup_msg" \
-                                       ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                        die_failed_squash $sha1 "$rest"
                        else
                                cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit
                                rm -f "$GIT_DIR"/MERGE_MSG
                                do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \
-                                       ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                                       ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                        die_failed_squash $sha1 "$rest"
                        fi
                        rm -f "$squash_msg" "$fixup_msg"
@@ -754,7 +758,8 @@ case "$action" in
 continue)
        if test ! -d "$rewritten"
        then
-               exec git rebase--helper ${force_rebase:+--no-ff} --continue
+               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                       --continue
        fi
        # do we have anything to commit?
        if git diff-index --cached --quiet HEAD --
@@ -794,11 +799,11 @@ In both cases, once you're done, continue with:
 You have uncommitted changes in your working tree. Please commit them
 first and then run 'git rebase --continue' again.")"
                        do_with_author git commit --amend --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                die "$(gettext "Could not commit staged changes.")"
                else
                        do_with_author git commit --no-verify -F "$msg" -e \
-                               ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+                               ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
                                die "$(gettext "Could not commit staged changes.")"
                fi
        fi
@@ -817,7 +822,8 @@ skip)
 
        if test ! -d "$rewritten"
        then
-               exec git rebase--helper ${force_rebase:+--no-ff} --continue
+               exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+                       --continue
        fi
        do_rest
        return 0
@@ -840,6 +846,9 @@ To continue rebase after editing, run:
 
        exit
        ;;
+show-current-patch)
+       exec git show REBASE_HEAD --
+       ;;
 esac
 
 comment_for_reflog start
@@ -855,6 +864,7 @@ fi
 
 orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
 mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
 
 : > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
 write_basic_state
@@ -1016,7 +1026,8 @@ checkout_onto
 if test -z "$rebase_root" && test ! -d "$rewritten"
 then
        require_clean_work_tree "rebase"
-       exec git rebase--helper ${force_rebase:+--no-ff} --continue
+       exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+               --continue
 fi
 do_rest
 
index 06a4723d4db3db74ea17ace60d824e83cdee25e9..ceb715453cc9eba0b6e91abfd2ea3863e74f3e05 100644 (file)
@@ -27,7 +27,8 @@ continue_merge () {
        cmt=$(cat "$state_dir/current")
        if ! git diff-index --quiet --ignore-submodules HEAD --
        then
-               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt"
+               if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+                       --no-verify -C "$cmt"
                then
                        echo "Commit failed, please do not call \"git commit\""
                        echo "directly, but instead do one of the following: "
@@ -57,6 +58,7 @@ call_merge () {
        echo "$msgnum" >"$state_dir/msgnum"
        cmt="$(cat "$state_dir/cmt.$msgnum")"
        echo "$cmt" > "$state_dir/current"
+       git update-ref REBASE_HEAD "$cmt"
        hd=$(git rev-parse --verify HEAD)
        cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD)
        eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"'
@@ -137,11 +139,15 @@ skip)
        finish_rb_merge
        return
        ;;
+show-current-patch)
+       exec git show REBASE_HEAD --
+       ;;
 esac
 
 mkdir -p "$state_dir"
 echo "$onto_name" > "$state_dir/onto_name"
 write_basic_state
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
 
 msgnum=0
 for cmt in $(git rev-list --reverse --no-merges "$revisions")
index fd72a35c65b43537b292445b87ffb7e682cc076a..a1f6e5de6a3ed1fe9a6217a136611682f3db6582 100755 (executable)
@@ -24,6 +24,7 @@ m,merge!           use merging strategies to rebase
 i,interactive!     let the user edit the list of commits to rebase
 x,exec=!           add exec lines after each commit of the editable list
 k,keep-empty      preserve empty commits during rebase
+allow-empty-message allow rebasing commits with empty messages
 f,force-rebase!    force rebase even if branch is up to date
 X,strategy-option=! pass the argument through to the merge strategy
 stat!              display a diffstat of what changed upstream
@@ -45,6 +46,7 @@ abort!             abort and check out the original branch
 skip!              skip current patch and continue
 edit-todo!         edit the todo list during an interactive rebase
 quit!              abort but keep HEAD where it is
+show-current-patch! show the patch file being applied or merged
 "
 . git-sh-setup
 set_reflog_action rebase
@@ -89,6 +91,7 @@ action=
 preserve_merges=
 autosquash=
 keep_empty=
+allow_empty_message=
 test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
 case "$(git config --bool commit.gpgsign)" in
 true)  gpg_sign_opt=-S ;;
@@ -181,6 +184,7 @@ You can run "git stash pop" or "git stash drop" at any time.
 }
 
 finish_rebase () {
+       rm -f "$(git rev-parse --git-path REBASE_HEAD)"
        apply_autostash &&
        { git gc --auto || true; } &&
        rm -rf "$state_dir"
@@ -245,7 +249,7 @@ do
        --verify)
                ok_to_skip_pre_rebase=
                ;;
-       --continue|--skip|--abort|--quit|--edit-todo)
+       --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
                test $total_argc -eq 2 || usage
                action=${1##--}
                ;;
@@ -262,6 +266,9 @@ do
        --keep-empty)
                keep_empty=yes
                ;;
+       --allow-empty-message)
+               allow_empty_message=--allow-empty-message
+               ;;
        --preserve-merges)
                preserve_merges=t
                test -z "$interactive_rebase" && interactive_rebase=implied
@@ -412,6 +419,10 @@ quit)
 edit-todo)
        run_specific_rebase
        ;;
+show-current-patch)
+       run_specific_rebase
+       die "BUG: run_specific_rebase is not supposed to return here"
+       ;;
 esac
 
 # Make sure no rebase is in progress
index edcc6d34692b28575d9728c25a407782310a39fb..2fa7818ca9a8ac7363d17aef17b864f7361b3eb8 100755 (executable)
 use Term::ANSIColor;
 use File::Temp qw/ tempdir tempfile /;
 use File::Spec::Functions qw(catdir catfile);
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
 use Cwd qw(abs_path cwd);
 use Git;
 use Git::I18N;
+use Net::Domain ();
+use Net::SMTP ();
+use Git::LoadCPAN::Mail::Address;
 
 Getopt::Long::Configure qw/ pass_through /;
 
@@ -56,6 +59,7 @@ sub usage {
     --[no-]cc               <str>  * Email Cc:
     --[no-]bcc              <str>  * Email Bcc:
     --subject               <str>  * Email "Subject:"
+    --reply-to              <str>  * Email "Reply-To:"
     --in-reply-to           <str>  * Email "In-Reply-To:"
     --[no-]xmailer                 * Add "X-Mailer:" header (default).
     --[no-]annotate                * Review each patch that will be sent in an editor.
@@ -166,13 +170,13 @@ sub format_2822_time {
 
 # Variables we fill in automatically, or via prompting:
 my (@to,$no_to,@initial_to,@cc,$no_cc,@initial_cc,@bcclist,$no_bcc,@xh,
-       $initial_reply_to,$initial_subject,@files,
+       $initial_in_reply_to,$reply_to,$initial_subject,@files,
        $author,$sender,$smtp_authpass,$annotate,$use_xmailer,$compose,$time);
 
 my $envelope_sender;
 
 # Example reply to:
-#$initial_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
+#$initial_in_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
 
 my $repo = eval { Git->repository() };
 my @repo = $repo ? ($repo) : ();
@@ -314,7 +318,8 @@ sub signal_handler {
     if !$help and $dump_aliases and @ARGV;
 $rc = GetOptions(
                    "sender|from=s" => \$sender,
-                    "in-reply-to=s" => \$initial_reply_to,
+                    "in-reply-to=s" => \$initial_in_reply_to,
+                   "reply-to=s" => \$reply_to,
                    "subject=s" => \$initial_subject,
                    "to=s" => \@initial_to,
                    "to-cmd=s" => \$to_cmd,
@@ -378,6 +383,10 @@ sub signal_handler {
 die __("Cannot run git format-patch from outside a repository\n")
        if $format_patch and not $repo;
 
+die __("`batch-size` and `relogin` must be specified together " .
+       "(via command-line or configuration option)\n")
+       if defined $relogin_delay and not defined $batch_size;
+
 # Now, let's fill any that aren't set in with defaults:
 
 sub read_config {
@@ -489,7 +498,7 @@ sub read_config {
 ($repocommitter) = Git::ident_person(@repo, 'committer');
 
 sub parse_address_line {
-       return Git::parse_mailboxes($_[0]);
+       return map { $_->format } Mail::Address->parse($_[0]);
 }
 
 sub split_addrs {
@@ -676,7 +685,8 @@ sub get_patch_subject {
 
        my $tpl_sender = $sender || $repoauthor || $repocommitter || '';
        my $tpl_subject = $initial_subject || '';
-       my $tpl_reply_to = $initial_reply_to || '';
+       my $tpl_in_reply_to = $initial_in_reply_to || '';
+       my $tpl_reply_to = $reply_to || '';
 
        print $c <<EOT1, Git::prefix_lines("GIT: ", __ <<EOT2), <<EOT3;
 From $tpl_sender # This line is ignored.
@@ -688,8 +698,9 @@ sub get_patch_subject {
 Clear the body content if you don't wish to send a summary.
 EOT2
 From: $tpl_sender
+Reply-To: $tpl_reply_to
 Subject: $tpl_subject
-In-Reply-To: $tpl_reply_to
+In-Reply-To: $tpl_in_reply_to
 
 EOT3
        for my $f (@files) {
@@ -703,57 +714,73 @@ sub get_patch_subject {
                do_edit($compose_filename);
        }
 
-       open my $c2, ">", $compose_filename . ".final"
-               or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
-
        open $c, "<", $compose_filename
                or die sprintf(__("Failed to open %s: %s"), $compose_filename, $!);
 
-       my $need_8bit_cte = file_has_nonascii($compose_filename);
-       my $in_body = 0;
-       my $summary_empty = 1;
        if (!defined $compose_encoding) {
                $compose_encoding = "UTF-8";
        }
-       while(<$c>) {
-               next if m/^GIT:/;
-               if ($in_body) {
-                       $summary_empty = 0 unless (/^\n$/);
-               } elsif (/^\n$/) {
-                       $in_body = 1;
-                       if ($need_8bit_cte) {
-                               print $c2 "MIME-Version: 1.0\n",
-                                        "Content-Type: text/plain; ",
-                                          "charset=$compose_encoding\n",
-                                        "Content-Transfer-Encoding: 8bit\n";
-                       }
-               } elsif (/^MIME-Version:/i) {
-                       $need_8bit_cte = 0;
-               } elsif (/^Subject:\s*(.+)\s*$/i) {
-                       $initial_subject = $1;
-                       my $subject = $initial_subject;
-                       $_ = "Subject: " .
-                               quote_subject($subject, $compose_encoding) .
-                               "\n";
-               } elsif (/^In-Reply-To:\s*(.+)\s*$/i) {
-                       $initial_reply_to = $1;
-                       next;
-               } elsif (/^From:\s*(.+)\s*$/i) {
-                       $sender = $1;
-                       next;
-               } elsif (/^(?:To|Cc|Bcc):/i) {
-                       print __("To/Cc/Bcc fields are not interpreted yet, they have been ignored\n");
-                       next;
+
+       my %parsed_email;
+       while (my $line = <$c>) {
+               next if $line =~ m/^GIT:/;
+               parse_header_line($line, \%parsed_email);
+               if ($line =~ /^$/) {
+                       $parsed_email{'body'} = filter_body($c);
                }
-               print $c2 $_;
        }
        close $c;
-       close $c2;
 
-       if ($summary_empty) {
+       open my $c2, ">", $compose_filename . ".final"
+       or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
+
+
+       if ($parsed_email{'From'}) {
+               $sender = delete($parsed_email{'From'});
+       }
+       if ($parsed_email{'In-Reply-To'}) {
+               $initial_in_reply_to = delete($parsed_email{'In-Reply-To'});
+       }
+       if ($parsed_email{'Reply-To'}) {
+               $reply_to = delete($parsed_email{'Reply-To'});
+       }
+       if ($parsed_email{'Subject'}) {
+               $initial_subject = delete($parsed_email{'Subject'});
+               print $c2 "Subject: " .
+                       quote_subject($initial_subject, $compose_encoding) .
+                       "\n";
+       }
+
+       if ($parsed_email{'MIME-Version'}) {
+               print $c2 "MIME-Version: $parsed_email{'MIME-Version'}\n",
+                               "Content-Type: $parsed_email{'Content-Type'};\n",
+                               "Content-Transfer-Encoding: $parsed_email{'Content-Transfer-Encoding'}\n";
+               delete($parsed_email{'MIME-Version'});
+               delete($parsed_email{'Content-Type'});
+               delete($parsed_email{'Content-Transfer-Encoding'});
+       } elsif (file_has_nonascii($compose_filename)) {
+               my $content_type = (delete($parsed_email{'Content-Type'}) or
+                       "text/plain; charset=$compose_encoding");
+               print $c2 "MIME-Version: 1.0\n",
+                       "Content-Type: $content_type\n",
+                       "Content-Transfer-Encoding: 8bit\n";
+       }
+       # Preserve unknown headers
+       foreach my $key (keys %parsed_email) {
+               next if $key eq 'body';
+               print $c2 "$key: $parsed_email{$key}";
+       }
+
+       if ($parsed_email{'body'}) {
+               print $c2 "\n$parsed_email{'body'}\n";
+               delete($parsed_email{'body'});
+       } else {
                print __("Summary email is empty, skipping it\n");
                $compose = -1;
        }
+
+       close $c2;
+
 } elsif ($annotate) {
        do_edit(@files);
 }
@@ -792,6 +819,32 @@ sub ask {
        return;
 }
 
+sub parse_header_line {
+       my $lines = shift;
+       my $parsed_line = shift;
+       my $addr_pat = join "|", qw(To Cc Bcc);
+
+       foreach (split(/\n/, $lines)) {
+               if (/^($addr_pat):\s*(.+)$/i) {
+                       $parsed_line->{$1} = [ parse_address_line($2) ];
+               } elsif (/^([^:]*):\s*(.+)\s*$/i) {
+                       $parsed_line->{$1} = $2;
+               }
+       }
+}
+
+sub filter_body {
+       my $c = shift;
+       my $body = "";
+       while (my $body_line = <$c>) {
+               if ($body_line !~ m/^GIT:/) {
+                       $body .= $body_line;
+               }
+       }
+       return $body;
+}
+
+
 my %broken_encoding;
 
 sub file_declares_8bit_cte {
@@ -872,16 +925,22 @@ sub expand_one_alias {
 @initial_cc = process_address_list(@initial_cc);
 @bcclist = process_address_list(@bcclist);
 
-if ($thread && !defined $initial_reply_to && $prompting) {
-       $initial_reply_to = ask(
+if ($thread && !defined $initial_in_reply_to && $prompting) {
+       $initial_in_reply_to = ask(
                __("Message-ID to be used as In-Reply-To for the first email (if any)? "),
                default => "",
                valid_re => qr/\@.*\./, confirm_only => 1);
 }
-if (defined $initial_reply_to) {
-       $initial_reply_to =~ s/^\s*<?//;
-       $initial_reply_to =~ s/>?\s*$//;
-       $initial_reply_to = "<$initial_reply_to>" if $initial_reply_to ne '';
+if (defined $initial_in_reply_to) {
+       $initial_in_reply_to =~ s/^\s*<?//;
+       $initial_in_reply_to =~ s/>?\s*$//;
+       $initial_in_reply_to = "<$initial_in_reply_to>" if $initial_in_reply_to ne '';
+}
+
+if (defined $reply_to) {
+       $reply_to =~ s/^\s+|\s+$//g;
+       ($reply_to) = expand_aliases($reply_to);
+       $reply_to = sanitize_address($reply_to);
 }
 
 if (!defined $smtp_server) {
@@ -901,7 +960,7 @@ sub expand_one_alias {
 }
 
 # Variables we set as part of the loop over files
-our ($message_id, %mail, $subject, $reply_to, $references, $message,
+our ($message_id, %mail, $subject, $in_reply_to, $references, $message,
        $needs_confirm, $message_num, $ask_default);
 
 sub extract_valid_address {
@@ -1142,10 +1201,8 @@ sub valid_fqdn {
 sub maildomain_net {
        my $maildomain;
 
-       if (eval { require Net::Domain; 1 }) {
-               my $domain = Net::Domain::domainname();
-               $maildomain = $domain if valid_fqdn($domain);
-       }
+       my $domain = Net::Domain::domainname();
+       $maildomain = $domain if valid_fqdn($domain);
 
        return $maildomain;
 }
@@ -1153,17 +1210,15 @@ sub maildomain_net {
 sub maildomain_mta {
        my $maildomain;
 
-       if (eval { require Net::SMTP; 1 }) {
-               for my $host (qw(mailhost localhost)) {
-                       my $smtp = Net::SMTP->new($host);
-                       if (defined $smtp) {
-                               my $domain = $smtp->domain;
-                               $smtp->quit;
+       for my $host (qw(mailhost localhost)) {
+               my $smtp = Net::SMTP->new($host);
+               if (defined $smtp) {
+                       my $domain = $smtp->domain;
+                       $smtp->quit;
 
-                               $maildomain = $domain if valid_fqdn($domain);
+                       $maildomain = $domain if valid_fqdn($domain);
 
-                               last if $maildomain;
-                       }
+                       last if $maildomain;
                }
        }
 
@@ -1310,11 +1365,14 @@ sub send_message {
        if ($use_xmailer) {
                $header .= "X-Mailer: git-send-email $gitversion\n";
        }
-       if ($reply_to) {
+       if ($in_reply_to) {
 
-               $header .= "In-Reply-To: $reply_to\n";
+               $header .= "In-Reply-To: $in_reply_to\n";
                $header .= "References: $references\n";
        }
+       if ($reply_to) {
+               $header .= "Reply-To: $reply_to\n";
+       }
        if (@xh) {
                $header .= join("\n", @xh) . "\n";
        }
@@ -1489,8 +1547,8 @@ sub send_message {
        return 1;
 }
 
-$reply_to = $initial_reply_to;
-$references = $initial_reply_to || '';
+$in_reply_to = $initial_in_reply_to;
+$references = $initial_in_reply_to || '';
 $subject = $initial_subject;
 $message_num = 0;
 
@@ -1700,9 +1758,9 @@ sub send_message {
 
        # set up for the next message
        if ($thread && $message_was_sent &&
-               ($chain_reply_to || !defined $reply_to || length($reply_to) == 0 ||
+               ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 ||
                $message_num == 1)) {
-               $reply_to = $message_id;
+               $in_reply_to = $message_id;
                if (length $references > 0) {
                        $references .= "\n $message_id";
                } else {
index fc8f8ae6401dddcceaa82f9e9c748f5c185536d6..94793c1a913abf569ff9101d935c355b9eb27648 100755 (executable)
@@ -39,7 +39,7 @@ fi
 no_changes () {
        git diff-index --quiet --cached HEAD --ignore-submodules -- "$@" &&
        git diff-files --quiet --ignore-submodules -- "$@" &&
-       (test -z "$untracked" || test -z "$(untracked_files)")
+       (test -z "$untracked" || test -z "$(untracked_files "$@")")
 }
 
 untracked_files () {
@@ -315,16 +315,18 @@ push_stash () {
        if test -z "$patch_mode"
        then
                test "$untracked" = "all" && CLEAN_X_OPTION=-x || CLEAN_X_OPTION=
-               if test -n "$untracked"
+               if test -n "$untracked" && test $# = 0
                then
-                       git clean --force --quiet -d $CLEAN_X_OPTION -- "$@"
+                       git clean --force --quiet -d $CLEAN_X_OPTION
                fi
 
                if test $# != 0
                then
-                       git add -u -- "$@" |
-                       git checkout-index -z --force --stdin
-                       git diff-index -p --cached --binary HEAD -- "$@" | git apply --index -R
+                       test -z "$untracked" && UPDATE_OPTION="-u" || UPDATE_OPTION=
+                       test "$untracked" = "all" && FORCE_OPTION="--force" || FORCE_OPTION=
+                       git add $UPDATE_OPTION $FORCE_OPTION -- "$@"
+                       git diff-index -p --cached --binary HEAD -- "$@" |
+                       git apply --index -R
                else
                        git reset --hard -q
                fi
index 156255a9e56e7a5930f3f652ee38994064db03ea..24914963ca23c837e0cc46ca2dc0fa46bf9886a6 100755 (executable)
@@ -428,60 +428,7 @@ cmd_deinit()
                shift
        done
 
-       if test -n "$deinit_all" && test "$#" -ne 0
-       then
-               echo >&2 "$(eval_gettext "pathspec and --all are incompatible")"
-               usage
-       fi
-       if test $# = 0 && test -z "$deinit_all"
-       then
-               die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")"
-       fi
-
-       {
-               git submodule--helper list --prefix "$wt_prefix" "$@" ||
-               echo "#unmatched" $?
-       } |
-       while read -r mode sha1 stage sm_path
-       do
-               die_if_unmatched "$mode" "$sha1"
-               name=$(git submodule--helper name "$sm_path") || exit
-
-               displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix")
-
-               # Remove the submodule work tree (unless the user already did it)
-               if test -d "$sm_path"
-               then
-                       # Protect submodules containing a .git directory
-                       if test -d "$sm_path/.git"
-                       then
-                               die "$(eval_gettext "\
-Submodule work tree '\$displaypath' contains a .git directory
-(use 'rm -rf' if you really want to remove it including all of its history)")"
-                       fi
-
-                       if test -z "$force"
-                       then
-                               git rm -qn "$sm_path" ||
-                               die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")"
-                       fi
-                       rm -rf "$sm_path" &&
-                       say "$(eval_gettext "Cleared directory '\$displaypath'")" ||
-                       say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")"
-               fi
-
-               mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")"
-
-               # Remove the .git/config entries (unless the user already did it)
-               if test -n "$(git config --get-regexp submodule."$name\.")"
-               then
-                       # Remove the whole section so we have a clean state when
-                       # the user later decides to init this submodule again
-                       url=$(git config submodule."$name".url)
-                       git config --remove-section submodule."$name" 2>/dev/null &&
-                       say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")"
-               fi
-       done
+       git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@"
 }
 
 is_tip_reachable () (
@@ -1036,63 +983,8 @@ cmd_sync()
                        ;;
                esac
        done
-       cd_to_toplevel
-       {
-               git submodule--helper list --prefix "$wt_prefix" "$@" ||
-               echo "#unmatched" $?
-       } |
-       while read -r mode sha1 stage sm_path
-       do
-               die_if_unmatched "$mode" "$sha1"
-
-               # skip inactive submodules
-               if ! git submodule--helper is-active "$sm_path"
-               then
-                       continue
-               fi
-
-               name=$(git submodule--helper name "$sm_path")
-               url=$(git config -f .gitmodules --get submodule."$name".url)
-
-               # Possibly a url relative to parent
-               case "$url" in
-               ./*|../*)
-                       # rewrite foo/bar as ../.. to find path from
-                       # submodule work tree to superproject work tree
-                       up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" &&
-                       # guarantee a trailing /
-                       up_path=${up_path%/}/ &&
-                       # path from submodule work tree to submodule origin repo
-                       sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") &&
-                       # path from superproject work tree to submodule origin repo
-                       super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit
-                       ;;
-               *)
-                       sub_origin_url="$url"
-                       super_config_url="$url"
-                       ;;
-               esac
-
-               displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
-               say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")"
-               git config submodule."$name".url "$super_config_url"
-
-               if test -e "$sm_path"/.git
-               then
-               (
-                       sanitize_submodule_env
-                       cd "$sm_path"
-                       remote=$(get_default_remote)
-                       git config remote."$remote".url "$sub_origin_url"
 
-                       if test -n "$recursive"
-                       then
-                               prefix="$prefix$sm_path/"
-                               eval cmd_sync
-                       fi
-               )
-               fi
-       done
+       git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
 }
 
 cmd_absorbgitdirs()
diff --git a/git.c b/git.c
index c870b9719c21b2db23ea7ab5d56fdeda483cb02e..ceaa58ef40e536f1290cce3ad1223004063e41a6 100644 (file)
--- a/git.c
+++ b/git.c
@@ -5,11 +5,11 @@
 #include "run-command.h"
 
 const char git_usage_string[] =
-       "git [--version] [--help] [-C <path>] [-c name=value]\n"
-       "           [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
-       "           [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
-       "           [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
-       "           <command> [<args>]";
+       N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
+          "           [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
+          "           [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+          "           [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
+          "           <command> [<args>]");
 
 const char git_more_info_string[] =
        N_("'git help -a' and 'git help -g' list available subcommands and some\n"
@@ -92,7 +92,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--git-dir")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --git-dir.\n" );
+                               fprintf(stderr, _("no directory given for --git-dir\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1);
@@ -106,7 +106,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--namespace")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No namespace given for --namespace.\n" );
+                               fprintf(stderr, _("no namespace given for --namespace\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1);
@@ -120,7 +120,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--work-tree")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --work-tree.\n" );
+                               fprintf(stderr, _("no directory given for --work-tree\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
@@ -134,7 +134,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "--super-prefix")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No prefix given for --super-prefix.\n" );
+                               fprintf(stderr, _("no prefix given for --super-prefix\n" ));
                                usage(git_usage_string);
                        }
                        setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1);
@@ -156,7 +156,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "-c")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "-c expects a configuration string\n" );
+                               fprintf(stderr, _("-c expects a configuration string\n" ));
                                usage(git_usage_string);
                        }
                        git_config_push_parameter((*argv)[1]);
@@ -194,12 +194,12 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                } else if (!strcmp(cmd, "-C")) {
                        if (*argc < 2) {
-                               fprintf(stderr, "No directory given for -C.\n" );
+                               fprintf(stderr, _("no directory given for -C\n" ));
                                usage(git_usage_string);
                        }
                        if ((*argv)[1][0]) {
                                if (chdir((*argv)[1]))
-                                       die_errno("Cannot change to '%s'", (*argv)[1]);
+                                       die_errno("cannot change to '%s'", (*argv)[1]);
                                if (envchanged)
                                        *envchanged = 1;
                        }
@@ -209,7 +209,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        list_builtins();
                        exit(0);
                } else {
-                       fprintf(stderr, "Unknown option: %s\n", cmd);
+                       fprintf(stderr, _("unknown option: %s\n"), cmd);
                        usage(git_usage_string);
                }
 
@@ -247,7 +247,7 @@ static int handle_alias(int *argcp, const char ***argv)
                        if (ret >= 0)   /* normal exit */
                                exit(ret);
 
-                       die_errno("While expanding alias '%s': '%s'",
+                       die_errno("while expanding alias '%s': '%s'",
                            alias_command, alias_string + 1);
                }
                count = split_cmdline(alias_string, &new_argv);
@@ -256,8 +256,8 @@ static int handle_alias(int *argcp, const char ***argv)
                            split_cmdline_strerror(count));
                option_count = handle_options(&new_argv, &count, &envchanged);
                if (envchanged)
-                       die("alias '%s' changes environment variables\n"
-                                "You can use '!git' in the alias to do this.",
+                       die("alias '%s' changes environment variables.\n"
+                                "You can use '!git' in the alias to do this",
                                 alias_command);
                memmove(new_argv - option_count, new_argv,
                                count * sizeof(char *));
@@ -389,7 +389,7 @@ static struct cmd_struct commands[] = {
        { "column", cmd_column, RUN_SETUP_GENTLY },
        { "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
        { "commit-tree", cmd_commit_tree, RUN_SETUP },
-       { "config", cmd_config, RUN_SETUP_GENTLY },
+       { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
        { "count-objects", cmd_count_objects, RUN_SETUP },
        { "credential", cmd_credential, RUN_SETUP_GENTLY },
        { "describe", cmd_describe, RUN_SETUP },
@@ -684,8 +684,8 @@ int cmd_main(int argc, const char **argv)
                if (errno != ENOENT)
                        break;
                if (was_alias) {
-                       fprintf(stderr, "Expansion of alias '%s' failed; "
-                               "'%s' is not a git command\n",
+                       fprintf(stderr, _("expansion of alias '%s' failed; "
+                                         "'%s' is not a git command\n"),
                                cmd, argv[0]);
                        exit(1);
                }
@@ -696,7 +696,7 @@ int cmd_main(int argc, const char **argv)
                        break;
        }
 
-       fprintf(stderr, "Failed to run command '%s': %s\n",
+       fprintf(stderr, _("failed to run command '%s': %s\n"),
                cmd, strerror(errno));
 
        return 1;
index 408f2859d3c42becc333b8c0e4ee6366de7fdb14..a58e6b3c44b0ef6175df1417a3ed6f7775bd9953 100644 (file)
@@ -29,12 +29,11 @@ Requirements
 ------------
 
  - Core git tools
- - Perl
+ - Perl 5.8
  - Perl modules: CGI, Encode, Fcntl, File::Find, File::Basename.
  - web server
 
 The following optional Perl modules are required for extra features
- - Digest::MD5 - for gravatar support
  - CGI::Fast and FCGI - for running gitweb as FastCGI script
  - HTML::TagCloud - for fancy tag cloud in project list view
  - HTTP::Date or Time::ParseDate - to support If-Modified-Since for feeds
index 2417057f2bc61a98e68dc6c817e456a21bf6044e..2594a4badb3d7b942b28b57ca036650328a1b050 100755 (executable)
@@ -20,6 +20,8 @@
 use File::Find qw();
 use File::Basename qw(basename);
 use Time::HiRes qw(gettimeofday tv_interval);
+use Digest::MD5 qw(md5_hex);
+
 binmode STDOUT, ':utf8';
 
 if (!defined($CGI::VERSION) || $CGI::VERSION < 4.08) {
@@ -490,7 +492,6 @@ sub evaluate_uri {
        # Currently available providers are gravatar and picon.
        # If an unknown provider is specified, the feature is disabled.
 
-       # Gravatar depends on Digest::MD5.
        # Picon currently relies on the indiana.edu database.
 
        # To enable system wide have in $GITWEB_CONFIG
@@ -1166,18 +1167,8 @@ sub configure_gitweb_features {
        our @snapshot_fmts = gitweb_get_feature('snapshot');
        @snapshot_fmts = filter_snapshot_fmts(@snapshot_fmts);
 
-       # check that the avatar feature is set to a known provider name,
-       # and for each provider check if the dependencies are satisfied.
-       # if the provider name is invalid or the dependencies are not met,
-       # reset $git_avatar to the empty string.
        our ($git_avatar) = gitweb_get_feature('avatar');
-       if ($git_avatar eq 'gravatar') {
-               $git_avatar = '' unless (eval { require Digest::MD5; 1; });
-       } elsif ($git_avatar eq 'picon') {
-               # no dependencies
-       } else {
-               $git_avatar = '';
-       }
+       $git_avatar = '' unless $git_avatar =~ /^(?:gravatar|picon)$/s;
 
        our @extra_branch_refs = gitweb_get_feature('extra-branch-refs');
        @extra_branch_refs = filter_and_validate_refs (@extra_branch_refs);
@@ -2167,7 +2158,7 @@ sub gravatar_url {
        my $size = shift;
        $avatar_cache{$email} ||=
                "//www.gravatar.com/avatar/" .
-                       Digest::MD5::md5_hex($email) . "?s=";
+                       md5_hex($email) . "?s=";
        return $avatar_cache{$email} . $size;
 }
 
diff --git a/grep.c b/grep.c
index 3d7cd0e96f1ee160a66dd500c58d4026bf24e34c..65b90c10a38c136d2ce9a42fee5c543cd7d4717d 100644 (file)
--- a/grep.c
+++ b/grep.c
@@ -18,6 +18,11 @@ static void std_output(struct grep_opt *opt, const void *buf, size_t size)
        fwrite(buf, size, 1, stdout);
 }
 
+static void color_set(char *dst, const char *color_bytes)
+{
+       xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
+}
+
 /*
  * Initialize the grep_defaults template with hardcoded defaults.
  * We could let the compiler do this, but without C99 initializers
@@ -2010,7 +2015,7 @@ static int grep_source_load_oid(struct grep_source *gs)
        enum object_type type;
 
        grep_read_lock();
-       gs->buf = read_sha1_file(gs->identifier, &type, &gs->size);
+       gs->buf = read_object_file(gs->identifier, &type, &gs->size);
        grep_read_unlock();
 
        if (!gs->buf)
diff --git a/hash.h b/hash.h
index 7d7a864f5dd43bb53ee6b31d391df3e15cbdbb08..7c8238bc2ebfded778351b58c16a3854617082c6 100644 (file)
--- a/hash.h
+++ b/hash.h
 #include "block-sha1/sha1.h"
 #endif
 
+#ifndef platform_SHA_CTX
+/*
+ * platform's underlying implementation of SHA-1; could be OpenSSL,
+ * blk_SHA, Apple CommonCrypto, etc...  Note that the relevant
+ * SHA-1 header may have already defined platform_SHA_CTX for our
+ * own implementations like block-sha1 and ppc-sha1, so we list
+ * the default for OpenSSL compatible SHA-1 implementations here.
+ */
+#define platform_SHA_CTX       SHA_CTX
+#define platform_SHA1_Init     SHA1_Init
+#define platform_SHA1_Update   SHA1_Update
+#define platform_SHA1_Final            SHA1_Final
+#endif
+
+#define git_SHA_CTX            platform_SHA_CTX
+#define git_SHA1_Init          platform_SHA1_Init
+#define git_SHA1_Update                platform_SHA1_Update
+#define git_SHA1_Final         platform_SHA1_Final
+
+#ifdef SHA1_MAX_BLOCK_SIZE
+#include "compat/sha1-chunked.h"
+#undef git_SHA1_Update
+#define git_SHA1_Update                git_SHA1_Update_Chunked
+#endif
+
 /*
  * Note that these constants are suitable for indexing the hash_algos array and
  * comparing against each other, but are otherwise arbitrary, so they should not
 /* Number of algorithms supported (including unknown). */
 #define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
 
-typedef void (*git_hash_init_fn)(void *ctx);
-typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len);
-typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx);
+/* A suitably aligned type for stack allocations of hash contexts. */
+union git_hash_ctx {
+       git_SHA_CTX sha1;
+};
+typedef union git_hash_ctx git_hash_ctx;
+
+typedef void (*git_hash_init_fn)(git_hash_ctx *ctx);
+typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len);
+typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx);
 
 struct git_hash_algo {
        /*
@@ -44,9 +75,6 @@ struct git_hash_algo {
        /* A four-byte version identifier, used in pack indices. */
        uint32_t format_id;
 
-       /* The size of a hash context (e.g. git_SHA_CTX). */
-       size_t ctxsz;
-
        /* The length of the hash in binary. */
        size_t rawsz;
 
index 14435ab65d1bbe3fc73be6a6d774b6ab6ffc8360..ff82b63133fe4b3de046e15f1066ce6df27404ac 100644 (file)
@@ -361,8 +361,8 @@ static void start_put(struct transfer_request *request)
        ssize_t size;
        git_zstream stream;
 
-       unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
+       unpacked = read_object_file(&request->obj->oid, &type, &len);
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
 
        /* Set it up */
        git_deflate_init(&stream, zlib_compression_level);
@@ -915,6 +915,10 @@ static struct remote_lock *lock_remote(const char *path, long timeout)
                                lock->timeout = -1;
                        }
                        XML_ParserFree(parser);
+               } else {
+                       fprintf(stderr,
+                               "error: curl result=%d, HTTP code=%ld\n",
+                               results.curl_result, results.http_code);
                }
        } else {
                fprintf(stderr, "Unable to start LOCK request\n");
index 1ae8363de2bb618df534cc85c86eba3eac9e9c0a..f506f394ac3b2bb6e7cf253f5af8641b0c14664b 100644 (file)
@@ -22,7 +22,7 @@ enum object_request_state {
 
 struct object_request {
        struct walker *walker;
-       unsigned char sha1[20];
+       struct object_id oid;
        struct alt_base *repo;
        enum object_request_state state;
        struct http_object_request *req;
@@ -56,7 +56,7 @@ static void start_object_request(struct walker *walker,
        struct active_request_slot *slot;
        struct http_object_request *req;
 
-       req = new_http_object_request(obj_req->repo->base, obj_req->sha1);
+       req = new_http_object_request(obj_req->repo->base, obj_req->oid.hash);
        if (req == NULL) {
                obj_req->state = ABORTED;
                return;
@@ -82,7 +82,7 @@ static void finish_object_request(struct object_request *obj_req)
                return;
 
        if (obj_req->req->rename == 0)
-               walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
+               walker_say(obj_req->walker, "got %s\n", oid_to_hex(&obj_req->oid));
 }
 
 static void process_object_response(void *callback_data)
@@ -129,7 +129,7 @@ static int fill_active_slot(struct walker *walker)
        list_for_each_safe(pos, tmp, head) {
                obj_req = list_entry(pos, struct object_request, node);
                if (obj_req->state == WAITING) {
-                       if (has_sha1_file(obj_req->sha1))
+                       if (has_sha1_file(obj_req->oid.hash))
                                obj_req->state = COMPLETE;
                        else {
                                start_object_request(walker, obj_req);
@@ -148,7 +148,7 @@ static void prefetch(struct walker *walker, unsigned char *sha1)
 
        newreq = xmalloc(sizeof(*newreq));
        newreq->walker = walker;
-       hashcpy(newreq->sha1, sha1);
+       hashcpy(newreq->oid.hash, sha1);
        newreq->repo = data->alt;
        newreq->state = WAITING;
        newreq->req = NULL;
@@ -481,13 +481,13 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
 
        list_for_each(pos, head) {
                obj_req = list_entry(pos, struct object_request, node);
-               if (!hashcmp(obj_req->sha1, sha1))
+               if (!hashcmp(obj_req->oid.hash, sha1))
                        break;
        }
        if (obj_req == NULL)
                return error("Couldn't find request for %s in the queue", hex);
 
-       if (has_sha1_file(obj_req->sha1)) {
+       if (has_sha1_file(obj_req->oid.hash)) {
                if (obj_req->req != NULL)
                        abort_http_object_request(obj_req->req);
                abort_object_request(obj_req);
@@ -541,11 +541,13 @@ static int fetch_object(struct walker *walker, unsigned char *sha1)
        } else if (req->zret != Z_STREAM_END) {
                walker->corrupt_object_found++;
                ret = error("File %s (%s) corrupt", hex, req->url);
-       } else if (hashcmp(obj_req->sha1, req->real_sha1)) {
+       } else if (hashcmp(obj_req->oid.hash, req->real_sha1)) {
                ret = error("File %s has bad hash", hex);
        } else if (req->rename < 0) {
-               ret = error("unable to write sha1 filename %s",
-                           sha1_file_name(req->sha1));
+               struct strbuf buf = STRBUF_INIT;
+               sha1_file_name(&buf, req->sha1);
+               ret = error("unable to write sha1 filename %s", buf.buf);
+               strbuf_release(&buf);
        }
 
        release_http_object_request(req);
diff --git a/http.c b/http.c
index 32a8238955a42a9f6fd86d7cd71d09129ab454a4..a5bd5d62c22c054f82b9971fc1f320c643f1d6fb 100644 (file)
--- a/http.c
+++ b/http.c
@@ -69,6 +69,9 @@ static const char *ssl_key;
 #if LIBCURL_VERSION_NUM >= 0x070908
 static const char *ssl_capath;
 #endif
+#if LIBCURL_VERSION_NUM >= 0x071304
+static const char *curl_no_proxy;
+#endif
 #if LIBCURL_VERSION_NUM >= 0x072c00
 static const char *ssl_pinnedkey;
 #endif
@@ -77,7 +80,6 @@ static long curl_low_speed_limit = -1;
 static long curl_low_speed_time = -1;
 static int curl_ftp_no_epsv;
 static const char *curl_http_proxy;
-static const char *curl_no_proxy;
 static const char *http_proxy_authmethod;
 static struct {
        const char *name;
@@ -1260,14 +1262,14 @@ static struct fill_chain *fill_cfg;
 
 void add_fill_function(void *data, int (*fill)(void *))
 {
-       struct fill_chain *new = xmalloc(sizeof(*new));
+       struct fill_chain *new_fill = xmalloc(sizeof(*new_fill));
        struct fill_chain **linkp = &fill_cfg;
-       new->data = data;
-       new->fill = fill;
-       new->next = NULL;
+       new_fill->data = data;
+       new_fill->fill = fill;
+       new_fill->next = NULL;
        while (*linkp)
                linkp = &(*linkp)->next;
-       *linkp = new;
+       *linkp = new_fill;
 }
 
 void fill_active_slots(void)
@@ -2234,7 +2236,7 @@ struct http_object_request *new_http_object_request(const char *base_url,
        unsigned char *sha1)
 {
        char *hex = sha1_to_hex(sha1);
-       const char *filename;
+       struct strbuf filename = STRBUF_INIT;
        char prevfile[PATH_MAX];
        int prevlocal;
        char prev_buf[PREV_BUF_SIZE];
@@ -2246,14 +2248,15 @@ struct http_object_request *new_http_object_request(const char *base_url,
        hashcpy(freq->sha1, sha1);
        freq->localfile = -1;
 
-       filename = sha1_file_name(sha1);
+       sha1_file_name(&filename, sha1);
        snprintf(freq->tmpfile, sizeof(freq->tmpfile),
-                "%s.temp", filename);
+                "%s.temp", filename.buf);
 
-       snprintf(prevfile, sizeof(prevfile), "%s.prev", filename);
+       snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf);
        unlink_or_warn(prevfile);
        rename(freq->tmpfile, prevfile);
        unlink_or_warn(freq->tmpfile);
+       strbuf_release(&filename);
 
        if (freq->localfile != -1)
                error("fd leakage in start: %d", freq->localfile);
@@ -2368,6 +2371,7 @@ void process_http_object_request(struct http_object_request *freq)
 int finish_http_object_request(struct http_object_request *freq)
 {
        struct stat st;
+       struct strbuf filename = STRBUF_INIT;
 
        close(freq->localfile);
        freq->localfile = -1;
@@ -2393,8 +2397,10 @@ int finish_http_object_request(struct http_object_request *freq)
                unlink_or_warn(freq->tmpfile);
                return -1;
        }
-       freq->rename =
-               finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1));
+
+       sha1_file_name(&filename, freq->sha1);
+       freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
+       strbuf_release(&filename);
 
        return freq->rename;
 }
index 36c7c1b4f6195b2d2f00013611a5d0aa96ecf408..ffb0a6eca8ce632dc448883a47987bf48adea31b 100644 (file)
@@ -1189,11 +1189,11 @@ static struct imap_store *imap_open_store(struct imap_server_conf *srvc, char *f
  */
 static void lf_to_crlf(struct strbuf *msg)
 {
-       char *new;
+       char *new_msg;
        size_t i, j;
        char lastc;
 
-       /* First pass: tally, in j, the size of the new string: */
+       /* First pass: tally, in j, the size of the new_msg string: */
        for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
                if (msg->buf[i] == '\n' && lastc != '\r')
                        j++; /* a CR will need to be added here */
@@ -1201,18 +1201,18 @@ static void lf_to_crlf(struct strbuf *msg)
                j++;
        }
 
-       new = xmallocz(j);
+       new_msg = xmallocz(j);
 
        /*
-        * Second pass: write the new string.  Note that this loop is
+        * Second pass: write the new_msg string.  Note that this loop is
         * otherwise identical to the first pass.
         */
        for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
                if (msg->buf[i] == '\n' && lastc != '\r')
-                       new[j++] = '\r';
-               lastc = new[j++] = msg->buf[i];
+                       new_msg[j++] = '\r';
+               lastc = new_msg[j++] = msg->buf[i];
        }
-       strbuf_attach(msg, new, j, j + 1);
+       strbuf_attach(msg, new_msg, j, j + 1);
 }
 
 /*
index 545ad0f28bce0ac0f92a31019b92f7ef8a668356..ecdce08c4be24cc14109796d9a7c165c53753a88 100644 (file)
@@ -151,29 +151,29 @@ static void range_set_union(struct range_set *out,
 
        assert(out->nr == 0);
        while (i < a->nr || j < b->nr) {
-               struct range *new;
+               struct range *new_range;
                if (i < a->nr && j < b->nr) {
                        if (ra[i].start < rb[j].start)
-                               new = &ra[i++];
+                               new_range = &ra[i++];
                        else if (ra[i].start > rb[j].start)
-                               new = &rb[j++];
+                               new_range = &rb[j++];
                        else if (ra[i].end < rb[j].end)
-                               new = &ra[i++];
+                               new_range = &ra[i++];
                        else
-                               new = &rb[j++];
+                               new_range = &rb[j++];
                } else if (i < a->nr)      /* b exhausted */
-                       new = &ra[i++];
+                       new_range = &ra[i++];
                else                       /* a exhausted */
-                       new = &rb[j++];
-               if (new->start == new->end)
+                       new_range = &rb[j++];
+               if (new_range->start == new_range->end)
                        ; /* empty range */
-               else if (!out->nr || out->ranges[out->nr-1].end < new->start) {
+               else if (!out->nr || out->ranges[out->nr-1].end < new_range->start) {
                        range_set_grow(out, 1);
-                       out->ranges[out->nr].start = new->start;
-                       out->ranges[out->nr].end = new->end;
+                       out->ranges[out->nr].start = new_range->start;
+                       out->ranges[out->nr].end = new_range->end;
                        out->nr++;
-               } else if (out->ranges[out->nr-1].end < new->end) {
-                       out->ranges[out->nr-1].end = new->end;
+               } else if (out->ranges[out->nr-1].end < new_range->end) {
+                       out->ranges[out->nr-1].end = new_range->end;
                }
        }
 }
@@ -501,8 +501,7 @@ static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec)
        unsigned mode;
        struct object_id oid;
 
-       if (get_tree_entry(commit->object.oid.hash, spec->path,
-                          oid.hash, &mode))
+       if (get_tree_entry(&commit->object.oid, spec->path, &oid, &mode))
                die("There is no path %s in the commit", spec->path);
        fill_filespec(spec, &oid, 1, mode);
 
@@ -696,18 +695,18 @@ static struct line_log_data *line_log_data_merge(struct line_log_data *a,
 static void add_line_range(struct rev_info *revs, struct commit *commit,
                           struct line_log_data *range)
 {
-       struct line_log_data *old = NULL;
-       struct line_log_data *new = NULL;
+       struct line_log_data *old_line = NULL;
+       struct line_log_data *new_line = NULL;
 
-       old = lookup_decoration(&revs->line_log_data, &commit->object);
-       if (old && range) {
-               new = line_log_data_merge(old, range);
-               free_line_log_data(old);
+       old_line = lookup_decoration(&revs->line_log_data, &commit->object);
+       if (old_line && range) {
+               new_line = line_log_data_merge(old_line, range);
+               free_line_log_data(old_line);
        } else if (range)
-               new = line_log_data_copy(range);
+               new_line = line_log_data_copy(range);
 
-       if (new)
-               add_decoration(&revs->line_log_data, &commit->object, new);
+       if (new_line)
+               add_decoration(&revs->line_log_data, &commit->object, new_line);
 }
 
 static void clear_commit_line_range(struct rev_info *revs, struct commit *commit)
@@ -1042,12 +1041,12 @@ static int process_diff_filepair(struct rev_info *rev,
 
 static struct diff_filepair *diff_filepair_dup(struct diff_filepair *pair)
 {
-       struct diff_filepair *new = xmalloc(sizeof(struct diff_filepair));
-       new->one = pair->one;
-       new->two = pair->two;
-       new->one->count++;
-       new->two->count++;
-       return new;
+       struct diff_filepair *new_filepair = xmalloc(sizeof(struct diff_filepair));
+       new_filepair->one = pair->one;
+       new_filepair->two = pair->two;
+       new_filepair->one->count++;
+       new_filepair->two->count++;
+       return new_filepair;
 }
 
 static void free_diffqueues(int n, struct diff_queue_struct *dq)
index 4c5b34e9499433e12b55a34f425aace38d9ecfaa..6a3cc985c45e751dd0c3f37880cf0bfd205ef088 100644 (file)
  * subordinate commands when necessary.  We also "intern" the arg for
  * the convenience of the current command.
  */
-int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
-                             const char *arg)
+static int gently_parse_list_objects_filter(
+       struct list_objects_filter_options *filter_options,
+       const char *arg,
+       struct strbuf *errbuf)
 {
        const char *v0;
 
-       if (filter_options->choice)
-               die(_("multiple object filter types cannot be combined"));
+       if (filter_options->choice) {
+               if (errbuf) {
+                       strbuf_init(errbuf, 0);
+                       strbuf_addstr(
+                               errbuf,
+                               _("multiple filter-specs cannot be combined"));
+               }
+               return 1;
+       }
 
        filter_options->filter_spec = strdup(arg);
 
        if (!strcmp(arg, "blob:none")) {
                filter_options->choice = LOFC_BLOB_NONE;
                return 0;
-       }
 
-       if (skip_prefix(arg, "blob:limit=", &v0)) {
-               if (!git_parse_ulong(v0, &filter_options->blob_limit_value))
-                       die(_("invalid filter-spec expression '%s'"), arg);
-               filter_options->choice = LOFC_BLOB_LIMIT;
-               return 0;
-       }
+       } else if (skip_prefix(arg, "blob:limit=", &v0)) {
+               if (git_parse_ulong(v0, &filter_options->blob_limit_value)) {
+                       filter_options->choice = LOFC_BLOB_LIMIT;
+                       return 0;
+               }
 
-       if (skip_prefix(arg, "sparse:oid=", &v0)) {
+       } else if (skip_prefix(arg, "sparse:oid=", &v0)) {
                struct object_context oc;
                struct object_id sparse_oid;
 
@@ -57,15 +64,27 @@ int parse_list_objects_filter(struct list_objects_filter_options *filter_options
                        filter_options->sparse_oid_value = oiddup(&sparse_oid);
                filter_options->choice = LOFC_SPARSE_OID;
                return 0;
-       }
 
-       if (skip_prefix(arg, "sparse:path=", &v0)) {
+       } else if (skip_prefix(arg, "sparse:path=", &v0)) {
                filter_options->choice = LOFC_SPARSE_PATH;
                filter_options->sparse_path_value = strdup(v0);
                return 0;
        }
 
-       die(_("invalid filter-spec expression '%s'"), arg);
+       if (errbuf) {
+               strbuf_init(errbuf, 0);
+               strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
+       }
+       memset(filter_options, 0, sizeof(*filter_options));
+       return 1;
+}
+
+int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
+                             const char *arg)
+{
+       struct strbuf buf = STRBUF_INIT;
+       if (gently_parse_list_objects_filter(filter_options, arg, &buf))
+               die("%s", buf.buf);
        return 0;
 }
 
@@ -75,7 +94,7 @@ int opt_parse_list_objects_filter(const struct option *opt,
        struct list_objects_filter_options *filter_options = opt->value;
 
        if (unset || !arg) {
-               list_objects_filter_release(filter_options);
+               list_objects_filter_set_no_filter(filter_options);
                return 0;
        }
 
@@ -90,3 +109,44 @@ void list_objects_filter_release(
        free(filter_options->sparse_path_value);
        memset(filter_options, 0, sizeof(*filter_options));
 }
+
+void partial_clone_register(
+       const char *remote,
+       const struct list_objects_filter_options *filter_options)
+{
+       /*
+        * Record the name of the partial clone remote in the
+        * config and in the global variable -- the latter is
+        * used throughout to indicate that partial clone is
+        * enabled and to expect missing objects.
+        */
+       if (repository_format_partial_clone &&
+           *repository_format_partial_clone &&
+           strcmp(remote, repository_format_partial_clone))
+               die(_("cannot change partial clone promisor remote"));
+
+       git_config_set("core.repositoryformatversion", "1");
+       git_config_set("extensions.partialclone", remote);
+
+       repository_format_partial_clone = xstrdup(remote);
+
+       /*
+        * Record the initial filter-spec in the config as
+        * the default for subsequent fetches from this remote.
+        */
+       core_partial_clone_filter_default =
+               xstrdup(filter_options->filter_spec);
+       git_config_set("core.partialclonefilter",
+                      core_partial_clone_filter_default);
+}
+
+void partial_clone_get_default_filter_spec(
+       struct list_objects_filter_options *filter_options)
+{
+       /*
+        * Parse default value, but silently ignore it if it is invalid.
+        */
+       gently_parse_list_objects_filter(filter_options,
+                                        core_partial_clone_filter_default,
+                                        NULL);
+}
index eea44a1a51b7c41feba26a688f82f3c67b23667e..0000a61f82d3dcc36e190b5422a13c93d47f15c2 100644 (file)
@@ -30,6 +30,11 @@ struct list_objects_filter_options {
         */
        enum list_objects_filter_choice choice;
 
+       /*
+        * Choice is LOFC_DISABLED because "--no-filter" was requested.
+        */
+       unsigned int no_filter : 1;
+
        /*
         * Parsed values (fields) from within the filter-spec.  These are
         * choice-specific; not all values will be defined for any given
@@ -58,4 +63,17 @@ int opt_parse_list_objects_filter(const struct option *opt,
 void list_objects_filter_release(
        struct list_objects_filter_options *filter_options);
 
+static inline void list_objects_filter_set_no_filter(
+       struct list_objects_filter_options *filter_options)
+{
+       list_objects_filter_release(filter_options);
+       filter_options->no_filter = 1;
+}
+
+void partial_clone_register(
+       const char *remote,
+       const struct list_objects_filter_options *filter_options);
+void partial_clone_get_default_filter_spec(
+       struct list_objects_filter_options *filter_options);
+
 #endif /* LIST_OBJECTS_FILTER_OPTIONS_H */
index 4356c45368e10bd7f41e15f91130dbfd3e4e9281..0ec83aaf1888903be4f1de38c8a94a594ef2130a 100644 (file)
@@ -117,7 +117,7 @@ static enum list_objects_filter_result filter_blobs_limit(
                assert(obj->type == OBJ_BLOB);
                assert((obj->flags & SEEN) == 0);
 
-               t = sha1_object_info(obj->oid.hash, &object_length);
+               t = oid_object_info(&obj->oid, &object_length);
                if (t != OBJ_BLOB) { /* probably OBJ_NONE */
                        /*
                         * We DO NOT have the blob locally, so we cannot
index 0966cdc9fa8dd448aa9955a2fb5324ae8ebbf97d..168bef688a89489a9d88d3e1f773483dbc1c8860 100644 (file)
@@ -9,6 +9,7 @@
 #include "list-objects.h"
 #include "list-objects-filter.h"
 #include "list-objects-filter-options.h"
+#include "packfile.h"
 
 static void process_blob(struct rev_info *revs,
                         struct blob *blob,
@@ -30,6 +31,20 @@ static void process_blob(struct rev_info *revs,
        if (obj->flags & (UNINTERESTING | SEEN))
                return;
 
+       /*
+        * Pre-filter known-missing objects when explicitly requested.
+        * Otherwise, a missing object error message may be reported
+        * later (depending on other filtering criteria).
+        *
+        * Note that this "--exclude-promisor-objects" pre-filtering
+        * may cause the actual filter to report an incomplete list
+        * of missing objects.
+        */
+       if (revs->exclude_promisor_objects &&
+           !has_object_file(&obj->oid) &&
+           is_promisor_object(&obj->oid))
+               return;
+
        pathlen = path->len;
        strbuf_addstr(path, name);
        if (filter_fn)
@@ -91,6 +106,8 @@ static void process_tree(struct rev_info *revs,
                all_entries_interesting: entry_not_interesting;
        int baselen = base->len;
        enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW;
+       int gently = revs->ignore_missing_links ||
+                    revs->exclude_promisor_objects;
 
        if (!revs->tree_objects)
                return;
@@ -98,9 +115,19 @@ static void process_tree(struct rev_info *revs,
                die("bad tree object");
        if (obj->flags & (UNINTERESTING | SEEN))
                return;
-       if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) {
+       if (parse_tree_gently(tree, gently) < 0) {
                if (revs->ignore_missing_links)
                        return;
+
+               /*
+                * Pre-filter known-missing tree objects when explicitly
+                * requested.  This may cause the actual filter to report
+                * an incomplete list of missing objects.
+                */
+               if (revs->exclude_promisor_objects &&
+                   is_promisor_object(&obj->oid))
+                       return;
+
                die("bad tree object %s", oid_to_hex(&obj->oid));
        }
 
index 5c6b09034c5398758cdd33beb630f219768be79c..d1c0bedf244fce0c8894175bf800384c0474b312 100644 (file)
@@ -177,7 +177,7 @@ static void show_parents(struct commit *commit, int abbrev, FILE *file)
        struct commit_list *p;
        for (p = commit->parents; p ; p = p->next) {
                struct commit *parent = p->item;
-               fprintf(file, " %s", find_unique_abbrev(parent->object.oid.hash, abbrev));
+               fprintf(file, " %s", find_unique_abbrev(&parent->object.oid, abbrev));
        }
 }
 
@@ -185,7 +185,7 @@ static void show_children(struct rev_info *opt, struct commit *commit, int abbre
 {
        struct commit_list *p = lookup_decoration(&opt->children, &commit->object);
        for ( ; p; p = p->next) {
-               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(p->item->object.oid.hash, abbrev));
+               fprintf(opt->diffopt.file, " %s", find_unique_abbrev(&p->item->object.oid, abbrev));
        }
 }
 
@@ -499,7 +499,7 @@ static void show_one_mergetag(struct commit *commit,
        int status, nth;
        size_t payload_size, gpg_message_offset;
 
-       hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash);
+       hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
        tag = lookup_tag(&oid);
        if (!tag)
                return; /* error message already given */
@@ -558,7 +558,7 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit), opt->diffopt.file);
+               fputs(find_unique_abbrev(&commit->object.oid, abbrev_commit), opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
                if (opt->children.name)
@@ -620,7 +620,8 @@ void show_log(struct rev_info *opt)
 
                if (!opt->graph)
                        put_revision_mark(opt, commit);
-               fputs(find_unique_abbrev(commit->object.oid.hash, abbrev_commit),
+               fputs(find_unique_abbrev(&commit->object.oid,
+                                        abbrev_commit),
                      opt->diffopt.file);
                if (opt->print_parents)
                        show_parents(commit, abbrev_commit, opt->diffopt.file);
@@ -628,8 +629,7 @@ void show_log(struct rev_info *opt)
                        show_children(opt, commit, abbrev_commit);
                if (parent)
                        fprintf(opt->diffopt.file, " (from %s)",
-                              find_unique_abbrev(parent->object.oid.hash,
-                                                 abbrev_commit));
+                              find_unique_abbrev(&parent->object.oid, abbrev_commit));
                fputs(diff_get_color_opt(&opt->diffopt, DIFF_RESET), opt->diffopt.file);
                show_decorations(opt, commit);
                if (opt->commit_format == CMIT_FMT_ONELINE) {
index cb921b4db676e3db918ee16f419cd2b78e0bf57e..13f0d2884e25edef3fde5bdf72b89a770111e472 100644 (file)
--- a/mailmap.c
+++ b/mailmap.c
@@ -224,7 +224,7 @@ static int read_mailmap_blob(struct string_list *map,
        if (get_oid(name, &oid) < 0)
                return 0;
 
-       buf = read_sha1_file(oid.hash, &type, &size);
+       buf = read_object_file(&oid, &type, &size);
        if (!buf)
                return error("unable to read mailmap object at %s", name);
        if (type != OBJ_BLOB)
index 396b7338df2c53009900fbd3df36dd06a7219493..72cc2baa3f96b2cbfa296335c7f0ff1094b6e96c 100644 (file)
@@ -54,7 +54,7 @@ static void *fill_tree_desc_strict(struct tree_desc *desc,
        enum object_type type;
        unsigned long size;
 
-       buffer = read_sha1_file(hash->hash, &type, &size);
+       buffer = read_object_file(hash, &type, &size);
        if (!buffer)
                die("unable to read tree (%s)", oid_to_hex(hash));
        if (type != OBJ_TREE)
@@ -158,22 +158,20 @@ static void match_trees(const struct object_id *hash1,
 }
 
 /*
- * A tree "hash1" has a subdirectory at "prefix".  Come up with a
- * tree object by replacing it with another tree "hash2".
+ * A tree "oid1" has a subdirectory at "prefix".  Come up with a tree object by
+ * replacing it with another tree "oid2".
  */
-static int splice_tree(const unsigned char *hash1,
-                      const char *prefix,
-                      const unsigned char *hash2,
-                      unsigned char *result)
+static int splice_tree(const struct object_id *oid1, const char *prefix,
+                      const struct object_id *oid2, struct object_id *result)
 {
        char *subpath;
        int toplen;
        char *buf;
        unsigned long sz;
        struct tree_desc desc;
-       unsigned char *rewrite_here;
-       const unsigned char *rewrite_with;
-       unsigned char subtree[20];
+       struct object_id *rewrite_here;
+       const struct object_id *rewrite_with;
+       struct object_id subtree;
        enum object_type type;
        int status;
 
@@ -182,9 +180,9 @@ static int splice_tree(const unsigned char *hash1,
        if (*subpath)
                subpath++;
 
-       buf = read_sha1_file(hash1, &type, &sz);
+       buf = read_object_file(oid1, &type, &sz);
        if (!buf)
-               die("cannot read tree %s", sha1_to_hex(hash1));
+               die("cannot read tree %s", oid_to_hex(oid1));
        init_tree_desc(&desc, buf, sz);
 
        rewrite_here = NULL;
@@ -197,26 +195,26 @@ static int splice_tree(const unsigned char *hash1,
                if (strlen(name) == toplen &&
                    !memcmp(name, prefix, toplen)) {
                        if (!S_ISDIR(mode))
-                               die("entry %s in tree %s is not a tree",
-                                   name, sha1_to_hex(hash1));
-                       rewrite_here = (unsigned char *) oid->hash;
+                               die("entry %s in tree %s is not a tree", name,
+                                   oid_to_hex(oid1));
+                       rewrite_here = (struct object_id *)oid;
                        break;
                }
                update_tree_entry(&desc);
        }
        if (!rewrite_here)
-               die("entry %.*s not found in tree %s",
-                   toplen, prefix, sha1_to_hex(hash1));
+               die("entry %.*s not found in tree %s", toplen, prefix,
+                   oid_to_hex(oid1));
        if (*subpath) {
-               status = splice_tree(rewrite_here, subpath, hash2, subtree);
+               status = splice_tree(rewrite_here, subpath, oid2, &subtree);
                if (status)
                        return status;
-               rewrite_with = subtree;
+               rewrite_with = &subtree;
+       } else {
+               rewrite_with = oid2;
        }
-       else
-               rewrite_with = hash2;
-       hashcpy(rewrite_here, rewrite_with);
-       status = write_sha1_file(buf, sz, tree_type, result);
+       oidcpy(rewrite_here, rewrite_with);
+       status = write_object_file(buf, sz, tree_type, result);
        free(buf);
        return status;
 }
@@ -271,7 +269,7 @@ void shift_tree(const struct object_id *hash1,
                if (!*del_prefix)
                        return;
 
-               if (get_tree_entry(hash2->hash, del_prefix, shifted->hash, &mode))
+               if (get_tree_entry(hash2, del_prefix, shifted, &mode))
                        die("cannot find path %s in tree %s",
                            del_prefix, oid_to_hex(hash2));
                return;
@@ -280,7 +278,7 @@ void shift_tree(const struct object_id *hash1,
        if (!*add_prefix)
                return;
 
-       splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
+       splice_tree(hash1, add_prefix, hash2, shifted);
 }
 
 /*
@@ -298,12 +296,12 @@ void shift_tree_by(const struct object_id *hash1,
        unsigned candidate = 0;
 
        /* Can hash2 be a tree at shift_prefix in tree hash1? */
-       if (!get_tree_entry(hash1->hash, shift_prefix, sub1.hash, &mode1) &&
+       if (!get_tree_entry(hash1, shift_prefix, &sub1, &mode1) &&
            S_ISDIR(mode1))
                candidate |= 1;
 
        /* Can hash1 be a tree at shift_prefix in tree hash2? */
-       if (!get_tree_entry(hash2->hash, shift_prefix, sub2.hash, &mode2) &&
+       if (!get_tree_entry(hash2, shift_prefix, &sub2, &mode2) &&
            S_ISDIR(mode2))
                candidate |= 2;
 
@@ -334,7 +332,7 @@ void shift_tree_by(const struct object_id *hash1,
                 * shift tree2 down by adding shift_prefix above it
                 * to match tree1.
                 */
-               splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
+               splice_tree(hash1, shift_prefix, hash2, shifted);
        else
                /*
                 * shift tree2 up by removing shift_prefix from it
index 9b6eac22e4256d8f2bf82961b6e4f320d89fdeba..fa49c17287f4120b4bbb75acc6b92b3d339710cd 100644 (file)
@@ -11,7 +11,7 @@ static int fill_mmfile_blob(mmfile_t *f, struct blob *obj)
        unsigned long size;
        enum object_type type;
 
-       buf = read_sha1_file(obj->object.oid.hash, &type, &size);
+       buf = read_object_file(&obj->object.oid, &type, &size);
        if (!buf)
                return -1;
        if (type != OBJ_BLOB) {
@@ -66,7 +66,7 @@ void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct
                        return NULL;
                if (!our)
                        our = their;
-               return read_sha1_file(our->object.oid.hash, &type, size);
+               return read_object_file(&our->object.oid, &type, size);
        }
 
        if (fill_mmfile_blob(&f1, our) < 0)
index cc5fa0a94965fad8821cdf81dfe84737f4dee043..9c05eb7f700eed0dc3e20a4c22f3a60e0aa21488 100644 (file)
@@ -49,6 +49,67 @@ static unsigned int path_hash(const char *path)
        return ignore_case ? strihash(path) : strhash(path);
 }
 
+static struct dir_rename_entry *dir_rename_find_entry(struct hashmap *hashmap,
+                                                     char *dir)
+{
+       struct dir_rename_entry key;
+
+       if (dir == NULL)
+               return NULL;
+       hashmap_entry_init(&key, strhash(dir));
+       key.dir = dir;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int dir_rename_cmp(const void *unused_cmp_data,
+                         const void *entry,
+                         const void *entry_or_key,
+                         const void *unused_keydata)
+{
+       const struct dir_rename_entry *e1 = entry;
+       const struct dir_rename_entry *e2 = entry_or_key;
+
+       return strcmp(e1->dir, e2->dir);
+}
+
+static void dir_rename_init(struct hashmap *map)
+{
+       hashmap_init(map, dir_rename_cmp, NULL, 0);
+}
+
+static void dir_rename_entry_init(struct dir_rename_entry *entry,
+                                 char *directory)
+{
+       hashmap_entry_init(entry, strhash(directory));
+       entry->dir = directory;
+       entry->non_unique_new_dir = 0;
+       strbuf_init(&entry->new_dir, 0);
+       string_list_init(&entry->possible_new_dirs, 0);
+}
+
+static struct collision_entry *collision_find_entry(struct hashmap *hashmap,
+                                                   char *target_file)
+{
+       struct collision_entry key;
+
+       hashmap_entry_init(&key, strhash(target_file));
+       key.target_file = target_file;
+       return hashmap_get(hashmap, &key, NULL);
+}
+
+static int collision_cmp(void *unused_cmp_data,
+                        const struct collision_entry *e1,
+                        const struct collision_entry *e2,
+                        const void *unused_keydata)
+{
+       return strcmp(e1->target_file, e2->target_file);
+}
+
+static void collision_init(struct hashmap *map)
+{
+       hashmap_init(map, (hashmap_cmp_fn) collision_cmp, NULL, 0);
+}
+
 static void flush_output(struct merge_options *o)
 {
        if (o->buffer_output < 2 && o->obuf.len) {
@@ -119,6 +180,7 @@ static int oid_eq(const struct object_id *a, const struct object_id *b)
 
 enum rename_type {
        RENAME_NORMAL = 0,
+       RENAME_DIR,
        RENAME_DELETE,
        RENAME_ONE_FILE_TO_ONE,
        RENAME_ONE_FILE_TO_TWO,
@@ -228,7 +290,7 @@ static void output_commit_title(struct merge_options *o, struct commit *commit)
                strbuf_addf(&o->obuf, "virtual %s\n",
                        merge_remote_util(commit)->name);
        else {
-               strbuf_add_unique_abbrev(&o->obuf, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(&o->obuf, &commit->object.oid,
                                         DEFAULT_ABBREV);
                strbuf_addch(&o->obuf, ' ');
                if (parse_commit(commit) != 0)
@@ -275,32 +337,37 @@ static void init_tree_desc_from_tree(struct tree_desc *desc, struct tree *tree)
        init_tree_desc(desc, tree->buffer, tree->size);
 }
 
-static int git_merge_trees(int index_only,
+static int git_merge_trees(struct merge_options *o,
                           struct tree *common,
                           struct tree *head,
                           struct tree *merge)
 {
        int rc;
        struct tree_desc t[3];
-       struct unpack_trees_options opts;
 
-       memset(&opts, 0, sizeof(opts));
-       if (index_only)
-               opts.index_only = 1;
+       memset(&o->unpack_opts, 0, sizeof(o->unpack_opts));
+       if (o->call_depth)
+               o->unpack_opts.index_only = 1;
        else
-               opts.update = 1;
-       opts.merge = 1;
-       opts.head_idx = 2;
-       opts.fn = threeway_merge;
-       opts.src_index = &the_index;
-       opts.dst_index = &the_index;
-       setup_unpack_trees_porcelain(&opts, "merge");
+               o->unpack_opts.update = 1;
+       o->unpack_opts.merge = 1;
+       o->unpack_opts.head_idx = 2;
+       o->unpack_opts.fn = threeway_merge;
+       o->unpack_opts.src_index = &the_index;
+       o->unpack_opts.dst_index = &the_index;
+       setup_unpack_trees_porcelain(&o->unpack_opts, "merge");
 
        init_tree_desc_from_tree(t+0, common);
        init_tree_desc_from_tree(t+1, head);
        init_tree_desc_from_tree(t+2, merge);
 
-       rc = unpack_trees(3, t, &opts);
+       rc = unpack_trees(3, t, &o->unpack_opts);
+       /*
+        * unpack_trees NULLifies src_index, but it's used in verify_uptodate,
+        * so set to the new index which will usually have modification
+        * timestamp info copied over.
+        */
+       o->unpack_opts.src_index = &the_index;
        cache_tree_free(&active_cache_tree);
        return rc;
 }
@@ -335,7 +402,7 @@ struct tree *write_tree_from_memory(struct merge_options *o)
        return result;
 }
 
-static int save_files_dirs(const unsigned char *sha1,
+static int save_files_dirs(const struct object_id *oid,
                struct strbuf *base, const char *path,
                unsigned int mode, int stage, void *context)
 {
@@ -360,6 +427,21 @@ static void get_files_dirs(struct merge_options *o, struct tree *tree)
        read_tree_recursive(tree, "", 0, 0, &match_all, save_files_dirs, o);
 }
 
+static int get_tree_entry_if_blob(struct tree *tree,
+                                 const char *path,
+                                 struct object_id *hashy,
+                                 unsigned int *mode_o)
+{
+       int ret;
+
+       ret = get_tree_entry(&tree->object.oid, path, hashy, mode_o);
+       if (S_ISDIR(*mode_o)) {
+               oidcpy(hashy, &null_oid);
+               *mode_o = 0;
+       }
+       return ret;
+}
+
 /*
  * Returns an index_entry instance which doesn't have to correspond to
  * a real cache entry in Git's index.
@@ -370,12 +452,12 @@ static struct stage_data *insert_stage_data(const char *path,
 {
        struct string_list_item *item;
        struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
-       get_tree_entry(o->object.oid.hash, path,
-                       e->stages[1].oid.hash, &e->stages[1].mode);
-       get_tree_entry(a->object.oid.hash, path,
-                       e->stages[2].oid.hash, &e->stages[2].mode);
-       get_tree_entry(b->object.oid.hash, path,
-                       e->stages[3].oid.hash, &e->stages[3].mode);
+       get_tree_entry_if_blob(o, path,
+                              &e->stages[1].oid, &e->stages[1].mode);
+       get_tree_entry_if_blob(a, path,
+                              &e->stages[2].oid, &e->stages[2].mode);
+       get_tree_entry_if_blob(b, path,
+                              &e->stages[3].oid, &e->stages[3].mode);
        item = string_list_insert(entries, path);
        item->util = e;
        return e;
@@ -513,80 +595,31 @@ static void record_df_conflict_files(struct merge_options *o,
 
 struct rename {
        struct diff_filepair *pair;
+       /*
+        * Purpose of src_entry and dst_entry:
+        *
+        * If 'before' is renamed to 'after' then src_entry will contain
+        * the versions of 'before' from the merge_base, HEAD, and MERGE in
+        * stages 1, 2, and 3; dst_entry will contain the respective
+        * versions of 'after' in corresponding locations.  Thus, we have a
+        * total of six modes and oids, though some will be null.  (Stage 0
+        * is ignored; we're interested in handling conflicts.)
+        *
+        * Since we don't turn on break-rewrites by default, neither
+        * src_entry nor dst_entry can have all three of their stages have
+        * non-null oids, meaning at most four of the six will be non-null.
+        * Also, since this is a rename, both src_entry and dst_entry will
+        * have at least one non-null oid, meaning at least two will be
+        * non-null.  Of the six oids, a typical rename will have three be
+        * non-null.  Only two implies a rename/delete, and four implies a
+        * rename/add.
+        */
        struct stage_data *src_entry;
        struct stage_data *dst_entry;
+       unsigned add_turned_into_rename:1;
        unsigned processed:1;
 };
 
-/*
- * Get information of all renames which occurred between 'o_tree' and
- * 'tree'. We need the three trees in the merge ('o_tree', 'a_tree' and
- * 'b_tree') to be able to associate the correct cache entries with
- * the rename information. 'tree' is always equal to either a_tree or b_tree.
- */
-static struct string_list *get_renames(struct merge_options *o,
-                                      struct tree *tree,
-                                      struct tree *o_tree,
-                                      struct tree *a_tree,
-                                      struct tree *b_tree,
-                                      struct string_list *entries)
-{
-       int i;
-       struct string_list *renames;
-       struct diff_options opts;
-
-       renames = xcalloc(1, sizeof(struct string_list));
-       if (!o->detect_rename)
-               return renames;
-
-       diff_setup(&opts);
-       opts.flags.recursive = 1;
-       opts.flags.rename_empty = 0;
-       opts.detect_rename = DIFF_DETECT_RENAME;
-       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
-                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
-                           1000;
-       opts.rename_score = o->rename_score;
-       opts.show_rename_progress = o->show_rename_progress;
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_setup_done(&opts);
-       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
-       diffcore_std(&opts);
-       if (opts.needed_rename_limit > o->needed_rename_limit)
-               o->needed_rename_limit = opts.needed_rename_limit;
-       for (i = 0; i < diff_queued_diff.nr; ++i) {
-               struct string_list_item *item;
-               struct rename *re;
-               struct diff_filepair *pair = diff_queued_diff.queue[i];
-               if (pair->status != 'R') {
-                       diff_free_filepair(pair);
-                       continue;
-               }
-               re = xmalloc(sizeof(*re));
-               re->processed = 0;
-               re->pair = pair;
-               item = string_list_lookup(entries, re->pair->one->path);
-               if (!item)
-                       re->src_entry = insert_stage_data(re->pair->one->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->src_entry = item->util;
-
-               item = string_list_lookup(entries, re->pair->two->path);
-               if (!item)
-                       re->dst_entry = insert_stage_data(re->pair->two->path,
-                                       o_tree, a_tree, b_tree, entries);
-               else
-                       re->dst_entry = item->util;
-               item = string_list_insert(renames, pair->one->path);
-               item->util = re;
-       }
-       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
-       diff_queued_diff.nr = 0;
-       diff_flush(&opts);
-       return renames;
-}
-
 static int update_stages(struct merge_options *opt, const char *path,
                         const struct diff_filespec *o,
                         const struct diff_filespec *a,
@@ -618,6 +651,27 @@ static int update_stages(struct merge_options *opt, const char *path,
        return 0;
 }
 
+static int update_stages_for_stage_data(struct merge_options *opt,
+                                       const char *path,
+                                       const struct stage_data *stage_data)
+{
+       struct diff_filespec o, a, b;
+
+       o.mode = stage_data->stages[1].mode;
+       oidcpy(&o.oid, &stage_data->stages[1].oid);
+
+       a.mode = stage_data->stages[2].mode;
+       oidcpy(&a.oid, &stage_data->stages[2].oid);
+
+       b.mode = stage_data->stages[3].mode;
+       oidcpy(&b.oid, &stage_data->stages[3].oid);
+
+       return update_stages(opt, path,
+                            is_null_oid(&o.oid) ? NULL : &o,
+                            is_null_oid(&a.oid) ? NULL : &a,
+                            is_null_oid(&b.oid) ? NULL : &b);
+}
+
 static void update_entry(struct stage_data *entry,
                         struct diff_filespec *o,
                         struct diff_filespec *a,
@@ -746,6 +800,20 @@ static int would_lose_untracked(const char *path)
        return !was_tracked(path) && file_exists(path);
 }
 
+static int was_dirty(struct merge_options *o, const char *path)
+{
+       struct cache_entry *ce;
+       int dirty = 1;
+
+       if (o->call_depth || !was_tracked(path))
+               return !dirty;
+
+       ce = cache_file_exists(path, strlen(path), ignore_case);
+       dirty = (ce->ce_stat_data.sd_mtime.sec > 0 &&
+                verify_uptodate(ce, &o->unpack_opts) != 0);
+       return dirty;
+}
+
 static int make_room_for_path(struct merge_options *o, const char *path)
 {
        int status, i;
@@ -823,7 +891,7 @@ static int update_file_flags(struct merge_options *o,
                        goto update_index;
                }
 
-               buf = read_sha1_file(oid->hash, &type, &size);
+               buf = read_object_file(oid, &type, &size);
                if (!buf)
                        return err(o, _("cannot read object %s '%s'"), oid_to_hex(oid), path);
                if (type != OBJ_BLOB) {
@@ -1009,8 +1077,9 @@ static int merge_file_1(struct merge_options *o,
                        if ((merge_status < 0) || !result_buf.ptr)
                                ret = err(o, _("Failed to execute internal merge"));
 
-                       if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
-                                                   blob_type, result->oid.hash))
+                       if (!ret &&
+                           write_object_file(result_buf.ptr, result_buf.size,
+                                             blob_type, &result->oid))
                                ret = err(o, _("Unable to add %s to database"),
                                          a->path);
 
@@ -1094,6 +1163,38 @@ static int merge_file_one(struct merge_options *o,
        return merge_file_1(o, &one, &a, &b, branch1, branch2, mfi);
 }
 
+static int conflict_rename_dir(struct merge_options *o,
+                              struct diff_filepair *pair,
+                              const char *rename_branch,
+                              const char *other_branch)
+{
+       const struct diff_filespec *dest = pair->two;
+
+       if (!o->call_depth && would_lose_untracked(dest->path)) {
+               char *alt_path = unique_path(o, dest->path, rename_branch);
+
+               output(o, 1, _("Error: Refusing to lose untracked file at %s; "
+                              "writing to %s instead."),
+                      dest->path, alt_path);
+               /*
+                * Write the file in worktree at alt_path, but not in the
+                * index.  Instead, write to dest->path for the index but
+                * only at the higher appropriate stage.
+                */
+               if (update_file(o, 0, &dest->oid, dest->mode, alt_path))
+                       return -1;
+               free(alt_path);
+               return update_stages(o, dest->path, NULL,
+                                    rename_branch == o->branch1 ? dest : NULL,
+                                    rename_branch == o->branch1 ? NULL : dest);
+       }
+
+       /* Update dest->path both in index and in worktree */
+       if (update_file(o, 1, &dest->oid, dest->mode, dest->path))
+               return -1;
+       return 0;
+}
+
 static int handle_change_delete(struct merge_options *o,
                                 const char *path, const char *old_path,
                                 const struct object_id *o_oid, int o_mode,
@@ -1107,7 +1208,8 @@ static int handle_change_delete(struct merge_options *o,
        const char *update_path = path;
        int ret = 0;
 
-       if (dir_in_way(path, !o->call_depth, 0)) {
+       if (dir_in_way(path, !o->call_depth, 0) ||
+           (!o->call_depth && would_lose_untracked(path))) {
                update_path = alt_path = unique_path(o, path, change_branch);
        }
 
@@ -1222,17 +1324,34 @@ static int handle_file(struct merge_options *o,
 
        add = filespec_from_entry(&other, dst_entry, stage ^ 1);
        if (add) {
+               int ren_src_was_dirty = was_dirty(o, rename->path);
                char *add_name = unique_path(o, rename->path, other_branch);
                if (update_file(o, 0, &add->oid, add->mode, add_name))
                        return -1;
 
-               remove_file(o, 0, rename->path, 0);
+               if (ren_src_was_dirty) {
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              rename->path);
+               }
+               /*
+                * Because the double negatives somehow keep confusing me...
+                *    1) update_wd iff !ren_src_was_dirty.
+                *    2) no_wd iff !update_wd
+                *    3) so, no_wd == !!ren_src_was_dirty == ren_src_was_dirty
+                */
+               remove_file(o, 0, rename->path, ren_src_was_dirty);
                dst_name = unique_path(o, rename->path, cur_branch);
        } else {
                if (dir_in_way(rename->path, !o->call_depth, 0)) {
                        dst_name = unique_path(o, rename->path, cur_branch);
                        output(o, 1, _("%s is a directory in %s adding as %s instead"),
                               rename->path, other_branch, dst_name);
+               } else if (!o->call_depth &&
+                          would_lose_untracked(rename->path)) {
+                       dst_name = unique_path(o, rename->path, cur_branch);
+                       output(o, 1, _("Refusing to lose untracked file at %s; "
+                                      "adding as %s instead"),
+                              rename->path, dst_name);
                }
        }
        if ((ret = update_file(o, 0, &rename->oid, rename->mode, dst_name)))
@@ -1358,11 +1477,43 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
                char *new_path2 = unique_path(o, path, ci->branch2);
                output(o, 1, _("Renaming %s to %s and %s to %s instead"),
                       a->path, new_path1, b->path, new_path2);
-               remove_file(o, 0, path, 0);
+               if (was_dirty(o, path))
+                       output(o, 1, _("Refusing to lose dirty file at %s"),
+                              path);
+               else if (would_lose_untracked(path))
+                       /*
+                        * Only way we get here is if both renames were from
+                        * a directory rename AND user had an untracked file
+                        * at the location where both files end up after the
+                        * two directory renames.  See testcase 10d of t6043.
+                        */
+                       output(o, 1, _("Refusing to lose untracked file at "
+                                      "%s, even though it's in the way."),
+                              path);
+               else
+                       remove_file(o, 0, path, 0);
                ret = update_file(o, 0, &mfi_c1.oid, mfi_c1.mode, new_path1);
                if (!ret)
                        ret = update_file(o, 0, &mfi_c2.oid, mfi_c2.mode,
                                          new_path2);
+               /*
+                * unpack_trees() actually populates the index for us for
+                * "normal" rename/rename(2to1) situtations so that the
+                * correct entries are at the higher stages, which would
+                * make the call below to update_stages_for_stage_data
+                * unnecessary.  However, if either of the renames came
+                * from a directory rename, then unpack_trees() will not
+                * have gotten the right data loaded into the index, so we
+                * need to do so now.  (While it'd be tempting to move this
+                * call to update_stages_for_stage_data() to
+                * apply_directory_rename_modifications(), that would break
+                * our intermediate calls to would_lose_untracked() since
+                * those rely on the current in-memory index.  See also the
+                * big "NOTE" in update_stages()).
+                */
+               if (update_stages_for_stage_data(o, path, ci->dst_entry1))
+                       ret = -1;
+
                free(new_path2);
                free(new_path1);
        }
@@ -1370,6 +1521,754 @@ static int conflict_rename_rename_2to1(struct merge_options *o,
        return ret;
 }
 
+/*
+ * Get the diff_filepairs changed between o_tree and tree.
+ */
+static struct diff_queue_struct *get_diffpairs(struct merge_options *o,
+                                              struct tree *o_tree,
+                                              struct tree *tree)
+{
+       struct diff_queue_struct *ret;
+       struct diff_options opts;
+
+       diff_setup(&opts);
+       opts.flags.recursive = 1;
+       opts.flags.rename_empty = 0;
+       opts.detect_rename = DIFF_DETECT_RENAME;
+       opts.rename_limit = o->merge_rename_limit >= 0 ? o->merge_rename_limit :
+                           o->diff_rename_limit >= 0 ? o->diff_rename_limit :
+                           1000;
+       opts.rename_score = o->rename_score;
+       opts.show_rename_progress = o->show_rename_progress;
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_setup_done(&opts);
+       diff_tree_oid(&o_tree->object.oid, &tree->object.oid, "", &opts);
+       diffcore_std(&opts);
+       if (opts.needed_rename_limit > o->needed_rename_limit)
+               o->needed_rename_limit = opts.needed_rename_limit;
+
+       ret = xmalloc(sizeof(*ret));
+       *ret = diff_queued_diff;
+
+       opts.output_format = DIFF_FORMAT_NO_OUTPUT;
+       diff_queued_diff.nr = 0;
+       diff_queued_diff.queue = NULL;
+       diff_flush(&opts);
+       return ret;
+}
+
+static int tree_has_path(struct tree *tree, const char *path)
+{
+       struct object_id hashy;
+       unsigned int mode_o;
+
+       return !get_tree_entry(&tree->object.oid, path,
+                              &hashy, &mode_o);
+}
+
+/*
+ * Return a new string that replaces the beginning portion (which matches
+ * entry->dir), with entry->new_dir.  In perl-speak:
+ *   new_path_name = (old_path =~ s/entry->dir/entry->new_dir/);
+ * NOTE:
+ *   Caller must ensure that old_path starts with entry->dir + '/'.
+ */
+static char *apply_dir_rename(struct dir_rename_entry *entry,
+                             const char *old_path)
+{
+       struct strbuf new_path = STRBUF_INIT;
+       int oldlen, newlen;
+
+       if (entry->non_unique_new_dir)
+               return NULL;
+
+       oldlen = strlen(entry->dir);
+       newlen = entry->new_dir.len + (strlen(old_path) - oldlen) + 1;
+       strbuf_grow(&new_path, newlen);
+       strbuf_addbuf(&new_path, &entry->new_dir);
+       strbuf_addstr(&new_path, &old_path[oldlen]);
+
+       return strbuf_detach(&new_path, NULL);
+}
+
+static void get_renamed_dir_portion(const char *old_path, const char *new_path,
+                                   char **old_dir, char **new_dir)
+{
+       char *end_of_old, *end_of_new;
+       int old_len, new_len;
+
+       *old_dir = NULL;
+       *new_dir = NULL;
+
+       /*
+        * For
+        *    "a/b/c/d/e/foo.c" -> "a/b/some/thing/else/e/foo.c"
+        * the "e/foo.c" part is the same, we just want to know that
+        *    "a/b/c/d" was renamed to "a/b/some/thing/else"
+        * so, for this example, this function returns "a/b/c/d" in
+        * *old_dir and "a/b/some/thing/else" in *new_dir.
+        *
+        * Also, if the basename of the file changed, we don't care.  We
+        * want to know which portion of the directory, if any, changed.
+        */
+       end_of_old = strrchr(old_path, '/');
+       end_of_new = strrchr(new_path, '/');
+
+       if (end_of_old == NULL || end_of_new == NULL)
+               return;
+       while (*--end_of_new == *--end_of_old &&
+              end_of_old != old_path &&
+              end_of_new != new_path)
+               ; /* Do nothing; all in the while loop */
+       /*
+        * We've found the first non-matching character in the directory
+        * paths.  That means the current directory we were comparing
+        * represents the rename.  Move end_of_old and end_of_new back
+        * to the full directory name.
+        */
+       if (*end_of_old == '/')
+               end_of_old++;
+       if (*end_of_old != '/')
+               end_of_new++;
+       end_of_old = strchr(end_of_old, '/');
+       end_of_new = strchr(end_of_new, '/');
+
+       /*
+        * It may have been the case that old_path and new_path were the same
+        * directory all along.  Don't claim a rename if they're the same.
+        */
+       old_len = end_of_old - old_path;
+       new_len = end_of_new - new_path;
+
+       if (old_len != new_len || strncmp(old_path, new_path, old_len)) {
+               *old_dir = xstrndup(old_path, old_len);
+               *new_dir = xstrndup(new_path, new_len);
+       }
+}
+
+static void remove_hashmap_entries(struct hashmap *dir_renames,
+                                  struct string_list *items_to_remove)
+{
+       int i;
+       struct dir_rename_entry *entry;
+
+       for (i = 0; i < items_to_remove->nr; i++) {
+               entry = items_to_remove->items[i].util;
+               hashmap_remove(dir_renames, entry, NULL);
+       }
+       string_list_clear(items_to_remove, 0);
+}
+
+/*
+ * See if there is a directory rename for path, and if there are any file
+ * level conflicts for the renamed location.  If there is a rename and
+ * there are no conflicts, return the new name.  Otherwise, return NULL.
+ */
+static char *handle_path_level_conflicts(struct merge_options *o,
+                                        const char *path,
+                                        struct dir_rename_entry *entry,
+                                        struct hashmap *collisions,
+                                        struct tree *tree)
+{
+       char *new_path = NULL;
+       struct collision_entry *collision_ent;
+       int clean = 1;
+       struct strbuf collision_paths = STRBUF_INIT;
+
+       /*
+        * entry has the mapping of old directory name to new directory name
+        * that we want to apply to path.
+        */
+       new_path = apply_dir_rename(entry, path);
+
+       if (!new_path) {
+               /* This should only happen when entry->non_unique_new_dir set */
+               if (!entry->non_unique_new_dir)
+                       BUG("entry->non_unqiue_dir not set and !new_path");
+               output(o, 1, _("CONFLICT (directory rename split): "
+                              "Unclear where to place %s because directory "
+                              "%s was renamed to multiple other directories, "
+                              "with no destination getting a majority of the "
+                              "files."),
+                      path, entry->dir);
+               clean = 0;
+               return NULL;
+       }
+
+       /*
+        * The caller needs to have ensured that it has pre-populated
+        * collisions with all paths that map to new_path.  Do a quick check
+        * to ensure that's the case.
+        */
+       collision_ent = collision_find_entry(collisions, new_path);
+       if (collision_ent == NULL)
+               BUG("collision_ent is NULL");
+
+       /*
+        * Check for one-sided add/add/.../add conflicts, i.e.
+        * where implicit renames from the other side doing
+        * directory rename(s) can affect this side of history
+        * to put multiple paths into the same location.  Warn
+        * and bail on directory renames for such paths.
+        */
+       if (collision_ent->reported_already) {
+               clean = 0;
+       } else if (tree_has_path(tree, new_path)) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Existing "
+                              "file/dir at %s in the way of implicit "
+                              "directory rename(s) putting the following "
+                              "path(s) there: %s."),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       } else if (collision_ent->source_files.nr > 1) {
+               collision_ent->reported_already = 1;
+               strbuf_add_separated_string_list(&collision_paths, ", ",
+                                                &collision_ent->source_files);
+               output(o, 1, _("CONFLICT (implicit dir rename): Cannot map "
+                              "more than one path to %s; implicit directory "
+                              "renames tried to put these paths there: %s"),
+                      new_path, collision_paths.buf);
+               clean = 0;
+       }
+
+       /* Free memory we no longer need */
+       strbuf_release(&collision_paths);
+       if (!clean && new_path) {
+               free(new_path);
+               return NULL;
+       }
+
+       return new_path;
+}
+
+/*
+ * There are a couple things we want to do at the directory level:
+ *   1. Check for both sides renaming to the same thing, in order to avoid
+ *      implicit renaming of files that should be left in place.  (See
+ *      testcase 6b in t6043 for details.)
+ *   2. Prune directory renames if there are still files left in the
+ *      the original directory.  These represent a partial directory rename,
+ *      i.e. a rename where only some of the files within the directory
+ *      were renamed elsewhere.  (Technically, this could be done earlier
+ *      in get_directory_renames(), except that would prevent us from
+ *      doing the previous check and thus failing testcase 6b.)
+ *   3. Check for rename/rename(1to2) conflicts (at the directory level).
+ *      In the future, we could potentially record this info as well and
+ *      omit reporting rename/rename(1to2) conflicts for each path within
+ *      the affected directories, thus cleaning up the merge output.
+ *   NOTE: We do NOT check for rename/rename(2to1) conflicts at the
+ *         directory level, because merging directories is fine.  If it
+ *         causes conflicts for files within those merged directories, then
+ *         that should be detected at the individual path level.
+ */
+static void handle_directory_level_conflicts(struct merge_options *o,
+                                            struct hashmap *dir_re_head,
+                                            struct tree *head,
+                                            struct hashmap *dir_re_merge,
+                                            struct tree *merge)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *head_ent;
+       struct dir_rename_entry *merge_ent;
+
+       struct string_list remove_from_head = STRING_LIST_INIT_NODUP;
+       struct string_list remove_from_merge = STRING_LIST_INIT_NODUP;
+
+       hashmap_iter_init(dir_re_head, &iter);
+       while ((head_ent = hashmap_iter_next(&iter))) {
+               merge_ent = dir_rename_find_entry(dir_re_merge, head_ent->dir);
+               if (merge_ent &&
+                   !head_ent->non_unique_new_dir &&
+                   !merge_ent->non_unique_new_dir &&
+                   !strbuf_cmp(&head_ent->new_dir, &merge_ent->new_dir)) {
+                       /* 1. Renamed identically; remove it from both sides */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               } else if (tree_has_path(head, head_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+
+       hashmap_iter_init(dir_re_merge, &iter);
+       while ((merge_ent = hashmap_iter_next(&iter))) {
+               head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
+               if (tree_has_path(merge, merge_ent->dir)) {
+                       /* 2. This wasn't a directory rename after all */
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+               } else if (head_ent &&
+                          !head_ent->non_unique_new_dir &&
+                          !merge_ent->non_unique_new_dir) {
+                       /* 3. rename/rename(1to2) */
+                       /*
+                        * We can assume it's not rename/rename(1to1) because
+                        * that was case (1), already checked above.  So we
+                        * know that head_ent->new_dir and merge_ent->new_dir
+                        * are different strings.
+                        */
+                       output(o, 1, _("CONFLICT (rename/rename): "
+                                      "Rename directory %s->%s in %s. "
+                                      "Rename directory %s->%s in %s"),
+                              head_ent->dir, head_ent->new_dir.buf, o->branch1,
+                              head_ent->dir, merge_ent->new_dir.buf, o->branch2);
+                       string_list_append(&remove_from_head,
+                                          head_ent->dir)->util = head_ent;
+                       strbuf_release(&head_ent->new_dir);
+                       string_list_append(&remove_from_merge,
+                                          merge_ent->dir)->util = merge_ent;
+                       strbuf_release(&merge_ent->new_dir);
+               }
+       }
+
+       remove_hashmap_entries(dir_re_head, &remove_from_head);
+       remove_hashmap_entries(dir_re_merge, &remove_from_merge);
+}
+
+static struct hashmap *get_directory_renames(struct diff_queue_struct *pairs,
+                                            struct tree *tree)
+{
+       struct hashmap *dir_renames;
+       struct hashmap_iter iter;
+       struct dir_rename_entry *entry;
+       int i;
+
+       /*
+        * Typically, we think of a directory rename as all files from a
+        * certain directory being moved to a target directory.  However,
+        * what if someone first moved two files from the original
+        * directory in one commit, and then renamed the directory
+        * somewhere else in a later commit?  At merge time, we just know
+        * that files from the original directory went to two different
+        * places, and that the bulk of them ended up in the same place.
+        * We want each directory rename to represent where the bulk of the
+        * files from that directory end up; this function exists to find
+        * where the bulk of the files went.
+        *
+        * The first loop below simply iterates through the list of file
+        * renames, finding out how often each directory rename pair
+        * possibility occurs.
+        */
+       dir_renames = xmalloc(sizeof(struct hashmap));
+       dir_rename_init(dir_renames);
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               int *count;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *old_dir, *new_dir;
+
+               /* File not part of directory rename if it wasn't renamed */
+               if (pair->status != 'R')
+                       continue;
+
+               get_renamed_dir_portion(pair->one->path, pair->two->path,
+                                       &old_dir,        &new_dir);
+               if (!old_dir)
+                       /* Directory didn't change at all; ignore this one. */
+                       continue;
+
+               entry = dir_rename_find_entry(dir_renames, old_dir);
+               if (!entry) {
+                       entry = xmalloc(sizeof(struct dir_rename_entry));
+                       dir_rename_entry_init(entry, old_dir);
+                       hashmap_put(dir_renames, entry);
+               } else {
+                       free(old_dir);
+               }
+               item = string_list_lookup(&entry->possible_new_dirs, new_dir);
+               if (!item) {
+                       item = string_list_insert(&entry->possible_new_dirs,
+                                                 new_dir);
+                       item->util = xcalloc(1, sizeof(int));
+               } else {
+                       free(new_dir);
+               }
+               count = item->util;
+               *count += 1;
+       }
+
+       /*
+        * For each directory with files moved out of it, we find out which
+        * target directory received the most files so we can declare it to
+        * be the "winning" target location for the directory rename.  This
+        * winner gets recorded in new_dir.  If there is no winner
+        * (multiple target directories received the same number of files),
+        * we set non_unique_new_dir.  Once we've determined the winner (or
+        * that there is no winner), we no longer need possible_new_dirs.
+        */
+       hashmap_iter_init(dir_renames, &iter);
+       while ((entry = hashmap_iter_next(&iter))) {
+               int max = 0;
+               int bad_max = 0;
+               char *best = NULL;
+
+               for (i = 0; i < entry->possible_new_dirs.nr; i++) {
+                       int *count = entry->possible_new_dirs.items[i].util;
+
+                       if (*count == max)
+                               bad_max = max;
+                       else if (*count > max) {
+                               max = *count;
+                               best = entry->possible_new_dirs.items[i].string;
+                       }
+               }
+               if (bad_max == max)
+                       entry->non_unique_new_dir = 1;
+               else {
+                       assert(entry->new_dir.len == 0);
+                       strbuf_addstr(&entry->new_dir, best);
+               }
+               /*
+                * The relevant directory sub-portion of the original full
+                * filepaths were xstrndup'ed before inserting into
+                * possible_new_dirs, and instead of manually iterating the
+                * list and free'ing each, just lie and tell
+                * possible_new_dirs that it did the strdup'ing so that it
+                * will free them for us.
+                */
+               entry->possible_new_dirs.strdup_strings = 1;
+               string_list_clear(&entry->possible_new_dirs, 1);
+       }
+
+       return dir_renames;
+}
+
+static struct dir_rename_entry *check_dir_renamed(const char *path,
+                                                 struct hashmap *dir_renames)
+{
+       char temp[PATH_MAX];
+       char *end;
+       struct dir_rename_entry *entry;
+
+       strcpy(temp, path);
+       while ((end = strrchr(temp, '/'))) {
+               *end = '\0';
+               entry = dir_rename_find_entry(dir_renames, temp);
+               if (entry)
+                       return entry;
+       }
+       return NULL;
+}
+
+static void compute_collisions(struct hashmap *collisions,
+                              struct hashmap *dir_renames,
+                              struct diff_queue_struct *pairs)
+{
+       int i;
+
+       /*
+        * Multiple files can be mapped to the same path due to directory
+        * renames done by the other side of history.  Since that other
+        * side of history could have merged multiple directories into one,
+        * if our side of history added the same file basename to each of
+        * those directories, then all N of them would get implicitly
+        * renamed by the directory rename detection into the same path,
+        * and we'd get an add/add/.../add conflict, and all those adds
+        * from *this* side of history.  This is not representable in the
+        * index, and users aren't going to easily be able to make sense of
+        * it.  So we need to provide a good warning about what's
+        * happening, and fall back to no-directory-rename detection
+        * behavior for those paths.
+        *
+        * See testcases 9e and all of section 5 from t6043 for examples.
+        */
+       collision_init(collisions);
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct dir_rename_entry *dir_rename_ent;
+               struct collision_entry *collision_ent;
+               char *new_path;
+               struct diff_filepair *pair = pairs->queue[i];
+
+               if (pair->status != 'A' && pair->status != 'R')
+                       continue;
+               dir_rename_ent = check_dir_renamed(pair->two->path,
+                                                  dir_renames);
+               if (!dir_rename_ent)
+                       continue;
+
+               new_path = apply_dir_rename(dir_rename_ent, pair->two->path);
+               if (!new_path)
+                       /*
+                        * dir_rename_ent->non_unique_new_path is true, which
+                        * means there is no directory rename for us to use,
+                        * which means it won't cause us any additional
+                        * collisions.
+                        */
+                       continue;
+               collision_ent = collision_find_entry(collisions, new_path);
+               if (!collision_ent) {
+                       collision_ent = xcalloc(1,
+                                               sizeof(struct collision_entry));
+                       hashmap_entry_init(collision_ent, strhash(new_path));
+                       hashmap_put(collisions, collision_ent);
+                       collision_ent->target_file = new_path;
+               } else {
+                       free(new_path);
+               }
+               string_list_insert(&collision_ent->source_files,
+                                  pair->two->path);
+       }
+}
+
+static char *check_for_directory_rename(struct merge_options *o,
+                                       const char *path,
+                                       struct tree *tree,
+                                       struct hashmap *dir_renames,
+                                       struct hashmap *dir_rename_exclusions,
+                                       struct hashmap *collisions,
+                                       int *clean_merge)
+{
+       char *new_path = NULL;
+       struct dir_rename_entry *entry = check_dir_renamed(path, dir_renames);
+       struct dir_rename_entry *oentry = NULL;
+
+       if (!entry)
+               return new_path;
+
+       /*
+        * This next part is a little weird.  We do not want to do an
+        * implicit rename into a directory we renamed on our side, because
+        * that will result in a spurious rename/rename(1to2) conflict.  An
+        * example:
+        *   Base commit: dumbdir/afile, otherdir/bfile
+        *   Side 1:      smrtdir/afile, otherdir/bfile
+        *   Side 2:      dumbdir/afile, dumbdir/bfile
+        * Here, while working on Side 1, we could notice that otherdir was
+        * renamed/merged to dumbdir, and change the diff_filepair for
+        * otherdir/bfile into a rename into dumbdir/bfile.  However, Side
+        * 2 will notice the rename from dumbdir to smrtdir, and do the
+        * transitive rename to move it from dumbdir/bfile to
+        * smrtdir/bfile.  That gives us bfile in dumbdir vs being in
+        * smrtdir, a rename/rename(1to2) conflict.  We really just want
+        * the file to end up in smrtdir.  And the way to achieve that is
+        * to not let Side1 do the rename to dumbdir, since we know that is
+        * the source of one of our directory renames.
+        *
+        * That's why oentry and dir_rename_exclusions is here.
+        *
+        * As it turns out, this also prevents N-way transient rename
+        * confusion; See testcases 9c and 9d of t6043.
+        */
+       oentry = dir_rename_find_entry(dir_rename_exclusions, entry->new_dir.buf);
+       if (oentry) {
+               output(o, 1, _("WARNING: Avoiding applying %s -> %s rename "
+                              "to %s, because %s itself was renamed."),
+                      entry->dir, entry->new_dir.buf, path, entry->new_dir.buf);
+       } else {
+               new_path = handle_path_level_conflicts(o, path, entry,
+                                                      collisions, tree);
+               *clean_merge &= (new_path != NULL);
+       }
+
+       return new_path;
+}
+
+static void apply_directory_rename_modifications(struct merge_options *o,
+                                                struct diff_filepair *pair,
+                                                char *new_path,
+                                                struct rename *re,
+                                                struct tree *tree,
+                                                struct tree *o_tree,
+                                                struct tree *a_tree,
+                                                struct tree *b_tree,
+                                                struct string_list *entries,
+                                                int *clean)
+{
+       struct string_list_item *item;
+       int stage = (tree == a_tree ? 2 : 3);
+       int update_wd;
+
+       /*
+        * In all cases where we can do directory rename detection,
+        * unpack_trees() will have read pair->two->path into the
+        * index and the working copy.  We need to remove it so that
+        * we can instead place it at new_path.  It is guaranteed to
+        * not be untracked (unpack_trees() would have errored out
+        * saying the file would have been overwritten), but it might
+        * be dirty, though.
+        */
+       update_wd = !was_dirty(o, pair->two->path);
+       if (!update_wd)
+               output(o, 1, _("Refusing to lose dirty file at %s"),
+                      pair->two->path);
+       remove_file(o, 1, pair->two->path, !update_wd);
+
+       /* Find or create a new re->dst_entry */
+       item = string_list_lookup(entries, new_path);
+       if (item) {
+               /*
+                * Since we're renaming on this side of history, and it's
+                * due to a directory rename on the other side of history
+                * (which we only allow when the directory in question no
+                * longer exists on the other side of history), the
+                * original entry for re->dst_entry is no longer
+                * necessary...
+                */
+               re->dst_entry->processed = 1;
+
+               /*
+                * ...because we'll be using this new one.
+                */
+               re->dst_entry = item->util;
+       } else {
+               /*
+                * re->dst_entry is for the before-dir-rename path, and we
+                * need it to hold information for the after-dir-rename
+                * path.  Before creating a new entry, we need to mark the
+                * old one as unnecessary (...unless it is shared by
+                * src_entry, i.e. this didn't use to be a rename, in which
+                * case we can just allow the normal processing to happen
+                * for it).
+                */
+               if (pair->status == 'R')
+                       re->dst_entry->processed = 1;
+
+               re->dst_entry = insert_stage_data(new_path,
+                                                 o_tree, a_tree, b_tree,
+                                                 entries);
+               item = string_list_insert(entries, new_path);
+               item->util = re->dst_entry;
+       }
+
+       /*
+        * Update the stage_data with the information about the path we are
+        * moving into place.  That slot will be empty and available for us
+        * to write to because of the collision checks in
+        * handle_path_level_conflicts().  In other words,
+        * re->dst_entry->stages[stage].oid will be the null_oid, so it's
+        * open for us to write to.
+        *
+        * It may be tempting to actually update the index at this point as
+        * well, using update_stages_for_stage_data(), but as per the big
+        * "NOTE" in update_stages(), doing so will modify the current
+        * in-memory index which will break calls to would_lose_untracked()
+        * that we need to make.  Instead, we need to just make sure that
+        * the various conflict_rename_*() functions update the index
+        * explicitly rather than relying on unpack_trees() to have done it.
+        */
+       get_tree_entry(&tree->object.oid,
+                      pair->two->path,
+                      &re->dst_entry->stages[stage].oid,
+                      &re->dst_entry->stages[stage].mode);
+
+       /* Update pair status */
+       if (pair->status == 'A') {
+               /*
+                * Recording rename information for this add makes it look
+                * like a rename/delete conflict.  Make sure we can
+                * correctly handle this as an add that was moved to a new
+                * directory instead of reporting a rename/delete conflict.
+                */
+               re->add_turned_into_rename = 1;
+       }
+       /*
+        * We don't actually look at pair->status again, but it seems
+        * pedagogically correct to adjust it.
+        */
+       pair->status = 'R';
+
+       /*
+        * Finally, record the new location.
+        */
+       pair->two->path = new_path;
+}
+
+/*
+ * Get information of all renames which occurred in 'pairs', making use of
+ * any implicit directory renames inferred from the other side of history.
+ * We need the three trees in the merge ('o_tree', 'a_tree' and 'b_tree')
+ * to be able to associate the correct cache entries with the rename
+ * information; tree is always equal to either a_tree or b_tree.
+ */
+static struct string_list *get_renames(struct merge_options *o,
+                                      struct diff_queue_struct *pairs,
+                                      struct hashmap *dir_renames,
+                                      struct hashmap *dir_rename_exclusions,
+                                      struct tree *tree,
+                                      struct tree *o_tree,
+                                      struct tree *a_tree,
+                                      struct tree *b_tree,
+                                      struct string_list *entries,
+                                      int *clean_merge)
+{
+       int i;
+       struct hashmap collisions;
+       struct hashmap_iter iter;
+       struct collision_entry *e;
+       struct string_list *renames;
+
+       compute_collisions(&collisions, dir_renames, pairs);
+       renames = xcalloc(1, sizeof(struct string_list));
+
+       for (i = 0; i < pairs->nr; ++i) {
+               struct string_list_item *item;
+               struct rename *re;
+               struct diff_filepair *pair = pairs->queue[i];
+               char *new_path; /* non-NULL only with directory renames */
+
+               if (pair->status != 'A' && pair->status != 'R') {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+               new_path = check_for_directory_rename(o, pair->two->path, tree,
+                                                     dir_renames,
+                                                     dir_rename_exclusions,
+                                                     &collisions,
+                                                     clean_merge);
+               if (pair->status != 'R' && !new_path) {
+                       diff_free_filepair(pair);
+                       continue;
+               }
+
+               re = xmalloc(sizeof(*re));
+               re->processed = 0;
+               re->add_turned_into_rename = 0;
+               re->pair = pair;
+               item = string_list_lookup(entries, re->pair->one->path);
+               if (!item)
+                       re->src_entry = insert_stage_data(re->pair->one->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->src_entry = item->util;
+
+               item = string_list_lookup(entries, re->pair->two->path);
+               if (!item)
+                       re->dst_entry = insert_stage_data(re->pair->two->path,
+                                       o_tree, a_tree, b_tree, entries);
+               else
+                       re->dst_entry = item->util;
+               item = string_list_insert(renames, pair->one->path);
+               item->util = re;
+               if (new_path)
+                       apply_directory_rename_modifications(o, pair, new_path,
+                                                            re, tree, o_tree,
+                                                            a_tree, b_tree,
+                                                            entries,
+                                                            clean_merge);
+       }
+
+       hashmap_iter_init(&collisions, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->target_file);
+               string_list_clear(&e->source_files, 0);
+       }
+       hashmap_free(&collisions, 1);
+       return renames;
+}
+
 static int process_renames(struct merge_options *o,
                           struct string_list *a_renames,
                           struct string_list *b_renames)
@@ -1528,7 +2427,19 @@ static int process_renames(struct merge_options *o,
                        dst_other.mode = ren1->dst_entry->stages[other_stage].mode;
                        try_merge = 0;
 
-                       if (oid_eq(&src_other.oid, &null_oid)) {
+                       if (oid_eq(&src_other.oid, &null_oid) &&
+                           ren1->add_turned_into_rename) {
+                               setup_rename_conflict_info(RENAME_DIR,
+                                                          ren1->pair,
+                                                          NULL,
+                                                          branch1,
+                                                          branch2,
+                                                          ren1->dst_entry,
+                                                          NULL,
+                                                          o,
+                                                          NULL,
+                                                          NULL);
+                       } else if (oid_eq(&src_other.oid, &null_oid)) {
                                setup_rename_conflict_info(RENAME_DELETE,
                                                           ren1->pair,
                                                           NULL,
@@ -1625,6 +2536,105 @@ static int process_renames(struct merge_options *o,
        return clean_merge;
 }
 
+struct rename_info {
+       struct string_list *head_renames;
+       struct string_list *merge_renames;
+};
+
+static void initial_cleanup_rename(struct diff_queue_struct *pairs,
+                                  struct hashmap *dir_renames)
+{
+       struct hashmap_iter iter;
+       struct dir_rename_entry *e;
+
+       hashmap_iter_init(dir_renames, &iter);
+       while ((e = hashmap_iter_next(&iter))) {
+               free(e->dir);
+               strbuf_release(&e->new_dir);
+               /* possible_new_dirs already cleared in get_directory_renames */
+       }
+       hashmap_free(dir_renames, 1);
+       free(dir_renames);
+
+       free(pairs->queue);
+       free(pairs);
+}
+
+static int handle_renames(struct merge_options *o,
+                         struct tree *common,
+                         struct tree *head,
+                         struct tree *merge,
+                         struct string_list *entries,
+                         struct rename_info *ri)
+{
+       struct diff_queue_struct *head_pairs, *merge_pairs;
+       struct hashmap *dir_re_head, *dir_re_merge;
+       int clean = 1;
+
+       ri->head_renames = NULL;
+       ri->merge_renames = NULL;
+
+       if (!o->detect_rename)
+               return 1;
+
+       head_pairs = get_diffpairs(o, common, head);
+       merge_pairs = get_diffpairs(o, common, merge);
+
+       dir_re_head = get_directory_renames(head_pairs, head);
+       dir_re_merge = get_directory_renames(merge_pairs, merge);
+
+       handle_directory_level_conflicts(o,
+                                        dir_re_head, head,
+                                        dir_re_merge, merge);
+
+       ri->head_renames  = get_renames(o, head_pairs,
+                                       dir_re_merge, dir_re_head, head,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       ri->merge_renames = get_renames(o, merge_pairs,
+                                       dir_re_head, dir_re_merge, merge,
+                                       common, head, merge, entries,
+                                       &clean);
+       if (clean < 0)
+               goto cleanup;
+       clean &= process_renames(o, ri->head_renames, ri->merge_renames);
+
+cleanup:
+       /*
+        * Some cleanup is deferred until cleanup_renames() because the
+        * data structures are still needed and referenced in
+        * process_entry().  But there are a few things we can free now.
+        */
+       initial_cleanup_rename(head_pairs, dir_re_head);
+       initial_cleanup_rename(merge_pairs, dir_re_merge);
+
+       return clean;
+}
+
+static void final_cleanup_rename(struct string_list *rename)
+{
+       const struct rename *re;
+       int i;
+
+       if (rename == NULL)
+               return;
+
+       for (i = 0; i < rename->nr; i++) {
+               re = rename->items[i].util;
+               diff_free_filepair(re->pair);
+       }
+       string_list_clear(rename, 1);
+       free(rename);
+}
+
+static void final_cleanup_renames(struct rename_info *re_info)
+{
+       final_cleanup_rename(re_info->head_renames);
+       final_cleanup_rename(re_info->merge_renames);
+}
+
 static struct object_id *stage_oid(const struct object_id *oid, unsigned mode)
 {
        return (is_null_oid(oid) || mode == 0) ? NULL: (struct object_id *)oid;
@@ -1636,7 +2646,7 @@ static int read_oid_strbuf(struct merge_options *o,
        void *buf;
        enum object_type type;
        unsigned long size;
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return err(o, _("cannot read object %s"), oid_to_hex(oid));
        if (type != OBJ_BLOB) {
@@ -1715,6 +2725,7 @@ static int handle_modify_delete(struct merge_options *o,
 
 static int merge_content(struct merge_options *o,
                         const char *path,
+                        int file_in_way,
                         struct object_id *o_oid, int o_mode,
                         struct object_id *a_oid, int a_mode,
                         struct object_id *b_oid, int b_mode,
@@ -1762,7 +2773,6 @@ static int merge_content(struct merge_options *o,
 
        if (mfi.clean && !df_conflict_remains &&
            oid_eq(&mfi.oid, a_oid) && mfi.mode == a_mode) {
-               int path_renamed_outside_HEAD;
                output(o, 3, _("Skipped %s (merged same as existing)"), path);
                /*
                 * The content merge resulted in the same file contents we
@@ -1770,8 +2780,7 @@ static int merge_content(struct merge_options *o,
                 * are recorded at the correct path (which may not be true
                 * if the merge involves a rename).
                 */
-               path_renamed_outside_HEAD = !path2 || !strcmp(path, path2);
-               if (!path_renamed_outside_HEAD) {
+               if (was_tracked(path)) {
                        add_cacheinfo(o, mfi.mode, &mfi.oid, path,
                                      0, (!o->call_depth), 0);
                        return mfi.clean;
@@ -1789,7 +2798,7 @@ static int merge_content(struct merge_options *o,
                                return -1;
        }
 
-       if (df_conflict_remains) {
+       if (df_conflict_remains || file_in_way) {
                char *new_path;
                if (o->call_depth) {
                        remove_file_from_cache(path);
@@ -1823,6 +2832,30 @@ static int merge_content(struct merge_options *o,
        return mfi.clean;
 }
 
+static int conflict_rename_normal(struct merge_options *o,
+                                 const char *path,
+                                 struct object_id *o_oid, unsigned int o_mode,
+                                 struct object_id *a_oid, unsigned int a_mode,
+                                 struct object_id *b_oid, unsigned int b_mode,
+                                 struct rename_conflict_info *ci)
+{
+       int clean_merge;
+       int file_in_the_way = 0;
+
+       if (was_dirty(o, path)) {
+               file_in_the_way = 1;
+               output(o, 1, _("Refusing to lose dirty file at %s"), path);
+       }
+
+       /* Merge the content and write it out */
+       clean_merge = merge_content(o, path, file_in_the_way,
+                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
+                                   ci);
+       if (clean_merge > 0 && file_in_the_way)
+               clean_merge = 0;
+       return clean_merge;
+}
+
 /* Per entry merge function */
 static int process_entry(struct merge_options *o,
                         const char *path, struct stage_data *entry)
@@ -1842,9 +2875,20 @@ static int process_entry(struct merge_options *o,
                switch (conflict_info->rename_type) {
                case RENAME_NORMAL:
                case RENAME_ONE_FILE_TO_ONE:
-                       clean_merge = merge_content(o, path,
-                                                   o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
-                                                   conflict_info);
+                       clean_merge = conflict_rename_normal(o,
+                                                            path,
+                                                            o_oid, o_mode,
+                                                            a_oid, a_mode,
+                                                            b_oid, b_mode,
+                                                            conflict_info);
+                       break;
+               case RENAME_DIR:
+                       clean_merge = 1;
+                       if (conflict_rename_dir(o,
+                                               conflict_info->pair1,
+                                               conflict_info->branch1,
+                                               conflict_info->branch2))
+                               clean_merge = -1;
                        break;
                case RENAME_DELETE:
                        clean_merge = 0;
@@ -1932,7 +2976,7 @@ static int process_entry(struct merge_options *o,
        } else if (a_oid && b_oid) {
                /* Case C: Added in both (check for same permissions) and */
                /* case D: Modified in both, but differently. */
-               clean_merge = merge_content(o, path,
+               clean_merge = merge_content(o, path, 0 /* file_in_way */,
                                            o_oid, o_mode, a_oid, a_mode, b_oid, b_mode,
                                            NULL);
        } else if (!o_oid && !a_oid && !b_oid) {
@@ -1973,7 +3017,7 @@ int merge_trees(struct merge_options *o,
                return 1;
        }
 
-       code = git_merge_trees(o->call_depth, common, head, merge);
+       code = git_merge_trees(o, common, head, merge);
 
        if (code != 0) {
                if (show(o, 4) || o->call_depth)
@@ -1984,7 +3028,8 @@ int merge_trees(struct merge_options *o,
        }
 
        if (unmerged_cache()) {
-               struct string_list *entries, *re_head, *re_merge;
+               struct string_list *entries;
+               struct rename_info re_info;
                int i;
                /*
                 * Only need the hashmap while processing entries, so
@@ -1998,10 +3043,9 @@ int merge_trees(struct merge_options *o,
                get_files_dirs(o, merge);
 
                entries = get_unmerged();
+               clean = handle_renames(o, common, head, merge, entries,
+                                      &re_info);
                record_df_conflict_files(o, entries);
-               re_head  = get_renames(o, head, common, head, merge, entries);
-               re_merge = get_renames(o, merge, common, head, merge, entries);
-               clean = process_renames(o, re_head, re_merge);
                if (clean < 0)
                        goto cleanup;
                for (i = entries->nr-1; 0 <= i; i--) {
@@ -2025,16 +3069,13 @@ int merge_trees(struct merge_options *o,
                }
 
 cleanup:
-               string_list_clear(re_merge, 0);
-               string_list_clear(re_head, 0);
+               final_cleanup_renames(&re_info);
+
                string_list_clear(entries, 1);
+               free(entries);
 
                hashmap_free(&o->current_file_dir_set, 1);
 
-               free(re_merge);
-               free(re_head);
-               free(entries);
-
                if (clean < 0)
                        return clean;
        }
@@ -2070,7 +3111,7 @@ int merge_recursive(struct merge_options *o,
 {
        struct commit_list *iter;
        struct commit *merged_common_ancestors;
-       struct tree *mrtree = mrtree;
+       struct tree *mrtree;
        int clean;
 
        if (show(o, 4)) {
@@ -2198,11 +3239,13 @@ int merge_recursive_generic(struct merge_options *o,
        hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
        clean = merge_recursive(o, head_commit, next_commit, ca,
                        result);
-       if (clean < 0)
+       if (clean < 0) {
+               rollback_lock_file(&lock);
                return clean;
+       }
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                return err(o, _("Unable to write index."));
 
        return clean ? 0 : 1;
index 80d69d140195cc3ba1054050569e56bfc0277b56..d863cf88676ef321aad85d395f964efe5673a805 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef MERGE_RECURSIVE_H
 #define MERGE_RECURSIVE_H
 
+#include "unpack-trees.h"
 #include "string-list.h"
 
 struct merge_options {
@@ -27,6 +28,32 @@ struct merge_options {
        struct strbuf obuf;
        struct hashmap current_file_dir_set;
        struct string_list df_conflict_file_set;
+       struct unpack_trees_options unpack_opts;
+};
+
+/*
+ * For dir_rename_entry, directory names are stored as a full path from the
+ * toplevel of the repository and do not include a trailing '/'.  Also:
+ *
+ *   dir:                original name of directory being renamed
+ *   non_unique_new_dir: if true, could not determine new_dir
+ *   new_dir:            final name of directory being renamed
+ *   possible_new_dirs:  temporary used to help determine new_dir; see comments
+ *                       in get_directory_renames() for details
+ */
+struct dir_rename_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *dir;
+       unsigned non_unique_new_dir:1;
+       struct strbuf new_dir;
+       struct string_list possible_new_dirs;
+};
+
+struct collision_entry {
+       struct hashmap_entry ent; /* must be the first member! */
+       char *target_file;
+       struct string_list source_files;
+       unsigned reported_already:1;
 };
 
 /* merge_trees() but with recursive ancestor consolidation */
diff --git a/merge.c b/merge.c
index 195b5787005ee8f63bc10845f6d788aa4730a61b..f06a4773d4f4093d700c652accc79ae17f161a59 100644 (file)
--- a/merge.c
+++ b/merge.c
@@ -113,17 +113,23 @@ int checkout_fast_forward(const struct object_id *head,
        setup_unpack_trees_porcelain(&opts, "merge");
 
        trees[nr_trees] = parse_tree_indirect(head);
-       if (!trees[nr_trees++])
+       if (!trees[nr_trees++]) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        trees[nr_trees] = parse_tree_indirect(remote);
-       if (!trees[nr_trees++])
+       if (!trees[nr_trees++]) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        for (i = 0; i < nr_trees; i++) {
                parse_tree(trees[i]);
                init_tree_desc(t+i, trees[i]->buffer, trees[i]->size);
        }
-       if (unpack_trees(nr_trees, t, &opts))
+       if (unpack_trees(nr_trees, t, &opts)) {
+               rollback_lock_file(&lock_file);
                return -1;
+       }
        if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
                return error(_("unable to write new index file"));
        return 0;
diff --git a/mru.c b/mru.c
deleted file mode 100644 (file)
index 9dedae0..0000000
--- a/mru.c
+++ /dev/null
@@ -1,50 +0,0 @@
-#include "cache.h"
-#include "mru.h"
-
-void mru_append(struct mru *mru, void *item)
-{
-       struct mru_entry *cur = xmalloc(sizeof(*cur));
-       cur->item = item;
-       cur->prev = mru->tail;
-       cur->next = NULL;
-
-       if (mru->tail)
-               mru->tail->next = cur;
-       else
-               mru->head = cur;
-       mru->tail = cur;
-}
-
-void mru_mark(struct mru *mru, struct mru_entry *entry)
-{
-       /* If we're already at the front of the list, nothing to do */
-       if (mru->head == entry)
-               return;
-
-       /* Otherwise, remove us from our current slot... */
-       if (entry->prev)
-               entry->prev->next = entry->next;
-       if (entry->next)
-               entry->next->prev = entry->prev;
-       else
-               mru->tail = entry->prev;
-
-       /* And insert us at the beginning. */
-       entry->prev = NULL;
-       entry->next = mru->head;
-       if (mru->head)
-               mru->head->prev = entry;
-       mru->head = entry;
-}
-
-void mru_clear(struct mru *mru)
-{
-       struct mru_entry *p = mru->head;
-
-       while (p) {
-               struct mru_entry *to_free = p;
-               p = p->next;
-               free(to_free);
-       }
-       mru->head = mru->tail = NULL;
-}
diff --git a/mru.h b/mru.h
deleted file mode 100644 (file)
index 42e4aea..0000000
--- a/mru.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef MRU_H
-#define MRU_H
-
-/**
- * A simple most-recently-used cache, backed by a doubly-linked list.
- *
- * Usage is roughly:
- *
- *   // Create a list.  Zero-initialization is required.
- *   static struct mru cache;
- *   mru_append(&cache, item);
- *   ...
- *
- *   // Iterate in MRU order.
- *   struct mru_entry *p;
- *   for (p = cache.head; p; p = p->next) {
- *     if (matches(p->item))
- *             break;
- *   }
- *
- *   // Mark an item as used, moving it to the front of the list.
- *   mru_mark(&cache, p);
- *
- *   // Reset the list to empty, cleaning up all resources.
- *   mru_clear(&cache);
- *
- * Note that you SHOULD NOT call mru_mark() and then continue traversing the
- * list; it reorders the marked item to the front of the list, and therefore
- * you will begin traversing the whole list again.
- */
-
-struct mru_entry {
-       void *item;
-       struct mru_entry *prev, *next;
-};
-
-struct mru {
-       struct mru_entry *head, *tail;
-};
-
-void mru_append(struct mru *mru, void *item);
-void mru_mark(struct mru *mru, struct mru_entry *entry);
-void mru_clear(struct mru *mru);
-
-#endif /* MRU_H */
index 45c98db0a057e93b5f3f84eba2008da86130010d..163849831c9f11316ce97c649b77c32cf2eed276 100644 (file)
@@ -578,6 +578,8 @@ static void threaded_lazy_init_name_hash(
 
 static void lazy_init_name_hash(struct index_state *istate)
 {
+       uint64_t start = getnanotime();
+
        if (istate->name_hash_initialized)
                return;
        hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
@@ -600,6 +602,7 @@ static void lazy_init_name_hash(struct index_state *istate)
        }
 
        istate->name_hash_initialized = 1;
+       trace_performance_since(start, "initialize name hash");
 }
 
 /*
@@ -696,12 +699,12 @@ void adjust_dirname_case(struct index_state *istate, char *name)
                if (*ptr == '/') {
                        struct dir_entry *dir;
 
-                       ptr++;
-                       dir = find_dir_entry(istate, name, ptr - name + 1);
+                       dir = find_dir_entry(istate, name, ptr - name);
                        if (dir) {
                                memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
-                               startPtr = ptr;
+                               startPtr = ptr + 1;
                        }
+                       ptr++;
                }
        }
 }
index 17ee8602b3d2fbc2469a98e6cdeab0f2ff2b7ad3..e61988e503b0b2097afeb6716123d88429c0717d 100644 (file)
@@ -54,10 +54,10 @@ int notes_cache_write(struct notes_cache *c)
        if (!c->tree.dirty)
                return 0;
 
-       if (write_notes_tree(&c->tree, tree_oid.hash))
+       if (write_notes_tree(&c->tree, &tree_oid))
                return -1;
-       if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL,
-                       commit_oid.hash, NULL, NULL) < 0)
+       if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL,
+                       &commit_oid, NULL, NULL) < 0)
                return -1;
        if (update_ref("update notes cache", c->tree.update_ref, &commit_oid,
                       NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
@@ -77,7 +77,7 @@ char *notes_cache_get(struct notes_cache *c, struct object_id *key_oid,
        value_oid = get_note(&c->tree, key_oid);
        if (!value_oid)
                return NULL;
-       value = read_sha1_file(value_oid->hash, &type, &size);
+       value = read_object_file(value_oid, &type, &size);
 
        *outsize = size;
        return value;
@@ -88,7 +88,7 @@ int notes_cache_put(struct notes_cache *c, struct object_id *key_oid,
 {
        struct object_id value_oid;
 
-       if (write_sha1_file(data, size, "blob", value_oid.hash) < 0)
+       if (write_object_file(data, size, "blob", &value_oid) < 0)
                return -1;
        return add_note(&c->tree, key_oid, &value_oid, NULL);
 }
index 0f6573cb17cbbbc2219453bdca424fd7ebc156dc..8e0726a9418e3b24bc8e665057f607e18e7d4206 100644 (file)
@@ -322,7 +322,7 @@ static void write_note_to_worktree(const struct object_id *obj,
 {
        enum object_type type;
        unsigned long size;
-       void *buf = read_sha1_file(note->hash, &type, &size);
+       void *buf = read_object_file(note, &type, &size);
 
        if (!buf)
                die("cannot read note %s for object %s",
@@ -642,9 +642,8 @@ int notes_merge(struct notes_merge_options *o,
                struct commit_list *parents = NULL;
                commit_list_insert(remote, &parents); /* LIFO order */
                commit_list_insert(local, &parents);
-               create_notes_commit(local_tree, parents,
-                                   o->commit_msg.buf, o->commit_msg.len,
-                                   result_oid->hash);
+               create_notes_commit(local_tree, parents, o->commit_msg.buf,
+                                   o->commit_msg.len, result_oid);
        }
 
 found_result:
@@ -718,8 +717,8 @@ int notes_merge_commit(struct notes_merge_options *o,
                strbuf_setlen(&path, baselen);
        }
 
-       create_notes_commit(partial_tree, partial_commit->parents,
-                           msg, strlen(msg), result_oid->hash);
+       create_notes_commit(partial_tree, partial_commit->parents, msg,
+                           strlen(msg), result_oid);
        unuse_commit_buffer(partial_commit, buffer);
        if (o->verbosity >= 4)
                printf("Finalized notes merge commit: %s\n",
index 5c8e70c98fd26cca8bc690bc92653dd578a78c92..02407fe2a7327947dda6bf755405d2778ae450ae 100644 (file)
@@ -6,13 +6,13 @@
 
 void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
                         const char *msg, size_t msg_len,
-                        unsigned char *result_sha1)
+                        struct object_id *result_oid)
 {
        struct object_id tree_oid;
 
        assert(t->initialized);
 
-       if (write_notes_tree(t, tree_oid.hash))
+       if (write_notes_tree(t, &tree_oid))
                die("Failed to write notes tree to database");
 
        if (!parents) {
@@ -27,7 +27,8 @@ void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
                /* else: t->ref points to nothing, assume root/orphan commit */
        }
 
-       if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL))
+       if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL,
+                       NULL))
                die("Failed to commit notes tree to database");
 }
 
@@ -47,7 +48,7 @@ void commit_notes(struct notes_tree *t, const char *msg)
        strbuf_addstr(&buf, msg);
        strbuf_complete_line(&buf);
 
-       create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash);
+       create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid);
        strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
        update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0,
                   UPDATE_REFS_DIE_ON_ERR);
index 11905783989aa0d87be08e0c4311762b4647e9a4..5d79cbef512e3bdbbfafedbd7650036aff695f83 100644 (file)
@@ -15,7 +15,8 @@
  * The resulting commit SHA1 is stored in result_sha1.
  */
 void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
-                        const char *msg, size_t msg_len, unsigned char *result_sha1);
+                        const char *msg, size_t msg_len,
+                        struct object_id *result_oid);
 
 void commit_notes(struct notes_tree *t, const char *msg);
 
diff --git a/notes.c b/notes.c
index c7f21fae441067f7250caf0b4186fcbcbc7630bb..a386d450c4c812ef30d0fc661fe2c03e1d062a83 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -270,8 +270,8 @@ static int note_tree_insert(struct notes_tree *t, struct int_node *tree,
                                if (!oidcmp(&l->val_oid, &entry->val_oid))
                                        return 0;
 
-                               ret = combine_notes(l->val_oid.hash,
-                                                   entry->val_oid.hash);
+                               ret = combine_notes(&l->val_oid,
+                                                   &entry->val_oid);
                                if (!ret && is_null_oid(&l->val_oid))
                                        note_tree_remove(t, tree, n, entry);
                                free(entry);
@@ -667,7 +667,7 @@ static int tree_write_stack_finish_subtree(struct tree_write_stack *tws)
                ret = tree_write_stack_finish_subtree(n);
                if (ret)
                        return ret;
-               ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash);
+               ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
                if (ret)
                        return ret;
                strbuf_release(&n->buf);
@@ -786,8 +786,8 @@ static int prune_notes_helper(const struct object_id *object_oid,
        return 0;
 }
 
-int combine_notes_concatenate(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_concatenate(struct object_id *cur_oid,
+                             const struct object_id *new_oid)
 {
        char *cur_msg = NULL, *new_msg = NULL, *buf;
        unsigned long cur_len, new_len, buf_len;
@@ -795,18 +795,18 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
        int ret;
 
        /* read in both note blob objects */
-       if (!is_null_sha1(new_sha1))
-               new_msg = read_sha1_file(new_sha1, &new_type, &new_len);
+       if (!is_null_oid(new_oid))
+               new_msg = read_object_file(new_oid, &new_type, &new_len);
        if (!new_msg || !new_len || new_type != OBJ_BLOB) {
                free(new_msg);
                return 0;
        }
-       if (!is_null_sha1(cur_sha1))
-               cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len);
+       if (!is_null_oid(cur_oid))
+               cur_msg = read_object_file(cur_oid, &cur_type, &cur_len);
        if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
                free(cur_msg);
                free(new_msg);
-               hashcpy(cur_sha1, new_sha1);
+               oidcpy(cur_oid, new_oid);
                return 0;
        }
 
@@ -825,20 +825,20 @@ int combine_notes_concatenate(unsigned char *cur_sha1,
        free(new_msg);
 
        /* create a new blob object from buf */
-       ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1);
+       ret = write_object_file(buf, buf_len, blob_type, cur_oid);
        free(buf);
        return ret;
 }
 
-int combine_notes_overwrite(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_overwrite(struct object_id *cur_oid,
+                           const struct object_id *new_oid)
 {
-       hashcpy(cur_sha1, new_sha1);
+       oidcpy(cur_oid, new_oid);
        return 0;
 }
 
-int combine_notes_ignore(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_ignore(struct object_id *cur_oid,
+                        const struct object_id *new_oid)
 {
        return 0;
 }
@@ -848,17 +848,17 @@ int combine_notes_ignore(unsigned char *cur_sha1,
  * newlines removed.
  */
 static int string_list_add_note_lines(struct string_list *list,
-                                     const unsigned char *sha1)
+                                     const struct object_id *oid)
 {
        char *data;
        unsigned long len;
        enum object_type t;
 
-       if (is_null_sha1(sha1))
+       if (is_null_oid(oid))
                return 0;
 
        /* read_sha1_file NUL-terminates */
-       data = read_sha1_file(sha1, &t, &len);
+       data = read_object_file(oid, &t, &len);
        if (t != OBJ_BLOB || !data || !len) {
                free(data);
                return t != OBJ_BLOB || !data;
@@ -884,17 +884,17 @@ static int string_list_join_lines_helper(struct string_list_item *item,
        return 0;
 }
 
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
-               const unsigned char *new_sha1)
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+                               const struct object_id *new_oid)
 {
        struct string_list sort_uniq_list = STRING_LIST_INIT_DUP;
        struct strbuf buf = STRBUF_INIT;
        int ret = 1;
 
        /* read both note blob objects into unique_lines */
-       if (string_list_add_note_lines(&sort_uniq_list, cur_sha1))
+       if (string_list_add_note_lines(&sort_uniq_list, cur_oid))
                goto out;
-       if (string_list_add_note_lines(&sort_uniq_list, new_sha1))
+       if (string_list_add_note_lines(&sort_uniq_list, new_oid))
                goto out;
        string_list_remove_empty_items(&sort_uniq_list, 0);
        string_list_sort(&sort_uniq_list);
@@ -905,7 +905,7 @@ int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
                                 string_list_join_lines_helper, &buf))
                goto out;
 
-       ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1);
+       ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
 
 out:
        strbuf_release(&buf);
@@ -1012,7 +1012,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
                return;
        if (flags & NOTES_INIT_WRITABLE && read_ref(notes_ref, &object_oid))
                die("Cannot use notes ref %s", notes_ref);
-       if (get_tree_entry(object_oid.hash, "", oid.hash, &mode))
+       if (get_tree_entry(&object_oid, "", &oid, &mode))
                die("Failed to read notes tree referenced by %s (%s)",
                    notes_ref, oid_to_hex(&object_oid));
 
@@ -1123,11 +1123,12 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
        return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data);
 }
 
-int write_notes_tree(struct notes_tree *t, unsigned char *result)
+int write_notes_tree(struct notes_tree *t, struct object_id *result)
 {
        struct tree_write_stack root;
        struct write_each_note_data cb_data;
        int ret;
+       int flags;
 
        if (!t)
                t = &default_notes_tree;
@@ -1141,12 +1142,12 @@ int write_notes_tree(struct notes_tree *t, unsigned char *result)
        cb_data.next_non_note = t->first_non_note;
 
        /* Write tree objects representing current notes tree */
-       ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
-                               FOR_EACH_NOTE_YIELD_SUBTREES,
-                       write_each_note, &cb_data) ||
-               write_each_non_note_until(NULL, &cb_data) ||
-               tree_write_stack_finish_subtree(&root) ||
-               write_sha1_file(root.buf.buf, root.buf.len, tree_type, result);
+       flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
+               FOR_EACH_NOTE_YIELD_SUBTREES;
+       ret = for_each_note(t, flags, write_each_note, &cb_data) ||
+             write_each_non_note_until(NULL, &cb_data) ||
+             tree_write_stack_finish_subtree(&root) ||
+             write_object_file(root.buf.buf, root.buf.len, tree_type, result);
        strbuf_release(&root.buf);
        return ret;
 }
@@ -1216,7 +1217,7 @@ static void format_note(struct notes_tree *t, const struct object_id *object_oid
        if (!oid)
                return;
 
-       if (!(msg = read_sha1_file(oid->hash, &type, &msglen)) || type != OBJ_BLOB) {
+       if (!(msg = read_object_file(oid, &type, &msglen)) || type != OBJ_BLOB) {
                free(msg);
                return;
        }
diff --git a/notes.h b/notes.h
index 3848c2fb3f03510c1d0e1bd23ef6fbb3060b89c6..0433f45db55b5b807abcd633f507e9c75324d4e6 100644 (file)
--- a/notes.h
+++ b/notes.h
@@ -9,27 +9,32 @@
  * When adding a new note annotating the same object as an existing note, it is
  * up to the caller to decide how to combine the two notes. The decision is
  * made by passing in a function of the following form. The function accepts
- * two SHA1s -- of the existing note and the new note, respectively. The
+ * two object_ids -- of the existing note and the new note, respectively. The
  * function then combines the notes in whatever way it sees fit, and writes the
- * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return
+ * resulting oid into the first argument (cur_oid). A non-zero return
  * value indicates failure.
  *
- * The two given SHA1s shall both be non-NULL and different from each other.
- * Either of them (but not both) may be == null_sha1, which indicates an
- * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1,
+ * The two given object_ids shall both be non-NULL and different from each
+ * other. Either of them (but not both) may be == null_oid, which indicates an
+ * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid,
  * the note will be removed from the notes tree.
  *
  * The default combine_notes function (you get this when passing NULL) is
  * combine_notes_concatenate(), which appends the contents of the new note to
  * the contents of the existing note.
  */
-typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1);
+typedef int (*combine_notes_fn)(struct object_id *cur_oid,
+                               const struct object_id *new_oid);
 
 /* Common notes combinators */
-int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1);
+int combine_notes_concatenate(struct object_id *cur_oid,
+                             const struct object_id *new_oid);
+int combine_notes_overwrite(struct object_id *cur_oid,
+                           const struct object_id *new_oid);
+int combine_notes_ignore(struct object_id *cur_oid,
+                        const struct object_id *new_oid);
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+                               const struct object_id *new_oid);
 
 /*
  * Notes tree object
@@ -212,7 +217,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
  * Write the given notes_tree structure to the object database
  *
  * Creates a new tree object encapsulating the current state of the given
- * notes_tree, and stores its SHA1 into the 'result' argument.
+ * notes_tree, and stores its object id into the 'result' argument.
  *
  * Returns zero on success, non-zero on failure.
  *
@@ -220,7 +225,7 @@ int for_each_note(struct notes_tree *t, int flags, each_note_fn fn,
  * this function has returned zero. Please also remember to create a
  * corresponding commit object, and update the appropriate notes ref.
  */
-int write_notes_tree(struct notes_tree *t, unsigned char *result);
+int write_notes_tree(struct notes_tree *t, struct object_id *result);
 
 /* Flags controlling the operation of prune */
 #define NOTES_PRUNE_VERBOSE 1
index 0afdfd19b784a541ad6d7237add2fbb1c9ec91ab..2c909385a7c4a31e218b9c4e4fb4b1ec5b19de71 100644 (file)
--- a/object.c
+++ b/object.c
@@ -26,7 +26,7 @@ static const char *object_type_strings[] = {
        "tag",          /* OBJ_TAG = 4 */
 };
 
-const char *typename(unsigned int type)
+const char *type_name(unsigned int type)
 {
        if (type >= ARRAY_SIZE(object_type_strings))
                return NULL;
@@ -166,7 +166,7 @@ void *object_as_type(struct object *obj, enum object_type type, int quiet)
                if (!quiet)
                        error("object %s is a %s, not a %s",
                              oid_to_hex(&obj->oid),
-                             typename(obj->type), typename(type));
+                             type_name(obj->type), type_name(type));
                return NULL;
        }
 }
@@ -244,7 +244,7 @@ struct object *parse_object(const struct object_id *oid)
        unsigned long size;
        enum object_type type;
        int eaten;
-       const unsigned char *repl = lookup_replace_object(oid->hash);
+       const struct object_id *repl = lookup_replace_object(oid);
        void *buffer;
        struct object *obj;
 
@@ -252,10 +252,10 @@ struct object *parse_object(const struct object_id *oid)
        if (obj && obj->parsed)
                return obj;
 
-       if ((obj && obj->type == OBJ_BLOB) ||
+       if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
            (!obj && has_object_file(oid) &&
-            sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
-               if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
+            oid_object_info(oid, NULL) == OBJ_BLOB)) {
+               if (check_object_signature(repl, NULL, 0, NULL) < 0) {
                        error("sha1 mismatch %s", oid_to_hex(oid));
                        return NULL;
                }
@@ -263,11 +263,11 @@ struct object *parse_object(const struct object_id *oid)
                return lookup_object(oid->hash);
        }
 
-       buffer = read_sha1_file(oid->hash, &type, &size);
+       buffer = read_object_file(oid, &type, &size);
        if (buffer) {
-               if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
+               if (check_object_signature(repl, buffer, size, type_name(type)) < 0) {
                        free(buffer);
-                       error("sha1 mismatch %s", sha1_to_hex(repl));
+                       error("sha1 mismatch %s", oid_to_hex(repl));
                        return NULL;
                }
 
index 87563d90562b51859da8313b105a3e7785ed9d26..f13f85b2a94e3afc15debfbaf89416b5cda45acb 100644 (file)
--- a/object.h
+++ b/object.h
@@ -28,18 +28,22 @@ struct object_array {
 #define TYPE_BITS   3
 /*
  * object flag allocation:
- * revision.h:      0---------10                                26
- * fetch-pack.c:    0---5
- * walker.c:        0-2
- * upload-pack.c:       4       11----------------19
- * builtin/blame.c:               12-13
- * bisect.c:                               16
- * bundle.c:                               16
- * http-push.c:                            16-----19
- * commit.c:                               16-----19
- * sha1_name.c:                                     20
- * list-objects-filter.c:                             21
- * builtin/fsck.c:  0--3
+ * revision.h:               0---------10                                26
+ * fetch-pack.c:             0----5
+ * walker.c:                 0-2
+ * upload-pack.c:                4       11----------------19
+ * builtin/blame.c:                        12-13
+ * bisect.c:                                        16
+ * bundle.c:                                        16
+ * http-push.c:                                     16-----19
+ * commit.c:                                        16-----19
+ * sha1_name.c:                                              20
+ * list-objects-filter.c:                                      21
+ * builtin/fsck.c:           0--3
+ * builtin/index-pack.c:                                     2021
+ * builtin/pack-objects.c:                                   20
+ * builtin/reflog.c:                   10--12
+ * builtin/unpack-objects.c:                                 2021
  */
 #define FLAG_BITS  27
 
@@ -53,7 +57,7 @@ struct object {
        struct object_id oid;
 };
 
-extern const char *typename(unsigned int type);
+extern const char *type_name(unsigned int type);
 extern int type_from_string_gently(const char *str, ssize_t, int gentle);
 #define type_from_string(str) type_from_string_gently(str, -1, 0)
 
index a8df5ce2ab67bfdb5445e4c02e4ad65032abe9a0..41ae27fb19a94a4c44058d8850a9592c41b82f90 100644 (file)
@@ -73,8 +73,7 @@ void bitmap_writer_build_type_index(struct pack_idx_entry **index,
                        break;
 
                default:
-                       real_type = sha1_object_info(entry->idx.oid.hash,
-                                                    NULL);
+                       real_type = oid_object_info(&entry->idx.oid, NULL);
                        break;
                }
 
@@ -440,19 +439,19 @@ void bitmap_writer_select_commits(struct commit **indexed_commits,
 }
 
 
-static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+static int hashwrite_ewah_helper(void *f, const void *buf, size_t len)
 {
-       /* sha1write will die on error */
-       sha1write(f, buf, len);
+       /* hashwrite will die on error */
+       hashwrite(f, buf, len);
        return len;
 }
 
 /**
  * Write the bitmap index to disk
  */
-static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap)
 {
-       if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+       if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0)
                die("Failed to write bitmap index");
 }
 
@@ -462,7 +461,7 @@ static const unsigned char *sha1_access(size_t pos, void *table)
        return index[pos]->oid.hash;
 }
 
-static void write_selected_commits_v1(struct sha1file *f,
+static void write_selected_commits_v1(struct hashfile *f,
                                      struct pack_idx_entry **index,
                                      uint32_t index_nr)
 {
@@ -477,15 +476,15 @@ static void write_selected_commits_v1(struct sha1file *f,
                if (commit_pos < 0)
                        die("BUG: trying to write commit not in index");
 
-               sha1write_be32(f, commit_pos);
-               sha1write_u8(f, stored->xor_offset);
-               sha1write_u8(f, stored->flags);
+               hashwrite_be32(f, commit_pos);
+               hashwrite_u8(f, stored->xor_offset);
+               hashwrite_u8(f, stored->flags);
 
                dump_bitmap(f, stored->write_as);
        }
 }
 
-static void write_hash_cache(struct sha1file *f,
+static void write_hash_cache(struct hashfile *f,
                             struct pack_idx_entry **index,
                             uint32_t index_nr)
 {
@@ -494,7 +493,7 @@ static void write_hash_cache(struct sha1file *f,
        for (i = 0; i < index_nr; ++i) {
                struct object_entry *entry = (struct object_entry *)index[i];
                uint32_t hash_value = htonl(entry->hash);
-               sha1write(f, &hash_value, sizeof(hash_value));
+               hashwrite(f, &hash_value, sizeof(hash_value));
        }
 }
 
@@ -511,13 +510,13 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        static uint16_t default_version = 1;
        static uint16_t flags = BITMAP_OPT_FULL_DAG;
        struct strbuf tmp_file = STRBUF_INIT;
-       struct sha1file *f;
+       struct hashfile *f;
 
        struct bitmap_disk_header header;
 
        int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX");
 
-       f = sha1fd(fd, tmp_file.buf);
+       f = hashfd(fd, tmp_file.buf);
 
        memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
        header.version = htons(default_version);
@@ -525,7 +524,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        header.entry_count = htonl(writer.selected_nr);
        hashcpy(header.checksum, writer.pack_checksum);
 
-       sha1write(f, &header, sizeof(header));
+       hashwrite(f, &header, sizeof(header));
        dump_bitmap(f, writer.commits);
        dump_bitmap(f, writer.trees);
        dump_bitmap(f, writer.blobs);
@@ -535,7 +534,7 @@ void bitmap_writer_finish(struct pack_idx_entry **index,
        if (options & BITMAP_OPT_HASH_CACHE)
                write_hash_cache(f, index, index_nr);
 
-       sha1close(f, NULL, CSUM_FSYNC);
+       hashclose(f, NULL, CSUM_FSYNC);
 
        if (adjust_shared_perm(tmp_file.buf))
                die_errno("unable to make temporary bitmap file readable");
index 073c1fbd46dfeeb215c209aa0659768e72b81c3b..d0591dd5e8f88b8b939c5720dac36a939fd1edf4 100644 (file)
@@ -41,7 +41,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
        } while (len);
 
        index_crc = p->index_data;
-       index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+       index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
 
        return data_crc != ntohl(*index_crc);
 }
@@ -54,7 +54,7 @@ static int verify_packfile(struct packed_git *p,
 {
        off_t index_size = p->index_size;
        const unsigned char *index_base = p->index_data;
-       git_SHA_CTX ctx;
+       git_hash_ctx ctx;
        unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
        off_t offset = 0, pack_sig_ofs = 0;
        uint32_t nr_objects, i;
@@ -64,24 +64,24 @@ static int verify_packfile(struct packed_git *p,
        if (!is_pack_valid(p))
                return error("packfile %s cannot be accessed", p->pack_name);
 
-       git_SHA1_Init(&ctx);
+       the_hash_algo->init_fn(&ctx);
        do {
                unsigned long remaining;
                unsigned char *in = use_pack(p, w_curs, offset, &remaining);
                offset += remaining;
                if (!pack_sig_ofs)
-                       pack_sig_ofs = p->pack_size - 20;
+                       pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
                if (offset > pack_sig_ofs)
                        remaining -= (unsigned int)(offset - pack_sig_ofs);
-               git_SHA1_Update(&ctx, in, remaining);
+               the_hash_algo->update_fn(&ctx, in, remaining);
        } while (offset < pack_sig_ofs);
-       git_SHA1_Final(hash, &ctx);
+       the_hash_algo->final_fn(hash, &ctx);
        pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
        if (hashcmp(hash, pack_sig))
-               err = error("%s SHA1 checksum mismatch",
+               err = error("%s pack checksum mismatch",
                            p->pack_name);
-       if (hashcmp(index_base + index_size - 40, pack_sig))
-               err = error("%s SHA1 does not match its index",
+       if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+               err = error("%s pack checksum does not match its index",
                            p->pack_name);
        unuse_pack(w_curs);
 
@@ -126,7 +126,7 @@ static int verify_packfile(struct packed_git *p,
 
                if (type == OBJ_BLOB && big_file_threshold <= size) {
                        /*
-                        * Let check_sha1_signature() check it with
+                        * Let check_object_signature() check it with
                         * the streaming interface; no point slurping
                         * the data in-core only to discard.
                         */
@@ -141,7 +141,7 @@ static int verify_packfile(struct packed_git *p,
                        err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name,
                                    (uintmax_t)entries[i].offset);
-               else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type)))
+               else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type)))
                        err = error("packed %s from %s is corrupt",
                                    oid_to_hex(entries[i].oid.oid), p->pack_name);
                else if (fn) {
@@ -165,8 +165,8 @@ int verify_pack_index(struct packed_git *p)
 {
        off_t index_size;
        const unsigned char *index_base;
-       git_SHA_CTX ctx;
-       unsigned char sha1[20];
+       git_hash_ctx ctx;
+       unsigned char hash[GIT_MAX_RAWSZ];
        int err = 0;
 
        if (open_pack_index(p))
@@ -175,11 +175,11 @@ int verify_pack_index(struct packed_git *p)
        index_base = p->index_data;
 
        /* Verify SHA1 sum of the index file */
-       git_SHA1_Init(&ctx);
-       git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
-       git_SHA1_Final(sha1, &ctx);
-       if (hashcmp(sha1, index_base + index_size - 20))
-               err = error("Packfile index for %s SHA1 mismatch",
+       the_hash_algo->init_fn(&ctx);
+       the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+       the_hash_algo->final_fn(hash, &ctx);
+       if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+               err = error("Packfile index for %s hash mismatch",
                            p->pack_name);
        return err;
 }
index 1b7ebd8d7eefbcc646867db3e52536b4aac42208..ff5f62c03326a7f01926c72a19be5029ae4c1a8b 100644 (file)
@@ -134,10 +134,8 @@ static void create_pack_revindex(struct packed_git *p)
                        if (!(off & 0x80000000)) {
                                p->revindex[i].offset = off;
                        } else {
-                               p->revindex[i].offset =
-                                       ((uint64_t)ntohl(*off_64++)) << 32;
-                               p->revindex[i].offset |=
-                                       ntohl(*off_64++);
+                               p->revindex[i].offset = get_be64(off_64);
+                               off_64 += 2;
                        }
                        p->revindex[i].nr = i;
                }
index fea62841920c9647edfcfba249a59bfb91170d8e..d775c7406dd5a869a1ce4d28f6ef872e08476b77 100644 (file)
@@ -46,7 +46,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                           int nr_objects, const struct pack_idx_option *opts,
                           const unsigned char *sha1)
 {
-       struct sha1file *f;
+       struct hashfile *f;
        struct pack_idx_entry **sorted_by_sha, **list, **last;
        off_t last_obj_offset = 0;
        uint32_t array[256];
@@ -68,7 +68,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
 
        if (opts->flags & WRITE_IDX_VERIFY) {
                assert(index_name);
-               f = sha1fd_check(index_name);
+               f = hashfd_check(index_name);
        } else {
                if (!index_name) {
                        struct strbuf tmp_file = STRBUF_INIT;
@@ -80,7 +80,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                        if (fd < 0)
                                die_errno("unable to create '%s'", index_name);
                }
-               f = sha1fd(fd, index_name);
+               f = hashfd(fd, index_name);
        }
 
        /* if last object's offset is >= 2^31 we should use index V2 */
@@ -91,7 +91,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                struct pack_idx_header hdr;
                hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
                hdr.idx_version = htonl(index_version);
-               sha1write(f, &hdr, sizeof(hdr));
+               hashwrite(f, &hdr, sizeof(hdr));
        }
 
        /*
@@ -110,7 +110,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                array[i] = htonl(next - sorted_by_sha);
                list = next;
        }
-       sha1write(f, array, 256 * 4);
+       hashwrite(f, array, 256 * 4);
 
        /*
         * Write the actual SHA1 entries..
@@ -120,9 +120,9 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                struct pack_idx_entry *obj = *list++;
                if (index_version < 2) {
                        uint32_t offset = htonl(obj->offset);
-                       sha1write(f, &offset, 4);
+                       hashwrite(f, &offset, 4);
                }
-               sha1write(f, obj->oid.hash, 20);
+               hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
                if ((opts->flags & WRITE_IDX_STRICT) &&
                    (i && !oidcmp(&list[-2]->oid, &obj->oid)))
                        die("The same object %s appears twice in the pack",
@@ -137,7 +137,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                for (i = 0; i < nr_objects; i++) {
                        struct pack_idx_entry *obj = *list++;
                        uint32_t crc32_val = htonl(obj->crc32);
-                       sha1write(f, &crc32_val, 4);
+                       hashwrite(f, &crc32_val, 4);
                }
 
                /* write the 32-bit offset table */
@@ -150,7 +150,7 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                                  ? (0x80000000 | nr_large_offset++)
                                  : obj->offset);
                        offset = htonl(offset);
-                       sha1write(f, &offset, 4);
+                       hashwrite(f, &offset, 4);
                }
 
                /* write the large offset table */
@@ -164,25 +164,25 @@ const char *write_idx_file(const char *index_name, struct pack_idx_entry **objec
                                continue;
                        split[0] = htonl(offset >> 32);
                        split[1] = htonl(offset & 0xffffffff);
-                       sha1write(f, split, 8);
+                       hashwrite(f, split, 8);
                        nr_large_offset--;
                }
        }
 
-       sha1write(f, sha1, 20);
-       sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
+       hashwrite(f, sha1, the_hash_algo->rawsz);
+       hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
                            ? CSUM_CLOSE : CSUM_FSYNC));
        return index_name;
 }
 
-off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
 {
        struct pack_header hdr;
 
        hdr.hdr_signature = htonl(PACK_SIGNATURE);
        hdr.hdr_version = htonl(PACK_VERSION);
        hdr.hdr_entries = htonl(nr_entries);
-       sha1write(f, &hdr, sizeof(hdr));
+       hashwrite(f, &hdr, sizeof(hdr));
        return sizeof(hdr);
 }
 
@@ -203,20 +203,20 @@ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
  * interested in the resulting SHA1 of pack data above partial_pack_offset.
  */
 void fixup_pack_header_footer(int pack_fd,
-                        unsigned char *new_pack_sha1,
+                        unsigned char *new_pack_hash,
                         const char *pack_name,
                         uint32_t object_count,
-                        unsigned char *partial_pack_sha1,
+                        unsigned char *partial_pack_hash,
                         off_t partial_pack_offset)
 {
        int aligned_sz, buf_sz = 8 * 1024;
-       git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
+       git_hash_ctx old_hash_ctx, new_hash_ctx;
        struct pack_header hdr;
        char *buf;
        ssize_t read_result;
 
-       git_SHA1_Init(&old_sha1_ctx);
-       git_SHA1_Init(&new_sha1_ctx);
+       the_hash_algo->init_fn(&old_hash_ctx);
+       the_hash_algo->init_fn(&new_hash_ctx);
 
        if (lseek(pack_fd, 0, SEEK_SET) != 0)
                die_errno("Failed seeking to start of '%s'", pack_name);
@@ -228,9 +228,9 @@ void fixup_pack_header_footer(int pack_fd,
                          pack_name);
        if (lseek(pack_fd, 0, SEEK_SET) != 0)
                die_errno("Failed seeking to start of '%s'", pack_name);
-       git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
+       the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
        hdr.hdr_entries = htonl(object_count);
-       git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
+       the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
        write_or_die(pack_fd, &hdr, sizeof(hdr));
        partial_pack_offset -= sizeof(hdr);
 
@@ -238,28 +238,28 @@ void fixup_pack_header_footer(int pack_fd,
        aligned_sz = buf_sz - sizeof(hdr);
        for (;;) {
                ssize_t m, n;
-               m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
+               m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
                        partial_pack_offset : aligned_sz;
                n = xread(pack_fd, buf, m);
                if (!n)
                        break;
                if (n < 0)
                        die_errno("Failed to checksum '%s'", pack_name);
-               git_SHA1_Update(&new_sha1_ctx, buf, n);
+               the_hash_algo->update_fn(&new_hash_ctx, buf, n);
 
                aligned_sz -= n;
                if (!aligned_sz)
                        aligned_sz = buf_sz;
 
-               if (!partial_pack_sha1)
+               if (!partial_pack_hash)
                        continue;
 
-               git_SHA1_Update(&old_sha1_ctx, buf, n);
+               the_hash_algo->update_fn(&old_hash_ctx, buf, n);
                partial_pack_offset -= n;
                if (partial_pack_offset == 0) {
-                       unsigned char sha1[20];
-                       git_SHA1_Final(sha1, &old_sha1_ctx);
-                       if (hashcmp(sha1, partial_pack_sha1) != 0)
+                       unsigned char hash[GIT_MAX_RAWSZ];
+                       the_hash_algo->final_fn(hash, &old_hash_ctx);
+                       if (hashcmp(hash, partial_pack_hash) != 0)
                                die("Unexpected checksum for %s "
                                    "(disk corruption?)", pack_name);
                        /*
@@ -267,23 +267,24 @@ void fixup_pack_header_footer(int pack_fd,
                         * pack, which also means making partial_pack_offset
                         * big enough not to matter anymore.
                         */
-                       git_SHA1_Init(&old_sha1_ctx);
+                       the_hash_algo->init_fn(&old_hash_ctx);
                        partial_pack_offset = ~partial_pack_offset;
                        partial_pack_offset -= MSB(partial_pack_offset, 1);
                }
        }
        free(buf);
 
-       if (partial_pack_sha1)
-               git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
-       git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
-       write_or_die(pack_fd, new_pack_sha1, 20);
+       if (partial_pack_hash)
+               the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
+       the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
+       write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
        fsync_or_die(pack_fd, pack_name);
 }
 
 char *index_pack_lockfile(int ip_out)
 {
-       char packname[46];
+       char packname[GIT_MAX_HEXSZ + 6];
+       const int len = the_hash_algo->hexsz + 6;
 
        /*
         * The first thing we expect from index-pack's output
@@ -292,9 +293,9 @@ char *index_pack_lockfile(int ip_out)
         * case, we need it to remove the corresponding .keep file
         * later on.  If we don't get that then tough luck with it.
         */
-       if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') {
+       if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
                const char *name;
-               packname[45] = 0;
+               packname[len-1] = 0;
                if (skip_prefix(packname, "keep\t", &name))
                        return xstrfmt("%s/pack/pack-%s.keep",
                                       get_object_directory(), name);
@@ -332,14 +333,14 @@ int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
        return n;
 }
 
-struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+struct hashfile *create_tmp_packfile(char **pack_tmp_name)
 {
        struct strbuf tmpname = STRBUF_INIT;
        int fd;
 
        fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
        *pack_tmp_name = strbuf_detach(&tmpname, NULL);
-       return sha1fd(fd, *pack_tmp_name);
+       return hashfd(fd, *pack_tmp_name);
 }
 
 void finish_tmp_packfile(struct strbuf *name_buffer,
diff --git a/pack.h b/pack.h
index 8294341af174ad67693006dfcb49a7208732f45b..34a9d458b411927b7c1e121e88387e022841ea2a 100644 (file)
--- a/pack.h
+++ b/pack.h
@@ -81,7 +81,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry
 extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
 extern int verify_pack_index(struct packed_git *);
 extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct sha1file *f, uint32_t);
+extern off_t write_pack_header(struct hashfile *f, uint32_t);
 extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
 extern char *index_pack_lockfile(int fd);
 
@@ -98,7 +98,7 @@ extern int encode_in_pack_object_header(unsigned char *hdr, int hdr_len,
 #define PH_ERROR_PROTOCOL      (-3)
 extern int read_pack_header(int fd, struct pack_header *);
 
-extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
 extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
 
 #endif
index 4a5fe7ab1883843a389ce74bf1c7bd89890d8e51..f26395ecabb39470f0ddfd127a417439a09de432 100644 (file)
@@ -1,5 +1,5 @@
 #include "cache.h"
-#include "mru.h"
+#include "list.h"
 #include "pack.h"
 #include "dir.h"
 #include "mergesort.h"
@@ -8,6 +8,11 @@
 #include "list.h"
 #include "streaming.h"
 #include "sha1-lookup.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "tree.h"
 
 char *odb_pack_name(struct strbuf *buf,
                    const unsigned char *sha1,
@@ -40,7 +45,7 @@ static unsigned int pack_max_fds;
 static size_t peak_pack_mapped;
 static size_t pack_mapped;
 struct packed_git *packed_git;
-struct mru packed_git_mru;
+LIST_HEAD(packed_git_mru);
 
 #define SZ_FMT PRIuMAX
 static inline uintmax_t sz_fmt(size_t s) { return s; }
@@ -643,10 +648,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
                return NULL;
 
        /*
-        * ".pack" is long enough to hold any suffix we're adding (and
+        * ".promisor" is long enough to hold any suffix we're adding (and
         * the use xsnprintf double-checks that)
         */
-       alloc = st_add3(path_len, strlen(".pack"), 1);
+       alloc = st_add3(path_len, strlen(".promisor"), 1);
        p = alloc_packed_git(alloc);
        memcpy(p->pack_name, path, path_len);
 
@@ -654,6 +659,10 @@ struct packed_git *add_packed_git(const char *path, size_t path_len, int local)
        if (!access(p->pack_name, F_OK))
                p->pack_keep = 1;
 
+       xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
+       if (!access(p->pack_name, F_OK))
+               p->pack_promisor = 1;
+
        xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
        if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
                free(p);
@@ -781,7 +790,8 @@ static void prepare_packed_git_one(char *objdir, int local)
                if (ends_with(de->d_name, ".idx") ||
                    ends_with(de->d_name, ".pack") ||
                    ends_with(de->d_name, ".bitmap") ||
-                   ends_with(de->d_name, ".keep"))
+                   ends_with(de->d_name, ".keep") ||
+                   ends_with(de->d_name, ".promisor"))
                        string_list_append(&garbage, path.buf);
                else
                        report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
@@ -866,9 +876,10 @@ static void prepare_packed_git_mru(void)
 {
        struct packed_git *p;
 
-       mru_clear(&packed_git_mru);
+       INIT_LIST_HEAD(&packed_git_mru);
+
        for (p = packed_git; p; p = p->next)
-               mru_append(&packed_git_mru, p);
+               list_add_tail(&p->mru, &packed_git_mru);
 }
 
 static int prepare_packed_git_run_once = 0;
@@ -1084,13 +1095,13 @@ static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
 {
        int type;
        struct revindex_entry *revidx;
-       const unsigned char *sha1;
+       struct object_id oid;
        revidx = find_pack_revindex(p, obj_offset);
        if (!revidx)
                return OBJ_BAD;
-       sha1 = nth_packed_object_sha1(p, revidx->nr);
-       mark_bad_packed_object(p, sha1);
-       type = sha1_object_info(sha1, NULL);
+       nth_packed_object_oid(&oid, p, revidx->nr);
+       mark_bad_packed_object(p, oid.hash);
+       type = oid_object_info(&oid, NULL);
        if (type <= OBJ_NONE)
                return OBJ_BAD;
        return type;
@@ -1350,16 +1361,16 @@ int packed_object_info(struct packed_git *p, off_t obj_offset,
                *oi->disk_sizep = revidx[1].offset - obj_offset;
        }
 
-       if (oi->typep || oi->typename) {
+       if (oi->typep || oi->type_name) {
                enum object_type ptot;
                ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
                                             curpos);
                if (oi->typep)
                        *oi->typep = ptot;
-               if (oi->typename) {
-                       const char *tn = typename(ptot);
+               if (oi->type_name) {
+                       const char *tn = type_name(ptot);
                        if (tn)
-                               strbuf_addstr(oi->typename, tn);
+                               strbuf_addstr(oi->type_name, tn);
                }
                if (ptot < 0) {
                        type = OBJ_BAD;
@@ -1441,7 +1452,7 @@ struct unpack_entry_stack_ent {
        unsigned long size;
 };
 
-static void *read_object(const unsigned char *sha1, enum object_type *type,
+static void *read_object(const struct object_id *oid, enum object_type *type,
                         unsigned long *size)
 {
        struct object_info oi = OBJECT_INFO_INIT;
@@ -1450,7 +1461,7 @@ static void *read_object(const unsigned char *sha1, enum object_type *type,
        oi.sizep = size;
        oi.contentp = &content;
 
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+       if (oid_object_info_extended(oid, &oi, 0) < 0)
                return NULL;
        return content;
 }
@@ -1490,11 +1501,11 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                        struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
                        off_t len = revidx[1].offset - obj_offset;
                        if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
-                               const unsigned char *sha1 =
-                                       nth_packed_object_sha1(p, revidx->nr);
+                               struct object_id oid;
+                               nth_packed_object_oid(&oid, p, revidx->nr);
                                error("bad packed object CRC for %s",
-                                     sha1_to_hex(sha1));
-                               mark_bad_packed_object(p, sha1);
+                                     oid_to_hex(&oid));
+                               mark_bad_packed_object(p, oid.hash);
                                data = NULL;
                                goto out;
                        }
@@ -1577,16 +1588,16 @@ void *unpack_entry(struct packed_git *p, off_t obj_offset,
                         * of a corrupted pack, and is better than failing outright.
                         */
                        struct revindex_entry *revidx;
-                       const unsigned char *base_sha1;
+                       struct object_id base_oid;
                        revidx = find_pack_revindex(p, obj_offset);
                        if (revidx) {
-                               base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+                               nth_packed_object_oid(&base_oid, p, revidx->nr);
                                error("failed to read delta base object %s"
                                      " at offset %"PRIuMAX" from %s",
-                                     sha1_to_hex(base_sha1), (uintmax_t)obj_offset,
+                                     oid_to_hex(&base_oid), (uintmax_t)obj_offset,
                                      p->pack_name);
-                               mark_bad_packed_object(p, base_sha1);
-                               base = read_object(base_sha1, &type, &base_size);
+                               mark_bad_packed_object(p, base_oid.hash);
+                               base = read_object(&base_oid, &type, &base_size);
                                external_base = base;
                        }
                }
@@ -1702,8 +1713,7 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
                        return off;
                index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
                check_pack_index_ptr(p, index);
-               return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
-                                  ntohl(*((uint32_t *)(index + 4)));
+               return get_be64(index);
        }
 }
 
@@ -1712,11 +1722,8 @@ off_t find_pack_entry_one(const unsigned char *sha1,
 {
        const uint32_t *level1_ofs = p->index_data;
        const unsigned char *index = p->index_data;
-       unsigned hi, lo, stride;
-       static int debug_lookup = -1;
-
-       if (debug_lookup < 0)
-               debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+       unsigned stride;
+       uint32_t result;
 
        if (!index) {
                if (open_pack_index(p))
@@ -1729,8 +1736,6 @@ off_t find_pack_entry_one(const unsigned char *sha1,
                index += 8;
        }
        index += 4 * 256;
-       hi = ntohl(level1_ofs[*sha1]);
-       lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
        if (p->index_version > 1) {
                stride = 20;
        } else {
@@ -1738,24 +1743,8 @@ off_t find_pack_entry_one(const unsigned char *sha1,
                index += 4;
        }
 
-       if (debug_lookup)
-               printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
-                      sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
-       while (lo < hi) {
-               unsigned mi = lo + (hi - lo) / 2;
-               int cmp = hashcmp(index + mi * stride, sha1);
-
-               if (debug_lookup)
-                       printf("lo %u hi %u rg %u mi %u\n",
-                              lo, hi, hi - lo, mi);
-               if (!cmp)
-                       return nth_packed_object_offset(p, mi);
-               if (cmp > 0)
-                       hi = mi;
-               else
-                       lo = mi+1;
-       }
+       if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+               return nth_packed_object_offset(p, result);
        return 0;
 }
 
@@ -1831,15 +1820,16 @@ static int fill_pack_entry(const unsigned char *sha1,
  */
 int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
 {
-       struct mru_entry *p;
+       struct list_head *pos;
 
        prepare_packed_git();
        if (!packed_git)
                return 0;
 
-       for (p = packed_git_mru.head; p; p = p->next) {
-               if (fill_pack_entry(sha1, e, p->item)) {
-                       mru_mark(&packed_git_mru, p);
+       list_for_each(pos, &packed_git_mru) {
+               struct packed_git *p = list_entry(pos, struct packed_git, mru);
+               if (fill_pack_entry(sha1, e, p)) {
+                       list_move(&p->mru, &packed_git_mru);
                        return 1;
                }
        }
@@ -1889,6 +1879,9 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags)
        for (p = packed_git; p; p = p->next) {
                if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
                        continue;
+               if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+                   !p->pack_promisor)
+                       continue;
                if (open_pack_index(p)) {
                        pack_errors = 1;
                        continue;
@@ -1899,3 +1892,61 @@ int for_each_packed_object(each_packed_object_fn cb, void *data, unsigned flags)
        }
        return r ? r : pack_errors;
 }
+
+static int add_promisor_object(const struct object_id *oid,
+                              struct packed_git *pack,
+                              uint32_t pos,
+                              void *set_)
+{
+       struct oidset *set = set_;
+       struct object *obj = parse_object(oid);
+       if (!obj)
+               return 1;
+
+       oidset_insert(set, oid);
+
+       /*
+        * If this is a tree, commit, or tag, the objects it refers
+        * to are also promisor objects. (Blobs refer to no objects.)
+        */
+       if (obj->type == OBJ_TREE) {
+               struct tree *tree = (struct tree *)obj;
+               struct tree_desc desc;
+               struct name_entry entry;
+               if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+                       /*
+                        * Error messages are given when packs are
+                        * verified, so do not print any here.
+                        */
+                       return 0;
+               while (tree_entry_gently(&desc, &entry))
+                       oidset_insert(set, entry.oid);
+       } else if (obj->type == OBJ_COMMIT) {
+               struct commit *commit = (struct commit *) obj;
+               struct commit_list *parents = commit->parents;
+
+               oidset_insert(set, &commit->tree->object.oid);
+               for (; parents; parents = parents->next)
+                       oidset_insert(set, &parents->item->object.oid);
+       } else if (obj->type == OBJ_TAG) {
+               struct tag *tag = (struct tag *) obj;
+               oidset_insert(set, &tag->tagged->oid);
+       }
+       return 0;
+}
+
+int is_promisor_object(const struct object_id *oid)
+{
+       static struct oidset promisor_objects;
+       static int promisor_objects_prepared;
+
+       if (!promisor_objects_prepared) {
+               if (repository_format_partial_clone) {
+                       for_each_packed_object(add_promisor_object,
+                                              &promisor_objects,
+                                              FOR_EACH_OBJECT_PROMISOR_ONLY);
+               }
+               promisor_objects_prepared = 1;
+       }
+       return oidset_contains(&promisor_objects, oid);
+}
index 0cdeb54dcd97a67c38285e8f81412ec71273fd7f..a7fca598d672b73010a5fb99e4507da4634002ff 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef PACKFILE_H
 #define PACKFILE_H
 
+#include "oidset.h"
+
 /*
  * Generate the filename to be used for a pack file with checksum "sha1" and
  * extension "ext". The result is written into the strbuf "buf", overwriting
@@ -124,6 +126,11 @@ extern int has_sha1_pack(const unsigned char *sha1);
 
 extern int has_pack_index(const unsigned char *sha1);
 
+/*
+ * Only iterate over packs obtained from the promisor remote.
+ */
+#define FOR_EACH_OBJECT_PROMISOR_ONLY 2
+
 /*
  * Iterate over packed objects in both the local
  * repository and any alternates repositories (unless the
@@ -135,4 +142,10 @@ typedef int each_packed_object_fn(const struct object_id *oid,
                                  void *data);
 extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
 
+/*
+ * Return 1 if an object in a promisor packfile is or refers to the given
+ * object, 0 otherwise.
+ */
+extern int is_promisor_object(const struct object_id *oid);
+
 #endif
index fca7159646c82cb9213e72afd94d8debc6bd4c51..0f7059a8ab32a624775026d7dc2289245c87c192 100644 (file)
@@ -317,14 +317,16 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
                return get_value(p, options, all_opts, flags ^ opt_flags);
        }
 
-       if (ambiguous_option)
-               return error("Ambiguous option: %s "
+       if (ambiguous_option) {
+               error("Ambiguous option: %s "
                        "(could be --%s%s or --%s%s)",
                        arg,
                        (ambiguous_flags & OPT_UNSET) ?  "no-" : "",
                        ambiguous_option->long_name,
                        (abbrev_flags & OPT_UNSET) ?  "no-" : "",
                        abbrev_option->long_name);
+               return -3;
+       }
        if (abbrev_option)
                return get_value(p, abbrev_option, all_opts, abbrev_flags);
        return -2;
@@ -425,6 +427,48 @@ void parse_options_start(struct parse_opt_ctx_t *ctx,
        parse_options_check(options);
 }
 
+/*
+ * TODO: we are not completing the --no-XXX form yet because there are
+ * many options that do not suppress it properly.
+ */
+static int show_gitcomp(struct parse_opt_ctx_t *ctx,
+                       const struct option *opts)
+{
+       for (; opts->type != OPTION_END; opts++) {
+               const char *suffix = "";
+
+               if (!opts->long_name)
+                       continue;
+               if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE))
+                       continue;
+
+               switch (opts->type) {
+               case OPTION_GROUP:
+                       continue;
+               case OPTION_STRING:
+               case OPTION_FILENAME:
+               case OPTION_INTEGER:
+               case OPTION_MAGNITUDE:
+               case OPTION_CALLBACK:
+                       if (opts->flags & PARSE_OPT_NOARG)
+                               break;
+                       if (opts->flags & PARSE_OPT_OPTARG)
+                               break;
+                       if (opts->flags & PARSE_OPT_LASTARG_DEFAULT)
+                               break;
+                       suffix = "=";
+                       break;
+               default:
+                       break;
+               }
+               if (opts->flags & PARSE_OPT_COMP_ARG)
+                       suffix = "=";
+               printf(" --%s%s", opts->long_name, suffix);
+       }
+       fputc('\n', stdout);
+       exit(0);
+}
+
 static int usage_with_options_internal(struct parse_opt_ctx_t *,
                                       const char * const *,
                                       const struct option *, int, int);
@@ -434,7 +478,6 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                       const char * const usagestr[])
 {
        int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP);
-       int err = 0;
 
        /* we must reset ->opt, unknown short option leave it dangling */
        ctx->opt = NULL;
@@ -455,11 +498,15 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                if (internal_help && ctx->total == 1 && !strcmp(arg + 1, "h"))
                        goto show_usage;
 
+               /* lone --git-completion-helper is asked by git-completion.bash */
+               if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper"))
+                       return show_gitcomp(ctx, options);
+
                if (arg[1] != '-') {
                        ctx->opt = arg + 1;
                        switch (parse_short_opt(ctx, options)) {
                        case -1:
-                               goto show_usage_error;
+                               return PARSE_OPT_ERROR;
                        case -2:
                                if (ctx->opt)
                                        check_typos(arg + 1, options);
@@ -472,7 +519,7 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        while (ctx->opt) {
                                switch (parse_short_opt(ctx, options)) {
                                case -1:
-                                       goto show_usage_error;
+                                       return PARSE_OPT_ERROR;
                                case -2:
                                        if (internal_help && *ctx->opt == 'h')
                                                goto show_usage;
@@ -504,9 +551,11 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                        goto show_usage;
                switch (parse_long_opt(ctx, arg + 2, options)) {
                case -1:
-                       goto show_usage_error;
+                       return PARSE_OPT_ERROR;
                case -2:
                        goto unknown;
+               case -3:
+                       goto show_usage;
                }
                continue;
 unknown:
@@ -517,15 +566,13 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
        }
        return PARSE_OPT_DONE;
 
- show_usage_error:
-       err = 1;
  show_usage:
-       return usage_with_options_internal(ctx, usagestr, options, 0, err);
+       return usage_with_options_internal(ctx, usagestr, options, 0, 0);
 }
 
 int parse_options_end(struct parse_opt_ctx_t *ctx)
 {
-       memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+       MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
        ctx->out[ctx->cpidx + ctx->argc] = NULL;
        return ctx->cpidx + ctx->argc;
 }
@@ -539,6 +586,7 @@ int parse_options(int argc, const char **argv, const char *prefix,
        parse_options_start(&ctx, argc, argv, prefix, options, flags);
        switch (parse_options_step(&ctx, options, usagestr)) {
        case PARSE_OPT_HELP:
+       case PARSE_OPT_ERROR:
                exit(129);
        case PARSE_OPT_NON_OPTION:
        case PARSE_OPT_DONE:
index af711227ae3aac7af07de0322a364ccac03b819d..dd14911a297a5b10705ecb31243c55a7dc2f193c 100644 (file)
@@ -38,7 +38,9 @@ enum parse_opt_option_flags {
        PARSE_OPT_LASTARG_DEFAULT = 16,
        PARSE_OPT_NODASH = 32,
        PARSE_OPT_LITERAL_ARGHELP = 64,
-       PARSE_OPT_SHELL_EVAL = 256
+       PARSE_OPT_SHELL_EVAL = 256,
+       PARSE_OPT_NOCOMPLETE = 512,
+       PARSE_OPT_COMP_ARG = 1024
 };
 
 struct option;
@@ -89,6 +91,11 @@ typedef int parse_opt_ll_cb(struct parse_opt_ctx_t *ctx,
  *   PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets
  *                             (i.e. '<argh>') in the help message.
  *                             Useful for options with multiple parameters.
+ *   PARSE_OPT_NOCOMPLETE: by default all visible options are completable
+ *                        by git-completion.bash. This option suppresses that.
+ *   PARSE_OPT_COMP_ARG: this option forces to git-completion.bash to
+ *                      complete an option as --name= not --name even if
+ *                      the option takes optional argument.
  *
  * `callback`::
  *   pointer to the callback to use for OPTION_CALLBACK or
@@ -112,19 +119,24 @@ struct option {
        intptr_t defval;
 };
 
+#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
+                                     PARSE_OPT_NOARG|(f), NULL, (b) }
+#define OPT_COUNTUP_F(s, l, v, h, f) { OPTION_COUNTUP, (s), (l), (v), NULL, \
+                                      (h), PARSE_OPT_NOARG|(f) }
+#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \
+                                         (h), PARSE_OPT_NOARG | (f), NULL, (i) }
+#define OPT_BOOL_F(s, l, v, h, f)   OPT_SET_INT_F(s, l, v, h, 1, f)
+
 #define OPT_END()                   { OPTION_END }
 #define OPT_ARGUMENT(l, h)          { OPTION_ARGUMENT, 0, (l), NULL, NULL, \
                                      (h), PARSE_OPT_NOARG}
 #define OPT_GROUP(h)                { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
-#define OPT_BIT(s, l, v, h, b)      { OPTION_BIT, (s), (l), (v), NULL, (h), \
-                                     PARSE_OPT_NOARG, NULL, (b) }
+#define OPT_BIT(s, l, v, h, b)      OPT_BIT_F(s, l, v, h, b, 0)
 #define OPT_NEGBIT(s, l, v, h, b)   { OPTION_NEGBIT, (s), (l), (v), NULL, \
                                      (h), PARSE_OPT_NOARG, NULL, (b) }
-#define OPT_COUNTUP(s, l, v, h)     { OPTION_COUNTUP, (s), (l), (v), NULL, \
-                                     (h), PARSE_OPT_NOARG }
-#define OPT_SET_INT(s, l, v, h, i)  { OPTION_SET_INT, (s), (l), (v), NULL, \
-                                     (h), PARSE_OPT_NOARG, NULL, (i) }
-#define OPT_BOOL(s, l, v, h)        OPT_SET_INT(s, l, v, h, 1)
+#define OPT_COUNTUP(s, l, v, h)     OPT_COUNTUP_F(s, l, v, h, 0)
+#define OPT_SET_INT(s, l, v, h, i)  OPT_SET_INT_F(s, l, v, h, i, 0)
+#define OPT_BOOL(s, l, v, h)        OPT_BOOL_F(s, l, v, h, 0)
 #define OPT_HIDDEN_BOOL(s, l, v, h) { OPTION_SET_INT, (s), (l), (v), NULL, \
                                      (h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1}
 #define OPT_CMDMODE(s, l, v, h, i)  { OPTION_CMDMODE, (s), (l), (v), NULL, \
@@ -188,6 +200,7 @@ enum {
        PARSE_OPT_HELP = -1,
        PARSE_OPT_DONE,
        PARSE_OPT_NON_OPTION,
+       PARSE_OPT_ERROR,
        PARSE_OPT_UNKNOWN
 };
 
@@ -240,7 +253,7 @@ extern int parse_opt_passthru_argv(const struct option *, const char *, int);
        { OPTION_CALLBACK, 'q', "quiet", (var), NULL, N_("be more quiet"), \
          PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }
 #define OPT__DRY_RUN(var, h)  OPT_BOOL('n', "dry-run", (var), (h))
-#define OPT__FORCE(var, h)    OPT_COUNTUP('f', "force",   (var), (h))
+#define OPT__FORCE(var, h, f) OPT_COUNTUP_F('f', "force",   (var), (h), (f))
 #define OPT__ABBREV(var)  \
        { OPTION_CALLBACK, 0, "abbrev", (var), N_("n"), \
          N_("use <n> digits to display SHA-1s"),       \
index 0f1fc27f862f61015db843f272e61b29c14223af..84c048a73cc2e5dd24f807669eb99b0ce3123195 100644 (file)
@@ -1,8 +1 @@
-perl.mak
-perl.mak.old
-MYMETA.json
-MYMETA.yml
-blib
-blibdirs
-pm_to_blib
-PM.stamp
+/build/
diff --git a/perl/FromCPAN/.gitattributes b/perl/FromCPAN/.gitattributes
new file mode 100644 (file)
index 0000000..8b64fc5
--- /dev/null
@@ -0,0 +1 @@
+/Error.pm whitespace=-blank-at-eof
diff --git a/perl/FromCPAN/Error.pm b/perl/FromCPAN/Error.pm
new file mode 100644 (file)
index 0000000..8b95e2d
--- /dev/null
@@ -0,0 +1,1040 @@
+# Error.pm
+#
+# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
+# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
+#
+# but modified ***significantly***
+
+package Error;
+
+use strict;
+use warnings;
+
+use vars qw($VERSION);
+use 5.004;
+
+$VERSION = "0.17025";
+
+use overload (
+       '""'       =>   'stringify',
+       '0+'       =>   'value',
+       'bool'     =>   sub { return 1; },
+       'fallback' =>   1
+);
+
+$Error::Depth = 0;     # Depth to pass to caller()
+$Error::Debug = 0;     # Generate verbose stack traces
+@Error::STACK = ();    # Clause stack for try
+$Error::THROWN = undef;        # last error thrown, a workaround until die $ref works
+
+my $LAST;              # Last error created
+my %ERROR;             # Last error associated with package
+
+sub _throw_Error_Simple
+{
+    my $args = shift;
+    return Error::Simple->new($args->{'text'});
+}
+
+$Error::ObjectifyCallback = \&_throw_Error_Simple;
+
+
+# Exported subs are defined in Error::subs
+
+use Scalar::Util ();
+
+sub import {
+    shift;
+    my @tags = @_;
+    local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
+
+    @tags = grep {
+       if( $_ eq ':warndie' ) {
+          Error::WarnDie->import();
+          0;
+       }
+       else {
+          1;
+       }
+    } @tags;
+
+    Error::subs->import(@tags);
+}
+
+# I really want to use last for the name of this method, but it is a keyword
+# which prevent the syntax  last Error
+
+sub prior {
+    shift; # ignore
+
+    return $LAST unless @_;
+
+    my $pkg = shift;
+    return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
+       unless ref($pkg);
+
+    my $obj = $pkg;
+    my $err = undef;
+    if($obj->isa('HASH')) {
+       $err = $obj->{'__Error__'}
+           if exists $obj->{'__Error__'};
+    }
+    elsif($obj->isa('GLOB')) {
+       $err = ${*$obj}{'__Error__'}
+           if exists ${*$obj}{'__Error__'};
+    }
+
+    $err;
+}
+
+sub flush {
+    shift; #ignore
+
+    unless (@_) {
+       $LAST = undef;
+       return;
+    }
+
+    my $pkg = shift;
+    return unless ref($pkg);
+
+    undef $ERROR{$pkg} if defined $ERROR{$pkg};
+}
+
+# Return as much information as possible about where the error
+# happened. The -stacktrace element only exists if $Error::DEBUG
+# was set when the error was created
+
+sub stacktrace {
+    my $self = shift;
+
+    return $self->{'-stacktrace'}
+       if exists $self->{'-stacktrace'};
+
+    my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
+
+    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+       unless($text =~ /\n$/s);
+
+    $text;
+}
+
+
+sub associate {
+    my $err = shift;
+    my $obj = shift;
+
+    return unless ref($obj);
+
+    if($obj->isa('HASH')) {
+       $obj->{'__Error__'} = $err;
+    }
+    elsif($obj->isa('GLOB')) {
+       ${*$obj}{'__Error__'} = $err;
+    }
+    $obj = ref($obj);
+    $ERROR{ ref($obj) } = $err;
+
+    return;
+}
+
+
+sub new {
+    my $self = shift;
+    my($pkg,$file,$line) = caller($Error::Depth);
+
+    my $err = bless {
+       '-package' => $pkg,
+       '-file'    => $file,
+       '-line'    => $line,
+       @_
+    }, $self;
+
+    $err->associate($err->{'-object'})
+       if(exists $err->{'-object'});
+
+    # To always create a stacktrace would be very inefficient, so
+    # we only do it if $Error::Debug is set
+
+    if($Error::Debug) {
+       require Carp;
+       local $Carp::CarpLevel = $Error::Depth;
+       my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
+       my $trace = Carp::longmess($text);
+       # Remove try calls from the trace
+       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+       $err->{'-stacktrace'} = $trace
+    }
+
+    $@ = $LAST = $ERROR{$pkg} = $err;
+}
+
+# Throw an error. this contains some very gory code.
+
+sub throw {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    # if we are not rethrow-ing then create the object to throw
+    $self = $self->new(@_) unless ref($self);
+
+    die $Error::THROWN = $self;
+}
+
+# syntactic sugar for
+#
+#    die with Error( ... );
+
+sub with {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    $self->new(@_);
+}
+
+# syntactic sugar for
+#
+#    record Error( ... ) and return;
+
+sub record {
+    my $self = shift;
+    local $Error::Depth = $Error::Depth + 1;
+
+    $self->new(@_);
+}
+
+# catch clause for
+#
+# try { ... } catch CLASS with { ... }
+
+sub catch {
+    my $pkg = shift;
+    my $code = shift;
+    my $clauses = shift || {};
+    my $catch = $clauses->{'catch'} ||= [];
+
+    unshift @$catch,  $pkg, $code;
+
+    $clauses;
+}
+
+# Object query methods
+
+sub object {
+    my $self = shift;
+    exists $self->{'-object'} ? $self->{'-object'} : undef;
+}
+
+sub file {
+    my $self = shift;
+    exists $self->{'-file'} ? $self->{'-file'} : undef;
+}
+
+sub line {
+    my $self = shift;
+    exists $self->{'-line'} ? $self->{'-line'} : undef;
+}
+
+sub text {
+    my $self = shift;
+    exists $self->{'-text'} ? $self->{'-text'} : undef;
+}
+
+# overload methods
+
+sub stringify {
+    my $self = shift;
+    defined $self->{'-text'} ? $self->{'-text'} : "Died";
+}
+
+sub value {
+    my $self = shift;
+    exists $self->{'-value'} ? $self->{'-value'} : undef;
+}
+
+package Error::Simple;
+
+use vars qw($VERSION);
+
+$VERSION = "0.17025";
+
+@Error::Simple::ISA = qw(Error);
+
+sub new {
+    my $self  = shift;
+    my $text  = "" . shift;
+    my $value = shift;
+    my(@args) = ();
+
+    local $Error::Depth = $Error::Depth + 1;
+
+    @args = ( -file => $1, -line => $2)
+       if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
+    push(@args, '-value', 0 + $value)
+       if defined($value);
+
+    $self->SUPER::new(-text => $text, @args);
+}
+
+sub stringify {
+    my $self = shift;
+    my $text = $self->SUPER::stringify;
+    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+       unless($text =~ /\n$/s);
+    $text;
+}
+
+##########################################################################
+##########################################################################
+
+# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
+# Peter Seibel <peter@weblogic.com>
+
+package Error::subs;
+
+use Exporter ();
+use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
+
+@EXPORT_OK   = qw(try with finally except otherwise);
+%EXPORT_TAGS = (try => \@EXPORT_OK);
+
+@ISA = qw(Exporter);
+
+sub run_clauses ($$$\@) {
+    my($clauses,$err,$wantarray,$result) = @_;
+    my $code = undef;
+
+    $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
+
+    CATCH: {
+
+       # catch
+       my $catch;
+       if(defined($catch = $clauses->{'catch'})) {
+           my $i = 0;
+
+           CATCHLOOP:
+           for( ; $i < @$catch ; $i += 2) {
+               my $pkg = $catch->[$i];
+               unless(defined $pkg) {
+                   #except
+                   splice(@$catch,$i,2,$catch->[$i+1]->($err));
+                   $i -= 2;
+                   next CATCHLOOP;
+               }
+               elsif(Scalar::Util::blessed($err) && $err->isa($pkg)) {
+                   $code = $catch->[$i+1];
+                   while(1) {
+                       my $more = 0;
+                       local($Error::THROWN, $@);
+                       my $ok = eval {
+                           $@ = $err;
+                           if($wantarray) {
+                               @{$result} = $code->($err,\$more);
+                           }
+                           elsif(defined($wantarray)) {
+                               @{$result} = ();
+                               $result->[0] = $code->($err,\$more);
+                           }
+                           else {
+                               $code->($err,\$more);
+                           }
+                           1;
+                       };
+                       if( $ok ) {
+                           next CATCHLOOP if $more;
+                           undef $err;
+                       }
+                       else {
+                           $err = $@ || $Error::THROWN;
+                               $err = $Error::ObjectifyCallback->({'text' =>$err})
+                                       unless ref($err);
+                       }
+                       last CATCH;
+                   };
+               }
+           }
+       }
+
+       # otherwise
+       my $owise;
+       if(defined($owise = $clauses->{'otherwise'})) {
+           my $code = $clauses->{'otherwise'};
+           my $more = 0;
+        local($Error::THROWN, $@);
+           my $ok = eval {
+               $@ = $err;
+               if($wantarray) {
+                   @{$result} = $code->($err,\$more);
+               }
+               elsif(defined($wantarray)) {
+                   @{$result} = ();
+                   $result->[0] = $code->($err,\$more);
+               }
+               else {
+                   $code->($err,\$more);
+               }
+               1;
+           };
+           if( $ok ) {
+               undef $err;
+           }
+           else {
+               $err = $@ || $Error::THROWN;
+
+               $err = $Error::ObjectifyCallback->({'text' =>$err})
+                       unless ref($err);
+           }
+       }
+    }
+    $err;
+}
+
+sub try (&;$) {
+    my $try = shift;
+    my $clauses = @_ ? shift : {};
+    my $ok = 0;
+    my $err = undef;
+    my @result = ();
+
+    unshift @Error::STACK, $clauses;
+
+    my $wantarray = wantarray();
+
+    do {
+       local $Error::THROWN = undef;
+       local $@ = undef;
+
+       $ok = eval {
+           if($wantarray) {
+               @result = $try->();
+           }
+           elsif(defined $wantarray) {
+               $result[0] = $try->();
+           }
+           else {
+               $try->();
+           }
+           1;
+       };
+
+       $err = $@ || $Error::THROWN
+           unless $ok;
+    };
+
+    shift @Error::STACK;
+
+    $err = run_clauses($clauses,$err,wantarray,@result)
+    unless($ok);
+
+    $clauses->{'finally'}->()
+       if(defined($clauses->{'finally'}));
+
+    if (defined($err))
+    {
+        if (Scalar::Util::blessed($err) && $err->can('throw'))
+        {
+            throw $err;
+        }
+        else
+        {
+            die $err;
+        }
+    }
+
+    wantarray ? @result : $result[0];
+}
+
+# Each clause adds a sub to the list of clauses. The finally clause is
+# always the last, and the otherwise clause is always added just before
+# the finally clause.
+#
+# All clauses, except the finally clause, add a sub which takes one argument
+# this argument will be the error being thrown. The sub will return a code ref
+# if that clause can handle that error, otherwise undef is returned.
+#
+# The otherwise clause adds a sub which unconditionally returns the users
+# code reference, this is why it is forced to be last.
+#
+# The catch clause is defined in Error.pm, as the syntax causes it to
+# be called as a method
+
+sub with (&;$) {
+    @_
+}
+
+sub finally (&) {
+    my $code = shift;
+    my $clauses = { 'finally' => $code };
+    $clauses;
+}
+
+# The except clause is a block which returns a hashref or a list of
+# key-value pairs, where the keys are the classes and the values are subs.
+
+sub except (&;$) {
+    my $code = shift;
+    my $clauses = shift || {};
+    my $catch = $clauses->{'catch'} ||= [];
+
+    my $sub = sub {
+       my $ref;
+       my(@array) = $code->($_[0]);
+       if(@array == 1 && ref($array[0])) {
+           $ref = $array[0];
+           $ref = [ %$ref ]
+               if(UNIVERSAL::isa($ref,'HASH'));
+       }
+       else {
+           $ref = \@array;
+       }
+       @$ref
+    };
+
+    unshift @{$catch}, undef, $sub;
+
+    $clauses;
+}
+
+sub otherwise (&;$) {
+    my $code = shift;
+    my $clauses = shift || {};
+
+    if(exists $clauses->{'otherwise'}) {
+       require Carp;
+       Carp::croak("Multiple otherwise clauses");
+    }
+
+    $clauses->{'otherwise'} = $code;
+
+    $clauses;
+}
+
+1;
+
+package Error::WarnDie;
+
+sub gen_callstack($)
+{
+    my ( $start ) = @_;
+
+    require Carp;
+    local $Carp::CarpLevel = $start;
+    my $trace = Carp::longmess("");
+    # Remove try calls from the trace
+    $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+    $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+    my @callstack = split( m/\n/, $trace );
+    return @callstack;
+}
+
+my $old_DIE;
+my $old_WARN;
+
+sub DEATH
+{
+    my ( $e ) = @_;
+
+    local $SIG{__DIE__} = $old_DIE if( defined $old_DIE );
+
+    die @_ if $^S;
+
+    my ( $etype, $message, $location, @callstack );
+    if ( ref($e) && $e->isa( "Error" ) ) {
+        $etype = "exception of type " . ref( $e );
+        $message = $e->text;
+        $location = $e->file . ":" . $e->line;
+        @callstack = split( m/\n/, $e->stacktrace );
+    }
+    else {
+        # Don't apply subsequent layer of message formatting
+        die $e if( $e =~ m/^\nUnhandled perl error caught at toplevel:\n\n/ );
+        $etype = "perl error";
+        my $stackdepth = 0;
+        while( caller( $stackdepth ) =~ m/^Error(?:$|::)/ ) {
+            $stackdepth++
+        }
+
+        @callstack = gen_callstack( $stackdepth + 1 );
+
+        $message = "$e";
+        chomp $message;
+
+        if ( $message =~ s/ at (.*?) line (\d+)\.$// ) {
+            $location = $1 . ":" . $2;
+        }
+        else {
+            my @caller = caller( $stackdepth );
+            $location = $caller[1] . ":" . $caller[2];
+        }
+    }
+
+    shift @callstack;
+    # Do it this way in case there are no elements; we don't print a spurious \n
+    my $callstack = join( "", map { "$_\n"} @callstack );
+
+    die "\nUnhandled $etype caught at toplevel:\n\n  $message\n\nThrown from: $location\n\nFull stack trace:\n\n$callstack\n";
+}
+
+sub TAXES
+{
+    my ( $message ) = @_;
+
+    local $SIG{__WARN__} = $old_WARN if( defined $old_WARN );
+
+    $message =~ s/ at .*? line \d+\.$//;
+    chomp $message;
+
+    my @callstack = gen_callstack( 1 );
+    my $location = shift @callstack;
+
+    # $location already starts in a leading space
+    $message .= $location;
+
+    # Do it this way in case there are no elements; we don't print a spurious \n
+    my $callstack = join( "", map { "$_\n"} @callstack );
+
+    warn "$message:\n$callstack";
+}
+
+sub import
+{
+    $old_DIE  = $SIG{__DIE__};
+    $old_WARN = $SIG{__WARN__};
+
+    $SIG{__DIE__}  = \&DEATH;
+    $SIG{__WARN__} = \&TAXES;
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Error - Error/exception handling in an OO-ish way
+
+=head1 WARNING
+
+Using the "Error" module is B<no longer recommended> due to the black-magical
+nature of its syntactic sugar, which often tends to break. Its maintainers
+have stopped actively writing code that uses it, and discourage people
+from doing so. See the "SEE ALSO" section below for better recommendations.
+
+=head1 SYNOPSIS
+
+    use Error qw(:try);
+
+    throw Error::Simple( "A simple error");
+
+    sub xyz {
+        ...
+       record Error::Simple("A simple error")
+           and return;
+    }
+
+    unlink($file) or throw Error::Simple("$file: $!",$!);
+
+    try {
+       do_some_stuff();
+       die "error!" if $condition;
+       throw Error::Simple "Oops!" if $other_condition;
+    }
+    catch Error::IO with {
+       my $E = shift;
+       print STDERR "File ", $E->{'-file'}, " had a problem\n";
+    }
+    except {
+       my $E = shift;
+       my $general_handler=sub {send_message $E->{-description}};
+       return {
+           UserException1 => $general_handler,
+           UserException2 => $general_handler
+       };
+    }
+    otherwise {
+       print STDERR "Well I don't know what to say\n";
+    }
+    finally {
+       close_the_garage_door_already(); # Should be reliable
+    }; # Don't forget the trailing ; or you might be surprised
+
+=head1 DESCRIPTION
+
+The C<Error> package provides two interfaces. Firstly C<Error> provides
+a procedural interface to exception handling. Secondly C<Error> is a
+base class for errors/exceptions that can either be thrown, for
+subsequent catch, or can simply be recorded.
+
+Errors in the class C<Error> should not be thrown directly, but the
+user should throw errors from a sub-class of C<Error>.
+
+=head1 PROCEDURAL INTERFACE
+
+C<Error> exports subroutines to perform exception handling. These will
+be exported if the C<:try> tag is used in the C<use> line.
+
+=over 4
+
+=item try BLOCK CLAUSES
+
+C<try> is the main subroutine called by the user. All other subroutines
+exported are clauses to the try subroutine.
+
+The BLOCK will be evaluated and, if no error is throw, try will return
+the result of the block.
+
+C<CLAUSES> are the subroutines below, which describe what to do in the
+event of an error being thrown within BLOCK.
+
+=item catch CLASS with BLOCK
+
+This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
+to be caught and handled by evaluating C<BLOCK>.
+
+C<BLOCK> will be passed two arguments. The first will be the error
+being thrown. The second is a reference to a scalar variable. If this
+variable is set by the catch block then, on return from the catch
+block, try will continue processing as if the catch block was never
+found. The error will also be available in C<$@>.
+
+To propagate the error the catch block may call C<$err-E<gt>throw>
+
+If the scalar reference by the second argument is not set, and the
+error is not thrown. Then the current try block will return with the
+result from the catch block.
+
+=item except BLOCK
+
+When C<try> is looking for a handler, if an except clause is found
+C<BLOCK> is evaluated. The return value from this block should be a
+HASHREF or a list of key-value pairs, where the keys are class names
+and the values are CODE references for the handler of errors of that
+type.
+
+=item otherwise BLOCK
+
+Catch any error by executing the code in C<BLOCK>
+
+When evaluated C<BLOCK> will be passed one argument, which will be the
+error being processed. The error will also be available in C<$@>.
+
+Only one otherwise block may be specified per try block
+
+=item finally BLOCK
+
+Execute the code in C<BLOCK> either after the code in the try block has
+successfully completed, or if the try block throws an error then
+C<BLOCK> will be executed after the handler has completed.
+
+If the handler throws an error then the error will be caught, the
+finally block will be executed and the error will be re-thrown.
+
+Only one finally block may be specified per try block
+
+=back
+
+=head1 COMPATIBILITY
+
+L<Moose> exports a keyword called C<with> which clashes with Error's. This
+example returns a prototype mismatch error:
+
+    package MyTest;
+
+    use warnings;
+    use Moose;
+    use Error qw(:try);
+
+(Thanks to C<maik.hentsche@amd.com> for the report.).
+
+=head1 CLASS INTERFACE
+
+=head2 CONSTRUCTORS
+
+The C<Error> object is implemented as a HASH. This HASH is initialized
+with the arguments that are passed to it's constructor. The elements
+that are used by, or are retrievable by the C<Error> class are listed
+below, other classes may add to these.
+
+       -file
+       -line
+       -text
+       -value
+       -object
+
+If C<-file> or C<-line> are not specified in the constructor arguments
+then these will be initialized with the file name and line number where
+the constructor was called from.
+
+If the error is associated with an object then the object should be
+passed as the C<-object> argument. This will allow the C<Error> package
+to associate the error with the object.
+
+The C<Error> package remembers the last error created, and also the
+last error associated with a package. This could either be the last
+error created by a sub in that package, or the last error which passed
+an object blessed into that package as the C<-object> argument.
+
+=over 4
+
+=item Error->new()
+
+See the Error::Simple documentation.
+
+=item throw ( [ ARGS ] )
+
+Create a new C<Error> object and throw an error, which will be caught
+by a surrounding C<try> block, if there is one. Otherwise it will cause
+the program to exit.
+
+C<throw> may also be called on an existing error to re-throw it.
+
+=item with ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+    die with Some::Error ( ... );
+
+=item record ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+    record Some::Error ( ... )
+       and return;
+
+=back
+
+=head2 STATIC METHODS
+
+=over 4
+
+=item prior ( [ PACKAGE ] )
+
+Return the last error created, or the last error associated with
+C<PACKAGE>
+
+=item flush ( [ PACKAGE ] )
+
+Flush the last error created, or the last error associated with
+C<PACKAGE>.It is necessary to clear the error stack before exiting the
+package or uncaught errors generated using C<record> will be reported.
+
+     $Error->flush;
+
+=cut
+
+=back
+
+=head2 OBJECT METHODS
+
+=over 4
+
+=item stacktrace
+
+If the variable C<$Error::Debug> was non-zero when the error was
+created, then C<stacktrace> returns a string created by calling
+C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
+the text of the error appended with the filename and line number of
+where the error was created, providing the text does not end with a
+newline.
+
+=item object
+
+The object this error was associated with
+
+=item file
+
+The file where the constructor of this error was called from
+
+=item line
+
+The line where the constructor of this error was called from
+
+=item text
+
+The text of the error
+
+=item $err->associate($obj)
+
+Associates an error with an object to allow error propagation. I.e:
+
+    $ber->encode(...) or
+        return Error->prior($ber)->associate($ldap);
+
+=back
+
+=head2 OVERLOAD METHODS
+
+=over 4
+
+=item stringify
+
+A method that converts the object into a string. This method may simply
+return the same as the C<text> method, or it may append more
+information. For example the file name and line number.
+
+By default this method returns the C<-text> argument that was passed to
+the constructor, or the string C<"Died"> if none was given.
+
+=item value
+
+A method that will return a value that can be associated with the
+error. For example if an error was created due to the failure of a
+system call, then this may return the numeric value of C<$!> at the
+time.
+
+By default this method returns the C<-value> argument that was passed
+to the constructor.
+
+=back
+
+=head1 PRE-DEFINED ERROR CLASSES
+
+=head2 Error::Simple
+
+This class can be used to hold simple error strings and values. It's
+constructor takes two arguments. The first is a text value, the second
+is a numeric value. These values are what will be returned by the
+overload methods.
+
+If the text value ends with C<at file line 1> as $@ strings do, then
+this information will be used to set the C<-file> and C<-line> arguments
+of the error object.
+
+This class is used internally if an eval'd block die's with an error
+that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
+
+
+=head1 $Error::ObjectifyCallback
+
+This variable holds a reference to a subroutine that converts errors that
+are plain strings to objects. It is used by Error.pm to convert textual
+errors to objects, and can be overridden by the user.
+
+It accepts a single argument which is a hash reference to named parameters.
+Currently the only named parameter passed is C<'text'> which is the text
+of the error, but others may be available in the future.
+
+For example the following code will cause Error.pm to throw objects of the
+class MyError::Bar by default:
+
+    sub throw_MyError_Bar
+    {
+        my $args = shift;
+        my $err = MyError::Bar->new();
+        $err->{'MyBarText'} = $args->{'text'};
+        return $err;
+    }
+
+    {
+        local $Error::ObjectifyCallback = \&throw_MyError_Bar;
+
+        # Error handling here.
+    }
+
+=cut
+
+=head1 MESSAGE HANDLERS
+
+C<Error> also provides handlers to extend the output of the C<warn()> perl
+function, and to handle the printing of a thrown C<Error> that is not caught
+or otherwise handled. These are not installed by default, but are requested
+using the C<:warndie> tag in the C<use> line.
+
+ use Error qw( :warndie );
+
+These new error handlers are installed in C<$SIG{__WARN__}> and
+C<$SIG{__DIE__}>. If these handlers are already defined when the tag is
+imported, the old values are stored, and used during the new code. Thus, to
+arrange for custom handling of warnings and errors, you will need to perform
+something like the following:
+
+ BEGIN {
+   $SIG{__WARN__} = sub {
+     print STDERR "My special warning handler: $_[0]"
+   };
+ }
+
+ use Error qw( :warndie );
+
+Note that setting C<$SIG{__WARN__}> after the C<:warndie> tag has been
+imported will overwrite the handler that C<Error> provides. If this cannot be
+avoided, then the tag can be explicitly C<import>ed later
+
+ use Error;
+
+ $SIG{__WARN__} = ...;
+
+ import Error qw( :warndie );
+
+=head2 EXAMPLE
+
+The C<__DIE__> handler turns messages such as
+
+ Can't call method "foo" on an undefined value at examples/warndie.pl line 16.
+
+into
+
+ Unhandled perl error caught at toplevel:
+
+   Can't call method "foo" on an undefined value
+
+ Thrown from: examples/warndie.pl:16
+
+ Full stack trace:
+
+         main::inner('undef') called at examples/warndie.pl line 20
+         main::outer('undef') called at examples/warndie.pl line 23
+
+=cut
+
+=head1 SEE ALSO
+
+See L<Exception::Class> for a different module providing Object-Oriented
+exception handling, along with a convenient syntax for declaring hierarchies
+for them. It doesn't provide Error's syntactic sugar of C<try { ... }>,
+C<catch { ... }>, etc. which may be a good thing or a bad thing based
+on what you want. (Because Error's syntactic sugar tends to break.)
+
+L<Error::Exception> aims to combine L<Error> and L<Exception::Class>
+"with correct stringification".
+
+L<TryCatch> and L<Try::Tiny> are similar in concept to Error.pm only providing
+a syntax that hopefully breaks less.
+
+=head1 KNOWN BUGS
+
+None, but that does not mean there are not any.
+
+=head1 AUTHORS
+
+Graham Barr <gbarr@pobox.com>
+
+The code that inspired me to write this was originally written by
+Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
+<jglick@sig.bsh.com>.
+
+C<:warndie> handlers added by Paul Evans <leonerd@leonerd.org.uk>
+
+=head1 MAINTAINER
+
+Shlomi Fish, L<http://www.shlomifish.org/> .
+
+=head1 PAST MAINTAINERS
+
+Arun Kumar U <u_arunkumar@yahoo.com>
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-8  Graham Barr. All rights reserved.
+This program is free software; you can redistribute it and/or modify it
+under the same terms as Perl itself.
+
+=cut
diff --git a/perl/FromCPAN/Mail/Address.pm b/perl/FromCPAN/Mail/Address.pm
new file mode 100644 (file)
index 0000000..683d490
--- /dev/null
@@ -0,0 +1,280 @@
+# Copyrights 1995-2018 by [Mark Overmeer].
+#  For other contributors see ChangeLog.
+# See the manual pages for details on the licensing terms.
+# Pod stripped from pm file by OODoc 2.02.
+# This code is part of the bundle MailTools.  Meta-POD processed with
+# OODoc into POD and HTML manual-pages.  See README.md for Copyright.
+# Licensed under the same terms as Perl itself.
+
+package Mail::Address;
+use vars '$VERSION';
+$VERSION = '2.20';
+
+use strict;
+
+use Carp;
+
+# use locale;   removed in version 1.78, because it causes taint problems
+
+sub Version { our $VERSION }
+
+
+
+# given a comment, attempt to extract a person's name
+sub _extract_name
+{   # This function can be called as method as well
+    my $self = @_ && ref $_[0] ? shift : undef;
+
+    local $_ = shift
+        or return '';
+
+    # Using encodings, too hard. See Mail::Message::Field::Full.
+    return '' if m/\=\?.*?\?\=/;
+
+    # trim whitespace
+    s/^\s+//;
+    s/\s+$//;
+    s/\s+/ /;
+
+    # Disregard numeric names (e.g. 123456.1234@compuserve.com)
+    return "" if /^[\d ]+$/;
+
+    s/^\((.*)\)$/$1/; # remove outermost parenthesis
+    s/^"(.*)"$/$1/;   # remove outer quotation marks
+    s/\(.*?\)//g;     # remove minimal embedded comments
+    s/\\//g;          # remove all escapes
+    s/^"(.*)"$/$1/;   # remove internal quotation marks
+    s/^([^\s]+) ?, ?(.*)$/$2 $1/; # reverse "Last, First M." if applicable
+    s/,.*//;
+
+    # Change casing only when the name contains only upper or only
+    # lower cased characters.
+    unless( m/[A-Z]/ && m/[a-z]/ )
+    {   # Set the case of the name to first char upper rest lower
+        s/\b(\w+)/\L\u$1/igo;  # Upcase first letter on name
+        s/\bMc(\w)/Mc\u$1/igo; # Scottish names such as 'McLeod'
+        s/\bo'(\w)/O'\u$1/igo; # Irish names such as 'O'Malley, O'Reilly'
+        s/\b(x*(ix)?v*(iv)?i*)\b/\U$1/igo; # Roman numerals, eg 'Level III Support'
+    }
+
+    # some cleanup
+    s/\[[^\]]*\]//g;
+    s/(^[\s'"]+|[\s'"]+$)//g;
+    s/\s{2,}/ /g;
+
+    $_;
+}
+
+sub _tokenise
+{   local $_ = join ',', @_;
+    my (@words,$snippet,$field);
+
+    s/\A\s+//;
+    s/[\r\n]+/ /g;
+
+    while ($_ ne '')
+    {   $field = '';
+        if(s/^\s*\(/(/ )    # (...)
+        {   my $depth = 0;
+
+     PAREN: while(s/^(\(([^\(\)\\]|\\.)*)//)
+            {   $field .= $1;
+                $depth++;
+                while(s/^(([^\(\)\\]|\\.)*\)\s*)//)
+                {   $field .= $1;
+                    last PAREN unless --$depth;
+                   $field .= $1 if s/^(([^\(\)\\]|\\.)+)//;
+                }
+            }
+
+            carp "Unmatched () '$field' '$_'"
+                if $depth;
+
+            $field =~ s/\s+\Z//;
+            push @words, $field;
+
+            next;
+        }
+
+        if( s/^("(?:[^"\\]+|\\.)*")\s*//       # "..."
+         || s/^(\[(?:[^\]\\]+|\\.)*\])\s*//    # [...]
+         || s/^([^\s()<>\@,;:\\".[\]]+)\s*//
+         || s/^([()<>\@,;:\\".[\]])\s*//
+          )
+        {   push @words, $1;
+            next;
+        }
+
+        croak "Unrecognised line: $_";
+    }
+
+    push @words, ",";
+    \@words;
+}
+
+sub _find_next
+{   my ($idx, $tokens, $len) = @_;
+
+    while($idx < $len)
+    {   my $c = $tokens->[$idx];
+        return $c if $c eq ',' || $c eq ';' || $c eq '<';
+        $idx++;
+    }
+
+    "";
+}
+
+sub _complete
+{   my ($class, $phrase, $address, $comment) = @_;
+
+    @$phrase || @$comment || @$address
+       or return undef;
+
+    my $o = $class->new(join(" ",@$phrase), join("",@$address), join(" ",@$comment));
+    @$phrase = @$address = @$comment = ();
+    $o;
+}
+
+#------------
+
+sub new(@)
+{   my $class = shift;
+    bless [@_], $class;
+}
+
+
+sub parse(@)
+{   my $class = shift;
+    my @line  = grep {defined} @_;
+    my $line  = join '', @line;
+
+    my (@phrase, @comment, @address, @objs);
+    my ($depth, $idx) = (0, 0);
+
+    my $tokens  = _tokenise @line;
+    my $len     = @$tokens;
+    my $next    = _find_next $idx, $tokens, $len;
+
+    local $_;
+    for(my $idx = 0; $idx < $len; $idx++)
+    {   $_ = $tokens->[$idx];
+
+        if(substr($_,0,1) eq '(') { push @comment, $_ }
+        elsif($_ eq '<')    { $depth++ }
+        elsif($_ eq '>')    { $depth-- if $depth }
+        elsif($_ eq ',' || $_ eq ';')
+        {   warn "Unmatched '<>' in $line" if $depth;
+            my $o = $class->_complete(\@phrase, \@address, \@comment);
+            push @objs, $o if defined $o;
+            $depth = 0;
+            $next = _find_next $idx+1, $tokens, $len;
+        }
+        elsif($depth)       { push @address, $_ }
+        elsif($next eq '<') { push @phrase,  $_ }
+        elsif( /^[.\@:;]$/ || !@address || $address[-1] =~ /^[.\@:;]$/ )
+        {   push @address, $_ }
+        else
+        {   warn "Unmatched '<>' in $line" if $depth;
+            my $o = $class->_complete(\@phrase, \@address, \@comment);
+            push @objs, $o if defined $o;
+            $depth = 0;
+            push @address, $_;
+        }
+    }
+    @objs;
+}
+
+#------------
+
+sub phrase  { shift->set_or_get(0, @_) }
+sub address { shift->set_or_get(1, @_) }
+sub comment { shift->set_or_get(2, @_) }
+
+sub set_or_get($)
+{   my ($self, $i) = (shift, shift);
+    @_ or return $self->[$i];
+
+    my $val = $self->[$i];
+    $self->[$i] = shift if @_;
+    $val;
+}
+
+
+my $atext = '[\-\w !#$%&\'*+/=?^`{|}~]';
+sub format
+{   my @addrs;
+
+    foreach (@_)
+    {   my ($phrase, $email, $comment) = @$_;
+        my @addr;
+
+        if(defined $phrase && length $phrase)
+        {   push @addr
+              , $phrase =~ /^(?:\s*$atext\s*)+$/o ? $phrase
+              : $phrase =~ /(?<!\\)"/             ? $phrase
+              :                                    qq("$phrase");
+
+            push @addr, "<$email>"
+                if defined $email && length $email;
+        }
+        elsif(defined $email && length $email)
+        {   push @addr, $email;
+        }
+
+        if(defined $comment && $comment =~ /\S/)
+        {   $comment =~ s/^\s*\(?/(/;
+            $comment =~ s/\)?\s*$/)/;
+        }
+
+        push @addr, $comment
+            if defined $comment && length $comment;
+
+        push @addrs, join(" ", @addr)
+            if @addr;
+    }
+
+    join ", ", @addrs;
+}
+
+#------------
+
+sub name
+{   my $self   = shift;
+    my $phrase = $self->phrase;
+    my $addr   = $self->address;
+
+    $phrase    = $self->comment
+        unless defined $phrase && length $phrase;
+
+    my $name   = $self->_extract_name($phrase);
+
+    # first.last@domain address
+    if($name eq '' && $addr =~ /([^\%\.\@_]+([\._][^\%\.\@_]+)+)[\@\%]/)
+    {   ($name  = $1) =~ s/[\._]+/ /g;
+       $name   = _extract_name $name;
+    }
+
+    if($name eq '' && $addr =~ m#/g=#i)    # X400 style address
+    {   my ($f) = $addr =~ m#g=([^/]*)#i;
+       my ($l) = $addr =~ m#s=([^/]*)#i;
+       $name   = _extract_name "$f $l";
+    }
+
+    length $name ? $name : undef;
+}
+
+
+sub host
+{   my $addr = shift->address || '';
+    my $i    = rindex $addr, '@';
+    $i >= 0 ? substr($addr, $i+1) : undef;
+}
+
+
+sub user
+{   my $addr = shift->address || '';
+    my $i    = rindex $addr, '@';
+    $i >= 0 ? substr($addr,0,$i) : $addr;
+}
+
+1;
index ffa09ace924e0a7b079d039e905363435b08cf9b..16ebcc612ce4acb4fba6511d5b388184934cb22a 100644 (file)
@@ -9,7 +9,10 @@ package Git;
 
 use 5.008;
 use strict;
+use warnings;
 
+use File::Temp ();
+use File::Spec ();
 
 BEGIN {
 
@@ -101,7 +104,7 @@ =head1 DESCRIPTION
 
 
 use Carp qw(carp croak); # but croak is bad - throw instead
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
 use Cwd qw(abs_path cwd);
 use IPC::Open2 qw(open2);
 use Fcntl qw(SEEK_SET SEEK_CUR);
@@ -189,7 +192,6 @@ sub repository {
                };
 
                if ($dir) {
-                       _verify_require();
                        File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir;
                        $opts{Repository} = abs_path($dir);
 
@@ -534,7 +536,9 @@ sub version {
 sub get_tz_offset {
        # some systems don't handle or mishandle %z, so be creative.
        my $t = shift || time;
-       my $gm = timegm(localtime($t));
+       my @t = localtime($t);
+       $t[5] += 1900;
+       my $gm = timegm(@t);
        my $sign = qw( + + - )[ $gm <=> $t ];
        return sprintf("%s%02d%02d", $sign, (gmtime(abs($t - $gm)))[2,1]);
 }
@@ -880,77 +884,6 @@ sub ident_person {
        return "$ident[0] <$ident[1]>";
 }
 
-=item parse_mailboxes
-
-Return an array of mailboxes extracted from a string.
-
-=cut
-
-# Very close to Mail::Address's parser, but we still have minor
-# differences in some cases (see t9000 for examples).
-sub parse_mailboxes {
-       my $re_comment = qr/\((?:[^)]*)\)/;
-       my $re_quote = qr/"(?:[^\"\\]|\\.)*"/;
-       my $re_word = qr/(?:[^]["\s()<>:;@\\,.]|\\.)+/;
-
-       # divide the string in tokens of the above form
-       my $re_token = qr/(?:$re_quote|$re_word|$re_comment|\S)/;
-       my @tokens = map { $_ =~ /\s*($re_token)\s*/g } @_;
-       my $end_of_addr_seen = 0;
-
-       # add a delimiter to simplify treatment for the last mailbox
-       push @tokens, ",";
-
-       my (@addr_list, @phrase, @address, @comment, @buffer) = ();
-       foreach my $token (@tokens) {
-               if ($token =~ /^[,;]$/) {
-                       # if buffer still contains undeterminated strings
-                       # append it at the end of @address or @phrase
-                       if ($end_of_addr_seen) {
-                               push @phrase, @buffer;
-                       } else {
-                               push @address, @buffer;
-                       }
-
-                       my $str_phrase = join ' ', @phrase;
-                       my $str_address = join '', @address;
-                       my $str_comment = join ' ', @comment;
-
-                       # quote are necessary if phrase contains
-                       # special characters
-                       if ($str_phrase =~ /[][()<>:;@\\,.\000-\037\177]/) {
-                               $str_phrase =~ s/(^|[^\\])"/$1/g;
-                               $str_phrase = qq["$str_phrase"];
-                       }
-
-                       # add "<>" around the address if necessary
-                       if ($str_address ne "" && $str_phrase ne "") {
-                               $str_address = qq[<$str_address>];
-                       }
-
-                       my $str_mailbox = "$str_phrase $str_address $str_comment";
-                       $str_mailbox =~ s/^\s*|\s*$//g;
-                       push @addr_list, $str_mailbox if ($str_mailbox);
-
-                       @phrase = @address = @comment = @buffer = ();
-                       $end_of_addr_seen = 0;
-               } elsif ($token =~ /^\(/) {
-                       push @comment, $token;
-               } elsif ($token eq "<") {
-                       push @phrase, (splice @address), (splice @buffer);
-               } elsif ($token eq ">") {
-                       $end_of_addr_seen = 1;
-                       push @address, (splice @buffer);
-               } elsif ($token eq "@" && !$end_of_addr_seen) {
-                       push @address, (splice @buffer), "@";
-               } else {
-                       push @buffer, $token;
-               }
-       }
-
-       return @addr_list;
-}
-
 =item hash_object ( TYPE, FILENAME )
 
 Compute the SHA1 object id of the given C<FILENAME> considering it is
@@ -1359,8 +1292,6 @@ sub temp_release {
 sub _temp_cache {
        my ($self, $name) = _maybe_self(@_);
 
-       _verify_require();
-
        my $temp_fd = \$TEMP_FILEMAP{$name};
        if (defined $$temp_fd and $$temp_fd->opened) {
                if ($TEMP_FILES{$$temp_fd}{locked}) {
@@ -1394,11 +1325,6 @@ sub _temp_cache {
        $$temp_fd;
 }
 
-sub _verify_require {
-       eval { require File::Temp; require File::Spec; };
-       $@ and throw Error::Simple($@);
-}
-
 =item temp_reset ( FILEHANDLE )
 
 Truncates and resets the position of the C<FILEHANDLE>.
@@ -1763,7 +1689,6 @@ sub DESTROY {
 # Pipe implementation for ActiveState Perl.
 
 package Git::activestate_pipe;
-use strict;
 
 sub TIEHANDLE {
        my ($class, @params) = @_;
index 836a5c23826328f7175faa6f9008b0534a4d1861..dba96fff0aecef6eac83aacdaa54b46806cdb0a4 100644 (file)
@@ -18,7 +18,7 @@ BEGIN
 
 sub __bootstrap_locale_messages {
        our $TEXTDOMAIN = 'git';
-       our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++';
+       our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
 
        require POSIX;
        POSIX->import(qw(setlocale));
diff --git a/perl/Git/LoadCPAN.pm b/perl/Git/LoadCPAN.pm
new file mode 100644 (file)
index 0000000..e5585e7
--- /dev/null
@@ -0,0 +1,104 @@
+package Git::LoadCPAN;
+use 5.008;
+use strict;
+use warnings;
+
+=head1 NAME
+
+Git::LoadCPAN - Wrapper for loading modules from the CPAN (OS) or Git's own copy
+
+=head1 DESCRIPTION
+
+The Perl code in Git depends on some modules from the CPAN, but we
+don't want to make those a hard requirement for anyone building from
+source.
+
+Therefore the L<Git::LoadCPAN> namespace shipped with Git contains
+wrapper modules like C<Git::LoadCPAN::Module::Name> that will first
+attempt to load C<Module::Name> from the OS, and if that doesn't work
+will fall back on C<FromCPAN::Module::Name> shipped with Git itself.
+
+Usually distributors will not ship with Git's Git::FromCPAN tree at
+all via the C<NO_PERL_CPAN_FALLBACKS> option, preferring to use their
+own packaging of CPAN modules instead.
+
+This module is only intended to be used for code shipping in the
+C<git.git> repository. Use it for anything else at your peril!
+
+=cut
+
+# NO_PERL_CPAN_FALLBACKS_STR evades the sed search-replace from the
+# Makefile, and allows for detecting whether the module is loaded from
+# perl/Git as opposed to perl/build/Git, which is useful for one-off
+# testing without having Error.pm et al installed.
+use constant NO_PERL_CPAN_FALLBACKS_STR => '@@' . 'NO_PERL_CPAN_FALLBACKS' . '@@';
+use constant NO_PERL_CPAN_FALLBACKS => (
+       q[@@NO_PERL_CPAN_FALLBACKS@@] ne ''
+       and
+       q[@@NO_PERL_CPAN_FALLBACKS@@] ne NO_PERL_CPAN_FALLBACKS_STR
+);
+
+sub import {
+       shift;
+       my $caller = caller;
+       my %args = @_;
+       my $module = exists $args{module} ? delete $args{module} : die "BUG: Expected 'module' parameter!";
+       my $import = exists $args{import} ? delete $args{import} : die "BUG: Expected 'import' parameter!";
+       die "BUG: Too many arguments!" if keys %args;
+
+       # Foo::Bar to Foo/Bar.pm
+       my $package_pm = $module;
+       $package_pm =~ s[::][/]g;
+       $package_pm .= '.pm';
+
+       eval {
+               require $package_pm;
+               1;
+       } or do {
+               my $error = $@ || "Zombie Error";
+
+               if (NO_PERL_CPAN_FALLBACKS) {
+                       chomp(my $error = sprintf <<'THEY_PROMISED', $module);
+BUG: The '%s' module is not here, but NO_PERL_CPAN_FALLBACKS was set!
+
+Git needs this Perl module from the CPAN, and will by default ship
+with a copy of it. This Git was built with NO_PERL_CPAN_FALLBACKS,
+meaning that whoever built it promised to provide this module.
+
+You're seeing this error because they broke that promise, and we can't
+load our fallback version, since we were asked not to install it.
+
+If you're seeing this error and didn't package Git yourself the
+package you're using is broken, or your system is broken. This error
+won't appear if Git is built without NO_PERL_CPAN_FALLBACKS (instead
+we'll use our fallback version of the module).
+THEY_PROMISED
+                       die $error;
+               }
+
+               my $Git_LoadCPAN_pm_path = $INC{"Git/LoadCPAN.pm"} || die "BUG: Should have our own path from %INC!";
+
+               require File::Basename;
+               my $Git_LoadCPAN_pm_root = File::Basename::dirname($Git_LoadCPAN_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_LoadCPAN_pm_path'!";
+
+               require File::Spec;
+               my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_LoadCPAN_pm_root, '..', 'FromCPAN');
+               die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
+
+               local @INC = ($Git_pm_FromCPAN_root, @INC);
+               require $package_pm;
+       };
+
+       if ($import) {
+               no strict 'refs';
+               *{"${caller}::import"} = sub {
+                       shift;
+                       use strict 'refs';
+                       unshift @_, $module;
+                       goto &{"${module}::import"};
+               };
+               use strict 'refs';
+       }
+}
+
+1;
diff --git a/perl/Git/LoadCPAN/Error.pm b/perl/Git/LoadCPAN/Error.pm
new file mode 100644 (file)
index 0000000..c6d2c45
--- /dev/null
@@ -0,0 +1,10 @@
+package Git::LoadCPAN::Error;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+       module => 'Error',
+       import => 1,
+);
+
+1;
diff --git a/perl/Git/LoadCPAN/Mail/Address.pm b/perl/Git/LoadCPAN/Mail/Address.pm
new file mode 100644 (file)
index 0000000..f70a4f0
--- /dev/null
@@ -0,0 +1,10 @@
+package Git::LoadCPAN::Mail::Address;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+       module => 'Mail::Address',
+       import => 0,
+);
+
+1;
index bc4eed3d75461444f8af0e27e2930ccb25663312..991a5885e9230b1f55bd6f3b7f7b53321bf9e562 100644 (file)
@@ -1405,7 +1405,7 @@ sub parse_svn_date {
                $ENV{TZ} = 'UTC';
 
                my $epoch_in_UTC =
-                   Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y - 1900);
+                   Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y);
 
                # Determine our local timezone (including DST) at the
                # time of $epoch_in_UTC.  $Git::SVN::Log::TZ stored the
diff --git a/perl/Makefile b/perl/Makefile
deleted file mode 100644 (file)
index f657de2..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Makefile for perl support modules and routine
-#
-makfile:=perl.mak
-modules =
-
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
-prefix_SQ = $(subst ','\'',$(prefix))
-localedir_SQ = $(subst ','\'',$(localedir))
-
-ifndef V
-       QUIET = @
-endif
-
-all install instlibdir: $(makfile)
-       $(QUIET)$(MAKE) -f $(makfile) $@
-
-clean:
-       $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0
-       $(RM) ppport.h
-       $(RM) $(makfile)
-       $(RM) $(makfile).old
-       $(RM) PM.stamp
-
-$(makfile): PM.stamp
-
-ifdef NO_PERL_MAKEMAKER
-instdir_SQ = $(subst ','\'',$(prefix)/lib)
-
-modules += Git
-modules += Git/I18N
-modules += Git/IndexInfo
-modules += Git/Packet
-modules += Git/SVN
-modules += Git/SVN/Memoize/YAML
-modules += Git/SVN/Fetcher
-modules += Git/SVN/Editor
-modules += Git/SVN/GlobSpec
-modules += Git/SVN/Log
-modules += Git/SVN/Migration
-modules += Git/SVN/Prompt
-modules += Git/SVN/Ra
-modules += Git/SVN/Utils
-
-$(makfile): ../GIT-CFLAGS Makefile
-       echo all: private-Error.pm Git.pm Git/I18N.pm > $@
-       set -e; \
-       for i in $(modules); \
-       do \
-               if test $$i = $${i%/*}; \
-               then \
-                       subdir=; \
-               else \
-                       subdir=/$${i%/*}; \
-               fi; \
-               echo '  $(RM) blib/lib/'$$i'.pm' >> $@; \
-               echo '  mkdir -p blib/lib'$$subdir >> $@; \
-               echo '  cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \
-       done
-       echo '  $(RM) blib/lib/Error.pm' >> $@
-       '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
-       echo '  cp private-Error.pm blib/lib/Error.pm' >> $@
-       echo install: >> $@
-       set -e; \
-       for i in $(modules); \
-       do \
-               if test $$i = $${i%/*}; \
-               then \
-                       subdir=; \
-               else \
-                       subdir=/$${i%/*}; \
-               fi; \
-               echo '  $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
-               echo '  mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \
-               echo '  cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
-       done
-       echo '  $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
-       '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
-       echo '  cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
-       echo instlibdir: >> $@
-       echo '  echo $(instdir_SQ)' >> $@
-else
-$(makfile): Makefile.PL ../GIT-CFLAGS
-       $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)'
-endif
-
-# this is just added comfort for calling make directly in perl dir
-# (even though GIT-CFLAGS aren't used yet. If ever)
-../GIT-CFLAGS:
-       $(MAKE) -C .. GIT-CFLAGS
diff --git a/perl/Makefile.PL b/perl/Makefile.PL
deleted file mode 100644 (file)
index 3f29ba9..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-use strict;
-use warnings;
-use ExtUtils::MakeMaker;
-use Getopt::Long;
-use File::Find;
-
-# Don't forget to update the perl/Makefile, too.
-# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease
-
-# Sanity: die at first unknown option
-Getopt::Long::Configure qw/ pass_through /;
-
-my $localedir = '';
-GetOptions("localedir=s" => \$localedir);
-
-sub MY::postamble {
-       return <<'MAKE_FRAG';
-instlibdir:
-       @echo '$(INSTALLSITELIB)'
-
-ifneq (,$(DESTDIR))
-ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10))
-$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \
-       is likely incompatible with the DESTDIR mechanism.  Try setting \
-       NO_PERL_MAKEMAKER=1 instead)
-endif
-endif
-
-MAKE_FRAG
-}
-
-# Find all the .pm files in "Git/" and Git.pm
-my %pm;
-find sub {
-       return unless /\.pm$/;
-
-       # sometimes File::Find prepends a ./  Strip it.
-       my $pm_path = $File::Find::name;
-       $pm_path =~ s{^\./}{};
-
-       $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path;
-}, "Git", "Git.pm";
-
-
-# We come with our own bundled Error.pm. It's not in the set of default
-# Perl modules so install it if it's not available on the system yet.
-if ( !eval { require Error } || $Error::VERSION < 0.15009) {
-       $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm';
-}
-
-# redirect stdout, otherwise the message "Writing perl.mak for Git"
-# disrupts the output for the target 'instlibdir'
-open STDOUT, ">&STDERR";
-
-WriteMakefile(
-       NAME            => 'Git',
-       VERSION_FROM    => 'Git.pm',
-       PM              => \%pm,
-       PM_FILTER       => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"],
-       MAKEFILE        => 'perl.mak',
-       INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3'
-);
diff --git a/perl/private-Error.pm b/perl/private-Error.pm
deleted file mode 100644 (file)
index 6098135..0000000
+++ /dev/null
@@ -1,827 +0,0 @@
-# Error.pm
-#
-# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
-# This program is free software; you can redistribute it and/or
-# modify it under the same terms as Perl itself.
-#
-# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
-# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
-#
-# but modified ***significantly***
-
-package Error;
-
-use strict;
-use vars qw($VERSION);
-use 5.004;
-
-$VERSION = "0.15009";
-
-use overload (
-       '""'       =>   'stringify',
-       '0+'       =>   'value',
-       'bool'     =>   sub { return 1; },
-       'fallback' =>   1
-);
-
-$Error::Depth = 0;     # Depth to pass to caller()
-$Error::Debug = 0;     # Generate verbose stack traces
-@Error::STACK = ();    # Clause stack for try
-$Error::THROWN = undef;        # last error thrown, a workaround until die $ref works
-
-my $LAST;              # Last error created
-my %ERROR;             # Last error associated with package
-
-sub throw_Error_Simple
-{
-    my $args = shift;
-    return Error::Simple->new($args->{'text'});
-}
-
-$Error::ObjectifyCallback = \&throw_Error_Simple;
-
-
-# Exported subs are defined in Error::subs
-
-sub import {
-    shift;
-    local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
-    Error::subs->import(@_);
-}
-
-# I really want to use last for the name of this method, but it is a keyword
-# which prevent the syntax  last Error
-
-sub prior {
-    shift; # ignore
-
-    return $LAST unless @_;
-
-    my $pkg = shift;
-    return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
-       unless ref($pkg);
-
-    my $obj = $pkg;
-    my $err = undef;
-    if($obj->isa('HASH')) {
-       $err = $obj->{'__Error__'}
-           if exists $obj->{'__Error__'};
-    }
-    elsif($obj->isa('GLOB')) {
-       $err = ${*$obj}{'__Error__'}
-           if exists ${*$obj}{'__Error__'};
-    }
-
-    $err;
-}
-
-sub flush {
-    shift; #ignore
-
-    unless (@_) {
-       $LAST = undef;
-       return;
-    }
-
-    my $pkg = shift;
-    return unless ref($pkg);
-
-    undef $ERROR{$pkg} if defined $ERROR{$pkg};
-}
-
-# Return as much information as possible about where the error
-# happened. The -stacktrace element only exists if $Error::DEBUG
-# was set when the error was created
-
-sub stacktrace {
-    my $self = shift;
-
-    return $self->{'-stacktrace'}
-       if exists $self->{'-stacktrace'};
-
-    my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
-
-    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
-       unless($text =~ /\n$/s);
-
-    $text;
-}
-
-# Allow error propagation, ie
-#
-# $ber->encode(...) or
-#    return Error->prior($ber)->associate($ldap);
-
-sub associate {
-    my $err = shift;
-    my $obj = shift;
-
-    return unless ref($obj);
-
-    if($obj->isa('HASH')) {
-       $obj->{'__Error__'} = $err;
-    }
-    elsif($obj->isa('GLOB')) {
-       ${*$obj}{'__Error__'} = $err;
-    }
-    $obj = ref($obj);
-    $ERROR{ ref($obj) } = $err;
-
-    return;
-}
-
-sub new {
-    my $self = shift;
-    my($pkg,$file,$line) = caller($Error::Depth);
-
-    my $err = bless {
-       '-package' => $pkg,
-       '-file'    => $file,
-       '-line'    => $line,
-       @_
-    }, $self;
-
-    $err->associate($err->{'-object'})
-       if(exists $err->{'-object'});
-
-    # To always create a stacktrace would be very inefficient, so
-    # we only do it if $Error::Debug is set
-
-    if($Error::Debug) {
-       require Carp;
-       local $Carp::CarpLevel = $Error::Depth;
-       my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
-       my $trace = Carp::longmess($text);
-       # Remove try calls from the trace
-       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
-       $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
-       $err->{'-stacktrace'} = $trace
-    }
-
-    $@ = $LAST = $ERROR{$pkg} = $err;
-}
-
-# Throw an error. this contains some very gory code.
-
-sub throw {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    # if we are not rethrow-ing then create the object to throw
-    $self = $self->new(@_) unless ref($self);
-
-    die $Error::THROWN = $self;
-}
-
-# syntactic sugar for
-#
-#    die with Error( ... );
-
-sub with {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    $self->new(@_);
-}
-
-# syntactic sugar for
-#
-#    record Error( ... ) and return;
-
-sub record {
-    my $self = shift;
-    local $Error::Depth = $Error::Depth + 1;
-
-    $self->new(@_);
-}
-
-# catch clause for
-#
-# try { ... } catch CLASS with { ... }
-
-sub catch {
-    my $pkg = shift;
-    my $code = shift;
-    my $clauses = shift || {};
-    my $catch = $clauses->{'catch'} ||= [];
-
-    unshift @$catch,  $pkg, $code;
-
-    $clauses;
-}
-
-# Object query methods
-
-sub object {
-    my $self = shift;
-    exists $self->{'-object'} ? $self->{'-object'} : undef;
-}
-
-sub file {
-    my $self = shift;
-    exists $self->{'-file'} ? $self->{'-file'} : undef;
-}
-
-sub line {
-    my $self = shift;
-    exists $self->{'-line'} ? $self->{'-line'} : undef;
-}
-
-sub text {
-    my $self = shift;
-    exists $self->{'-text'} ? $self->{'-text'} : undef;
-}
-
-# overload methods
-
-sub stringify {
-    my $self = shift;
-    defined $self->{'-text'} ? $self->{'-text'} : "Died";
-}
-
-sub value {
-    my $self = shift;
-    exists $self->{'-value'} ? $self->{'-value'} : undef;
-}
-
-package Error::Simple;
-
-@Error::Simple::ISA = qw(Error);
-
-sub new {
-    my $self  = shift;
-    my $text  = "" . shift;
-    my $value = shift;
-    my(@args) = ();
-
-    local $Error::Depth = $Error::Depth + 1;
-
-    @args = ( -file => $1, -line => $2)
-       if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
-    push(@args, '-value', 0 + $value)
-       if defined($value);
-
-    $self->SUPER::new(-text => $text, @args);
-}
-
-sub stringify {
-    my $self = shift;
-    my $text = $self->SUPER::stringify;
-    $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
-       unless($text =~ /\n$/s);
-    $text;
-}
-
-##########################################################################
-##########################################################################
-
-# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
-# Peter Seibel <peter@weblogic.com>
-
-package Error::subs;
-
-use Exporter ();
-use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
-
-@EXPORT_OK   = qw(try with finally except otherwise);
-%EXPORT_TAGS = (try => \@EXPORT_OK);
-
-@ISA = qw(Exporter);
-
-
-sub blessed {
-       my $item = shift;
-       local $@; # don't kill an outer $@
-       ref $item and eval { $item->can('can') };
-}
-
-
-sub run_clauses ($$$\@) {
-    my($clauses,$err,$wantarray,$result) = @_;
-    my $code = undef;
-
-    $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
-
-    CATCH: {
-
-       # catch
-       my $catch;
-       if(defined($catch = $clauses->{'catch'})) {
-           my $i = 0;
-
-           CATCHLOOP:
-           for( ; $i < @$catch ; $i += 2) {
-               my $pkg = $catch->[$i];
-               unless(defined $pkg) {
-                   #except
-                   splice(@$catch,$i,2,$catch->[$i+1]->());
-                   $i -= 2;
-                   next CATCHLOOP;
-               }
-               elsif(blessed($err) && $err->isa($pkg)) {
-                   $code = $catch->[$i+1];
-                   while(1) {
-                       my $more = 0;
-                       local($Error::THROWN);
-                       my $ok = eval {
-                           if($wantarray) {
-                               @{$result} = $code->($err,\$more);
-                           }
-                           elsif(defined($wantarray)) {
-                               @{$result} = ();
-                               $result->[0] = $code->($err,\$more);
-                           }
-                           else {
-                               $code->($err,\$more);
-                           }
-                           1;
-                       };
-                       if( $ok ) {
-                           next CATCHLOOP if $more;
-                           undef $err;
-                       }
-                       else {
-                           $err = defined($Error::THROWN)
-                                   ? $Error::THROWN : $@;
-                $err = $Error::ObjectifyCallback->({'text' =>$err})
-                    unless ref($err);
-                       }
-                       last CATCH;
-                   };
-               }
-           }
-       }
-
-       # otherwise
-       my $owise;
-       if(defined($owise = $clauses->{'otherwise'})) {
-           my $code = $clauses->{'otherwise'};
-           my $more = 0;
-           my $ok = eval {
-               if($wantarray) {
-                   @{$result} = $code->($err,\$more);
-               }
-               elsif(defined($wantarray)) {
-                   @{$result} = ();
-                   $result->[0] = $code->($err,\$more);
-               }
-               else {
-                   $code->($err,\$more);
-               }
-               1;
-           };
-           if( $ok ) {
-               undef $err;
-           }
-           else {
-               $err = defined($Error::THROWN)
-                       ? $Error::THROWN : $@;
-
-        $err = $Error::ObjectifyCallback->({'text' =>$err})
-            unless ref($err);
-           }
-       }
-    }
-    $err;
-}
-
-sub try (&;$) {
-    my $try = shift;
-    my $clauses = @_ ? shift : {};
-    my $ok = 0;
-    my $err = undef;
-    my @result = ();
-
-    unshift @Error::STACK, $clauses;
-
-    my $wantarray = wantarray();
-
-    do {
-       local $Error::THROWN = undef;
-    local $@ = undef;
-
-       $ok = eval {
-           if($wantarray) {
-               @result = $try->();
-           }
-           elsif(defined $wantarray) {
-               $result[0] = $try->();
-           }
-           else {
-               $try->();
-           }
-           1;
-       };
-
-       $err = defined($Error::THROWN) ? $Error::THROWN : $@
-           unless $ok;
-    };
-
-    shift @Error::STACK;
-
-    $err = run_clauses($clauses,$err,wantarray,@result)
-       unless($ok);
-
-    $clauses->{'finally'}->()
-       if(defined($clauses->{'finally'}));
-
-    if (defined($err))
-    {
-        if (blessed($err) && $err->can('throw'))
-        {
-            throw $err;
-        }
-        else
-        {
-            die $err;
-        }
-    }
-
-    wantarray ? @result : $result[0];
-}
-
-# Each clause adds a sub to the list of clauses. The finally clause is
-# always the last, and the otherwise clause is always added just before
-# the finally clause.
-#
-# All clauses, except the finally clause, add a sub which takes one argument
-# this argument will be the error being thrown. The sub will return a code ref
-# if that clause can handle that error, otherwise undef is returned.
-#
-# The otherwise clause adds a sub which unconditionally returns the users
-# code reference, this is why it is forced to be last.
-#
-# The catch clause is defined in Error.pm, as the syntax causes it to
-# be called as a method
-
-sub with (&;$) {
-    @_
-}
-
-sub finally (&) {
-    my $code = shift;
-    my $clauses = { 'finally' => $code };
-    $clauses;
-}
-
-# The except clause is a block which returns a hashref or a list of
-# key-value pairs, where the keys are the classes and the values are subs.
-
-sub except (&;$) {
-    my $code = shift;
-    my $clauses = shift || {};
-    my $catch = $clauses->{'catch'} ||= [];
-
-    my $sub = sub {
-       my $ref;
-       my(@array) = $code->($_[0]);
-       if(@array == 1 && ref($array[0])) {
-           $ref = $array[0];
-           $ref = [ %$ref ]
-               if(UNIVERSAL::isa($ref,'HASH'));
-       }
-       else {
-           $ref = \@array;
-       }
-       @$ref
-    };
-
-    unshift @{$catch}, undef, $sub;
-
-    $clauses;
-}
-
-sub otherwise (&;$) {
-    my $code = shift;
-    my $clauses = shift || {};
-
-    if(exists $clauses->{'otherwise'}) {
-       require Carp;
-       Carp::croak("Multiple otherwise clauses");
-    }
-
-    $clauses->{'otherwise'} = $code;
-
-    $clauses;
-}
-
-1;
-__END__
-
-=head1 NAME
-
-Error - Error/exception handling in an OO-ish way
-
-=head1 SYNOPSIS
-
-    use Error qw(:try);
-
-    throw Error::Simple( "A simple error");
-
-    sub xyz {
-        ...
-       record Error::Simple("A simple error")
-           and return;
-    }
-
-    unlink($file) or throw Error::Simple("$file: $!",$!);
-
-    try {
-       do_some_stuff();
-       die "error!" if $condition;
-       throw Error::Simple -text => "Oops!" if $other_condition;
-    }
-    catch Error::IO with {
-       my $E = shift;
-       print STDERR "File ", $E->{'-file'}, " had a problem\n";
-    }
-    except {
-       my $E = shift;
-       my $general_handler=sub {send_message $E->{-description}};
-       return {
-           UserException1 => $general_handler,
-           UserException2 => $general_handler
-       };
-    }
-    otherwise {
-       print STDERR "Well I don't know what to say\n";
-    }
-    finally {
-       close_the_garage_door_already(); # Should be reliable
-    }; # Don't forget the trailing ; or you might be surprised
-
-=head1 DESCRIPTION
-
-The C<Error> package provides two interfaces. Firstly C<Error> provides
-a procedural interface to exception handling. Secondly C<Error> is a
-base class for errors/exceptions that can either be thrown, for
-subsequent catch, or can simply be recorded.
-
-Errors in the class C<Error> should not be thrown directly, but the
-user should throw errors from a sub-class of C<Error>.
-
-=head1 PROCEDURAL INTERFACE
-
-C<Error> exports subroutines to perform exception handling. These will
-be exported if the C<:try> tag is used in the C<use> line.
-
-=over 4
-
-=item try BLOCK CLAUSES
-
-C<try> is the main subroutine called by the user. All other subroutines
-exported are clauses to the try subroutine.
-
-The BLOCK will be evaluated and, if no error is throw, try will return
-the result of the block.
-
-C<CLAUSES> are the subroutines below, which describe what to do in the
-event of an error being thrown within BLOCK.
-
-=item catch CLASS with BLOCK
-
-This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
-to be caught and handled by evaluating C<BLOCK>.
-
-C<BLOCK> will be passed two arguments. The first will be the error
-being thrown. The second is a reference to a scalar variable. If this
-variable is set by the catch block then, on return from the catch
-block, try will continue processing as if the catch block was never
-found.
-
-To propagate the error the catch block may call C<$err-E<gt>throw>
-
-If the scalar reference by the second argument is not set, and the
-error is not thrown. Then the current try block will return with the
-result from the catch block.
-
-=item except BLOCK
-
-When C<try> is looking for a handler, if an except clause is found
-C<BLOCK> is evaluated. The return value from this block should be a
-HASHREF or a list of key-value pairs, where the keys are class names
-and the values are CODE references for the handler of errors of that
-type.
-
-=item otherwise BLOCK
-
-Catch any error by executing the code in C<BLOCK>
-
-When evaluated C<BLOCK> will be passed one argument, which will be the
-error being processed.
-
-Only one otherwise block may be specified per try block
-
-=item finally BLOCK
-
-Execute the code in C<BLOCK> either after the code in the try block has
-successfully completed, or if the try block throws an error then
-C<BLOCK> will be executed after the handler has completed.
-
-If the handler throws an error then the error will be caught, the
-finally block will be executed and the error will be re-thrown.
-
-Only one finally block may be specified per try block
-
-=back
-
-=head1 CLASS INTERFACE
-
-=head2 CONSTRUCTORS
-
-The C<Error> object is implemented as a HASH. This HASH is initialized
-with the arguments that are passed to its constructor. The elements
-that are used by, or are retrievable by the C<Error> class are listed
-below, other classes may add to these.
-
-       -file
-       -line
-       -text
-       -value
-       -object
-
-If C<-file> or C<-line> are not specified in the constructor arguments
-then these will be initialized with the file name and line number where
-the constructor was called from.
-
-If the error is associated with an object then the object should be
-passed as the C<-object> argument. This will allow the C<Error> package
-to associate the error with the object.
-
-The C<Error> package remembers the last error created, and also the
-last error associated with a package. This could either be the last
-error created by a sub in that package, or the last error which passed
-an object blessed into that package as the C<-object> argument.
-
-=over 4
-
-=item throw ( [ ARGS ] )
-
-Create a new C<Error> object and throw an error, which will be caught
-by a surrounding C<try> block, if there is one. Otherwise it will cause
-the program to exit.
-
-C<throw> may also be called on an existing error to re-throw it.
-
-=item with ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
-    die with Some::Error ( ... );
-
-=item record ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
-    record Some::Error ( ... )
-       and return;
-
-=back
-
-=head2 STATIC METHODS
-
-=over 4
-
-=item prior ( [ PACKAGE ] )
-
-Return the last error created, or the last error associated with
-C<PACKAGE>
-
-=item flush ( [ PACKAGE ] )
-
-Flush the last error created, or the last error associated with
-C<PACKAGE>.It is necessary to clear the error stack before exiting the
-package or uncaught errors generated using C<record> will be reported.
-
-     $Error->flush;
-
-=cut
-
-=back
-
-=head2 OBJECT METHODS
-
-=over 4
-
-=item stacktrace
-
-If the variable C<$Error::Debug> was non-zero when the error was
-created, then C<stacktrace> returns a string created by calling
-C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
-the text of the error appended with the filename and line number of
-where the error was created, providing the text does not end with a
-newline.
-
-=item object
-
-The object this error was associated with
-
-=item file
-
-The file where the constructor of this error was called from
-
-=item line
-
-The line where the constructor of this error was called from
-
-=item text
-
-The text of the error
-
-=back
-
-=head2 OVERLOAD METHODS
-
-=over 4
-
-=item stringify
-
-A method that converts the object into a string. This method may simply
-return the same as the C<text> method, or it may append more
-information. For example the file name and line number.
-
-By default this method returns the C<-text> argument that was passed to
-the constructor, or the string C<"Died"> if none was given.
-
-=item value
-
-A method that will return a value that can be associated with the
-error. For example if an error was created due to the failure of a
-system call, then this may return the numeric value of C<$!> at the
-time.
-
-By default this method returns the C<-value> argument that was passed
-to the constructor.
-
-=back
-
-=head1 PRE-DEFINED ERROR CLASSES
-
-=over 4
-
-=item Error::Simple
-
-This class can be used to hold simple error strings and values. Its
-constructor takes two arguments. The first is a text value, the second
-is a numeric value. These values are what will be returned by the
-overload methods.
-
-If the text value ends with C<at file line 1> as $@ strings do, then
-this information will be used to set the C<-file> and C<-line> arguments
-of the error object.
-
-This class is used internally if an eval'd block die's with an error
-that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
-
-=back
-
-=head1 $Error::ObjectifyCallback
-
-This variable holds a reference to a subroutine that converts errors that
-are plain strings to objects. It is used by Error.pm to convert textual
-errors to objects, and can be overridden by the user.
-
-It accepts a single argument which is a hash reference to named parameters.
-Currently the only named parameter passed is C<'text'> which is the text
-of the error, but others may be available in the future.
-
-For example the following code will cause Error.pm to throw objects of the
-class MyError::Bar by default:
-
-    sub throw_MyError_Bar
-    {
-        my $args = shift;
-        my $err = MyError::Bar->new();
-        $err->{'MyBarText'} = $args->{'text'};
-        return $err;
-    }
-
-    {
-        local $Error::ObjectifyCallback = \&throw_MyError_Bar;
-
-        # Error handling here.
-    }
-
-=head1 KNOWN BUGS
-
-None, but that does not mean there are not any.
-
-=head1 AUTHORS
-
-Graham Barr <gbarr@pobox.com>
-
-The code that inspired me to write this was originally written by
-Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
-<jglick@sig.bsh.com>.
-
-=head1 MAINTAINER
-
-Shlomi Fish <shlomif@iglu.org.il>
-
-=head1 PAST MAINTAINERS
-
-Arun Kumar U <u_arunkumar@yahoo.com>
-
-=cut
index 2a83255e4eeaf8e8ce74516880b0333b23d6f583..4d08d4487460f839f39f667c714f1bb85fa1e144 100644 (file)
@@ -78,6 +78,7 @@ static void preload_index(struct index_state *index,
 {
        int threads, i, work, offset;
        struct thread_data data[MAX_PARALLEL];
+       uint64_t start = getnanotime();
 
        if (!core_preload_index)
                return;
@@ -108,6 +109,7 @@ static void preload_index(struct index_state *index,
                if (pthread_join(p->pthread, NULL))
                        die("unable to join threaded lstat");
        }
+       trace_performance_since(start, "preload index");
 }
 #endif
 
index f7ce4902301490d73bdd79bd396cf7bbe5f893ea..34fe891fc03672fa042257e4d32630882868eadf 100644 (file)
--- a/pretty.c
+++ b/pretty.c
@@ -549,7 +549,7 @@ static void add_merge_info(const struct pretty_print_context *pp,
                struct object_id *oidp = &parent->item->object.oid;
                strbuf_addch(sb, ' ');
                if (pp->abbrev)
-                       strbuf_add_unique_abbrev(sb, oidp->hash, pp->abbrev);
+                       strbuf_add_unique_abbrev(sb, oidp, pp->abbrev);
                else
                        strbuf_addstr(sb, oid_to_hex(oidp));
                parent = parent->next;
@@ -1156,7 +1156,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                return 1;
        case 'h':               /* abbreviated commit hash */
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_COMMIT));
-               strbuf_add_unique_abbrev(sb, commit->object.oid.hash,
+               strbuf_add_unique_abbrev(sb, &commit->object.oid,
                                         c->pretty_ctx->abbrev);
                strbuf_addstr(sb, diff_get_color(c->auto_color, DIFF_RESET));
                return 1;
@@ -1164,7 +1164,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                strbuf_addstr(sb, oid_to_hex(&commit->tree->object.oid));
                return 1;
        case 't':               /* abbreviated tree hash */
-               strbuf_add_unique_abbrev(sb, commit->tree->object.oid.hash,
+               strbuf_add_unique_abbrev(sb, &commit->tree->object.oid,
                                         c->pretty_ctx->abbrev);
                return 1;
        case 'P':               /* parent hashes */
@@ -1178,7 +1178,7 @@ static size_t format_commit_one(struct strbuf *sb, /* in UTF-8 */
                for (p = commit->parents; p; p = p->next) {
                        if (p != commit->parents)
                                strbuf_addch(sb, ' ');
-                       strbuf_add_unique_abbrev(sb, p->item->object.oid.hash,
+                       strbuf_add_unique_abbrev(sb, &p->item->object.oid,
                                                 c->pretty_ctx->abbrev);
                }
                return 1;
diff --git a/quote.c b/quote.c
index de2922ddd63d6fc001822d116387c8ced7d7630d..c95dd2cafbaa85c9c443a229134842bf06ce3200 100644 (file)
--- a/quote.c
+++ b/quote.c
@@ -43,6 +43,22 @@ void sq_quote_buf(struct strbuf *dst, const char *src)
        free(to_free);
 }
 
+void sq_quote_buf_pretty(struct strbuf *dst, const char *src)
+{
+       static const char ok_punct[] = "+,-./:=@_^";
+       const char *p;
+
+       for (p = src; *p; p++) {
+               if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) {
+                       sq_quote_buf(dst, src);
+                       return;
+               }
+       }
+
+       /* if we get here, we did not need quoting */
+       strbuf_addstr(dst, src);
+}
+
 void sq_quotef(struct strbuf *dst, const char *fmt, ...)
 {
        struct strbuf src = STRBUF_INIT;
@@ -56,7 +72,7 @@ void sq_quotef(struct strbuf *dst, const char *fmt, ...)
        strbuf_release(&src);
 }
 
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+void sq_quote_argv(struct strbuf *dst, const char **argv)
 {
        int i;
 
@@ -65,8 +81,16 @@ void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
        for (i = 0; argv[i]; ++i) {
                strbuf_addch(dst, ' ');
                sq_quote_buf(dst, argv[i]);
-               if (maxlen && dst->len > maxlen)
-                       die("Too many or long arguments");
+       }
+}
+
+void sq_quote_argv_pretty(struct strbuf *dst, const char **argv)
+{
+       int i;
+
+       for (i = 0; argv[i]; i++) {
+               strbuf_addch(dst, ' ');
+               sq_quote_buf_pretty(dst, argv[i]);
        }
 }
 
@@ -94,9 +118,15 @@ static char *sq_dequote_step(char *arg, char **next)
                                *next = NULL;
                        return arg;
                case '\\':
-                       c = *++src;
-                       if (need_bs_quote(c) && *++src == '\'') {
-                               *dst++ = c;
+                       /*
+                        * Allow backslashed characters outside of
+                        * single-quotes only if they need escaping,
+                        * and only if we resume the single-quoted part
+                        * afterward.
+                        */
+                       if (need_bs_quote(src[1]) && src[2] == '\'') {
+                               *dst++ = src[1];
+                               src += 2;
                                continue;
                        }
                /* Fallthrough */
diff --git a/quote.h b/quote.h
index 66f5644aa29d0da4f95e693429ad6f8c0eb8cf09..ea992dcc91ef599b5d053b00ae3a036ce5e87c1a 100644 (file)
--- a/quote.h
+++ b/quote.h
@@ -30,9 +30,17 @@ struct strbuf;
  */
 
 extern void sq_quote_buf(struct strbuf *, const char *src);
-extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+extern void sq_quote_argv(struct strbuf *, const char **argv);
 extern void sq_quotef(struct strbuf *, const char *fmt, ...);
 
+/*
+ * These match their non-pretty variants, except that they avoid
+ * quoting when there are no exotic characters. These should only be used for
+ * human-readable output, as sq_dequote() is not smart enough to dequote it.
+ */
+void sq_quote_buf_pretty(struct strbuf *, const char *src);
+void sq_quote_argv_pretty(struct strbuf *, const char **argv);
+
 /* This unwraps what sq_quote() produces in place, but returns
  * NULL if the input does not look like what sq_quote would have
  * produced.
index 88d7d679da9c837463f74d9a9277025b008635af..404e1440e96b23abb3e4919fdda110e89fff7626 100644 (file)
@@ -77,7 +77,7 @@ static void add_recent_object(const struct object_id *oid,
         * later processing, and the revision machinery expects
         * commits and tags to have been parsed.
         */
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type < 0)
                die("unable to get object info for %s", oid_to_hex(oid));
 
@@ -94,7 +94,7 @@ static void add_recent_object(const struct object_id *oid,
                break;
        default:
                die("unknown object type for %s: %s",
-                   oid_to_hex(oid), typename(type));
+                   oid_to_hex(oid), type_name(type));
        }
 
        if (!obj)
index 198e72b6851a13bd5bf3f8b1dc417a732cd5b147..10f1c6bb8a316e85448445afc3478c832d61709c 100644 (file)
@@ -62,6 +62,7 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
        replace_index_entry_in_base(istate, old, ce);
        remove_name_hash(istate, old);
        free(old);
+       ce->ce_flags &= ~CE_HASHED;
        set_index_entry(istate, nr, ce);
        ce->ce_flags |= CE_UPDATE_IN_BASE;
        mark_fsmonitor_invalid(istate, ce);
@@ -70,20 +71,20 @@ static void replace_index_entry(struct index_state *istate, int nr, struct cache
 
 void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
 {
-       struct cache_entry *old = istate->cache[nr], *new;
+       struct cache_entry *old_entry = istate->cache[nr], *new_entry;
        int namelen = strlen(new_name);
 
-       new = xmalloc(cache_entry_size(namelen));
-       copy_cache_entry(new, old);
-       new->ce_flags &= ~CE_HASHED;
-       new->ce_namelen = namelen;
-       new->index = 0;
-       memcpy(new->name, new_name, namelen + 1);
+       new_entry = xmalloc(cache_entry_size(namelen));
+       copy_cache_entry(new_entry, old_entry);
+       new_entry->ce_flags &= ~CE_HASHED;
+       new_entry->ce_namelen = namelen;
+       new_entry->index = 0;
+       memcpy(new_entry->name, new_name, namelen + 1);
 
-       cache_tree_invalidate_path(istate, old->name);
-       untracked_cache_remove_from_index(istate, old->name);
+       cache_tree_invalidate_path(istate, old_entry->name);
+       untracked_cache_remove_from_index(istate, old_entry->name);
        remove_index_entry_at(istate, nr);
-       add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+       add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
 }
 
 void fill_stat_data(struct stat_data *sd, struct stat *st)
@@ -184,7 +185,7 @@ static int ce_compare_link(const struct cache_entry *ce, size_t expected_size)
        if (strbuf_readlink(&sb, ce->name, expected_size))
                return -1;
 
-       buffer = read_sha1_file(ce->oid.hash, &type, &size);
+       buffer = read_object_file(&ce->oid, &type, &size);
        if (buffer) {
                if (size == sb.len)
                        match = memcmp(buffer, sb.buf, size);
@@ -615,26 +616,26 @@ static struct cache_entry *create_alias_ce(struct index_state *istate,
                                           struct cache_entry *alias)
 {
        int len;
-       struct cache_entry *new;
+       struct cache_entry *new_entry;
 
        if (alias->ce_flags & CE_ADDED)
                die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
 
        /* Ok, create the new entry using the name of the existing alias */
        len = ce_namelen(alias);
-       new = xcalloc(1, cache_entry_size(len));
-       memcpy(new->name, alias->name, len);
-       copy_cache_entry(new, ce);
+       new_entry = xcalloc(1, cache_entry_size(len));
+       memcpy(new_entry->name, alias->name, len);
+       copy_cache_entry(new_entry, ce);
        save_or_free_index_entry(istate, ce);
-       return new;
+       return new_entry;
 }
 
 void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
 {
-       unsigned char sha1[20];
-       if (write_sha1_file("", 0, blob_type, sha1))
+       struct object_id oid;
+       if (write_object_file("", 0, blob_type, &oid))
                die("cannot create an empty blob in the object database");
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, &oid);
 }
 
 int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
@@ -1217,9 +1218,8 @@ int add_index_entry(struct index_state *istate, struct cache_entry *ce, int opti
        /* Add it in.. */
        istate->cache_nr++;
        if (istate->cache_nr > pos + 1)
-               memmove(istate->cache + pos + 1,
-                       istate->cache + pos,
-                       (istate->cache_nr - pos - 1) * sizeof(ce));
+               MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+                          istate->cache_nr - pos - 1);
        set_index_entry(istate, pos, ce);
        istate->cache_changed |= CE_ENTRY_ADDED;
        return 0;
@@ -1325,7 +1325,8 @@ static struct cache_entry *refresh_cache_ent(struct index_state *istate,
 
        size = ce_size(ce);
        updated = xmalloc(size);
-       memcpy(updated, ce, size);
+       copy_cache_entry(updated, ce);
+       memcpy(updated->name, ce->name, ce->ce_namelen + 1);
        fill_stat_cache_info(updated, &st);
        /*
         * If ignore_valid is not set, we should leave CE_VALID bit
@@ -1372,6 +1373,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
+       uint64_t start = getnanotime();
 
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
@@ -1379,7 +1381,7 @@ int refresh_index(struct index_state *istate, unsigned int flags,
        added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
        unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
        for (i = 0; i < istate->cache_nr; i++) {
-               struct cache_entry *ce, *new;
+               struct cache_entry *ce, *new_entry;
                int cache_errno = 0;
                int changed = 0;
                int filtered = 0;
@@ -1408,10 +1410,10 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                if (filtered)
                        continue;
 
-               new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
-               if (new == ce)
+               new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+               if (new_entry == ce)
                        continue;
-               if (!new) {
+               if (!new_entry) {
                        const char *fmt;
 
                        if (really && cache_errno == EINVAL) {
@@ -1440,8 +1442,9 @@ int refresh_index(struct index_state *istate, unsigned int flags,
                        continue;
                }
 
-               replace_index_entry(istate, i, new);
+               replace_index_entry(istate, i, new_entry);
        }
+       trace_performance_since(start, "refresh index");
        return has_errors;
 }
 
@@ -1545,8 +1548,8 @@ int verify_ce_order;
 
 static int verify_hdr(struct cache_header *hdr, unsigned long size)
 {
-       git_SHA_CTX c;
-       unsigned char sha1[20];
+       git_hash_ctx c;
+       unsigned char hash[GIT_MAX_RAWSZ];
        int hdr_version;
 
        if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
@@ -1558,10 +1561,10 @@ static int verify_hdr(struct cache_header *hdr, unsigned long size)
        if (!verify_index_checksum)
                return 0;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, size - 20);
-       git_SHA1_Final(sha1, &c);
-       if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+       the_hash_algo->final_fn(hash, &c);
+       if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
                return error("bad index file sha1 signature");
        return 0;
 }
@@ -1791,7 +1794,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
                die_errno("cannot stat the open index");
 
        mmap_size = xsize_t(st.st_size);
-       if (mmap_size < sizeof(struct cache_header) + 20)
+       if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                die("index file smaller than expected");
 
        mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
@@ -1803,7 +1806,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        if (verify_hdr(hdr, mmap_size) < 0)
                goto unmap;
 
-       hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+       hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
        istate->version = ntohl(hdr->hdr_version);
        istate->cache_nr = ntohl(hdr->hdr_entries);
        istate->cache_alloc = alloc_nr(istate->cache_nr);
@@ -1831,7 +1834,7 @@ int do_read_index(struct index_state *istate, const char *path, int must_exist)
        istate->timestamp.sec = st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
 
-       while (src_offset <= mmap_size - 20 - 8) {
+       while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
                /* After an array of active_nr index entries,
                 * there can be arbitrary number of extended
                 * sections, each of which is prefixed with
@@ -1872,6 +1875,7 @@ static void freshen_shared_index(const char *shared_index, int warn)
 int read_index_from(struct index_state *istate, const char *path,
                    const char *gitdir)
 {
+       uint64_t start = getnanotime();
        struct split_index *split_index;
        int ret;
        char *base_sha1_hex;
@@ -1882,6 +1886,7 @@ int read_index_from(struct index_state *istate, const char *path,
                return istate->cache_nr;
 
        ret = do_read_index(istate, path, 0);
+       trace_performance_since(start, "read cache %s", path);
 
        split_index = istate->split_index;
        if (!split_index || is_null_sha1(split_index->base_sha1)) {
@@ -1905,6 +1910,7 @@ int read_index_from(struct index_state *istate, const char *path,
        freshen_shared_index(base_path, 0);
        merge_base_index(istate);
        post_read_index_from(istate);
+       trace_performance_since(start, "read cache %s", base_path);
        free(base_path);
        return ret;
 }
@@ -1957,11 +1963,11 @@ int unmerged_index(const struct index_state *istate)
 static unsigned char write_buffer[WRITE_BUFFER_SIZE];
 static unsigned long write_buffer_len;
 
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
 {
        unsigned int buffered = write_buffer_len;
        if (buffered) {
-               git_SHA1_Update(context, write_buffer, buffered);
+               the_hash_algo->update_fn(context, write_buffer, buffered);
                if (write_in_full(fd, write_buffer, buffered) < 0)
                        return -1;
                write_buffer_len = 0;
@@ -1969,7 +1975,7 @@ static int ce_write_flush(git_SHA_CTX *context, int fd)
        return 0;
 }
 
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
 {
        while (len) {
                unsigned int buffered = write_buffer_len;
@@ -1991,7 +1997,7 @@ static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
        return 0;
 }
 
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
                                  unsigned int ext, unsigned int sz)
 {
        ext = htonl(ext);
@@ -2000,26 +2006,26 @@ static int write_index_ext_header(git_SHA_CTX *context, int fd,
                (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
 }
 
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
 {
        unsigned int left = write_buffer_len;
 
        if (left) {
                write_buffer_len = 0;
-               git_SHA1_Update(context, write_buffer, left);
+               the_hash_algo->update_fn(context, write_buffer, left);
        }
 
-       /* Flush first if not enough space for SHA1 signature */
-       if (left + 20 > WRITE_BUFFER_SIZE) {
+       /* Flush first if not enough space for hash signature */
+       if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
                if (write_in_full(fd, write_buffer, left) < 0)
                        return -1;
                left = 0;
        }
 
-       /* Append the SHA1 signature at the end */
-       git_SHA1_Final(write_buffer + left, context);
-       hashcpy(sha1, write_buffer + left);
-       left += 20;
+       /* Append the hash signature at the end */
+       the_hash_algo->final_fn(write_buffer + left, context);
+       hashcpy(hash, write_buffer + left);
+       left += the_hash_algo->rawsz;
        return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
 }
 
@@ -2100,17 +2106,19 @@ static void copy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,
        }
 }
 
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
                          struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
 {
        int size;
-       int saved_namelen = saved_namelen; /* compiler workaround */
        int result;
+       unsigned int saved_namelen;
+       int stripped_name = 0;
        static unsigned char padding[8] = { 0x00 };
 
        if (ce->ce_flags & CE_STRIP_NAME) {
                saved_namelen = ce_namelen(ce);
                ce->ce_namelen = 0;
+               stripped_name = 1;
        }
 
        if (ce->ce_flags & CE_EXTENDED)
@@ -2150,7 +2158,7 @@ static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
                strbuf_splice(previous_name, common, to_remove,
                              ce->name + common, ce_namelen(ce) - common);
        }
-       if (ce->ce_flags & CE_STRIP_NAME) {
+       if (stripped_name) {
                ce->ce_namelen = saved_namelen;
                ce->ce_flags &= ~CE_STRIP_NAME;
        }
@@ -2167,7 +2175,7 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        int fd;
        ssize_t n;
        struct stat st;
-       unsigned char sha1[20];
+       unsigned char hash[GIT_MAX_RAWSZ];
 
        if (!istate->initialized)
                return 0;
@@ -2179,14 +2187,14 @@ static int verify_index_from(const struct index_state *istate, const char *path)
        if (fstat(fd, &st))
                goto out;
 
-       if (st.st_size < sizeof(struct cache_header) + 20)
+       if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
                goto out;
 
-       n = pread_in_full(fd, sha1, 20, st.st_size - 20);
-       if (n != 20)
+       n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+       if (n != the_hash_algo->rawsz)
                goto out;
 
-       if (hashcmp(istate->sha1, sha1))
+       if (hashcmp(istate->sha1, hash))
                goto out;
 
        close(fd);
@@ -2234,8 +2242,9 @@ void update_index_if_able(struct index_state *istate, struct lock_file *lockfile
 static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                          int strip_extensions)
 {
+       uint64_t start = getnanotime();
        int newfd = tempfile->fd;
-       git_SHA_CTX c;
+       git_hash_ctx c;
        struct cache_header hdr;
        int i, err = 0, removed, extended, hdr_version;
        struct cache_entry **cache = istate->cache;
@@ -2273,7 +2282,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
        hdr.hdr_version = htonl(hdr_version);
        hdr.hdr_entries = htonl(entries - removed);
 
-       git_SHA1_Init(&c);
+       the_hash_algo->init_fn(&c);
        if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
                return -1;
 
@@ -2374,6 +2383,7 @@ static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
                return -1;
        istate->timestamp.sec = (unsigned int)st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
+       trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
        return 0;
 }
 
@@ -2532,6 +2542,12 @@ int write_locked_index(struct index_state *istate, struct lock_file *lock,
        int new_shared_index, ret;
        struct split_index *si = istate->split_index;
 
+       if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
+               if (flags & COMMIT_LOCK)
+                       rollback_lock_file(lock);
+               return 0;
+       }
+
        if (istate->fsmonitor_last_update)
                fill_fsmonitor_bitmap(istate);
 
@@ -2677,7 +2693,7 @@ void *read_blob_data_from_index(const struct index_state *istate,
        }
        if (pos < 0)
                return NULL;
-       data = read_sha1_file(istate->cache[pos]->oid.hash, &type, &sz);
+       data = read_object_file(&istate->cache[pos]->oid, &type, &sz);
        if (!data || type != OBJ_BLOB) {
                free(data);
                return NULL;
index f9e25aea7a97e18b5723c8fa379da859d6dc9fcf..9a333e21b51a0583415e23f905876291dd2ef349 100644 (file)
@@ -529,12 +529,12 @@ static void end_align_handler(struct ref_formatting_stack **stack)
 
 static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
 {
-       struct ref_formatting_stack *new;
+       struct ref_formatting_stack *new_stack;
 
        push_stack_element(&state->stack);
-       new = state->stack;
-       new->at_end = end_align_handler;
-       new->at_end_data = &atomv->atom->u.align;
+       new_stack = state->stack;
+       new_stack->at_end = end_align_handler;
+       new_stack->at_end_data = &atomv->atom->u.align;
 }
 
 static void if_then_else_handler(struct ref_formatting_stack **stack)
@@ -574,16 +574,16 @@ static void if_then_else_handler(struct ref_formatting_stack **stack)
 
 static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
 {
-       struct ref_formatting_stack *new;
+       struct ref_formatting_stack *new_stack;
        struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
 
        if_then_else->str = atomv->atom->u.if_then_else.str;
        if_then_else->cmp_status = atomv->atom->u.if_then_else.cmp_status;
 
        push_stack_element(&state->stack);
-       new = state->stack;
-       new->at_end = if_then_else_handler;
-       new->at_end_data = if_then_else;
+       new_stack = state->stack;
+       new_stack->at_end = if_then_else_handler;
+       new_stack->at_end_data = if_then_else;
 }
 
 static int is_empty(const char *s)
@@ -728,7 +728,7 @@ int verify_ref_format(struct ref_format *format)
 static void *get_obj(const struct object_id *oid, struct object **obj, unsigned long *sz, int *eaten)
 {
        enum object_type type;
-       void *buf = read_sha1_file(oid->hash, &type, sz);
+       void *buf = read_object_file(oid, &type, sz);
 
        if (buf)
                *obj = parse_object_buffer(oid, type, *sz, buf, eaten);
@@ -737,18 +737,18 @@ static void *get_obj(const struct object_id *oid, struct object **obj, unsigned
        return buf;
 }
 
-static int grab_objectname(const char *name, const unsigned char *sha1,
+static int grab_objectname(const char *name, const struct object_id *oid,
                           struct atom_value *v, struct used_atom *atom)
 {
        if (starts_with(name, "objectname")) {
                if (atom->u.objectname.option == O_SHORT) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, DEFAULT_ABBREV));
+                       v->s = xstrdup(find_unique_abbrev(oid, DEFAULT_ABBREV));
                        return 1;
                } else if (atom->u.objectname.option == O_FULL) {
-                       v->s = xstrdup(sha1_to_hex(sha1));
+                       v->s = xstrdup(oid_to_hex(oid));
                        return 1;
                } else if (atom->u.objectname.option == O_LENGTH) {
-                       v->s = xstrdup(find_unique_abbrev(sha1, atom->u.objectname.length));
+                       v->s = xstrdup(find_unique_abbrev(oid, atom->u.objectname.length));
                        return 1;
                } else
                        die("BUG: unknown %%(objectname) option");
@@ -769,13 +769,13 @@ static void grab_common_values(struct atom_value *val, int deref, struct object
                if (deref)
                        name++;
                if (!strcmp(name, "objecttype"))
-                       v->s = typename(obj->type);
+                       v->s = type_name(obj->type);
                else if (!strcmp(name, "objectsize")) {
                        v->value = sz;
                        v->s = xstrfmt("%lu", sz);
                }
                else if (deref)
-                       grab_objectname(name, obj->oid.hash, v, &used_atom[i]);
+                       grab_objectname(name, &obj->oid, v, &used_atom[i]);
        }
 }
 
@@ -795,7 +795,7 @@ static void grab_tag_values(struct atom_value *val, int deref, struct object *ob
                if (!strcmp(name, "tag"))
                        v->s = tag->tag;
                else if (!strcmp(name, "type") && tag->tagged)
-                       v->s = typename(tag->tagged->type);
+                       v->s = type_name(tag->tagged->type);
                else if (!strcmp(name, "object") && tag->tagged)
                        v->s = xstrdup(oid_to_hex(&tag->tagged->oid));
        }
@@ -1249,8 +1249,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
        if (atom->u.remote_ref.option == RR_REF)
                *s = show_ref(&atom->u.remote_ref.refname, refname);
        else if (atom->u.remote_ref.option == RR_TRACK) {
-               if (stat_tracking_info(branch, &num_ours,
-                                      &num_theirs, NULL)) {
+               if (stat_tracking_info(branch, &num_ours, &num_theirs,
+                                      NULL, AHEAD_BEHIND_FULL) < 0) {
                        *s = xstrdup(msgs.gone);
                } else if (!num_ours && !num_theirs)
                        *s = "";
@@ -1267,8 +1267,8 @@ static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
                        free((void *)to_free);
                }
        } else if (atom->u.remote_ref.option == RR_TRACKSHORT) {
-               if (stat_tracking_info(branch, &num_ours,
-                                      &num_theirs, NULL))
+               if (stat_tracking_info(branch, &num_ours, &num_theirs,
+                                      NULL, AHEAD_BEHIND_FULL) < 0)
                        return;
 
                if (!num_ours && !num_theirs)
@@ -1354,15 +1354,31 @@ static const char *get_refname(struct used_atom *atom, struct ref_array_item *re
        return show_ref(&atom->u.refname, ref->refname);
 }
 
+static void get_object(struct ref_array_item *ref, const struct object_id *oid,
+                      int deref, struct object **obj)
+{
+       int eaten;
+       unsigned long size;
+       void *buf = get_obj(oid, obj, &size, &eaten);
+       if (!buf)
+               die(_("missing object %s for %s"),
+                   oid_to_hex(oid), ref->refname);
+       if (!*obj)
+               die(_("parse_object_buffer failed on %s for %s"),
+                   oid_to_hex(oid), ref->refname);
+
+       grab_values(ref->value, deref, *obj, buf, size);
+       if (!eaten)
+               free(buf);
+}
+
 /*
  * Parse the object referred by ref, and grab needed value.
  */
 static void populate_value(struct ref_array_item *ref)
 {
-       void *buf;
        struct object *obj;
-       int eaten, i;
-       unsigned long size;
+       int i;
        const struct object_id *tagged;
 
        ref->value = xcalloc(used_atom_cnt, sizeof(struct atom_value));
@@ -1439,7 +1455,7 @@ static void populate_value(struct ref_array_item *ref)
                                v->s = xstrdup(buf + 1);
                        }
                        continue;
-               } else if (!deref && grab_objectname(name, ref->objectname.hash, v, atom)) {
+               } else if (!deref && grab_objectname(name, &ref->objectname, v, atom)) {
                        continue;
                } else if (!strcmp(name, "HEAD")) {
                        if (atom->u.head && !strcmp(ref->refname, atom->u.head))
@@ -1478,22 +1494,12 @@ static void populate_value(struct ref_array_item *ref)
        for (i = 0; i < used_atom_cnt; i++) {
                struct atom_value *v = &ref->value[i];
                if (v->s == NULL)
-                       goto need_obj;
+                       break;
        }
-       return;
-
- need_obj:
-       buf = get_obj(&ref->objectname, &obj, &size, &eaten);
-       if (!buf)
-               die(_("missing object %s for %s"),
-                   oid_to_hex(&ref->objectname), ref->refname);
-       if (!obj)
-               die(_("parse_object_buffer failed on %s for %s"),
-                   oid_to_hex(&ref->objectname), ref->refname);
+       if (used_atom_cnt <= i)
+               return;
 
-       grab_values(ref->value, 0, obj, buf, size);
-       if (!eaten)
-               free(buf);
+       get_object(ref, &ref->objectname, 0, &obj);
 
        /*
         * If there is no atom that wants to know about tagged
@@ -1514,16 +1520,7 @@ static void populate_value(struct ref_array_item *ref)
         * is not consistent with what deref_tag() does
         * which peels the onion to the core.
         */
-       buf = get_obj(tagged, &obj, &size, &eaten);
-       if (!buf)
-               die(_("missing object %s for %s"),
-                   oid_to_hex(tagged), ref->refname);
-       if (!obj)
-               die(_("parse_object_buffer failed on %s for %s"),
-                   oid_to_hex(tagged), ref->refname);
-       grab_values(ref->value, 1, obj, buf, size);
-       if (!eaten)
-               free(buf);
+       get_object(ref, tagged, 1, &obj);
 }
 
 /*
diff --git a/refs.c b/refs.c
index 20ba82b4343ff2ef72cea32deec8a8d7fbd6def7..8b7a77fe5eedb08c0b034b1cf3bb4ef40efa9834 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -301,7 +301,7 @@ enum peel_status peel_object(const struct object_id *name, struct object_id *oid
        struct object *o = lookup_unknown_object(name->hash);
 
        if (o->type == OBJ_NONE) {
-               int type = sha1_object_info(name->hash, NULL);
+               int type = oid_object_info(name, NULL);
                if (type < 0 || !object_as_type(o, type, 0))
                        return PEEL_INVALID;
        }
index 023243fd5f1833f3c5f0b6fd3cd82b2e0c69644e..65288c647278aa27790b13c0360f756686dadf7a 100644 (file)
@@ -68,17 +68,21 @@ struct snapshot {
        int mmapped;
 
        /*
-        * The contents of the `packed-refs` file. If the file was
-        * already sorted, this points at the mmapped contents of the
-        * file. If not, this points at heap-allocated memory
-        * containing the contents, sorted. If there were no contents
-        * (e.g., because the file didn't exist), `buf` and `eof` are
-        * both NULL.
+        * The contents of the `packed-refs` file:
+        *
+        * - buf -- a pointer to the start of the memory
+        * - start -- a pointer to the first byte of actual references
+        *   (i.e., after the header line, if one is present)
+        * - eof -- a pointer just past the end of the reference
+        *   contents
+        *
+        * If the `packed-refs` file was already sorted, `buf` points
+        * at the mmapped contents of the file. If not, it points at
+        * heap-allocated memory containing the contents, sorted. If
+        * there were no contents (e.g., because the file didn't
+        * exist), `buf`, `start`, and `eof` are all NULL.
         */
-       char *buf, *eof;
-
-       /* The size of the header line, if any; otherwise, 0: */
-       size_t header_len;
+       char *buf, *start, *eof;
 
        /*
         * What is the peeled state of the `packed-refs` file that
@@ -169,8 +173,7 @@ static void clear_snapshot_buffer(struct snapshot *snapshot)
        } else {
                free(snapshot->buf);
        }
-       snapshot->buf = snapshot->eof = NULL;
-       snapshot->header_len = 0;
+       snapshot->buf = snapshot->start = snapshot->eof = NULL;
 }
 
 /*
@@ -319,13 +322,14 @@ static void sort_snapshot(struct snapshot *snapshot)
        size_t len, i;
        char *new_buffer, *dst;
 
-       pos = snapshot->buf + snapshot->header_len;
+       pos = snapshot->start;
        eof = snapshot->eof;
-       len = eof - pos;
 
-       if (!len)
+       if (pos == eof)
                return;
 
+       len = eof - pos;
+
        /*
         * Initialize records based on a crude estimate of the number
         * of references in the file (we'll grow it below if needed):
@@ -391,9 +395,8 @@ static void sort_snapshot(struct snapshot *snapshot)
         * place:
         */
        clear_snapshot_buffer(snapshot);
-       snapshot->buf = new_buffer;
+       snapshot->buf = snapshot->start = new_buffer;
        snapshot->eof = new_buffer + len;
-       snapshot->header_len = 0;
 
 cleanup:
        free(records);
@@ -442,23 +445,26 @@ static const char *find_end_of_record(const char *p, const char *end)
  */
 static void verify_buffer_safe(struct snapshot *snapshot)
 {
-       const char *buf = snapshot->buf + snapshot->header_len;
+       const char *start = snapshot->start;
        const char *eof = snapshot->eof;
        const char *last_line;
 
-       if (buf == eof)
+       if (start == eof)
                return;
 
-       last_line = find_start_of_record(buf, eof - 1);
+       last_line = find_start_of_record(start, eof - 1);
        if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
                die_invalid_line(snapshot->refs->path,
                                 last_line, eof - last_line);
 }
 
+#define SMALL_FILE_SIZE (32*1024)
+
 /*
  * Depending on `mmap_strategy`, either mmap or read the contents of
  * the `packed-refs` file into the snapshot. Return 1 if the file
- * existed and was read, or 0 if the file was absent. Die on errors.
+ * existed and was read, or 0 if the file was absent or empty. Die on
+ * errors.
  */
 static int load_contents(struct snapshot *snapshot)
 {
@@ -489,24 +495,23 @@ static int load_contents(struct snapshot *snapshot)
                die_errno("couldn't stat %s", snapshot->refs->path);
        size = xsize_t(st.st_size);
 
-       switch (mmap_strategy) {
-       case MMAP_NONE:
+       if (!size) {
+               return 0;
+       } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
                snapshot->buf = xmalloc(size);
                bytes_read = read_in_full(fd, snapshot->buf, size);
                if (bytes_read < 0 || bytes_read != size)
                        die_errno("couldn't read %s", snapshot->refs->path);
-               snapshot->eof = snapshot->buf + size;
                snapshot->mmapped = 0;
-               break;
-       case MMAP_TEMPORARY:
-       case MMAP_OK:
+       } else {
                snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
-               snapshot->eof = snapshot->buf + size;
                snapshot->mmapped = 1;
-               break;
        }
        close(fd);
 
+       snapshot->start = snapshot->buf;
+       snapshot->eof = snapshot->buf + size;
+
        return 1;
 }
 
@@ -515,9 +520,11 @@ static int load_contents(struct snapshot *snapshot)
  * `refname` starts. If `mustexist` is true and the reference doesn't
  * exist, then return NULL. If `mustexist` is false and the reference
  * doesn't exist, then return the point where that reference would be
- * inserted. In the latter mode, `refname` doesn't have to be a proper
- * reference name; for example, one could search for "refs/replace/"
- * to find the start of any replace references.
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
  *
  * The record is sought using a binary search, so `snapshot->buf` must
  * be sorted.
@@ -539,7 +546,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
         * preceding records all have reference names that come
         * *before* `refname`.
         */
-       const char *lo = snapshot->buf + snapshot->header_len;
+       const char *lo = snapshot->start;
 
        /*
         * A pointer to a the first character of a record whose
@@ -547,7 +554,7 @@ static const char *find_reference_location(struct snapshot *snapshot,
         */
        const char *hi = snapshot->eof;
 
-       while (lo < hi) {
+       while (lo != hi) {
                const char *mid, *rec;
                int cmp;
 
@@ -616,9 +623,7 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
 
        /* If the file has a header line, process it: */
        if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
-               struct strbuf tmp = STRBUF_INIT;
-               char *p;
-               const char *eol;
+               char *tmp, *p, *eol;
                struct string_list traits = STRING_LIST_INIT_NODUP;
 
                eol = memchr(snapshot->buf, '\n',
@@ -628,9 +633,9 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                                              snapshot->buf,
                                              snapshot->eof - snapshot->buf);
 
-               strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+               tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
 
-               if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+               if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
                        die_invalid_line(refs->path,
                                         snapshot->buf,
                                         snapshot->eof - snapshot->buf);
@@ -647,10 +652,10 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                /* perhaps other traits later as well */
 
                /* The "+ 1" is for the LF character. */
-               snapshot->header_len = eol + 1 - snapshot->buf;
+               snapshot->start = eol + 1;
 
                string_list_clear(&traits, 0);
-               strbuf_release(&tmp);
+               free(tmp);
        }
 
        verify_buffer_safe(snapshot);
@@ -671,13 +676,12 @@ static struct snapshot *create_snapshot(struct packed_ref_store *refs)
                 * We don't want to leave the file mmapped, so we are
                 * forced to make a copy now:
                 */
-               size_t size = snapshot->eof -
-                       (snapshot->buf + snapshot->header_len);
+               size_t size = snapshot->eof - snapshot->start;
                char *buf_copy = xmalloc(size);
 
-               memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+               memcpy(buf_copy, snapshot->start, size);
                clear_snapshot_buffer(snapshot);
-               snapshot->buf = buf_copy;
+               snapshot->buf = snapshot->start = buf_copy;
                snapshot->eof = buf_copy + size;
        }
 
@@ -924,7 +928,12 @@ static struct ref_iterator *packed_ref_iterator_begin(
         */
        snapshot = get_snapshot(refs);
 
-       if (!snapshot->buf)
+       if (prefix && *prefix)
+               start = find_reference_location(snapshot, prefix, 0);
+       else
+               start = snapshot->start;
+
+       if (start == snapshot->eof)
                return empty_ref_iterator_begin();
 
        iter = xcalloc(1, sizeof(*iter));
@@ -934,11 +943,6 @@ static struct ref_iterator *packed_ref_iterator_begin(
        iter->snapshot = snapshot;
        acquire_snapshot(snapshot);
 
-       if (prefix && *prefix)
-               start = find_reference_location(snapshot, prefix, 0);
-       else
-               start = snapshot->buf + snapshot->header_len;
-
        iter->pos = start;
        iter->eof = snapshot->eof;
        strbuf_init(&iter->refname_buf, 0);
index 82c1cf90a7ef61cc8174bff5a9800af2ec8d7053..e90bd3e727fd0fcd5f6a99dde4fbac65b479f411 100644 (file)
@@ -238,10 +238,8 @@ int remove_entry_from_dir(struct ref_dir *dir, const char *refname)
                return -1;
        entry = dir->entries[entry_index];
 
-       memmove(&dir->entries[entry_index],
-               &dir->entries[entry_index + 1],
-               (dir->nr - entry_index - 1) * sizeof(*dir->entries)
-               );
+       MOVE_ARRAY(&dir->entries[entry_index],
+                  &dir->entries[entry_index + 1], dir->nr - entry_index - 1);
        dir->nr--;
        if (dir->sorted > entry_index)
                dir->sorted--;
index 0053b09549ab419ab8f2da2c519d689f5e0d83b3..a7c4c9b5ff4822e36bfc43a59d113c624537297e 100644 (file)
@@ -13,6 +13,7 @@
 #include "credential.h"
 #include "sha1-array.h"
 #include "send-pack.h"
+#include "quote.h"
 
 static struct remote *remote;
 /* always ends with a trailing slash */
@@ -24,6 +25,7 @@ struct options {
        char *deepen_since;
        struct string_list deepen_not;
        struct string_list push_options;
+       char *filter;
        unsigned progress : 1,
                check_self_contained_and_connected : 1,
                cloning : 1,
@@ -33,7 +35,9 @@ struct options {
                thin : 1,
                /* One of the SEND_PACK_PUSH_CERT_* constants. */
                push_cert : 2,
-               deepen_relative : 1;
+               deepen_relative : 1,
+               from_promisor : 1,
+               no_dependents : 1;
 };
 static struct options options;
 static struct string_list cas_options = STRING_LIST_INIT_DUP;
@@ -142,7 +146,15 @@ static int set_option(const char *name, const char *value)
                        return -1;
                return 0;
        } else if (!strcmp(name, "push-option")) {
-               string_list_append(&options.push_options, value);
+               if (*value != '"')
+                       string_list_append(&options.push_options, value);
+               else {
+                       struct strbuf unquoted = STRBUF_INIT;
+                       if (unquote_c_style(&unquoted, value, NULL) < 0)
+                               die("invalid quoting in push-option value");
+                       string_list_append_nodup(&options.push_options,
+                                                strbuf_detach(&unquoted, NULL));
+               }
                return 0;
 
 #if LIBCURL_VERSION_NUM >= 0x070a08
@@ -157,6 +169,15 @@ static int set_option(const char *name, const char *value)
                        return -1;
                return 0;
 #endif /* LIBCURL_VERSION_NUM >= 0x070a08 */
+       } else if (!strcmp(name, "from-promisor")) {
+               options.from_promisor = 1;
+               return 0;
+       } else if (!strcmp(name, "no-dependents")) {
+               options.no_dependents = 1;
+               return 0;
+       } else if (!strcmp(name, "filter")) {
+               options.filter = xstrdup(value);;
+               return 0;
        } else {
                return 1 /* unsupported */;
        }
@@ -339,6 +360,8 @@ static struct discovery *discover_refs(const char *service, int for_push)
                 * pkt-line matches our request.
                 */
                line = packet_read_line_buf(&last->buf, &last->len, NULL);
+               if (!line)
+                       die("invalid server response; expected service, got flush packet");
 
                strbuf_reset(&exp);
                strbuf_addf(&exp, "# service=%s", service);
@@ -822,6 +845,12 @@ static int fetch_git(struct discovery *heads,
                                 options.deepen_not.items[i].string);
        if (options.deepen_relative && options.depth)
                argv_array_push(&args, "--deepen-relative");
+       if (options.from_promisor)
+               argv_array_push(&args, "--from-promisor");
+       if (options.no_dependents)
+               argv_array_push(&args, "--no-dependents");
+       if (options.filter)
+               argv_array_pushf(&args, "--filter=%s", options.filter);
        argv_array_push(&args, url.buf);
 
        for (i = 0; i < nr_heads; i++) {
index bcebb4c789567eb4017a3a0132ba55c59c427991..c4bb9a8ba920c1b344b910e7f6e59915044f4f9a 100644 (file)
@@ -61,7 +61,7 @@ static char *read_ref_note(const struct object_id *oid)
        init_notes(NULL, notes_ref, NULL, 0);
        if (!(note_oid = get_note(NULL, oid)))
                return NULL;    /* note tree not found */
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)))
+       if (!(msg = read_object_file(note_oid, &type, &msglen)))
                error("Empty notes tree. %s", notes_ref);
        else if (!msglen || type != OBJ_BLOB) {
                error("Note contains unusable content. "
@@ -108,7 +108,7 @@ static int note2mark_cb(const struct object_id *object_oid,
        enum object_type type;
        struct rev_note note;
 
-       if (!(msg = read_sha1_file(note_oid->hash, &type, &msglen)) ||
+       if (!(msg = read_object_file(note_oid, &type, &msglen)) ||
                        !msglen || type != OBJ_BLOB) {
                free(msg);
                return 1;
index 4e93753e1988afd4a01559951f96142c6dc2e73d..91eb010ca983c5493bbc17c5652ef31060390226 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -22,6 +22,7 @@ static struct refspec s_tag_refspec = {
        "refs/tags/*"
 };
 
+/* See TAG_REFSPEC for the string version */
 const struct refspec *tag_refspec = &s_tag_refspec;
 
 struct counted_string {
@@ -103,6 +104,17 @@ static void add_fetch_refspec(struct remote *remote, const char *ref)
        remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
 }
 
+void add_prune_tags_to_fetch_refspec(struct remote *remote)
+{
+       int nr = remote->fetch_refspec_nr;
+       int bufsize = nr  + 1;
+       int size = sizeof(struct refspec);
+
+       remote->fetch = xrealloc(remote->fetch, size  * bufsize);
+       memcpy(&remote->fetch[nr], tag_refspec, size);
+       add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
+}
+
 static void add_url(struct remote *remote, const char *url)
 {
        ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
@@ -173,6 +185,7 @@ static struct remote *make_remote(const char *name, int len)
 
        ret = xcalloc(1, sizeof(struct remote));
        ret->prune = -1;  /* unspecified */
+       ret->prune_tags = -1;  /* unspecified */
        ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
        remotes[remotes_nr++] = ret;
        ret->name = xstrndup(name, len);
@@ -391,6 +404,8 @@ static int handle_config(const char *key, const char *value, void *cb)
                remote->skip_default_update = git_config_bool(key, value);
        else if (!strcmp(subkey, "prune"))
                remote->prune = git_config_bool(key, value);
+       else if (!strcmp(subkey, "prunetags"))
+               remote->prune_tags = git_config_bool(key, value);
        else if (!strcmp(subkey, "url")) {
                const char *v;
                if (git_config_string(&v, key, value))
@@ -1361,7 +1376,7 @@ static void add_missing_tags(struct ref *src, struct ref **dst, struct ref ***ds
                        continue; /* not a tag */
                if (string_list_has_string(&dst_tag, ref->name))
                        continue; /* they already have it */
-               if (sha1_object_info(ref->new_oid.hash, NULL) != OBJ_TAG)
+               if (oid_object_info(&ref->new_oid, NULL) != OBJ_TAG)
                        continue; /* be conservative */
                item = string_list_append(&src_tag, ref->name);
                item->util = ref;
@@ -1970,33 +1985,33 @@ static void unmark_and_free(struct commit_list *list, unsigned int mark)
 int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
 {
        struct object *o;
-       struct commit *old, *new;
+       struct commit *old_commit, *new_commit;
        struct commit_list *list, *used;
        int found = 0;
 
        /*
-        * Both new and old must be commit-ish and new is descendant of
-        * old.  Otherwise we require --force.
+        * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+        * old_commit.  Otherwise we require --force.
         */
        o = deref_tag(parse_object(old_oid), NULL, 0);
        if (!o || o->type != OBJ_COMMIT)
                return 0;
-       old = (struct commit *) o;
+       old_commit = (struct commit *) o;
 
        o = deref_tag(parse_object(new_oid), NULL, 0);
        if (!o || o->type != OBJ_COMMIT)
                return 0;
-       new = (struct commit *) o;
+       new_commit = (struct commit *) o;
 
-       if (parse_commit(new) < 0)
+       if (parse_commit(new_commit) < 0)
                return 0;
 
        used = list = NULL;
-       commit_list_insert(new, &list);
+       commit_list_insert(new_commit, &list);
        while (list) {
-               new = pop_most_recent_commit(&list, TMP_MARK);
-               commit_list_insert(new, &used);
-               if (new == old) {
+               new_commit = pop_most_recent_commit(&list, TMP_MARK);
+               commit_list_insert(new_commit, &used);
+               if (new_commit == old_commit) {
                        found = 1;
                        break;
                }
@@ -2007,16 +2022,23 @@ int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
 }
 
 /*
- * Compare a branch with its upstream, and save their differences (number
- * of commits) in *num_ours and *num_theirs. The name of the upstream branch
- * (or NULL if no upstream is defined) is returned via *upstream_name, if it
- * is not itself NULL.
+ * Lookup the upstream branch for the given branch and if present, optionally
+ * compute the commit ahead/behind values for the pair.
+ *
+ * If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the
+ * counts in *num_ours and *num_theirs.  If abf is AHEAD_BEHIND_QUICK, skip
+ * the (potentially expensive) a/b computation (*num_ours and *num_theirs are
+ * set to zero).
+ *
+ * The name of the upstream branch (or NULL if no upstream is defined) is
+ * returned via *upstream_name, if it is not itself NULL.
  *
  * Returns -1 if num_ours and num_theirs could not be filled in (e.g., no
- * upstream defined, or ref does not exist), 0 otherwise.
+ * upstream defined, or ref does not exist).  Returns 0 if the commits are
+ * identical.  Returns 1 if commits are different.
  */
 int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
-                      const char **upstream_name)
+                      const char **upstream_name, enum ahead_behind_flags abf)
 {
        struct object_id oid;
        struct commit *ours, *theirs;
@@ -2044,11 +2066,15 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
        if (!ours)
                return -1;
 
+       *num_theirs = *num_ours = 0;
+
        /* are we the same? */
-       if (theirs == ours) {
-               *num_theirs = *num_ours = 0;
+       if (theirs == ours)
                return 0;
-       }
+       if (abf == AHEAD_BEHIND_QUICK)
+               return 1;
+       if (abf != AHEAD_BEHIND_FULL)
+               BUG("stat_tracking_info: invalid abf '%d'", abf);
 
        /* Run "rev-list --left-right ours...theirs" internally... */
        argv_array_push(&argv, ""); /* ignored */
@@ -2064,8 +2090,6 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
                die("revision walk setup failed");
 
        /* ... and count the commits on each side. */
-       *num_ours = 0;
-       *num_theirs = 0;
        while (1) {
                struct commit *c = get_revision(&revs);
                if (!c)
@@ -2081,20 +2105,22 @@ int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
        clear_commit_marks(theirs, ALL_REV_FLAGS);
 
        argv_array_clear(&argv);
-       return 0;
+       return 1;
 }
 
 /*
  * Return true when there is anything to report, otherwise false.
  */
-int format_tracking_info(struct branch *branch, struct strbuf *sb)
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+                        enum ahead_behind_flags abf)
 {
-       int ours, theirs;
+       int ours, theirs, sti;
        const char *full_base;
        char *base;
        int upstream_is_gone = 0;
 
-       if (stat_tracking_info(branch, &ours, &theirs, &full_base) < 0) {
+       sti = stat_tracking_info(branch, &ours, &theirs, &full_base, abf);
+       if (sti < 0) {
                if (!full_base)
                        return 0;
                upstream_is_gone = 1;
@@ -2108,10 +2134,17 @@ int format_tracking_info(struct branch *branch, struct strbuf *sb)
                if (advice_status_hints)
                        strbuf_addstr(sb,
                                _("  (use \"git branch --unset-upstream\" to fixup)\n"));
-       } else if (!ours && !theirs) {
+       } else if (!sti) {
                strbuf_addf(sb,
                        _("Your branch is up to date with '%s'.\n"),
                        base);
+       } else if (abf == AHEAD_BEHIND_QUICK) {
+               strbuf_addf(sb,
+                           _("Your branch and '%s' refer to different commits.\n"),
+                           base);
+               if (advice_status_hints)
+                       strbuf_addf(sb, _("  (use \"%s\" for details)\n"),
+                                   "git status --ahead-behind");
        } else if (!theirs) {
                strbuf_addf(sb,
                        Q_("Your branch is ahead of '%s' by %d commit.\n",
index 1f6611be214363a4be363fad959135a9d123cee0..f09c01969d6b0d701140ceb9cb2e8f9e68533c96 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -47,6 +47,7 @@ struct remote {
        int skip_default_update;
        int mirror;
        int prune;
+       int prune_tags;
 
        const char *receivepack;
        const char *uploadpack;
@@ -257,10 +258,18 @@ enum match_refs_flags {
        MATCH_REFS_FOLLOW_TAGS  = (1 << 3)
 };
 
+/* Flags for --ahead-behind option. */
+enum ahead_behind_flags {
+       AHEAD_BEHIND_UNSPECIFIED = -1,
+       AHEAD_BEHIND_QUICK       =  0,  /* just eq/neq reporting */
+       AHEAD_BEHIND_FULL        =  1,  /* traditional a/b reporting */
+};
+
 /* Reporting of tracking info */
 int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
-                      const char **upstream_name);
-int format_tracking_info(struct branch *branch, struct strbuf *sb);
+                      const char **upstream_name, enum ahead_behind_flags abf);
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+                        enum ahead_behind_flags abf);
 
 struct ref *get_local_heads(void);
 /*
@@ -297,4 +306,8 @@ extern int parseopt_push_cas_option(const struct option *, const char *arg, int
 extern int is_empty_cas(const struct push_cas_option *);
 void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *);
 
+#define TAG_REFSPEC "refs/tags/*:refs/tags/*"
+
+void add_prune_tags_to_fetch_refspec(struct remote *remote);
+
 #endif
index f0b39f06d5dabc3c98c5c083458736a585b4cabb..336357394d8b1eac1414fe4577272b0002463a86 100644 (file)
@@ -8,8 +8,8 @@
  * sha1.
  */
 static struct replace_object {
-       unsigned char original[20];
-       unsigned char replacement[20];
+       struct object_id original;
+       struct object_id replacement;
 } **replace_object;
 
 static int replace_object_alloc, replace_object_nr;
@@ -17,7 +17,7 @@ static int replace_object_alloc, replace_object_nr;
 static const unsigned char *replace_sha1_access(size_t index, void *table)
 {
        struct replace_object **replace = table;
-       return replace[index]->original;
+       return replace[index]->original.hash;
 }
 
 static int replace_object_pos(const unsigned char *sha1)
@@ -29,7 +29,7 @@ static int replace_object_pos(const unsigned char *sha1)
 static int register_replace_object(struct replace_object *replace,
                                   int ignore_dups)
 {
-       int pos = replace_object_pos(replace->original);
+       int pos = replace_object_pos(replace->original.hash);
 
        if (0 <= pos) {
                if (ignore_dups)
@@ -44,10 +44,8 @@ static int register_replace_object(struct replace_object *replace,
        ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
        replace_object_nr++;
        if (pos < replace_object_nr)
-               memmove(replace_object + pos + 1,
-                       replace_object + pos,
-                       (replace_object_nr - pos - 1) *
-                       sizeof(*replace_object));
+               MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
+                          replace_object_nr - pos - 1);
        replace_object[pos] = replace;
        return 0;
 }
@@ -61,14 +59,14 @@ static int register_replace_ref(const char *refname,
        const char *hash = slash ? slash + 1 : refname;
        struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
 
-       if (strlen(hash) != 40 || get_sha1_hex(hash, repl_obj->original)) {
+       if (get_oid_hex(hash, &repl_obj->original)) {
                free(repl_obj);
                warning("bad replace ref name: %s", refname);
                return 0;
        }
 
        /* Copy sha1 from the read ref */
-       hashcpy(repl_obj->replacement, oid->hash);
+       oidcpy(&repl_obj->replacement, oid);
 
        /* Register new object */
        if (register_replace_object(repl_obj, 1))
@@ -94,16 +92,16 @@ static void prepare_replace_object(void)
 #define MAXREPLACEDEPTH 5
 
 /*
- * If a replacement for object sha1 has been set up, return the
+ * If a replacement for object oid has been set up, return the
  * replacement object's name (replaced recursively, if necessary).
- * The return value is either sha1 or a pointer to a
+ * The return value is either oid or a pointer to a
  * permanently-allocated value.  This function always respects replace
  * references, regardless of the value of check_replace_refs.
  */
-const unsigned char *do_lookup_replace_object(const unsigned char *sha1)
+const struct object_id *do_lookup_replace_object(const struct object_id *oid)
 {
        int pos, depth = MAXREPLACEDEPTH;
-       const unsigned char *cur = sha1;
+       const struct object_id *cur = oid;
 
        prepare_replace_object();
 
@@ -111,11 +109,11 @@ const unsigned char *do_lookup_replace_object(const unsigned char *sha1)
        do {
                if (--depth < 0)
                        die("replace depth too high for object %s",
-                           sha1_to_hex(sha1));
+                           oid_to_hex(oid));
 
-               pos = replace_object_pos(cur);
+               pos = replace_object_pos(cur->hash);
                if (0 <= pos)
-                       cur = replace_object[pos]->replacement;
+                       cur = &replace_object[pos]->replacement;
        } while (0 <= pos);
 
        return cur;
index 4ffbe9bc94edc18314cb49c945038e2f20a40922..62f52f47fcaed32c4f14a9f76eb1e66e13d4046a 100644 (file)
@@ -4,64 +4,68 @@
 #include "submodule-config.h"
 
 /* The main repository */
-static struct repository the_repo = {
-       NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &the_index, &hash_algos[GIT_HASH_SHA1], 0, 0
-};
-struct repository *the_repository = &the_repo;
+static struct repository the_repo;
+struct repository *the_repository;
 
-static char *git_path_from_env(const char *envvar, const char *git_dir,
-                              const char *path, int fromenv)
+void initialize_the_repository(void)
 {
-       if (fromenv) {
-               const char *value = getenv(envvar);
-               if (value)
-                       return xstrdup(value);
-       }
+       the_repository = &the_repo;
 
-       return xstrfmt("%s/%s", git_dir, path);
+       the_repo.index = &the_index;
+       repo_set_hash_algo(&the_repo, GIT_HASH_SHA1);
 }
 
-static int find_common_dir(struct strbuf *sb, const char *gitdir, int fromenv)
+static void expand_base_dir(char **out, const char *in,
+                           const char *base_dir, const char *def_in)
 {
-       if (fromenv) {
-               const char *value = getenv(GIT_COMMON_DIR_ENVIRONMENT);
-               if (value) {
-                       strbuf_addstr(sb, value);
-                       return 1;
-               }
-       }
-
-       return get_common_dir_noenv(sb, gitdir);
+       free(*out);
+       if (in)
+               *out = xstrdup(in);
+       else
+               *out = xstrfmt("%s/%s", base_dir, def_in);
 }
 
-static void repo_setup_env(struct repository *repo)
+static void repo_set_commondir(struct repository *repo,
+                              const char *commondir)
 {
        struct strbuf sb = STRBUF_INIT;
 
-       repo->different_commondir = find_common_dir(&sb, repo->gitdir,
-                                                   !repo->ignore_env);
        free(repo->commondir);
+
+       if (commondir) {
+               repo->different_commondir = 1;
+               repo->commondir = xstrdup(commondir);
+               return;
+       }
+
+       repo->different_commondir = get_common_dir_noenv(&sb, repo->gitdir);
        repo->commondir = strbuf_detach(&sb, NULL);
-       free(repo->objectdir);
-       repo->objectdir = git_path_from_env(DB_ENVIRONMENT, repo->commondir,
-                                           "objects", !repo->ignore_env);
-       free(repo->graft_file);
-       repo->graft_file = git_path_from_env(GRAFT_ENVIRONMENT, repo->commondir,
-                                            "info/grafts", !repo->ignore_env);
-       free(repo->index_file);
-       repo->index_file = git_path_from_env(INDEX_ENVIRONMENT, repo->gitdir,
-                                            "index", !repo->ignore_env);
 }
 
-void repo_set_gitdir(struct repository *repo, const char *path)
+void repo_set_gitdir(struct repository *repo,
+                    const char *root,
+                    const struct set_gitdir_args *o)
 {
-       const char *gitfile = read_gitfile(path);
+       const char *gitfile = read_gitfile(root);
+       /*
+        * repo->gitdir is saved because the caller could pass "root"
+        * that also points to repo->gitdir. We want to keep it alive
+        * until after xstrdup(root). Then we can free it.
+        */
        char *old_gitdir = repo->gitdir;
 
-       repo->gitdir = xstrdup(gitfile ? gitfile : path);
-       repo_setup_env(repo);
-
+       repo->gitdir = xstrdup(gitfile ? gitfile : root);
        free(old_gitdir);
+
+       repo_set_commondir(repo, o->commondir);
+       expand_base_dir(&repo->objectdir, o->object_dir,
+                       repo->commondir, "objects");
+       free(repo->alternate_db);
+       repo->alternate_db = xstrdup_or_null(o->alternate_db);
+       expand_base_dir(&repo->graft_file, o->graft_file,
+                       repo->commondir, "info/grafts");
+       expand_base_dir(&repo->index_file, o->index_file,
+                       repo->gitdir, "index");
 }
 
 void repo_set_hash_algo(struct repository *repo, int hash_algo)
@@ -79,6 +83,7 @@ static int repo_init_gitdir(struct repository *repo, const char *gitdir)
        int error = 0;
        char *abspath = NULL;
        const char *resolved_gitdir;
+       struct set_gitdir_args args = { NULL };
 
        abspath = real_pathdup(gitdir, 0);
        if (!abspath) {
@@ -93,7 +98,7 @@ static int repo_init_gitdir(struct repository *repo, const char *gitdir)
                goto out;
        }
 
-       repo_set_gitdir(repo, resolved_gitdir);
+       repo_set_gitdir(repo, resolved_gitdir, &args);
 
 out:
        free(abspath);
@@ -128,13 +133,13 @@ static int read_and_verify_repository_format(struct repository_format *format,
  * Initialize 'repo' based on the provided 'gitdir'.
  * Return 0 upon success and a non-zero value upon failure.
  */
-int repo_init(struct repository *repo, const char *gitdir, const char *worktree)
+static int repo_init(struct repository *repo,
+                    const char *gitdir,
+                    const char *worktree)
 {
        struct repository_format format;
        memset(repo, 0, sizeof(*repo));
 
-       repo->ignore_env = 1;
-
        if (repo_init_gitdir(repo, gitdir))
                goto error;
 
@@ -210,6 +215,7 @@ void repo_clear(struct repository *repo)
        FREE_AND_NULL(repo->gitdir);
        FREE_AND_NULL(repo->commondir);
        FREE_AND_NULL(repo->objectdir);
+       FREE_AND_NULL(repo->alternate_db);
        FREE_AND_NULL(repo->graft_file);
        FREE_AND_NULL(repo->index_file);
        FREE_AND_NULL(repo->worktree);
index 0329e40c7f5e72dad3ba46328a8e3d6c29ed8e58..f21fd93f722714617f0b6f2419c25d34a90311de 100644 (file)
@@ -26,6 +26,9 @@ struct repository {
         */
        char *objectdir;
 
+       /* Path to extra alternate object database if not NULL */
+       char *alternate_db;
+
        /*
         * Path to the repository's graft file.
         * Cannot be NULL after initialization.
@@ -72,15 +75,6 @@ struct repository {
        const struct git_hash_algo *hash_algo;
 
        /* Configurations */
-       /*
-        * Bit used during initialization to indicate if repository state (like
-        * the location of the 'objectdir') should be read from the
-        * environment.  By default this bit will be set at the begining of
-        * 'repo_init()' so that all repositories will ignore the environment.
-        * The exception to this is 'the_repository', which doesn't go through
-        * the normal 'repo_init()' process.
-        */
-       unsigned ignore_env:1;
 
        /* Indicate if a repository has a different 'commondir' from 'gitdir' */
        unsigned different_commondir:1;
@@ -88,10 +82,24 @@ struct repository {
 
 extern struct repository *the_repository;
 
-extern void repo_set_gitdir(struct repository *repo, const char *path);
+/*
+ * Define a custom repository layout. Any field can be NULL, which
+ * will default back to the path according to the default layout.
+ */
+struct set_gitdir_args {
+       const char *commondir;
+       const char *object_dir;
+       const char *graft_file;
+       const char *index_file;
+       const char *alternate_db;
+};
+
+extern void repo_set_gitdir(struct repository *repo,
+                           const char *root,
+                           const struct set_gitdir_args *extra_args);
 extern void repo_set_worktree(struct repository *repo, const char *path);
 extern void repo_set_hash_algo(struct repository *repo, int algo);
-extern int repo_init(struct repository *repo, const char *gitdir, const char *worktree);
+extern void initialize_the_repository(void);
 extern int repo_submodule_init(struct repository *submodule,
                               struct repository *superproject,
                               const char *path);
index 1ce440f4bb84d001ff2b0ac1a67772f4bf5926c0..18cae2d11c9a86aae0ed352a8f7606b142c5c183 100644 (file)
--- a/rerere.c
+++ b/rerere.c
@@ -159,8 +159,8 @@ static struct rerere_dir *find_rerere_dir(const char *hex)
                ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc);
                /* ... and add it in. */
                rerere_dir_nr++;
-               memmove(rerere_dir + pos + 1, rerere_dir + pos,
-                       (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir));
+               MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos,
+                          rerere_dir_nr - pos - 1);
                rerere_dir[pos] = rr_dir;
                scan_rerere_dir(rr_dir);
        }
@@ -719,11 +719,9 @@ static void update_paths(struct string_list *update)
                        item->string);
        }
 
-       if (active_cache_changed) {
-               if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
-                       die("Unable to write new index file");
-       } else
-               rollback_lock_file(&index_lock);
+       if (write_locked_index(&the_index, &index_lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
+               die("Unable to write new index file");
 }
 
 static void remove_variant(struct rerere_id *id)
@@ -981,8 +979,8 @@ static int handle_cache(const char *path, unsigned char *sha1, const char *outpu
                        break;
                i = ce_stage(ce) - 1;
                if (!mmfile[i].ptr) {
-                       mmfile[i].ptr = read_sha1_file(ce->oid.hash, &type,
-                                                      &size);
+                       mmfile[i].ptr = read_object_file(&ce->oid, &type,
+                                                        &size);
                        mmfile[i].size = size;
                }
        }
index b40f3173d3fe5ef5c06c00ff8994060a9078669d..aed95b4b35fbb187bb96242afcf6b4d8e3d2008b 100644 (file)
@@ -24,7 +24,7 @@ void record_resolve_undo(struct index_state *istate, struct cache_entry *ce)
        if (!lost->util)
                lost->util = xcalloc(1, sizeof(*ui));
        ui = lost->util;
-       hashcpy(ui->sha1[stage - 1], ce->oid.hash);
+       oidcpy(&ui->oid[stage - 1], &ce->oid);
        ui->mode[stage - 1] = ce->ce_mode;
 }
 
@@ -44,7 +44,7 @@ void resolve_undo_write(struct strbuf *sb, struct string_list *resolve_undo)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       strbuf_add(sb, ui->sha1[i], 20);
+                       strbuf_add(sb, ui->oid[i].hash, the_hash_algo->rawsz);
                }
        }
 }
@@ -55,6 +55,7 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
        size_t len;
        char *endptr;
        int i;
+       const unsigned rawsz = the_hash_algo->rawsz;
 
        resolve_undo = xcalloc(1, sizeof(*resolve_undo));
        resolve_undo->strdup_strings = 1;
@@ -87,11 +88,11 @@ struct string_list *resolve_undo_read(const char *data, unsigned long size)
                for (i = 0; i < 3; i++) {
                        if (!ui->mode[i])
                                continue;
-                       if (size < 20)
+                       if (size < rawsz)
                                goto error;
-                       hashcpy(ui->sha1[i], (const unsigned char *)data);
-                       size -= 20;
-                       data += 20;
+                       memcpy(ui->oid[i].hash, (const unsigned char *)data, rawsz);
+                       size -= rawsz;
+                       data += rawsz;
                }
        }
        return resolve_undo;
@@ -145,7 +146,7 @@ int unmerge_index_entry_at(struct index_state *istate, int pos)
                struct cache_entry *nce;
                if (!ru->mode[i])
                        continue;
-               nce = make_cache_entry(ru->mode[i], ru->sha1[i],
+               nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
                                       name, i + 1, 0);
                if (matched)
                        nce->ce_flags |= CE_MATCHED;
index 46306455edddb94a554a7a2fcadf49a30861f599..87291904bd34e0e7f3a3601b6742f5345391824d 100644 (file)
@@ -3,7 +3,7 @@
 
 struct resolve_undo_info {
        unsigned int mode[3];
-       unsigned char sha1[3][20];
+       struct object_id oid[3];
 };
 
 extern void record_resolve_undo(struct index_state *, struct cache_entry *);
index e5e527bcf256e0d062ffaae2a3d6ac2a1e7373d6..b42c836d7a64a67779c587954bcab90d919aaffb 100644 (file)
@@ -113,7 +113,8 @@ void mark_parents_uninteresting(struct commit *commit)
                         * it is popped next time around, we won't be trying
                         * to parse it and get an error.
                         */
-                       if (!has_object_file(&commit->object.oid))
+                       if (!commit->object.parsed &&
+                           !has_object_file(&commit->object.oid))
                                commit->object.parsed = 1;
 
                        if (commit->object.flags & UNINTERESTING)
@@ -198,6 +199,8 @@ static struct object *get_reference(struct rev_info *revs, const char *name,
        if (!object) {
                if (revs->ignore_missing)
                        return object;
+               if (revs->exclude_promisor_objects && is_promisor_object(oid))
+                       return NULL;
                die("bad object %s", name);
        }
        object->flags |= flags;
@@ -799,9 +802,17 @@ static int add_parents_to_list(struct rev_info *revs, struct commit *commit,
 
        for (parent = commit->parents; parent; parent = parent->next) {
                struct commit *p = parent->item;
-
-               if (parse_commit_gently(p, revs->ignore_missing_links) < 0)
+               int gently = revs->ignore_missing_links ||
+                            revs->exclude_promisor_objects;
+               if (parse_commit_gently(p, gently) < 0) {
+                       if (revs->exclude_promisor_objects &&
+                           is_promisor_object(&p->object.oid)) {
+                               if (revs->first_parent_only)
+                                       break;
+                               continue;
+                       }
                        return -1;
+               }
                if (revs->show_source && !p->util)
                        p->util = commit->util;
                p->object.flags |= left_flag;
@@ -2072,7 +2083,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_ERE;
        } else if (!strcmp(arg, "--regexp-ignore-case") || !strcmp(arg, "-i")) {
                revs->grep_filter.ignore_case = 1;
-               revs->diffopt.flags.pickaxe_ignore_case = 1;
+               revs->diffopt.pickaxe_opts |= DIFF_PICKAXE_IGNORE_CASE;
        } else if (!strcmp(arg, "--fixed-strings") || !strcmp(arg, "-F")) {
                revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_FIXED;
        } else if (!strcmp(arg, "--perl-regexp") || !strcmp(arg, "-P")) {
@@ -2094,6 +2105,10 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
                revs->limited = 1;
        } else if (!strcmp(arg, "--ignore-missing")) {
                revs->ignore_missing = 1;
+       } else if (!strcmp(arg, "--exclude-promisor-objects")) {
+               if (fetch_if_missing)
+                       die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+               revs->exclude_promisor_objects = 1;
        } else {
                int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
                if (!opts)
@@ -2403,11 +2418,14 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct s
                revs->diff = 1;
 
        /* Pickaxe, diff-filter and rename following need diffs */
-       if (revs->diffopt.pickaxe ||
+       if ((revs->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
            revs->diffopt.filter ||
            revs->diffopt.flags.follow_renames)
                revs->diff = 1;
 
+       if (revs->diffopt.objfind)
+               revs->simplify_history = 0;
+
        if (revs->topo_order)
                revs->limited = 1;
 
@@ -2836,6 +2854,16 @@ void reset_revision_walk(void)
        clear_object_flags(SEEN | ADDED | SHOWN);
 }
 
+static int mark_uninteresting(const struct object_id *oid,
+                             struct packed_git *pack,
+                             uint32_t pos,
+                             void *unused)
+{
+       struct object *o = parse_object(oid);
+       o->flags |= UNINTERESTING | SEEN;
+       return 0;
+}
+
 int prepare_revision_walk(struct rev_info *revs)
 {
        int i;
@@ -2863,6 +2891,11 @@ int prepare_revision_walk(struct rev_info *revs)
            (revs->limited && limiting_can_increase_treesame(revs)))
                revs->treesame.name = "treesame";
 
+       if (revs->exclude_promisor_objects) {
+               for_each_packed_object(mark_uninteresting, NULL,
+                                      FOR_EACH_OBJECT_PROMISOR_ONLY);
+       }
+
        if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED)
                commit_list_sort_by_date(&revs->commits);
        if (revs->no_walk)
index 187406b6ebfde26e11681cd4884b118ed10cd84a..b8c47b98e22562ef320197b4ddbc8f0c3ee40f98 100644 (file)
@@ -121,7 +121,10 @@ struct rev_info {
                        ancestry_path:1,
                        first_parent_only:1,
                        line_level_traverse:1,
-                       tree_blobs_in_commit_order:1;
+                       tree_blobs_in_commit_order:1,
+
+                       /* for internal use only */
+                       exclude_promisor_objects:1;
 
        /* Diff flags */
        unsigned int    diff:1,
index 31fc5ea86eb6a3fe3650b8ac50f19f8621d90b9d..a483d5904a3ec1acae8908dd2e699fa00bcaaa9d 100644 (file)
@@ -6,6 +6,7 @@
 #include "thread-utils.h"
 #include "strbuf.h"
 #include "string-list.h"
+#include "quote.h"
 
 void child_process_init(struct child_process *child)
 {
@@ -556,6 +557,90 @@ static int wait_or_whine(pid_t pid, const char *argv0, int in_signal)
        return code;
 }
 
+static void trace_add_env(struct strbuf *dst, const char *const *deltaenv)
+{
+       struct string_list envs = STRING_LIST_INIT_DUP;
+       const char *const *e;
+       int i;
+       int printed_unset = 0;
+
+       /* Last one wins, see run-command.c:prep_childenv() for context */
+       for (e = deltaenv; e && *e; e++) {
+               struct strbuf key = STRBUF_INIT;
+               char *equals = strchr(*e, '=');
+
+               if (equals) {
+                       strbuf_add(&key, *e, equals - *e);
+                       string_list_insert(&envs, key.buf)->util = equals + 1;
+               } else {
+                       string_list_insert(&envs, *e)->util = NULL;
+               }
+               strbuf_release(&key);
+       }
+
+       /* "unset X Y...;" */
+       for (i = 0; i < envs.nr; i++) {
+               const char *var = envs.items[i].string;
+               const char *val = envs.items[i].util;
+
+               if (val || !getenv(var))
+                       continue;
+
+               if (!printed_unset) {
+                       strbuf_addstr(dst, " unset");
+                       printed_unset = 1;
+               }
+               strbuf_addf(dst, " %s", var);
+       }
+       if (printed_unset)
+               strbuf_addch(dst, ';');
+
+       /* ... followed by "A=B C=D ..." */
+       for (i = 0; i < envs.nr; i++) {
+               const char *var = envs.items[i].string;
+               const char *val = envs.items[i].util;
+               const char *oldval;
+
+               if (!val)
+                       continue;
+
+               oldval = getenv(var);
+               if (oldval && !strcmp(val, oldval))
+                       continue;
+
+               strbuf_addf(dst, " %s=", var);
+               sq_quote_buf_pretty(dst, val);
+       }
+       string_list_clear(&envs, 0);
+}
+
+static void trace_run_command(const struct child_process *cp)
+{
+       struct strbuf buf = STRBUF_INIT;
+
+       if (!trace_want(&trace_default_key))
+               return;
+
+       strbuf_addf(&buf, "trace: run_command:");
+       if (cp->dir) {
+               strbuf_addstr(&buf, " cd ");
+               sq_quote_buf_pretty(&buf, cp->dir);
+               strbuf_addch(&buf, ';');
+       }
+       /*
+        * The caller is responsible for initializing cp->env from
+        * cp->env_array if needed. We only check one place.
+        */
+       if (cp->env)
+               trace_add_env(&buf, cp->env);
+       if (cp->git_cmd)
+               strbuf_addstr(&buf, " git");
+       sq_quote_argv_pretty(&buf, cp->argv);
+
+       trace_printf("%s", buf.buf);
+       strbuf_release(&buf);
+}
+
 int start_command(struct child_process *cmd)
 {
        int need_in, need_out, need_err;
@@ -624,7 +709,8 @@ int start_command(struct child_process *cmd)
                cmd->err = fderr[0];
        }
 
-       trace_argv_printf(cmd->argv, "trace: run_command:");
+       trace_run_command(cmd);
+
        fflush(NULL);
 
 #ifndef GIT_WINDOWS_NATIVE
index 2112d3b27ad21e1d457b08a3aa71a811b5e8ee79..19025a7aca82a7066b9a2d40d4d50406a9749a5f 100644 (file)
@@ -37,14 +37,14 @@ int option_parse_push_signed(const struct option *opt,
        die("bad %s argument: %s", opt->long_name, arg);
 }
 
-static void feed_object(const unsigned char *sha1, FILE *fh, int negative)
+static void feed_object(const struct object_id *oid, FILE *fh, int negative)
 {
-       if (negative && !has_sha1_file(sha1))
+       if (negative && !has_sha1_file(oid->hash))
                return;
 
        if (negative)
                putc('^', fh);
-       fputs(sha1_to_hex(sha1), fh);
+       fputs(oid_to_hex(oid), fh);
        putc('\n', fh);
 }
 
@@ -89,13 +89,13 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc
         */
        po_in = xfdopen(po.in, "w");
        for (i = 0; i < extra->nr; i++)
-               feed_object(extra->oid[i].hash, po_in, 1);
+               feed_object(&extra->oid[i], po_in, 1);
 
        while (refs) {
                if (!is_null_oid(&refs->old_oid))
-                       feed_object(refs->old_oid.hash, po_in, 1);
+                       feed_object(&refs->old_oid, po_in, 1);
                if (!is_null_oid(&refs->new_oid))
-                       feed_object(refs->new_oid.hash, po_in, 0);
+                       feed_object(&refs->new_oid, po_in, 0);
                refs = refs->next;
        }
 
@@ -137,6 +137,8 @@ static int pack_objects(int fd, struct ref *refs, struct oid_array *extra, struc
 static int receive_unpack_status(int in)
 {
        const char *line = packet_read_line(in, NULL);
+       if (!line)
+               return error(_("unexpected flush packet while reading remote unpack status"));
        if (!skip_prefix(line, "unpack ", &line))
                return error(_("unable to parse remote unpack status: %s"), line);
        if (strcmp(line, "ok"))
index 4d3f60594cbf0e9ddf2ea78e8c58eca312b4cac0..667f35ebdffbc1ef730e310cbea9b3dbce786e21 100644 (file)
@@ -1,10 +1,10 @@
 #include "cache.h"
 #include "config.h"
 #include "lockfile.h"
-#include "sequencer.h"
 #include "dir.h"
 #include "object.h"
 #include "commit.h"
+#include "sequencer.h"
 #include "tag.h"
 #include "run-command.h"
 #include "exec_cmd.h"
 #include "log-tree.h"
 #include "wt-status.h"
 #include "hashmap.h"
+#include "notes-utils.h"
+#include "sigchain.h"
 
 #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
 
 const char sign_off_header[] = "Signed-off-by: ";
 static const char cherry_picked_prefix[] = "(cherry picked from commit ";
 
+GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
+
 GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
 
 static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
@@ -130,6 +134,51 @@ static GIT_PATH_FUNC(rebase_path_strategy, "rebase-merge/strategy")
 static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
 static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
 
+static int git_sequencer_config(const char *k, const char *v, void *cb)
+{
+       struct replay_opts *opts = cb;
+       int status;
+
+       if (!strcmp(k, "commit.cleanup")) {
+               const char *s;
+
+               status = git_config_string(&s, k, v);
+               if (status)
+                       return status;
+
+               if (!strcmp(s, "verbatim"))
+                       opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+               else if (!strcmp(s, "whitespace"))
+                       opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+               else if (!strcmp(s, "strip"))
+                       opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
+               else if (!strcmp(s, "scissors"))
+                       opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+               else
+                       warning(_("invalid commit message cleanup mode '%s'"),
+                                 s);
+
+               return status;
+       }
+
+       if (!strcmp(k, "commit.gpgsign")) {
+               opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL;
+               return 0;
+       }
+
+       status = git_gpg_config(k, v, NULL);
+       if (status)
+               return status;
+
+       return git_diff_basic_config(k, v, NULL);
+}
+
+void sequencer_init_config(struct replay_opts *opts)
+{
+       opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+       git_config(git_sequencer_config, opts);
+}
+
 static inline int is_rebase_i(const struct replay_opts *opts)
 {
        return opts->action == REPLAY_INTERACTIVE_REBASE;
@@ -233,7 +282,7 @@ struct commit_message {
 
 static const char *short_commit_name(struct commit *commit)
 {
-       return find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
+       return find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV);
 }
 
 static int get_message(struct commit *commit, struct commit_message *out)
@@ -290,7 +339,7 @@ static void print_advice(int show_hint, struct replay_opts *opts)
 static int write_message(const void *buf, size_t len, const char *filename,
                         int append_eol)
 {
-       static struct lock_file msg_file;
+       struct lock_file msg_file = LOCK_INIT;
 
        int msg_fd = hold_lock_file_for_update(&msg_file, filename, 0);
        if (msg_fd < 0)
@@ -303,10 +352,8 @@ static int write_message(const void *buf, size_t len, const char *filename,
                rollback_lock_file(&msg_file);
                return error_errno(_("could not write eol to '%s'"), filename);
        }
-       if (commit_lock_file(&msg_file) < 0) {
-               rollback_lock_file(&msg_file);
-               return error(_("failed to finalize '%s'."), filename);
-       }
+       if (commit_lock_file(&msg_file) < 0)
+               return error(_("failed to finalize '%s'"), filename);
 
        return 0;
 }
@@ -436,7 +483,7 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
        struct tree *result, *next_tree, *base_tree, *head_tree;
        int clean;
        char **xopt;
-       static struct lock_file index_lock;
+       struct lock_file index_lock = LOCK_INIT;
 
        if (hold_locked_index(&index_lock, LOCK_REPORT_ON_ERROR) < 0)
                return -1;
@@ -465,21 +512,19 @@ static int do_recursive_merge(struct commit *base, struct commit *next,
                fputs(o.obuf.buf, stdout);
        strbuf_release(&o.obuf);
        diff_warn_rename_limit("merge.renamelimit", o.needed_rename_limit, 0);
-       if (clean < 0)
+       if (clean < 0) {
+               rollback_lock_file(&index_lock);
                return clean;
+       }
 
-       if (active_cache_changed &&
-           write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
+       if (write_locked_index(&the_index, &index_lock,
+                              COMMIT_LOCK | SKIP_IF_UNCHANGED))
                /*
                 * TRANSLATORS: %s will be "revert", "cherry-pick" or
                 * "rebase -i".
                 */
                return error(_("%s: Unable to write new index file"),
                        _(action_name(opts)));
-       rollback_lock_file(&index_lock);
-
-       if (opts->signoff)
-               append_signoff(msgbuf, 0, 0);
 
        if (!clean)
                append_conflicts_hint(msgbuf);
@@ -596,6 +641,18 @@ static int read_env_script(struct argv_array *env)
        return 0;
 }
 
+static char *get_author(const char *message)
+{
+       size_t len;
+       const char *a;
+
+       a = find_commit_header(message, "author", &len);
+       if (a)
+               return xmemdupz(a, len);
+
+       return NULL;
+}
+
 static const char staged_changes_advice[] =
 N_("you have staged changes in your working tree\n"
 "If these changes are meant to be squashed into the previous commit, run:\n"
@@ -658,8 +715,6 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
                argv_array_push(&cmd.args, "--amend");
        if (opts->gpg_sign)
                argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
-       if (opts->signoff)
-               argv_array_push(&cmd.args, "-s");
        if (defmsg)
                argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
        if ((flags & CLEANUP_MSG))
@@ -694,6 +749,461 @@ static int run_git_commit(const char *defmsg, struct replay_opts *opts,
        return run_command(&cmd);
 }
 
+static int rest_is_empty(const struct strbuf *sb, int start)
+{
+       int i, eol;
+       const char *nl;
+
+       /* Check if the rest is just whitespace and Signed-off-by's. */
+       for (i = start; i < sb->len; i++) {
+               nl = memchr(sb->buf + i, '\n', sb->len - i);
+               if (nl)
+                       eol = nl - sb->buf;
+               else
+                       eol = sb->len;
+
+               if (strlen(sign_off_header) <= eol - i &&
+                   starts_with(sb->buf + i, sign_off_header)) {
+                       i = eol;
+                       continue;
+               }
+               while (i < eol)
+                       if (!isspace(sb->buf[i++]))
+                               return 0;
+       }
+
+       return 1;
+}
+
+/*
+ * Find out if the message in the strbuf contains only whitespace and
+ * Signed-off-by lines.
+ */
+int message_is_empty(const struct strbuf *sb,
+                    enum commit_msg_cleanup_mode cleanup_mode)
+{
+       if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+               return 0;
+       return rest_is_empty(sb, 0);
+}
+
+/*
+ * See if the user edited the message in the editor or left what
+ * was in the template intact
+ */
+int template_untouched(const struct strbuf *sb, const char *template_file,
+                      enum commit_msg_cleanup_mode cleanup_mode)
+{
+       struct strbuf tmpl = STRBUF_INIT;
+       const char *start;
+
+       if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+               return 0;
+
+       if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
+               return 0;
+
+       strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+       if (!skip_prefix(sb->buf, tmpl.buf, &start))
+               start = sb->buf;
+       strbuf_release(&tmpl);
+       return rest_is_empty(sb, start - sb->buf);
+}
+
+int update_head_with_reflog(const struct commit *old_head,
+                           const struct object_id *new_head,
+                           const char *action, const struct strbuf *msg,
+                           struct strbuf *err)
+{
+       struct ref_transaction *transaction;
+       struct strbuf sb = STRBUF_INIT;
+       const char *nl;
+       int ret = 0;
+
+       if (action) {
+               strbuf_addstr(&sb, action);
+               strbuf_addstr(&sb, ": ");
+       }
+
+       nl = strchr(msg->buf, '\n');
+       if (nl) {
+               strbuf_add(&sb, msg->buf, nl + 1 - msg->buf);
+       } else {
+               strbuf_addbuf(&sb, msg);
+               strbuf_addch(&sb, '\n');
+       }
+
+       transaction = ref_transaction_begin(err);
+       if (!transaction ||
+           ref_transaction_update(transaction, "HEAD", new_head,
+                                  old_head ? &old_head->object.oid : &null_oid,
+                                  0, sb.buf, err) ||
+           ref_transaction_commit(transaction, err)) {
+               ret = -1;
+       }
+       ref_transaction_free(transaction);
+       strbuf_release(&sb);
+
+       return ret;
+}
+
+static int run_rewrite_hook(const struct object_id *oldoid,
+                           const struct object_id *newoid)
+{
+       struct child_process proc = CHILD_PROCESS_INIT;
+       const char *argv[3];
+       int code;
+       struct strbuf sb = STRBUF_INIT;
+
+       argv[0] = find_hook("post-rewrite");
+       if (!argv[0])
+               return 0;
+
+       argv[1] = "amend";
+       argv[2] = NULL;
+
+       proc.argv = argv;
+       proc.in = -1;
+       proc.stdout_to_stderr = 1;
+
+       code = start_command(&proc);
+       if (code)
+               return code;
+       strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
+       sigchain_push(SIGPIPE, SIG_IGN);
+       write_in_full(proc.in, sb.buf, sb.len);
+       close(proc.in);
+       strbuf_release(&sb);
+       sigchain_pop(SIGPIPE);
+       return finish_command(&proc);
+}
+
+void commit_post_rewrite(const struct commit *old_head,
+                        const struct object_id *new_head)
+{
+       struct notes_rewrite_cfg *cfg;
+
+       cfg = init_copy_notes_for_rewrite("amend");
+       if (cfg) {
+               /* we are amending, so old_head is not NULL */
+               copy_note_for_rewrite(cfg, &old_head->object.oid, new_head);
+               finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
+       }
+       run_rewrite_hook(&old_head->object.oid, new_head);
+}
+
+static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit)
+{
+       struct argv_array hook_env = ARGV_ARRAY_INIT;
+       int ret;
+       const char *name;
+
+       name = git_path_commit_editmsg();
+       if (write_message(msg->buf, msg->len, name, 0))
+               return -1;
+
+       argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file());
+       argv_array_push(&hook_env, "GIT_EDITOR=:");
+       if (commit)
+               ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+                                 "commit", commit, NULL);
+       else
+               ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+                                 "message", NULL);
+       if (ret)
+               ret = error(_("'prepare-commit-msg' hook failed"));
+       argv_array_clear(&hook_env);
+
+       return ret;
+}
+
+static const char implicit_ident_advice_noconfig[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly. Run the\n"
+"following command and follow the instructions in your editor to edit\n"
+"your configuration file:\n"
+"\n"
+"    git config --global --edit\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+"    git commit --amend --reset-author\n");
+
+static const char implicit_ident_advice_config[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly:\n"
+"\n"
+"    git config --global user.name \"Your Name\"\n"
+"    git config --global user.email you@example.com\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+"    git commit --amend --reset-author\n");
+
+static const char *implicit_ident_advice(void)
+{
+       char *user_config = expand_user_path("~/.gitconfig", 0);
+       char *xdg_config = xdg_config_home("config");
+       int config_exists = file_exists(user_config) || file_exists(xdg_config);
+
+       free(user_config);
+       free(xdg_config);
+
+       if (config_exists)
+               return _(implicit_ident_advice_config);
+       else
+               return _(implicit_ident_advice_noconfig);
+
+}
+
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+                         unsigned int flags)
+{
+       struct rev_info rev;
+       struct commit *commit;
+       struct strbuf format = STRBUF_INIT;
+       const char *head;
+       struct pretty_print_context pctx = {0};
+       struct strbuf author_ident = STRBUF_INIT;
+       struct strbuf committer_ident = STRBUF_INIT;
+
+       commit = lookup_commit(oid);
+       if (!commit)
+               die(_("couldn't look up newly created commit"));
+       if (parse_commit(commit))
+               die(_("could not parse newly created commit"));
+
+       strbuf_addstr(&format, "format:%h] %s");
+
+       format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
+       format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
+       if (strbuf_cmp(&author_ident, &committer_ident)) {
+               strbuf_addstr(&format, "\n Author: ");
+               strbuf_addbuf_percentquote(&format, &author_ident);
+       }
+       if (flags & SUMMARY_SHOW_AUTHOR_DATE) {
+               struct strbuf date = STRBUF_INIT;
+
+               format_commit_message(commit, "%ad", &date, &pctx);
+               strbuf_addstr(&format, "\n Date: ");
+               strbuf_addbuf_percentquote(&format, &date);
+               strbuf_release(&date);
+       }
+       if (!committer_ident_sufficiently_given()) {
+               strbuf_addstr(&format, "\n Committer: ");
+               strbuf_addbuf_percentquote(&format, &committer_ident);
+               if (advice_implicit_identity) {
+                       strbuf_addch(&format, '\n');
+                       strbuf_addstr(&format, implicit_ident_advice());
+               }
+       }
+       strbuf_release(&author_ident);
+       strbuf_release(&committer_ident);
+
+       init_revisions(&rev, prefix);
+       setup_revisions(0, NULL, &rev, NULL);
+
+       rev.diff = 1;
+       rev.diffopt.output_format =
+               DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
+
+       rev.verbose_header = 1;
+       rev.show_root_diff = 1;
+       get_commit_format(format.buf, &rev);
+       rev.always_show_header = 0;
+       rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+       rev.diffopt.break_opt = 0;
+       diff_setup_done(&rev.diffopt);
+
+       head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+       if (!head)
+               die_errno(_("unable to resolve HEAD after creating commit"));
+       if (!strcmp(head, "HEAD"))
+               head = _("detached HEAD");
+       else
+               skip_prefix(head, "refs/heads/", &head);
+       printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ?
+                                               _(" (root-commit)") : "");
+
+       if (!log_tree_commit(&rev, commit)) {
+               rev.always_show_header = 1;
+               rev.use_terminator = 1;
+               log_tree_commit(&rev, commit);
+       }
+
+       strbuf_release(&format);
+}
+
+static int parse_head(struct commit **head)
+{
+       struct commit *current_head;
+       struct object_id oid;
+
+       if (get_oid("HEAD", &oid)) {
+               current_head = NULL;
+       } else {
+               current_head = lookup_commit_reference(&oid);
+               if (!current_head)
+                       return error(_("could not parse HEAD"));
+               if (oidcmp(&oid, &current_head->object.oid)) {
+                       warning(_("HEAD %s is not a commit!"),
+                               oid_to_hex(&oid));
+               }
+               if (parse_commit(current_head))
+                       return error(_("could not parse HEAD commit"));
+       }
+       *head = current_head;
+
+       return 0;
+}
+
+/*
+ * Try to commit without forking 'git commit'. In some cases we need
+ * to run 'git commit' to display an error message
+ *
+ * Returns:
+ *  -1 - error unable to commit
+ *   0 - success
+ *   1 - run 'git commit'
+ */
+static int try_to_commit(struct strbuf *msg, const char *author,
+                        struct replay_opts *opts, unsigned int flags,
+                        struct object_id *oid)
+{
+       struct object_id tree;
+       struct commit *current_head;
+       struct commit_list *parents = NULL;
+       struct commit_extra_header *extra = NULL;
+       struct strbuf err = STRBUF_INIT;
+       struct strbuf commit_msg = STRBUF_INIT;
+       char *amend_author = NULL;
+       const char *hook_commit = NULL;
+       enum commit_msg_cleanup_mode cleanup;
+       int res = 0;
+
+       if (parse_head(&current_head))
+               return -1;
+
+       if (flags & AMEND_MSG) {
+               const char *exclude_gpgsig[] = { "gpgsig", NULL };
+               const char *out_enc = get_commit_output_encoding();
+               const char *message = logmsg_reencode(current_head, NULL,
+                                                     out_enc);
+
+               if (!msg) {
+                       const char *orig_message = NULL;
+
+                       find_commit_subject(message, &orig_message);
+                       msg = &commit_msg;
+                       strbuf_addstr(msg, orig_message);
+                       hook_commit = "HEAD";
+               }
+               author = amend_author = get_author(message);
+               unuse_commit_buffer(current_head, message);
+               if (!author) {
+                       res = error(_("unable to parse commit author"));
+                       goto out;
+               }
+               parents = copy_commit_list(current_head->parents);
+               extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+       } else if (current_head) {
+               commit_list_insert(current_head, &parents);
+       }
+
+       if (write_cache_as_tree(&tree, 0, NULL)) {
+               res = error(_("git write-tree failed to write a tree"));
+               goto out;
+       }
+
+       if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
+                                             &current_head->tree->object.oid :
+                                             &empty_tree_oid, &tree)) {
+               res = 1; /* run 'git commit' to display error message */
+               goto out;
+       }
+
+       if (find_hook("prepare-commit-msg")) {
+               res = run_prepare_commit_msg_hook(msg, hook_commit);
+               if (res)
+                       goto out;
+               if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(),
+                                    2048) < 0) {
+                       res = error_errno(_("unable to read commit message "
+                                             "from '%s'"),
+                                           git_path_commit_editmsg());
+                       goto out;
+               }
+               msg = &commit_msg;
+       }
+
+       cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
+                                         opts->default_msg_cleanup;
+
+       if (cleanup != COMMIT_MSG_CLEANUP_NONE)
+               strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
+       if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+               res = 1; /* run 'git commit' to display error message */
+               goto out;
+       }
+
+       if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+                                oid, author, opts->gpg_sign, extra)) {
+               res = error(_("failed to write commit object"));
+               goto out;
+       }
+
+       if (update_head_with_reflog(current_head, oid,
+                                   getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+               res = error("%s", err.buf);
+               goto out;
+       }
+
+       if (flags & AMEND_MSG)
+               commit_post_rewrite(current_head, oid);
+
+out:
+       free_commit_extra_headers(extra);
+       strbuf_release(&err);
+       strbuf_release(&commit_msg);
+       free(amend_author);
+
+       return res;
+}
+
+static int do_commit(const char *msg_file, const char *author,
+                    struct replay_opts *opts, unsigned int flags)
+{
+       int res = 1;
+
+       if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+               struct object_id oid;
+               struct strbuf sb = STRBUF_INIT;
+
+               if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0)
+                       return error_errno(_("unable to read commit message "
+                                            "from '%s'"),
+                                          msg_file);
+
+               res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags,
+                                   &oid);
+               strbuf_release(&sb);
+               if (!res) {
+                       unlink(git_path_cherry_pick_head());
+                       unlink(git_path_merge_msg());
+                       if (!is_rebase_i(opts))
+                               print_commit_summary(NULL, &oid,
+                                               SUMMARY_SHOW_AUTHOR_DATE);
+                       return res;
+               }
+       }
+       if (res == 1)
+               return run_git_commit(msg_file, opts, flags);
+
+       return res;
+}
+
 static int is_original_commit_empty(struct commit *commit)
 {
        const struct object_id *ptree_oid;
@@ -952,6 +1462,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
        struct object_id head;
        struct commit *base, *next, *parent;
        const char *base_label, *next_label;
+       char *author = NULL;
        struct commit_message msg = { NULL, NULL, NULL, NULL };
        struct strbuf msgbuf = STRBUF_INIT;
        int res, unborn = 0, allow;
@@ -963,7 +1474,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                 * that represents the "current" state for merge-recursive
                 * to work on.
                 */
-               if (write_cache_as_tree(head.hash, 0, NULL))
+               if (write_cache_as_tree(&head, 0, NULL))
                        return error(_("your index file is unmerged."));
        } else {
                unborn = get_oid("HEAD", &head);
@@ -1066,6 +1577,8 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                        strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid));
                        strbuf_addstr(&msgbuf, ")\n");
                }
+               if (!is_fixup(command))
+                       author = get_author(msg.message);
        }
 
        if (command == TODO_REWORD)
@@ -1091,6 +1604,9 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                }
        }
 
+       if (opts->signoff)
+               append_signoff(&msgbuf, 0, 0);
+
        if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
                res = -1;
        else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
@@ -1148,9 +1664,13 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
                goto leave;
        } else if (allow)
                flags |= ALLOW_EMPTY;
-       if (!opts->no_commit)
+       if (!opts->no_commit) {
 fast_forward_edit:
-               res = run_git_commit(msg_file, opts, flags);
+               if (author || command == TODO_REVERT || (flags & AMEND_MSG))
+                       res = do_commit(msg_file, author, opts, flags);
+               else
+                       res = error(_("unable to parse commit author"));
+       }
 
        if (!res && final_fixup) {
                unlink(rebase_path_fixup_msg());
@@ -1159,6 +1679,7 @@ static int do_pick_commit(enum todo_command command, struct commit *commit,
 
 leave:
        free_message(commit, &msg);
+       free(author);
        update_abort_safety_file();
 
        return res;
@@ -1183,7 +1704,7 @@ static int prepare_revs(struct replay_opts *opts)
 
 static int read_and_refresh_cache(struct replay_opts *opts)
 {
-       static struct lock_file index_lock;
+       struct lock_file index_lock = LOCK_INIT;
        int index_fd = hold_locked_index(&index_lock, 0);
        if (read_index_preload(&the_index, NULL) < 0) {
                rollback_lock_file(&index_lock);
@@ -1191,13 +1712,13 @@ static int read_and_refresh_cache(struct replay_opts *opts)
                        _(action_name(opts)));
        }
        refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
-       if (the_index.cache_changed && index_fd >= 0) {
-               if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) {
+       if (index_fd >= 0) {
+               if (write_locked_index(&the_index, &index_lock,
+                                      COMMIT_LOCK | SKIP_IF_UNCHANGED)) {
                        return error(_("git %s: failed to refresh the index"),
                                _(action_name(opts)));
                }
        }
-       rollback_lock_file(&index_lock);
        return 0;
 }
 
@@ -1347,22 +1868,31 @@ static int count_commands(struct todo_list *todo_list)
        return count;
 }
 
+static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
+{
+       int fd;
+       ssize_t len;
+
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return error_errno(_("could not open '%s'"), path);
+       len = strbuf_read(sb, fd, 0);
+       close(fd);
+       if (len < 0)
+               return error(_("could not read '%s'."), path);
+       return len;
+}
+
 static int read_populate_todo(struct todo_list *todo_list,
                        struct replay_opts *opts)
 {
        struct stat st;
        const char *todo_file = get_todo_path(opts);
-       int fd, res;
+       int res;
 
        strbuf_reset(&todo_list->buf);
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0)
-               return error_errno(_("could not open '%s'"), todo_file);
-       if (strbuf_read(&todo_list->buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0)
+               return -1;
 
        res = stat(todo_file, &st);
        if (res)
@@ -1577,16 +2107,14 @@ static int create_seq_dir(void)
 
 static int save_head(const char *head)
 {
-       static struct lock_file head_lock;
+       struct lock_file head_lock = LOCK_INIT;
        struct strbuf buf = STRBUF_INIT;
        int fd;
        ssize_t written;
 
        fd = hold_lock_file_for_update(&head_lock, git_path_head_file(), 0);
-       if (fd < 0) {
-               rollback_lock_file(&head_lock);
+       if (fd < 0)
                return error_errno(_("could not lock HEAD"));
-       }
        strbuf_addf(&buf, "%s\n", head);
        written = write_in_full(fd, buf.buf, buf.len);
        strbuf_release(&buf);
@@ -1595,10 +2123,8 @@ static int save_head(const char *head)
                return error_errno(_("could not write to '%s'"),
                                   git_path_head_file());
        }
-       if (commit_lock_file(&head_lock) < 0) {
-               rollback_lock_file(&head_lock);
-               return error(_("failed to finalize '%s'."), git_path_head_file());
-       }
+       if (commit_lock_file(&head_lock) < 0)
+               return error(_("failed to finalize '%s'"), git_path_head_file());
        return 0;
 }
 
@@ -1702,7 +2228,7 @@ int sequencer_rollback(struct replay_opts *opts)
 
 static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
 {
-       static struct lock_file todo_lock;
+       struct lock_file todo_lock = LOCK_INIT;
        const char *todo_path = get_todo_path(opts);
        int next = todo_list->current, offset, fd;
 
@@ -1722,7 +2248,7 @@ static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
                        todo_list->buf.len - offset) < 0)
                return error_errno(_("could not write to '%s'"), todo_path);
        if (commit_lock_file(&todo_lock) < 0)
-               return error(_("failed to finalize '%s'."), todo_path);
+               return error(_("failed to finalize '%s'"), todo_path);
 
        if (is_rebase_i(opts)) {
                const char *done_path = rebase_path_done();
@@ -1792,6 +2318,9 @@ static int make_patch(struct commit *commit, struct replay_opts *opts)
        p = short_commit_name(commit);
        if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0)
                return -1;
+       if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid,
+                      NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
+               res |= error(_("could not update %s"), "REBASE_HEAD");
 
        strbuf_addf(&buf, "%s/patch", get_dir(opts));
        memset(&log_tree_opt, 0, sizeof(log_tree_opt));
@@ -2043,6 +2572,7 @@ static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
                        unlink(rebase_path_author_script());
                        unlink(rebase_path_stopped_sha());
                        unlink(rebase_path_amend());
+                       delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
                }
                if (item->command <= TODO_SQUASH) {
                        if (is_rebase_i(opts))
@@ -2346,9 +2876,10 @@ int sequencer_pick_revisions(struct replay_opts *opts)
 
                if (!get_oid(name, &oid)) {
                        if (!lookup_commit_reference_gently(&oid, 1)) {
-                               enum object_type type = sha1_object_info(oid.hash, NULL);
+                               enum object_type type = oid_object_info(&oid,
+                                                                       NULL);
                                return error(_("%s: can't cherry-pick a %s"),
-                                       name, typename(type));
+                                       name, type_name(type));
                        }
                } else
                        return error(_("%s: bad revision"), name);
@@ -2629,20 +3160,13 @@ int check_todo_list(void)
        struct strbuf todo_file = STRBUF_INIT;
        struct todo_list todo_list = TODO_LIST_INIT;
        struct strbuf missing = STRBUF_INIT;
-       int advise_to_edit_todo = 0, res = 0, fd, i;
+       int advise_to_edit_todo = 0, res = 0, i;
 
        strbuf_addstr(&todo_file, rebase_path_todo());
-       fd = open(todo_file.buf, O_RDONLY);
-       if (fd < 0) {
-               res = error_errno(_("could not open '%s'"), todo_file.buf);
-               goto leave_check;
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               res = error(_("could not read '%s'."), todo_file.buf);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+               res = -1;
                goto leave_check;
        }
-       close(fd);
        advise_to_edit_todo = res =
                parse_insn_buffer(todo_list.buf.buf, &todo_list);
 
@@ -2658,17 +3182,10 @@ int check_todo_list(void)
 
        todo_list_release(&todo_list);
        strbuf_addstr(&todo_file, ".backup");
-       fd = open(todo_file.buf, O_RDONLY);
-       if (fd < 0) {
-               res = error_errno(_("could not open '%s'"), todo_file.buf);
-               goto leave_check;
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               res = error(_("could not read '%s'."), todo_file.buf);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+               res = -1;
                goto leave_check;
        }
-       close(fd);
        strbuf_release(&todo_file);
        res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list);
 
@@ -2749,15 +3266,8 @@ int skip_unnecessary_picks(void)
        }
        strbuf_release(&buf);
 
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0) {
-               return error_errno(_("could not open '%s'"), todo_file);
-       }
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+               return -1;
        if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
                todo_list_release(&todo_list);
                return -1;
@@ -2848,17 +3358,11 @@ int rearrange_squash(void)
        const char *todo_file = rebase_path_todo();
        struct todo_list todo_list = TODO_LIST_INIT;
        struct hashmap subject2item;
-       int res = 0, rearranged = 0, *next, *tail, fd, i;
+       int res = 0, rearranged = 0, *next, *tail, i;
        char **subjects;
 
-       fd = open(todo_file, O_RDONLY);
-       if (fd < 0)
-               return error_errno(_("could not open '%s'"), todo_file);
-       if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
-               close(fd);
-               return error(_("could not read '%s'."), todo_file);
-       }
-       close(fd);
+       if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+               return -1;
        if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
                todo_list_release(&todo_list);
                return -1;
index 81f6d7d393fd1a502e3763cd1717b87ffa243717..e45b178dfc41d723bf186f20674c4515d7c7fa00 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef SEQUENCER_H
 #define SEQUENCER_H
 
+const char *git_path_commit_editmsg(void);
 const char *git_path_seq_dir(void);
 
 #define APPEND_SIGNOFF_DEDUP (1u << 0)
@@ -11,6 +12,13 @@ enum replay_action {
        REPLAY_INTERACTIVE_REBASE
 };
 
+enum commit_msg_cleanup_mode {
+       COMMIT_MSG_CLEANUP_SPACE,
+       COMMIT_MSG_CLEANUP_NONE,
+       COMMIT_MSG_CLEANUP_SCISSORS,
+       COMMIT_MSG_CLEANUP_ALL
+};
+
 struct replay_opts {
        enum replay_action action;
 
@@ -29,6 +37,7 @@ struct replay_opts {
        int mainline;
 
        char *gpg_sign;
+       enum commit_msg_cleanup_mode default_msg_cleanup;
 
        /* Merge strategy */
        char *strategy;
@@ -40,6 +49,8 @@ struct replay_opts {
 };
 #define REPLAY_OPTS_INIT { -1 }
 
+/* Call this to setup defaults before parsing command line options */
+void sequencer_init_config(struct replay_opts *opts);
 int sequencer_pick_revisions(struct replay_opts *opts);
 int sequencer_continue(struct replay_opts *opts);
 int sequencer_rollback(struct replay_opts *opts);
@@ -61,5 +72,19 @@ extern const char sign_off_header[];
 
 void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
 void append_conflicts_hint(struct strbuf *msgbuf);
+int message_is_empty(const struct strbuf *sb,
+                    enum commit_msg_cleanup_mode cleanup_mode);
+int template_untouched(const struct strbuf *sb, const char *template_file,
+                      enum commit_msg_cleanup_mode cleanup_mode);
+int update_head_with_reflog(const struct commit *old_head,
+                           const struct object_id *new_head,
+                           const char *action, const struct strbuf *msg,
+                           struct strbuf *err);
+void commit_post_rewrite(const struct commit *current_head,
+                        const struct object_id *new_head);
 
+#define SUMMARY_INITIAL_COMMIT   (1 << 0)
+#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+                         unsigned int flags);
 #endif
diff --git a/setup.c b/setup.c
index 8cc34186ce1f918ce5a9c8fc22ea81b7c645ff17..664453fcef7f3b75f56d000dc52f42a0aff42fb6 100644 (file)
--- a/setup.c
+++ b/setup.c
@@ -119,7 +119,7 @@ char *prefix_path(const char *prefix, int len, const char *path)
 {
        char *r = prefix_path_gently(prefix, len, NULL, path);
        if (!r)
-               die("'%s' is outside repository", path);
+               die(_("'%s' is outside repository"), path);
        return r;
 }
 
@@ -160,7 +160,7 @@ int check_filename(const char *prefix, const char *arg)
                free(to_free);
                return 0; /* file does not exist */
        }
-       die_errno("failed to stat '%s'", arg);
+       die_errno(_("failed to stat '%s'"), arg);
 }
 
 static void NORETURN die_verify_filename(const char *prefix,
@@ -230,7 +230,7 @@ void verify_filename(const char *prefix,
                     int diagnose_misspelt_rev)
 {
        if (*arg == '-')
-               die("option '%s' must come before non-option arguments", arg);
+               die(_("option '%s' must come before non-option arguments"), arg);
        if (looks_like_pathspec(arg) || check_filename(prefix, arg))
                return;
        die_verify_filename(prefix, arg, diagnose_misspelt_rev);
@@ -385,14 +385,14 @@ void setup_work_tree(void)
                return;
 
        if (work_tree_config_is_bogus)
-               die("unable to set up work tree using invalid config");
+               die(_("unable to set up work tree using invalid config"));
 
        work_tree = get_git_work_tree();
        git_dir = get_git_dir();
        if (!is_absolute_path(git_dir))
                git_dir = real_path(get_git_dir());
        if (!work_tree || chdir(work_tree))
-               die("This operation must be run in a work tree");
+               die(_("this operation must be run in a work tree"));
 
        /*
         * Make sure subsequent git processes find correct worktree
@@ -422,7 +422,11 @@ static int check_repo_format(const char *var, const char *value, void *vdata)
                        ;
                else if (!strcmp(ext, "preciousobjects"))
                        data->precious_objects = git_config_bool(var, value);
-               else
+               else if (!strcmp(ext, "partialclone")) {
+                       if (!value)
+                               return config_error_nonbool(var);
+                       data->partial_clone = xstrdup(value);
+               } else
                        string_list_append(&data->unknown_extensions, ext);
        } else if (strcmp(var, "core.bare") == 0) {
                data->is_bare = git_config_bool(var, value);
@@ -464,6 +468,7 @@ static int check_repository_format_gently(const char *gitdir, struct repository_
        }
 
        repository_format_precious_objects = candidate->precious_objects;
+       repository_format_partial_clone = candidate->partial_clone;
        string_list_clear(&candidate->unknown_extensions, 0);
        if (!has_common) {
                if (candidate->is_bare != -1) {
@@ -525,17 +530,17 @@ void read_gitfile_error_die(int error_code, const char *path, const char *dir)
                /* non-fatal; follow return path */
                break;
        case READ_GITFILE_ERR_OPEN_FAILED:
-               die_errno("Error opening '%s'", path);
+               die_errno(_("error opening '%s'"), path);
        case READ_GITFILE_ERR_TOO_LARGE:
-               die("Too large to be a .git file: '%s'", path);
+               die(_("too large to be a .git file: '%s'"), path);
        case READ_GITFILE_ERR_READ_FAILED:
-               die("Error reading %s", path);
+               die(_("error reading %s"), path);
        case READ_GITFILE_ERR_INVALID_FORMAT:
-               die("Invalid gitfile format: %s", path);
+               die(_("invalid gitfile format: %s"), path);
        case READ_GITFILE_ERR_NO_PATH:
-               die("No path in gitfile: %s", path);
+               die(_("no path in gitfile: %s"), path);
        case READ_GITFILE_ERR_NOT_A_REPO:
-               die("Not a git repository: %s", dir);
+               die(_("not a git repository: %s"), dir);
        default:
                die("BUG: unknown error code");
        }
@@ -634,7 +639,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
        int offset;
 
        if (PATH_MAX - 40 < strlen(gitdirenv))
-               die("'$%s' too big", GIT_DIR_ENVIRONMENT);
+               die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT);
 
        gitfile = (char*)read_gitfile(gitdirenv);
        if (gitfile) {
@@ -648,7 +653,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
                        free(gitfile);
                        return NULL;
                }
-               die("Not a git repository: '%s'", gitdirenv);
+               die(_("not a git repository: '%s'"), gitdirenv);
        }
 
        if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) {
@@ -677,12 +682,12 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
                else {
                        char *core_worktree;
                        if (chdir(gitdirenv))
-                               die_errno("Could not chdir to '%s'", gitdirenv);
+                               die_errno(_("cannot chdir to '%s'"), gitdirenv);
                        if (chdir(git_work_tree_cfg))
-                               die_errno("Could not chdir to '%s'", git_work_tree_cfg);
+                               die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg);
                        core_worktree = xgetcwd();
                        if (chdir(cwd->buf))
-                               die_errno("Could not come back to cwd");
+                               die_errno(_("cannot come back to cwd"));
                        set_git_work_tree(core_worktree);
                        free(core_worktree);
                }
@@ -710,7 +715,7 @@ static const char *setup_explicit_git_dir(const char *gitdirenv,
        if (offset >= 0) {      /* cwd inside worktree? */
                set_git_dir(real_path(gitdirenv));
                if (chdir(worktree))
-                       die_errno("Could not chdir to '%s'", worktree);
+                       die_errno(_("cannot chdir to '%s'"), worktree);
                strbuf_addch(cwd, '/');
                free(gitfile);
                return cwd->buf + offset;
@@ -738,7 +743,7 @@ static const char *setup_discovered_git_dir(const char *gitdir,
                if (offset != cwd->len && !is_absolute_path(gitdir))
                        gitdir = to_free = real_pathdup(gitdir, 1);
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
                free(to_free);
                return ret;
@@ -748,7 +753,7 @@ static const char *setup_discovered_git_dir(const char *gitdir,
        if (is_bare_repository_cfg > 0) {
                set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir));
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                return NULL;
        }
 
@@ -787,7 +792,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
 
                gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset);
                if (chdir(cwd->buf))
-                       die_errno("Could not come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
        }
 
@@ -795,7 +800,7 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
        inside_work_tree = 0;
        if (offset != cwd->len) {
                if (chdir(cwd->buf))
-                       die_errno("Cannot come back to cwd");
+                       die_errno(_("cannot come back to cwd"));
                root_len = offset_1st_component(cwd->buf);
                strbuf_setlen(cwd, offset > root_len ? offset : root_len);
                set_git_dir(cwd->buf);
@@ -808,9 +813,9 @@ static const char *setup_bare_git_dir(struct strbuf *cwd, int offset,
 static const char *setup_nongit(const char *cwd, int *nongit_ok)
 {
        if (!nongit_ok)
-               die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
+               die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
        if (chdir(cwd))
-               die_errno(_("Cannot come back to cwd"));
+               die_errno(_("cannot come back to cwd"));
        *nongit_ok = 1;
        return NULL;
 }
@@ -819,7 +824,7 @@ static dev_t get_device_or_die(const char *path, const char *prefix, int prefix_
 {
        struct stat buf;
        if (stat(path, &buf)) {
-               die_errno("failed to stat '%*s%s%s'",
+               die_errno(_("failed to stat '%*s%s%s'"),
                                prefix_len,
                                prefix ? prefix : "",
                                prefix ? "/" : "", path);
@@ -1061,13 +1066,13 @@ const char *setup_git_directory_gently(int *nongit_ok)
                break;
        case GIT_DIR_DISCOVERED:
                if (dir.len < cwd.len && chdir(dir.buf))
-                       die(_("Cannot change to '%s'"), dir.buf);
+                       die(_("cannot change to '%s'"), dir.buf);
                prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len,
                                                  &repo_fmt, nongit_ok);
                break;
        case GIT_DIR_BARE:
                if (dir.len < cwd.len && chdir(dir.buf))
-                       die(_("Cannot change to '%s'"), dir.buf);
+                       die(_("cannot change to '%s'"), dir.buf);
                prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok);
                break;
        case GIT_DIR_HIT_CEILING:
@@ -1080,7 +1085,7 @@ const char *setup_git_directory_gently(int *nongit_ok)
                        strbuf_release(&dir);
                        return NULL;
                }
-               die(_("Not a git repository (or any parent up to mount point %s)\n"
+               die(_("not a git repository (or any parent up to mount point %s)\n"
                      "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
                    dir.buf);
        default:
@@ -1111,8 +1116,7 @@ const char *setup_git_directory_gently(int *nongit_ok)
                        const char *gitdir = getenv(GIT_DIR_ENVIRONMENT);
                        if (!gitdir)
                                gitdir = DEFAULT_GIT_DIR_ENVIRONMENT;
-                       repo_set_gitdir(the_repository, gitdir);
-                       setup_git_env();
+                       setup_git_env(gitdir);
                }
                if (startup_info->have_repository)
                        repo_set_hash_algo(the_repository, repo_fmt.hash_algo);
@@ -1164,7 +1168,7 @@ int git_config_perm(const char *var, const char *value)
        /* A filemode value was given: 0xxx */
 
        if ((i & 0600) != 0600)
-               die(_("Problem with core.sharedRepository filemode value "
+               die(_("problem with core.sharedRepository filemode value "
                    "(0%.3o).\nThe owner of files must always have "
                    "read and write permissions."), i);
 
@@ -1207,7 +1211,7 @@ void sanitize_stdfds(void)
        while (fd != -1 && fd < 2)
                fd = dup(fd);
        if (fd == -1)
-               die_errno("open /dev/null or dup failed");
+               die_errno(_("open /dev/null or dup failed"));
        if (fd > 2)
                close(fd);
 }
@@ -1222,12 +1226,12 @@ int daemonize(void)
                case 0:
                        break;
                case -1:
-                       die_errno("fork failed");
+                       die_errno(_("fork failed"));
                default:
                        exit(0);
        }
        if (setsid() == -1)
-               die_errno("setsid failed");
+               die_errno(_("setsid failed"));
        close(0);
        close(1);
        close(2);
index 4cf3ebd9212f6d5c9b9829373e58c34b83f0a548..8d0b1db3e27c99b80faa2867565de237581802f4 100644 (file)
@@ -99,3 +99,31 @@ int sha1_pos(const unsigned char *sha1, void *table, size_t nr,
        } while (lo < hi);
        return -lo-1;
 }
+
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+                const unsigned char *table, size_t stride, uint32_t *result)
+{
+       uint32_t hi, lo;
+
+       hi = ntohl(fanout_nbo[*sha1]);
+       lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1]));
+
+       while (lo < hi) {
+               unsigned mi = lo + (hi - lo) / 2;
+               int cmp = hashcmp(table + mi * stride, sha1);
+
+               if (!cmp) {
+                       if (result)
+                               *result = mi;
+                       return 1;
+               }
+               if (cmp > 0)
+                       hi = mi;
+               else
+                       lo = mi + 1;
+       }
+
+       if (result)
+               *result = lo;
+       return 0;
+}
index cf5314f402ce78f0d5ab2bd72ee7f334b6394e04..7678b23b36c291c7b1b6656e0c42fc14ea12da15 100644 (file)
@@ -7,4 +7,26 @@ extern int sha1_pos(const unsigned char *sha1,
                    void *table,
                    size_t nr,
                    sha1_access_fn fn);
+
+/*
+ * Searches for sha1 in table, using the given fanout table to determine the
+ * interval to search, then using binary search. Returns 1 if found, 0 if not.
+ *
+ * Takes the following parameters:
+ *
+ *  - sha1: the hash to search for
+ *  - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the
+ *    integer at position i represents the number of elements in table whose
+ *    first byte is less than or equal to i
+ *  - table: a sorted list of hashes with optional extra information in between
+ *  - stride: distance between two consecutive elements in table (should be
+ *    GIT_MAX_RAWSZ or greater)
+ *  - result: if not NULL, this function stores the element index of the
+ *    position found (if the search is successful) or the index of the least
+ *    element that is greater than sha1 (if the search is not successful)
+ *
+ * This function does not verify the validity of the fanout table.
+ */
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+                const unsigned char *table, size_t stride, uint32_t *result);
 #endif
index 3da70ac650a8cdeca6ef8a6a424a7740d38267d5..aea9124a78fce21eb4cb756af29823493274e581 100644 (file)
 #include "bulk-checkin.h"
 #include "streaming.h"
 #include "dir.h"
-#include "mru.h"
 #include "list.h"
 #include "mergesort.h"
 #include "quote.h"
 #include "packfile.h"
+#include "fetch-object.h"
+
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
 
 const unsigned char null_sha1[GIT_MAX_RAWSZ];
 const struct object_id null_oid;
@@ -39,32 +42,32 @@ const struct object_id empty_blob_oid = {
        EMPTY_BLOB_SHA1_BIN_LITERAL
 };
 
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
 {
-       git_SHA1_Init((git_SHA_CTX *)ctx);
+       git_SHA1_Init(&ctx->sha1);
 }
 
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
 {
-       git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+       git_SHA1_Update(&ctx->sha1, data, len);
 }
 
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
 {
-       git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+       git_SHA1_Final(hash, &ctx->sha1);
 }
 
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
 {
        die("trying to init unknown hash");
 }
 
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
 {
        die("trying to update unknown hash");
 }
 
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
 {
        die("trying to finalize unknown hash");
 }
@@ -75,7 +78,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
                0x00000000,
                0,
                0,
-               0,
                git_hash_unknown_init,
                git_hash_unknown_update,
                git_hash_unknown_final,
@@ -86,7 +88,6 @@ const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
                "sha-1",
                /* "sha1", big-endian */
                0x73686131,
-               sizeof(git_SHA_CTX),
                GIT_SHA1_RAWSZ,
                GIT_SHA1_HEXSZ,
                git_hash_sha1_init,
@@ -133,14 +134,14 @@ static struct cached_object *find_cached_object(const unsigned char *sha1)
 }
 
 
-static enum safe_crlf get_safe_crlf(unsigned flags)
+static int get_conv_flags(unsigned flags)
 {
        if (flags & HASH_RENORMALIZE)
-               return SAFE_CRLF_RENORMALIZE;
+               return CONV_EOL_RENORMALIZE;
        else if (flags & HASH_WRITE_OBJECT)
-               return safe_crlf;
+         return global_conv_flags_eol;
        else
-               return SAFE_CRLF_FALSE;
+               return 0;
 }
 
 
@@ -321,15 +322,11 @@ static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
        }
 }
 
-const char *sha1_file_name(const unsigned char *sha1)
+void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
 {
-       static struct strbuf buf = STRBUF_INIT;
-
-       strbuf_reset(&buf);
-       strbuf_addf(&buf, "%s/", get_object_directory());
-
-       fill_sha1_path(&buf, sha1);
-       return buf.buf;
+       strbuf_addstr(buf, get_object_directory());
+       strbuf_addch(buf, '/');
+       fill_sha1_path(buf, sha1);
 }
 
 struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
@@ -671,15 +668,11 @@ int foreach_alt_odb(alt_odb_fn fn, void *cb)
 
 void prepare_alt_odb(void)
 {
-       const char *alt;
-
        if (alt_odb_tail)
                return;
 
-       alt = getenv(ALTERNATE_DB_ENVIRONMENT);
-
        alt_odb_tail = &alt_odb_list;
-       link_alt_odb_entries(alt, PATH_SEP, NULL, 0);
+       link_alt_odb_entries(the_repository->alternate_db, PATH_SEP, NULL, 0);
 
        read_info_alternates(get_object_directory(), 0);
 }
@@ -710,7 +703,12 @@ int check_and_freshen_file(const char *fn, int freshen)
 
 static int check_and_freshen_local(const unsigned char *sha1, int freshen)
 {
-       return check_and_freshen_file(sha1_file_name(sha1), freshen);
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(&buf, sha1);
+
+       return check_and_freshen_file(buf.buf, freshen);
 }
 
 static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
@@ -785,31 +783,31 @@ void *xmmap(void *start, size_t length,
  * With "map" == NULL, try reading the object named with "sha1" using
  * the streaming interface and rehash it to do the same.
  */
-int check_sha1_signature(const unsigned char *sha1, void *map,
-                        unsigned long size, const char *type)
+int check_object_signature(const struct object_id *oid, void *map,
+                          unsigned long size, const char *type)
 {
-       unsigned char real_sha1[20];
+       struct object_id real_oid;
        enum object_type obj_type;
        struct git_istream *st;
-       git_SHA_CTX c;
-       char hdr[32];
+       git_hash_ctx c;
+       char hdr[MAX_HEADER_LEN];
        int hdrlen;
 
        if (map) {
-               hash_sha1_file(map, size, type, real_sha1);
-               return hashcmp(sha1, real_sha1) ? -1 : 0;
+               hash_object_file(map, size, type, &real_oid);
+               return oidcmp(oid, &real_oid) ? -1 : 0;
        }
 
-       st = open_istream(sha1, &obj_type, &size, NULL);
+       st = open_istream(oid, &obj_type, &size, NULL);
        if (!st)
                return -1;
 
        /* Generate the header */
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
 
        /* Sha1.. */
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, hdrlen);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
        for (;;) {
                char buf[1024 * 16];
                ssize_t readlen = read_istream(st, buf, sizeof(buf));
@@ -820,11 +818,11 @@ int check_sha1_signature(const unsigned char *sha1, void *map,
                }
                if (!readlen)
                        break;
-               git_SHA1_Update(&c, buf, readlen);
+               the_hash_algo->update_fn(&c, buf, readlen);
        }
-       git_SHA1_Final(real_sha1, &c);
+       the_hash_algo->final_fn(real_oid.hash, &c);
        close_istream(st);
-       return hashcmp(sha1, real_sha1) ? -1 : 0;
+       return oidcmp(oid, &real_oid) ? -1 : 0;
 }
 
 int git_open_cloexec(const char *name, int flags)
@@ -866,8 +864,12 @@ static int stat_sha1_file(const unsigned char *sha1, struct stat *st,
                          const char **path)
 {
        struct alternate_object_database *alt;
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(&buf, sha1);
+       *path = buf.buf;
 
-       *path = sha1_file_name(sha1);
        if (!lstat(*path, st))
                return 0;
 
@@ -891,8 +893,12 @@ static int open_sha1_file(const unsigned char *sha1, const char **path)
        int fd;
        struct alternate_object_database *alt;
        int most_interesting_errno;
+       static struct strbuf buf = STRBUF_INIT;
+
+       strbuf_reset(&buf);
+       sha1_file_name(&buf, sha1);
+       *path = buf.buf;
 
-       *path = sha1_file_name(sha1);
        fd = git_open(*path);
        if (fd >= 0)
                return fd;
@@ -1087,8 +1093,8 @@ static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
        }
 
        type = type_from_string_gently(type_buf, type_len, 1);
-       if (oi->typename)
-               strbuf_add(oi->typename, type_buf, type_len);
+       if (oi->type_name)
+               strbuf_add(oi->type_name, type_buf, type_len);
        /*
         * Set type to 0 if its an unknown object and
         * we're obtaining the type using '--allow-unknown-type'
@@ -1143,7 +1149,7 @@ static int sha1_loose_object_info(const unsigned char *sha1,
        unsigned long mapsize;
        void *map;
        git_zstream stream;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        struct strbuf hdrbuf = STRBUF_INIT;
        unsigned long size_scratch;
 
@@ -1158,7 +1164,7 @@ static int sha1_loose_object_info(const unsigned char *sha1,
         * return value implicitly indicates whether the
         * object even exists.
         */
-       if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) {
+       if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
                const char *path;
                struct stat st;
                if (stat_sha1_file(sha1, &st, &path) < 0)
@@ -1213,23 +1219,27 @@ static int sha1_loose_object_info(const unsigned char *sha1,
        return (status < 0) ? status : 0;
 }
 
-int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
+int fetch_if_missing = 1;
+
+int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags)
 {
        static struct object_info blank_oi = OBJECT_INFO_INIT;
        struct pack_entry e;
        int rtype;
-       const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
-                                   lookup_replace_object(sha1) :
-                                   sha1;
+       const struct object_id *real = oid;
+       int already_retried = 0;
+
+       if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+               real = lookup_replace_object(oid);
 
-       if (is_null_sha1(real))
+       if (is_null_oid(real))
                return -1;
 
        if (!oi)
                oi = &blank_oi;
 
        if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
-               struct cached_object *co = find_cached_object(real);
+               struct cached_object *co = find_cached_object(real->hash);
                if (co) {
                        if (oi->typep)
                                *(oi->typep) = co->type;
@@ -1239,8 +1249,8 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                                *(oi->disk_sizep) = 0;
                        if (oi->delta_base_sha1)
                                hashclr(oi->delta_base_sha1);
-                       if (oi->typename)
-                               strbuf_addstr(oi->typename, typename(co->type));
+                       if (oi->type_name)
+                               strbuf_addstr(oi->type_name, type_name(co->type));
                        if (oi->contentp)
                                *oi->contentp = xmemdupz(co->buf, co->size);
                        oi->whence = OI_CACHED;
@@ -1248,19 +1258,37 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                }
        }
 
-       if (!find_pack_entry(real, &e)) {
+       while (1) {
+               if (find_pack_entry(real->hash, &e))
+                       break;
+
+               if (flags & OBJECT_INFO_IGNORE_LOOSE)
+                       return -1;
+
                /* Most likely it's a loose object. */
-               if (!sha1_loose_object_info(real, oi, flags))
+               if (!sha1_loose_object_info(real->hash, oi, flags))
                        return 0;
 
                /* Not a loose object; someone else may have just packed it. */
-               if (flags & OBJECT_INFO_QUICK) {
-                       return -1;
-               } else {
+               if (!(flags & OBJECT_INFO_QUICK)) {
                        reprepare_packed_git();
-                       if (!find_pack_entry(real, &e))
-                               return -1;
+                       if (find_pack_entry(real->hash, &e))
+                               break;
+               }
+
+               /* Check if it is a missing object */
+               if (fetch_if_missing && repository_format_partial_clone &&
+                   !already_retried) {
+                       /*
+                        * TODO Investigate haveing fetch_object() return
+                        * TODO error/success and stopping the music here.
+                        */
+                       fetch_object(repository_format_partial_clone, real->hash);
+                       already_retried = 1;
+                       continue;
                }
+
+               return -1;
        }
 
        if (oi == &blank_oi)
@@ -1269,11 +1297,10 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
                 * information below, so return early.
                 */
                return 0;
-
        rtype = packed_object_info(e.p, e.offset, oi);
        if (rtype < 0) {
-               mark_bad_packed_object(e.p, real);
-               return sha1_object_info_extended(real, oi, 0);
+               mark_bad_packed_object(e.p, real->hash);
+               return oid_object_info_extended(real, oi, 0);
        } else if (oi->whence == OI_PACKED) {
                oi->u.packed.offset = e.offset;
                oi->u.packed.pack = e.p;
@@ -1285,15 +1312,15 @@ int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi,
 }
 
 /* returns enum object_type or negative */
-int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
+int oid_object_info(const struct object_id *oid, unsigned long *sizep)
 {
        enum object_type type;
        struct object_info oi = OBJECT_INFO_INIT;
 
        oi.typep = &type;
        oi.sizep = sizep;
-       if (sha1_object_info_extended(sha1, &oi,
-                                     OBJECT_INFO_LOOKUP_REPLACE) < 0)
+       if (oid_object_info_extended(oid, &oi,
+                                    OBJECT_INFO_LOOKUP_REPLACE) < 0)
                return -1;
        return type;
 }
@@ -1301,24 +1328,27 @@ int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
 static void *read_object(const unsigned char *sha1, enum object_type *type,
                         unsigned long *size)
 {
+       struct object_id oid;
        struct object_info oi = OBJECT_INFO_INIT;
        void *content;
        oi.typep = type;
        oi.sizep = size;
        oi.contentp = &content;
 
-       if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+       hashcpy(oid.hash, sha1);
+
+       if (oid_object_info_extended(&oid, &oi, 0) < 0)
                return NULL;
        return content;
 }
 
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
-                     unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+                       struct object_id *oid)
 {
        struct cached_object *co;
 
-       hash_sha1_file(buf, len, typename(type), sha1);
-       if (has_sha1_file(sha1) || find_cached_object(sha1))
+       hash_object_file(buf, len, type_name(type), oid);
+       if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
                return 0;
        ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
        co = &cached_objects[cached_object_nr++];
@@ -1326,7 +1356,7 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
        co->type = type;
        co->buf = xmalloc(len);
        memcpy(co->buf, buf, len);
-       hashcpy(co->sha1, sha1);
+       hashcpy(co->sha1, oid->hash);
        return 0;
 }
 
@@ -1335,65 +1365,65 @@ int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
  * deal with them should arrange to call read_object() and give error
  * messages themselves.
  */
-void *read_sha1_file_extended(const unsigned char *sha1,
-                             enum object_type *type,
-                             unsigned long *size,
-                             int lookup_replace)
+void *read_object_file_extended(const struct object_id *oid,
+                               enum object_type *type,
+                               unsigned long *size,
+                               int lookup_replace)
 {
        void *data;
        const struct packed_git *p;
        const char *path;
        struct stat st;
-       const unsigned char *repl = lookup_replace ? lookup_replace_object(sha1)
-                                                  : sha1;
+       const struct object_id *repl = lookup_replace ? lookup_replace_object(oid)
+                                                     : oid;
 
        errno = 0;
-       data = read_object(repl, type, size);
+       data = read_object(repl->hash, type, size);
        if (data)
                return data;
 
        if (errno && errno != ENOENT)
-               die_errno("failed to read object %s", sha1_to_hex(sha1));
+               die_errno("failed to read object %s", oid_to_hex(oid));
 
        /* die if we replaced an object with one that does not exist */
-       if (repl != sha1)
+       if (repl != oid)
                die("replacement %s not found for %s",
-                   sha1_to_hex(repl), sha1_to_hex(sha1));
+                   oid_to_hex(repl), oid_to_hex(oid));
 
-       if (!stat_sha1_file(repl, &st, &path))
+       if (!stat_sha1_file(repl->hash, &st, &path))
                die("loose object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), path);
+                   oid_to_hex(repl), path);
 
-       if ((p = has_packed_and_bad(repl)) != NULL)
+       if ((p = has_packed_and_bad(repl->hash)) != NULL)
                die("packed object %s (stored in %s) is corrupt",
-                   sha1_to_hex(repl), p->pack_name);
+                   oid_to_hex(repl), p->pack_name);
 
        return NULL;
 }
 
-void *read_object_with_reference(const unsigned char *sha1,
+void *read_object_with_reference(const struct object_id *oid,
                                 const char *required_type_name,
                                 unsigned long *size,
-                                unsigned char *actual_sha1_return)
+                                struct object_id *actual_oid_return)
 {
        enum object_type type, required_type;
        void *buffer;
        unsigned long isize;
-       unsigned char actual_sha1[20];
+       struct object_id actual_oid;
 
        required_type = type_from_string(required_type_name);
-       hashcpy(actual_sha1, sha1);
+       oidcpy(&actual_oid, oid);
        while (1) {
                int ref_length = -1;
                const char *ref_type = NULL;
 
-               buffer = read_sha1_file(actual_sha1, &type, &isize);
+               buffer = read_object_file(&actual_oid, &type, &isize);
                if (!buffer)
                        return NULL;
                if (type == required_type) {
                        *size = isize;
-                       if (actual_sha1_return)
-                               hashcpy(actual_sha1_return, actual_sha1);
+                       if (actual_oid_return)
+                               oidcpy(actual_oid_return, &actual_oid);
                        return buffer;
                }
                /* Handle references */
@@ -1407,32 +1437,32 @@ void *read_object_with_reference(const unsigned char *sha1,
                }
                ref_length = strlen(ref_type);
 
-               if (ref_length + 40 > isize ||
+               if (ref_length + GIT_SHA1_HEXSZ > isize ||
                    memcmp(buffer, ref_type, ref_length) ||
-                   get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
+                   get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
                        free(buffer);
                        return NULL;
                }
                free(buffer);
                /* Now we have the ID of the referred-to object in
-                * actual_sha1.  Check again. */
+                * actual_oid.  Check again. */
        }
 }
 
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
-                                    const char *type, unsigned char *sha1,
-                                    char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+                                     const char *type, struct object_id *oid,
+                                     char *hdr, int *hdrlen)
 {
-       git_SHA_CTX c;
+       git_hash_ctx c;
 
        /* Generate the header */
        *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
 
        /* Sha1.. */
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, *hdrlen);
-       git_SHA1_Update(&c, buf, len);
-       git_SHA1_Final(sha1, &c);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, *hdrlen);
+       the_hash_algo->update_fn(&c, buf, len);
+       the_hash_algo->final_fn(oid->hash, &c);
 }
 
 /*
@@ -1485,12 +1515,12 @@ static int write_buffer(int fd, const void *buf, size_t len)
        return 0;
 }
 
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
-                   unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+                    struct object_id *oid)
 {
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen = sizeof(hdr);
-       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
        return 0;
 }
 
@@ -1548,18 +1578,22 @@ static int create_tmpfile(struct strbuf *tmp, const char *filename)
        return fd;
 }
 
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
-                             const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+                             int hdrlen, const void *buf, unsigned long len,
+                             time_t mtime)
 {
        int fd, ret;
        unsigned char compressed[4096];
        git_zstream stream;
-       git_SHA_CTX c;
-       unsigned char parano_sha1[20];
+       git_hash_ctx c;
+       struct object_id parano_oid;
        static struct strbuf tmp_file = STRBUF_INIT;
-       const char *filename = sha1_file_name(sha1);
+       static struct strbuf filename = STRBUF_INIT;
+
+       strbuf_reset(&filename);
+       sha1_file_name(&filename, oid->hash);
 
-       fd = create_tmpfile(&tmp_file, filename);
+       fd = create_tmpfile(&tmp_file, filename.buf);
        if (fd < 0) {
                if (errno == EACCES)
                        return error("insufficient permission for adding an object to repository database %s", get_object_directory());
@@ -1571,14 +1605,14 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        git_deflate_init(&stream, zlib_compression_level);
        stream.next_out = compressed;
        stream.avail_out = sizeof(compressed);
-       git_SHA1_Init(&c);
+       the_hash_algo->init_fn(&c);
 
        /* First header.. */
        stream.next_in = (unsigned char *)hdr;
        stream.avail_in = hdrlen;
        while (git_deflate(&stream, 0) == Z_OK)
                ; /* nothing */
-       git_SHA1_Update(&c, hdr, hdrlen);
+       the_hash_algo->update_fn(&c, hdr, hdrlen);
 
        /* Then the data itself.. */
        stream.next_in = (void *)buf;
@@ -1586,7 +1620,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        do {
                unsigned char *in0 = stream.next_in;
                ret = git_deflate(&stream, Z_FINISH);
-               git_SHA1_Update(&c, in0, stream.next_in - in0);
+               the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
                if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
                        die("unable to write sha1 file");
                stream.next_out = compressed;
@@ -1594,13 +1628,16 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
        } while (ret == Z_OK);
 
        if (ret != Z_STREAM_END)
-               die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+               die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+                   ret);
        ret = git_deflate_end_gently(&stream);
        if (ret != Z_OK)
-               die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
-       git_SHA1_Final(parano_sha1, &c);
-       if (hashcmp(sha1, parano_sha1) != 0)
-               die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+               die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+                   ret);
+       the_hash_algo->final_fn(parano_oid.hash, &c);
+       if (oidcmp(oid, &parano_oid) != 0)
+               die("confused by unstable object source data for %s",
+                   oid_to_hex(oid));
 
        close_sha1_file(fd);
 
@@ -1612,7 +1649,7 @@ static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
                        warning_errno("failed utime() on %s", tmp_file.buf);
        }
 
-       return finalize_object_file(tmp_file.buf, filename);
+       return finalize_object_file(tmp_file.buf, filename.buf);
 }
 
 static int freshen_loose_object(const unsigned char *sha1)
@@ -1633,58 +1670,60 @@ static int freshen_packed_object(const unsigned char *sha1)
        return 1;
 }
 
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+                     struct object_id *oid)
 {
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen = sizeof(hdr);
 
        /* Normally if we have it in the pack then we do not bother writing
         * it out into .git/objects/??/?{38} file.
         */
-       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
-       if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+       write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+       if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
                return 0;
-       return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+       return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
 }
 
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
-                            struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+                              const char *type, struct object_id *oid,
+                              unsigned flags)
 {
        char *header;
        int hdrlen, status = 0;
 
        /* type string, SP, %lu of the length plus NUL must fit this */
-       hdrlen = strlen(type) + 32;
+       hdrlen = strlen(type) + MAX_HEADER_LEN;
        header = xmalloc(hdrlen);
-       write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+       write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
 
        if (!(flags & HASH_WRITE_OBJECT))
                goto cleanup;
        if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
                goto cleanup;
-       status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+       status = write_loose_object(oid, header, hdrlen, buf, len, 0);
 
 cleanup:
        free(header);
        return status;
 }
 
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
 {
        void *buf;
        unsigned long len;
        enum object_type type;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
        int hdrlen;
        int ret;
 
-       if (has_loose_object(sha1))
+       if (has_loose_object(oid->hash))
                return 0;
-       buf = read_object(sha1, &type, &len);
+       buf = read_object(oid->hash, &type, &len);
        if (!buf)
-               return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
-       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
-       ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+               return error("cannot read sha1_file for %s", oid_to_hex(oid));
+       hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+       ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
        free(buf);
 
        return ret;
@@ -1692,10 +1731,12 @@ int force_object_loose(const unsigned char *sha1, time_t mtime)
 
 int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
 {
+       struct object_id oid;
        if (!startup_info->have_repository)
                return 0;
-       return sha1_object_info_extended(sha1, NULL,
-                                        flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+       hashcpy(oid.hash, sha1);
+       return oid_object_info_extended(&oid, NULL,
+                                       flags | OBJECT_INFO_SKIP_CACHED) >= 0;
 }
 
 int has_object_file(const struct object_id *oid)
@@ -1752,7 +1793,7 @@ static int index_mem(struct object_id *oid, void *buf, size_t size,
        if ((type == OBJ_BLOB) && path) {
                struct strbuf nbuf = STRBUF_INIT;
                if (convert_to_git(&the_index, path, buf, size, &nbuf,
-                                  get_safe_crlf(flags))) {
+                                  get_conv_flags(flags))) {
                        buf = strbuf_detach(&nbuf, &size);
                        re_allocated = 1;
                }
@@ -1767,9 +1808,9 @@ static int index_mem(struct object_id *oid, void *buf, size_t size,
        }
 
        if (write_object)
-               ret = write_sha1_file(buf, size, typename(type), oid->hash);
+               ret = write_object_file(buf, size, type_name(type), oid);
        else
-               ret = hash_sha1_file(buf, size, typename(type), oid->hash);
+               ret = hash_object_file(buf, size, type_name(type), oid);
        if (re_allocated)
                free(buf);
        return ret;
@@ -1786,14 +1827,14 @@ static int index_stream_convert_blob(struct object_id *oid, int fd,
        assert(would_convert_to_git_filter_fd(path));
 
        convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
-                                get_safe_crlf(flags));
+                                get_conv_flags(flags));
 
        if (write_object)
-               ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
-                                     oid->hash);
+               ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                       oid);
        else
-               ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
-                                    oid->hash);
+               ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+                                      oid);
        strbuf_release(&sbuf);
        return ret;
 }
@@ -1861,7 +1902,7 @@ static int index_stream(struct object_id *oid, int fd, size_t size,
                        enum object_type type, const char *path,
                        unsigned flags)
 {
-       return index_bulk_checkin(oid->hash, fd, size, type, path, flags);
+       return index_bulk_checkin(oid, fd, size, type, path, flags);
 }
 
 int index_fd(struct object_id *oid, int fd, struct stat *st,
@@ -1907,8 +1948,8 @@ int index_path(struct object_id *oid, const char *path, struct stat *st, unsigne
                if (strbuf_readlink(&sb, path, st->st_size))
                        return error_errno("readlink(\"%s\")", path);
                if (!(flags & HASH_WRITE_OBJECT))
-                       hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
-               else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+                       hash_object_file(sb.buf, sb.len, blob_type, oid);
+               else if (write_object_file(sb.buf, sb.len, blob_type, oid))
                        rc = error("%s: failed to insert into database", path);
                strbuf_release(&sb);
                break;
@@ -1935,14 +1976,14 @@ int read_pack_header(int fd, struct pack_header *header)
        return 0;
 }
 
-void assert_sha1_type(const unsigned char *sha1, enum object_type expect)
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
 {
-       enum object_type type = sha1_object_info(sha1, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
        if (type < 0)
-               die("%s is not a valid object", sha1_to_hex(sha1));
+               die("%s is not a valid object", oid_to_hex(oid));
        if (type != expect)
-               die("%s is not a valid '%s' object", sha1_to_hex(sha1),
-                   typename(expect));
+               die("%s is not a valid '%s' object", oid_to_hex(oid),
+                   type_name(expect));
 }
 
 int for_each_file_in_obj_subdir(unsigned int subdir_nr,
@@ -2093,14 +2134,14 @@ static int check_stream_sha1(git_zstream *stream,
                             const char *path,
                             const unsigned char *expected_sha1)
 {
-       git_SHA_CTX c;
+       git_hash_ctx c;
        unsigned char real_sha1[GIT_MAX_RAWSZ];
        unsigned char buf[4096];
        unsigned long total_read;
        int status = Z_OK;
 
-       git_SHA1_Init(&c);
-       git_SHA1_Update(&c, hdr, stream->total_out);
+       the_hash_algo->init_fn(&c);
+       the_hash_algo->update_fn(&c, hdr, stream->total_out);
 
        /*
         * We already read some bytes into hdr, but the ones up to the NUL
@@ -2119,7 +2160,7 @@ static int check_stream_sha1(git_zstream *stream,
                if (size - total_read < stream->avail_out)
                        stream->avail_out = size - total_read;
                status = git_inflate(stream, Z_FINISH);
-               git_SHA1_Update(&c, buf, stream->next_out - buf);
+               the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
                total_read += stream->next_out - buf;
        }
        git_inflate_end(stream);
@@ -2134,7 +2175,7 @@ static int check_stream_sha1(git_zstream *stream,
                return -1;
        }
 
-       git_SHA1_Final(real_sha1, &c);
+       the_hash_algo->final_fn(real_sha1, &c);
        if (hashcmp(expected_sha1, real_sha1)) {
                error("sha1 mismatch for %s (expected %s)", path,
                      sha1_to_hex(expected_sha1));
@@ -2145,7 +2186,7 @@ static int check_stream_sha1(git_zstream *stream,
 }
 
 int read_loose_object(const char *path,
-                     const unsigned char *expected_sha1,
+                     const struct object_id *expected_oid,
                      enum object_type *type,
                      unsigned long *size,
                      void **contents)
@@ -2154,7 +2195,7 @@ int read_loose_object(const char *path,
        void *map = NULL;
        unsigned long mapsize;
        git_zstream stream;
-       char hdr[32];
+       char hdr[MAX_HEADER_LEN];
 
        *contents = NULL;
 
@@ -2177,19 +2218,19 @@ int read_loose_object(const char *path,
        }
 
        if (*type == OBJ_BLOB) {
-               if (check_stream_sha1(&stream, hdr, *size, path, expected_sha1) < 0)
+               if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
                        goto out;
        } else {
-               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_sha1);
+               *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
                if (!*contents) {
                        error("unable to unpack contents of %s", path);
                        git_inflate_end(&stream);
                        goto out;
                }
-               if (check_sha1_signature(expected_sha1, *contents,
-                                        *size, typename(*type))) {
+               if (check_object_signature(expected_oid, *contents,
+                                        *size, type_name(*type))) {
                        error("sha1 mismatch for %s (expected %s)", path,
-                             sha1_to_hex(expected_sha1));
+                             oid_to_hex(expected_oid));
                        free(*contents);
                        goto out;
                }
index 611c7d24ddee678470ba74cb3c2ca669a778b44a..39e911c8bab21535ea6aef88c612cd282788e495 100644 (file)
@@ -238,7 +238,7 @@ static int finish_object_disambiguation(struct disambiguate_state *ds,
 
 static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_COMMIT;
 }
 
@@ -247,7 +247,7 @@ static int disambiguate_committish_only(const struct object_id *oid, void *cb_da
        struct object *obj;
        int kind;
 
-       kind = sha1_object_info(oid->hash, NULL);
+       kind = oid_object_info(oid, NULL);
        if (kind == OBJ_COMMIT)
                return 1;
        if (kind != OBJ_TAG)
@@ -262,7 +262,7 @@ static int disambiguate_committish_only(const struct object_id *oid, void *cb_da
 
 static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_TREE;
 }
 
@@ -271,7 +271,7 @@ static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_
        struct object *obj;
        int kind;
 
-       kind = sha1_object_info(oid->hash, NULL);
+       kind = oid_object_info(oid, NULL);
        if (kind == OBJ_TREE || kind == OBJ_COMMIT)
                return 1;
        if (kind != OBJ_TAG)
@@ -286,7 +286,7 @@ static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_
 
 static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
 {
-       int kind = sha1_object_info(oid->hash, NULL);
+       int kind = oid_object_info(oid, NULL);
        return kind == OBJ_BLOB;
 }
 
@@ -365,7 +365,7 @@ static int show_ambiguous_object(const struct object_id *oid, void *data)
        if (ds->fn && !ds->fn(oid, ds->cb_data))
                return 0;
 
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type == OBJ_COMMIT) {
                struct commit *commit = lookup_commit(oid);
                if (commit) {
@@ -380,8 +380,8 @@ static int show_ambiguous_object(const struct object_id *oid, void *data)
        }
 
        advise("  %s %s%s",
-              find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
-              typename(type) ? typename(type) : "unknown type",
+              find_unique_abbrev(oid, DEFAULT_ABBREV),
+              type_name(type) ? type_name(type) : "unknown type",
               desc.buf);
 
        strbuf_release(&desc);
@@ -542,20 +542,20 @@ static void find_abbrev_len_for_pack(struct packed_git *p,
        /*
         * first is now the position in the packfile where we would insert
         * mad->hash if it does not exist (or the position of mad->hash if
-        * it does exist). Hence, we consider a maximum of three objects
+        * it does exist). Hence, we consider a maximum of two objects
         * nearby for the abbreviation length.
         */
        mad->init_len = 0;
        if (!match) {
-               nth_packed_object_oid(&oid, p, first);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first))
+                       extend_abbrev_len(&oid, mad);
        } else if (first < num - 1) {
-               nth_packed_object_oid(&oid, p, first + 1);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first + 1))
+                       extend_abbrev_len(&oid, mad);
        }
        if (first > 0) {
-               nth_packed_object_oid(&oid, p, first - 1);
-               extend_abbrev_len(&oid, mad);
+               if (nth_packed_object_oid(&oid, p, first - 1))
+                       extend_abbrev_len(&oid, mad);
        }
        mad->init_len = mad->cur_len;
 }
@@ -569,7 +569,7 @@ static void find_abbrev_len_packed(struct min_abbrev_data *mad)
                find_abbrev_len_for_pack(p, mad);
 }
 
-int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
 {
        struct disambiguate_state ds;
        struct min_abbrev_data mad;
@@ -596,14 +596,14 @@ int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
                        len = FALLBACK_DEFAULT_ABBREV;
        }
 
-       sha1_to_hex_r(hex, sha1);
+       oid_to_hex_r(hex, oid);
        if (len == GIT_SHA1_HEXSZ || !len)
                return GIT_SHA1_HEXSZ;
 
        mad.init_len = len;
        mad.cur_len = len;
        mad.hex = hex;
-       mad.hash = sha1;
+       mad.hash = oid->hash;
 
        find_abbrev_len_packed(&mad);
 
@@ -621,13 +621,13 @@ int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
        return mad.cur_len;
 }
 
-const char *find_unique_abbrev(const unsigned char *sha1, int len)
+const char *find_unique_abbrev(const struct object_id *oid, int len)
 {
        static int bufno;
        static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
        char *hex = hexbuffer[bufno];
        bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
-       find_unique_abbrev_r(hex, sha1, len);
+       find_unique_abbrev_r(hex, oid, len);
        return hex;
 }
 
@@ -901,8 +901,8 @@ struct object *peel_to_type(const char *name, int namelen,
                        if (name)
                                error("%.*s: expected %s type, but the object "
                                      "dereferences to %s type",
-                                     namelen, name, typename(expected_type),
-                                     typename(o->type));
+                                     namelen, name, type_name(expected_type),
+                                     type_name(o->type));
                        return NULL;
                }
        }
@@ -1529,8 +1529,7 @@ static void diagnose_invalid_oid_path(const char *prefix,
        if (is_missing_file_error(errno)) {
                char *fullname = xstrfmt("%s%s", prefix, filename);
 
-               if (!get_tree_entry(tree_oid->hash, fullname,
-                                   oid.hash, &mode)) {
+               if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
                        die("Path '%s' exists, but not '%s'.\n"
                            "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
                            fullname,
@@ -1722,8 +1721,8 @@ static int get_oid_with_context_1(const char *name,
                                        filename, oid->hash, &oc->symlink_path,
                                        &oc->mode);
                        } else {
-                               ret = get_tree_entry(tree_oid.hash, filename,
-                                                    oid->hash, &oc->mode);
+                               ret = get_tree_entry(&tree_oid, filename, oid,
+                                                    &oc->mode);
                                if (ret && only_to_die) {
                                        diagnose_invalid_oid_path(prefix,
                                                                   filename,
index a8c272927842901190b07098911ef785a458bcf3..41e1c3fd3f787e04d6e4fa9eb7c56b617f1c5fa5 100644 (file)
@@ -1,9 +1,9 @@
 /* Plumbing with collition-detecting SHA1 code */
 
-#ifdef DC_SHA1_SUBMODULE
-#include "sha1collisiondetection/lib/sha1.h"
-#elif defined(DC_SHA1_EXTERNAL)
+#ifdef DC_SHA1_EXTERNAL
 #include <sha1dc/sha1.h>
+#elif defined(DC_SHA1_SUBMODULE)
+#include "sha1collisiondetection/lib/sha1.h"
 #else
 #include "sha1dc/sha1.h"
 #endif
index 284d04d67f885d8905130e138c45cc4ff4fbdc30..3eb8ff1b43db284bddac643c270e2148114c4bbf 100644 (file)
@@ -305,17 +305,17 @@ void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce
 }
 
 void replace_index_entry_in_base(struct index_state *istate,
-                                struct cache_entry *old,
-                                struct cache_entry *new)
+                                struct cache_entry *old_entry,
+                                struct cache_entry *new_entry)
 {
-       if (old->index &&
+       if (old_entry->index &&
            istate->split_index &&
            istate->split_index->base &&
-           old->index <= istate->split_index->base->cache_nr) {
-               new->index = old->index;
-               if (old != istate->split_index->base->cache[new->index - 1])
-                       free(istate->split_index->base->cache[new->index - 1]);
-               istate->split_index->base->cache[new->index - 1] = new;
+           old_entry->index <= istate->split_index->base->cache_nr) {
+               new_entry->index = old_entry->index;
+               if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
+                       free(istate->split_index->base->cache[new_entry->index - 1]);
+               istate->split_index->base->cache[new_entry->index - 1] = new_entry;
        }
 }
 
index df91c1bda8117fe7d0f25a0aaedce79370801ce0..43d66826eb712b9a9b6872458527266bd9818146 100644 (file)
@@ -21,7 +21,7 @@ struct split_index *init_split_index(struct index_state *istate);
 void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce);
 void replace_index_entry_in_base(struct index_state *istate,
                                 struct cache_entry *old,
-                                struct cache_entry *new);
+                                struct cache_entry *new_entry);
 int read_link_extension(struct index_state *istate,
                        const void *data, unsigned long sz);
 int write_link_extension(struct strbuf *sb,
index 1df674e9194ee6d5cd5386f477745ff6639b7b65..83d05024e6718f4b9481e1782ed84117d5e7598a 100644 (file)
--- a/strbuf.c
+++ b/strbuf.c
@@ -1,5 +1,6 @@
 #include "cache.h"
 #include "refs.h"
+#include "string-list.h"
 #include "utf8.h"
 
 int starts_with(const char *str, const char *prefix)
@@ -95,6 +96,7 @@ void strbuf_trim(struct strbuf *sb)
        strbuf_rtrim(sb);
        strbuf_ltrim(sb);
 }
+
 void strbuf_rtrim(struct strbuf *sb)
 {
        while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1]))
@@ -102,6 +104,13 @@ void strbuf_rtrim(struct strbuf *sb)
        sb->buf[sb->len] = '\0';
 }
 
+void strbuf_trim_trailing_dir_sep(struct strbuf *sb)
+{
+       while (sb->len > 0 && is_dir_sep((unsigned char)sb->buf[sb->len - 1]))
+               sb->len--;
+       sb->buf[sb->len] = '\0';
+}
+
 void strbuf_ltrim(struct strbuf *sb)
 {
        char *b = sb->buf;
@@ -163,6 +172,21 @@ struct strbuf **strbuf_split_buf(const char *str, size_t slen,
        return ret;
 }
 
+void strbuf_add_separated_string_list(struct strbuf *str,
+                                     const char *sep,
+                                     struct string_list *slist)
+{
+       struct string_list_item *item;
+       int sep_needed = 0;
+
+       for_each_string_list_item(item, slist) {
+               if (sep_needed)
+                       strbuf_addstr(str, sep);
+               strbuf_addstr(str, item->string);
+               sep_needed = 1;
+       }
+}
+
 void strbuf_list_free(struct strbuf **sbs)
 {
        struct strbuf **s = sbs;
@@ -612,14 +636,18 @@ ssize_t strbuf_read_file(struct strbuf *sb, const char *path, size_t hint)
 {
        int fd;
        ssize_t len;
+       int saved_errno;
 
        fd = open(path, O_RDONLY);
        if (fd < 0)
                return -1;
        len = strbuf_read(sb, fd, hint);
+       saved_errno = errno;
        close(fd);
-       if (len < 0)
+       if (len < 0) {
+               errno = saved_errno;
                return -1;
+       }
 
        return len;
 }
@@ -869,12 +897,12 @@ void strbuf_addftime(struct strbuf *sb, const char *fmt, const struct tm *tm,
        strbuf_setlen(sb, sb->len + len);
 }
 
-void strbuf_add_unique_abbrev(struct strbuf *sb, const unsigned char *sha1,
+void strbuf_add_unique_abbrev(struct strbuf *sb, const struct object_id *oid,
                              int abbrev_len)
 {
        int r;
        strbuf_grow(sb, GIT_SHA1_HEXSZ + 1);
-       r = find_unique_abbrev_r(sb->buf + sb->len, sha1, abbrev_len);
+       r = find_unique_abbrev_r(sb->buf + sb->len, oid, abbrev_len);
        strbuf_setlen(sb, sb->len + r);
 }
 
index 14c8c10d66b9aaa2d8f0c109cf0dd668701cb2eb..c4de5e4588bd4326d363b9387599cd94e29d2f8f 100644 (file)
--- a/strbuf.h
+++ b/strbuf.h
@@ -1,6 +1,8 @@
 #ifndef STRBUF_H
 #define STRBUF_H
 
+struct string_list;
+
 /**
  * strbuf's are meant to be used with all the usual C string and memory
  * APIs. Given that the length of the buffer is known, it's often better to
@@ -70,6 +72,12 @@ struct strbuf {
 extern char strbuf_slopbuf[];
 #define STRBUF_INIT  { .alloc = 0, .len = 0, .buf = strbuf_slopbuf }
 
+/*
+ * Predeclare this here, since cache.h includes this file before it defines the
+ * struct.
+ */
+struct object_id;
+
 /**
  * Life Cycle Functions
  * --------------------
@@ -179,6 +187,9 @@ extern void strbuf_trim(struct strbuf *);
 extern void strbuf_rtrim(struct strbuf *);
 extern void strbuf_ltrim(struct strbuf *);
 
+/* Strip trailing directory separators */
+extern void strbuf_trim_trailing_dir_sep(struct strbuf *);
+
 /**
  * Replace the contents of the strbuf with a reencoded form.  Returns -1
  * on error, 0 on success.
@@ -528,6 +539,20 @@ static inline struct strbuf **strbuf_split(const struct strbuf *sb,
        return strbuf_split_max(sb, terminator, 0);
 }
 
+/*
+ * Adds all strings of a string list to the strbuf, separated by the given
+ * separator.  For example, if sep is
+ *   ', '
+ * and slist contains
+ *   ['element1', 'element2', ..., 'elementN'],
+ * then write:
+ *   'element1, element2, ..., elementN'
+ * to str.  If only one element, just write "element1" to str.
+ */
+extern void strbuf_add_separated_string_list(struct strbuf *str,
+                                            const char *sep,
+                                            struct string_list *slist);
+
 /**
  * Free a NULL-terminated list of strbufs (for example, the return
  * values of the strbuf_split*() functions).
@@ -539,7 +564,7 @@ extern void strbuf_list_free(struct strbuf **);
  * the strbuf `sb`.
  */
 extern void strbuf_add_unique_abbrev(struct strbuf *sb,
-                                    const unsigned char *sha1,
+                                    const struct object_id *oid,
                                     int abbrev_len);
 
 /**
index 5892b50bd89c3c66bdb541ca0100f0671834a542..46fabee3aa43840fbe93dfd0e12eff85d5e81cb2 100644 (file)
@@ -14,7 +14,7 @@ enum input_source {
 
 typedef int (*open_istream_fn)(struct git_istream *,
                               struct object_info *,
-                              const unsigned char *,
+                              const struct object_id *,
                               enum object_type *);
 typedef int (*close_istream_fn)(struct git_istream *);
 typedef ssize_t (*read_istream_fn)(struct git_istream *, char *, size_t);
@@ -27,7 +27,7 @@ struct stream_vtbl {
 #define open_method_decl(name) \
        int open_istream_ ##name \
        (struct git_istream *st, struct object_info *oi, \
-        const unsigned char *sha1, \
+        const struct object_id *oid, \
         enum object_type *type)
 
 #define close_method_decl(name) \
@@ -105,7 +105,7 @@ ssize_t read_istream(struct git_istream *st, void *buf, size_t sz)
        return st->vtbl->read(st, buf, sz);
 }
 
-static enum input_source istream_source(const unsigned char *sha1,
+static enum input_source istream_source(const struct object_id *oid,
                                        enum object_type *type,
                                        struct object_info *oi)
 {
@@ -114,7 +114,7 @@ static enum input_source istream_source(const unsigned char *sha1,
 
        oi->typep = type;
        oi->sizep = &size;
-       status = sha1_object_info_extended(sha1, oi, 0);
+       status = oid_object_info_extended(oid, oi, 0);
        if (status < 0)
                return stream_error;
 
@@ -130,14 +130,14 @@ static enum input_source istream_source(const unsigned char *sha1,
        }
 }
 
-struct git_istream *open_istream(const unsigned char *sha1,
+struct git_istream *open_istream(const struct object_id *oid,
                                 enum object_type *type,
                                 unsigned long *size,
                                 struct stream_filter *filter)
 {
        struct git_istream *st;
        struct object_info oi = OBJECT_INFO_INIT;
-       const unsigned char *real = lookup_replace_object(sha1);
+       const struct object_id *real = lookup_replace_object(oid);
        enum input_source src = istream_source(real, type, &oi);
 
        if (src < 0)
@@ -335,7 +335,7 @@ static struct stream_vtbl loose_vtbl = {
 
 static open_method_decl(loose)
 {
-       st->u.loose.mapped = map_sha1_file(sha1, &st->u.loose.mapsize);
+       st->u.loose.mapped = map_sha1_file(oid->hash, &st->u.loose.mapsize);
        if (!st->u.loose.mapped)
                return -1;
        if ((unpack_sha1_header(&st->z,
@@ -486,7 +486,7 @@ static struct stream_vtbl incore_vtbl = {
 
 static open_method_decl(incore)
 {
-       st->u.incore.buf = read_sha1_file_extended(sha1, type, &st->size, 0);
+       st->u.incore.buf = read_object_file_extended(oid, type, &st->size, 0);
        st->u.incore.read_ptr = 0;
        st->vtbl = &incore_vtbl;
 
@@ -507,7 +507,7 @@ int stream_blob_to_fd(int fd, const struct object_id *oid, struct stream_filter
        ssize_t kept = 0;
        int result = -1;
 
-       st = open_istream(oid->hash, &type, &sz, filter);
+       st = open_istream(oid, &type, &sz, filter);
        if (!st) {
                if (filter)
                        free_stream_filter(filter);
index 73c1d156b352898c9b5661a3e480f579b80c5a00..32f46267710b4e88cd0fc90e1a5a6f6388361c61 100644 (file)
@@ -8,7 +8,7 @@
 /* opaque */
 struct git_istream;
 
-extern struct git_istream *open_istream(const unsigned char *, enum object_type *, unsigned long *, struct stream_filter *);
+extern struct git_istream *open_istream(const struct object_id *, enum object_type *, unsigned long *, struct stream_filter *);
 extern int close_istream(struct git_istream *);
 extern ssize_t read_istream(struct git_istream *, void *, size_t);
 
index 49701998c9bd663073244e64c4edd7e80991a3ef..71b18ad5af24b92421719c3e1e4d5b7154da5926 100644 (file)
@@ -73,8 +73,8 @@ static inline struct child_process *subprocess_get_child_process(
 }
 
 /*
- * Perform the version and capability negotiation as described in the "Long
- * Running Filter Process" section of the gitattributes documentation using the
+ * Perform the version and capability negotiation as described in the
+ * "Handshake" section of long-running-process-protocol.txt using the
  * given requested versions and capabilities. The "versions" and "capabilities"
  * parameters are arrays terminated by a 0 or blank struct.
  *
index 602ba8ca8b8455df9b34e2990397c838d542569f..3f2075764feb53fc8c981aab39d16c6b191cb6d8 100644 (file)
@@ -520,7 +520,7 @@ static const struct submodule *config_from(struct submodule_cache *cache,
        if (submodule)
                goto out;
 
-       config = read_sha1_file(oid.hash, &type, &config_size);
+       config = read_object_file(&oid, &type, &config_size);
        if (!config || type != OBJ_BLOB)
                goto out;
 
index 47ddc9b2739bf52d9263ea9cd90edde6e9d1c738..a05c544e8dad2600ba196e43fd58b953c6ba3d1b 100644 (file)
@@ -540,9 +540,9 @@ static void show_submodule_header(struct diff_options *o, const char *path,
 
 output_header:
        strbuf_addf(&sb, "Submodule %s ", path);
-       strbuf_add_unique_abbrev(&sb, one->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, one, DEFAULT_ABBREV);
        strbuf_addstr(&sb, (fast_backward || fast_forward) ? ".." : "...");
-       strbuf_add_unique_abbrev(&sb, two->hash, DEFAULT_ABBREV);
+       strbuf_add_unique_abbrev(&sb, two, DEFAULT_ABBREV);
        if (message)
                strbuf_addf(&sb, " %s\n", message);
        else
@@ -590,7 +590,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                struct object_id *one, struct object_id *two,
                unsigned dirty_submodule)
 {
-       const struct object_id *old = the_hash_algo->empty_tree, *new = the_hash_algo->empty_tree;
+       const struct object_id *old_oid = the_hash_algo->empty_tree, *new_oid = the_hash_algo->empty_tree;
        struct commit *left = NULL, *right = NULL;
        struct commit_list *merge_bases = NULL;
        struct child_process cp = CHILD_PROCESS_INIT;
@@ -605,9 +605,9 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                goto done;
 
        if (left)
-               old = one;
+               old_oid = one;
        if (right)
-               new = two;
+               new_oid = two;
 
        cp.git_cmd = 1;
        cp.dir = path;
@@ -630,7 +630,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
                argv_array_pushf(&cp.args, "--dst-prefix=%s%s/",
                                 o->b_prefix, path);
        }
-       argv_array_push(&cp.args, oid_to_hex(old));
+       argv_array_push(&cp.args, oid_to_hex(old_oid));
        /*
         * If the submodule has modified content, we will diff against the
         * work tree, under the assumption that the user has asked for the
@@ -638,7 +638,7 @@ void show_submodule_inline_diff(struct diff_options *o, const char *path,
         * haven't yet been committed to the submodule yet.
         */
        if (!(dirty_submodule & DIRTY_SUBMODULE_MODIFIED))
-               argv_array_push(&cp.args, oid_to_hex(new));
+               argv_array_push(&cp.args, oid_to_hex(new_oid));
 
        prepare_submodule_repo_env(&cp.env_array);
        if (start_command(&cp))
@@ -817,7 +817,7 @@ static int check_has_commit(const struct object_id *oid, void *data)
 {
        struct has_commit_data *cb = data;
 
-       enum object_type type = sha1_object_info(oid->hash, NULL);
+       enum object_type type = oid_object_info(oid, NULL);
 
        switch (type) {
        case OBJ_COMMIT:
@@ -831,7 +831,7 @@ static int check_has_commit(const struct object_id *oid, void *data)
                return 0;
        default:
                die(_("submodule entry '%s' (%s) is a %s, not a commit"),
-                   cb->path, oid_to_hex(oid), typename(type));
+                   cb->path, oid_to_hex(oid), type_name(type));
        }
 }
 
@@ -1578,8 +1578,8 @@ static void submodule_reset_index(const char *path)
  * pass NULL for old or new respectively.
  */
 int submodule_move_head(const char *path,
-                        const char *old,
-                        const char *new,
+                        const char *old_head,
+                        const char *new_head,
                         unsigned flags)
 {
        int ret = 0;
@@ -1600,7 +1600,7 @@ int submodule_move_head(const char *path,
        else
                error_code_ptr = NULL;
 
-       if (old && !is_submodule_populated_gently(path, error_code_ptr))
+       if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
                return 0;
 
        sub = submodule_from_path(&null_oid, path);
@@ -1608,14 +1608,14 @@ int submodule_move_head(const char *path,
        if (!sub)
                die("BUG: could not get submodule information for '%s'", path);
 
-       if (old && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+       if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
                /* Check if the submodule has a dirty index. */
                if (submodule_has_dirty_index(sub))
                        return error(_("submodule '%s' has dirty index"), path);
        }
 
        if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
-               if (old) {
+               if (old_head) {
                        if (!submodule_uses_gitfile(path))
                                absorb_git_dir_into_superproject("", path,
                                        ABSORB_GITDIR_RECURSE_SUBMODULES);
@@ -1629,7 +1629,7 @@ int submodule_move_head(const char *path,
                        submodule_reset_index(path);
                }
 
-               if (old && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+               if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
                        char *gitdir = xstrfmt("%s/modules/%s",
                                    get_git_common_dir(), sub->name);
                        connect_work_tree_and_git_dir(path, gitdir);
@@ -1658,9 +1658,9 @@ int submodule_move_head(const char *path,
                argv_array_push(&cp.args, "-m");
 
        if (!(flags & SUBMODULE_MOVE_HEAD_FORCE))
-               argv_array_push(&cp.args, old ? old : EMPTY_TREE_SHA1_HEX);
+               argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX);
 
-       argv_array_push(&cp.args, new ? new : EMPTY_TREE_SHA1_HEX);
+       argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX);
 
        if (run_command(&cp)) {
                ret = -1;
@@ -1668,7 +1668,7 @@ int submodule_move_head(const char *path,
        }
 
        if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
-               if (new) {
+               if (new_head) {
                        child_process_init(&cp);
                        /* also set the HEAD accordingly */
                        cp.git_cmd = 1;
@@ -1677,7 +1677,7 @@ int submodule_move_head(const char *path,
 
                        prepare_submodule_repo_env(&cp.env_array);
                        argv_array_pushl(&cp.args, "update-ref", "HEAD",
-                                        "--no-deref", new, NULL);
+                                        "--no-deref", new_head, NULL);
 
                        if (run_command(&cp)) {
                                ret = -1;
index b9b7ef0030a732bb81745641413f9f013588c91d..9589f131273d4f04605c8dbf7dcce05aaea606ad 100644 (file)
@@ -117,7 +117,7 @@ int submodule_to_gitdir(struct strbuf *buf, const char *submodule);
 #define SUBMODULE_MOVE_HEAD_FORCE   (1<<1)
 extern int submodule_move_head(const char *path,
                               const char *old,
-                              const char *new,
+                              const char *new_head,
                               unsigned flags);
 
 /*
index 1a1361a8063b8e247b6a92b83b6e8876953f95bd..24ddebfabf97be1251452fe5b6bba847597431fd 100644 (file)
--- a/t/README
+++ b/t/README
@@ -84,9 +84,10 @@ appropriately before running "make".
 
 -x::
        Turn on shell tracing (i.e., `set -x`) during the tests
-       themselves. Implies `--verbose`. Note that in non-bash shells,
-       this can cause failures in some tests which redirect and test
-       the output of shell functions. Use with caution.
+       themselves. Implies `--verbose`.
+       Ignored in test scripts that set the variable 'test_untraceable'
+       to a non-empty value, unless it's run with a Bash version
+       supporting BASH_XTRACEFD, i.e. v4.1 or later.
 
 -d::
 --debug::
@@ -452,6 +453,22 @@ Don't:
    causing the next test to start in an unexpected directory.  Do so
    inside a subshell if necessary.
 
+ - save and verify the standard error of compound commands, i.e. group
+   commands, subshells, and shell functions (except test helper
+   functions like 'test_must_fail') like this:
+
+     ( cd dir && git cmd ) 2>error &&
+     test_cmp expect error
+
+   When running the test with '-x' tracing, then the trace of commands
+   executed in the compound command will be included in standard error
+   as well, quite possibly throwing off the subsequent checks examining
+   the output.  Instead, save only the relevant git command's standard
+   error:
+
+     ( cd dir && git cmd 2>../error ) &&
+     test_cmp expect error
+
  - Break the TAP output
 
    The raw output from your test may be interpreted by a TAP harness. TAP
index f752532ffbcd130c3c3cb25ae20da41d4f74ae68..d7c55c2355ec7067e15655e99df8077344ed78fa 100644 (file)
@@ -54,8 +54,8 @@ int cmd_main(int ac, const char **av)
                printf("no untracked cache\n");
                return 0;
        }
-       printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
-       printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+       printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid));
+       printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid));
        printf("exclude_per_dir %s\n", uc->exclude_per_dir);
        printf("flags %08x\n", uc->dir_flags);
        if (uc->root)
index 1145d5167115a761d319b1e205500e765cd090cd..9ae9281c071254019ccca3486b3a6762d9c0085f 100644 (file)
@@ -1,5 +1,6 @@
 #include "git-compat-util.h"
 #include "hashmap.h"
+#include "strbuf.h"
 
 struct test_entry
 {
@@ -29,11 +30,12 @@ static int test_entry_cmp(const void *cmp_data,
                return strcmp(e1->key, key ? key : e2->key);
 }
 
-static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
-               char *value, int vlen)
+static struct test_entry *alloc_test_entry(unsigned int hash,
+                                          char *key, char *value)
 {
-       struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
-                       + vlen + 2);
+       size_t klen = strlen(key);
+       size_t vlen = strlen(value);
+       struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2));
        hashmap_entry_init(entry, hash);
        memcpy(entry->key, key, klen + 1);
        memcpy(entry->key + klen + 1, value, vlen + 1);
@@ -85,11 +87,11 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
        unsigned int *hashes;
        unsigned int i, j;
 
-       entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
-       hashes = malloc(TEST_SIZE * sizeof(int));
+       ALLOC_ARRAY(entries, TEST_SIZE);
+       ALLOC_ARRAY(hashes, TEST_SIZE);
        for (i = 0; i < TEST_SIZE; i++) {
-               snprintf(buf, sizeof(buf), "%i", i);
-               entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
+               xsnprintf(buf, sizeof(buf), "%i", i);
+               entries[i] = alloc_test_entry(0, buf, "");
                hashes[i] = hash(method, i, entries[i]->key);
        }
 
@@ -144,7 +146,7 @@ static void perf_hashmap(unsigned int method, unsigned int rounds)
  */
 int cmd_main(int argc, const char **argv)
 {
-       char line[1024];
+       struct strbuf line = STRBUF_INIT;
        struct hashmap map;
        int icase;
 
@@ -153,44 +155,42 @@ int cmd_main(int argc, const char **argv)
        hashmap_init(&map, test_entry_cmp, &icase, 0);
 
        /* process commands from stdin */
-       while (fgets(line, sizeof(line), stdin)) {
+       while (strbuf_getline(&line, stdin) != EOF) {
                char *cmd, *p1 = NULL, *p2 = NULL;
-               int l1 = 0, l2 = 0, hash = 0;
+               unsigned int hash = 0;
                struct test_entry *entry;
 
                /* break line into command and up to two parameters */
-               cmd = strtok(line, DELIM);
+               cmd = strtok(line.buf, DELIM);
                /* ignore empty lines */
                if (!cmd || *cmd == '#')
                        continue;
 
                p1 = strtok(NULL, DELIM);
                if (p1) {
-                       l1 = strlen(p1);
                        hash = icase ? strihash(p1) : strhash(p1);
                        p2 = strtok(NULL, DELIM);
-                       if (p2)
-                               l2 = strlen(p2);
                }
 
-               if (!strcmp("hash", cmd) && l1) {
+               if (!strcmp("hash", cmd) && p1) {
 
                        /* print results of different hash functions */
-                       printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
-                                       strihash(p1), memihash(p1, l1));
+                       printf("%u %u %u %u\n",
+                              strhash(p1), memhash(p1, strlen(p1)),
+                              strihash(p1), memihash(p1, strlen(p1)));
 
-               } else if (!strcmp("add", cmd) && l1 && l2) {
+               } else if (!strcmp("add", cmd) && p1 && p2) {
 
                        /* create entry with key = p1, value = p2 */
-                       entry = alloc_test_entry(hash, p1, l1, p2, l2);
+                       entry = alloc_test_entry(hash, p1, p2);
 
                        /* add to hashmap */
                        hashmap_add(&map, entry);
 
-               } else if (!strcmp("put", cmd) && l1 && l2) {
+               } else if (!strcmp("put", cmd) && p1 && p2) {
 
                        /* create entry with key = p1, value = p2 */
-                       entry = alloc_test_entry(hash, p1, l1, p2, l2);
+                       entry = alloc_test_entry(hash, p1, p2);
 
                        /* add / replace entry */
                        entry = hashmap_put(&map, entry);
@@ -199,7 +199,7 @@ int cmd_main(int argc, const char **argv)
                        puts(entry ? get_value(entry) : "NULL");
                        free(entry);
 
-               } else if (!strcmp("get", cmd) && l1) {
+               } else if (!strcmp("get", cmd) && p1) {
 
                        /* lookup entry in hashmap */
                        entry = hashmap_get_from_hash(&map, hash, p1);
@@ -212,7 +212,7 @@ int cmd_main(int argc, const char **argv)
                                entry = hashmap_get_next(&map, entry);
                        }
 
-               } else if (!strcmp("remove", cmd) && l1) {
+               } else if (!strcmp("remove", cmd) && p1) {
 
                        /* setup static key */
                        struct hashmap_entry key;
@@ -238,7 +238,7 @@ int cmd_main(int argc, const char **argv)
                        printf("%u %u\n", map.tablesize,
                               hashmap_get_size(&map));
 
-               } else if (!strcmp("intern", cmd) && l1) {
+               } else if (!strcmp("intern", cmd) && p1) {
 
                        /* test that strintern works */
                        const char *i1 = strintern(p1);
@@ -252,7 +252,7 @@ int cmd_main(int argc, const char **argv)
                        else
                                printf("%s\n", i1);
 
-               } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
+               } else if (!strcmp("perfhashmap", cmd) && p1 && p2) {
 
                        perf_hashmap(atoi(p1), atoi(p2));
 
@@ -263,6 +263,7 @@ int cmd_main(int argc, const char **argv)
                }
        }
 
+       strbuf_release(&line);
        hashmap_free(&map, 1);
        return 0;
 }
index d24d157379f30cafdeeb772e82bf5ee4c862272c..153342e44dd11ae357cc299a9214f4c365614a5e 100644 (file)
@@ -54,6 +54,15 @@ int cmd_main(int argc, const char **argv)
        struct child_process proc = CHILD_PROCESS_INIT;
        int jobs;
 
+       if (argc < 3)
+               return 1;
+       while (!strcmp(argv[1], "env")) {
+               if (!argv[2])
+                       die("env specifier without a value");
+               argv_array_push(&proc.env_array, argv[2]);
+               argv += 2;
+               argc -= 2;
+       }
        if (argc < 3)
                return 1;
        proc.argv = (const char **)argv + 2;
index 921d7b3e7ea20efe85f15beb7c75b8a5263267fb..66d33dfcfd1a0b3fa6b94a029aab9117a2511224 100644 (file)
@@ -16,6 +16,8 @@ int cmd_main(int argc, const char **argv)
                return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD);
        else if (!strcmp(argv[1], "pathmatch"))
                return !!wildmatch(argv[3], argv[2], 0);
+       else if (!strcmp(argv[1], "ipathmatch"))
+               return !!wildmatch(argv[3], argv[2], WM_CASEFOLD);
        else
                return 1;
 }
index cd220e378e201fc5bfbaf9c78b0a3183ebf12d11..e3809dcead1818400829b27721cf8197de71432b 100644 (file)
@@ -9,8 +9,8 @@ test_terminal () {
                echo >&4 "test_terminal: need to declare TTY prerequisite"
                return 127
        fi
-       perl "$TEST_DIRECTORY"/test-terminal.perl "$@"
-}
+       perl "$TEST_DIRECTORY"/test-terminal.perl "$@" 2>&7
+} 7>&2 2>&4
 
 test_lazy_prereq TTY '
        test_have_prereq PERL &&
index e40120848837c1ef78cf75d1724c69226823db15..821cf1498b78bbb5b43b2bda32bbcc88d580c31b 100755 (executable)
@@ -1,8 +1,9 @@
 #!/usr/bin/perl
 
-use lib '../../perl/blib/lib';
+use lib '../../perl/build/lib';
 use strict;
 use warnings;
+use JSON;
 use Git;
 
 sub get_times {
@@ -35,10 +36,34 @@ sub format_times {
        return $out;
 }
 
-my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests);
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
+    $codespeed, $subsection, $reponame);
 while (scalar @ARGV) {
        my $arg = $ARGV[0];
        my $dir;
+       if ($arg eq "--codespeed") {
+               $codespeed = 1;
+               shift @ARGV;
+               next;
+       }
+       if ($arg eq "--subsection") {
+               shift @ARGV;
+               $subsection = $ARGV[0];
+               shift @ARGV;
+               if (! $subsection) {
+                       die "empty subsection";
+               }
+               next;
+       }
+       if ($arg eq "--reponame") {
+               shift @ARGV;
+               $reponame = $ARGV[0];
+               shift @ARGV;
+               if (! $reponame) {
+                       die "empty reponame";
+               }
+               next;
+       }
        last if -f $arg or $arg eq "--";
        if (! -d $arg) {
                my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
@@ -70,8 +95,15 @@ sub format_times {
 }
 
 my $resultsdir = "test-results";
-if ($ENV{GIT_PERF_SUBSECTION} ne "") {
-       $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION};
+
+if (! $subsection and
+    exists $ENV{GIT_PERF_SUBSECTION} and
+    $ENV{GIT_PERF_SUBSECTION} ne "") {
+       $subsection = $ENV{GIT_PERF_SUBSECTION};
+}
+
+if ($subsection) {
+       $resultsdir .= "/" . $subsection;
 }
 
 my @subtests;
@@ -100,13 +132,6 @@ sub read_descr {
        return $line;
 }
 
-my %descrs;
-my $descrlen = 4; # "Test"
-for my $t (@subtests) {
-       $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr");
-       $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen;
-}
-
 sub have_duplicate {
        my %seen;
        for (@_) {
@@ -122,54 +147,119 @@ sub have_slash {
        return 0;
 }
 
-my %newdirabbrevs = %dirabbrevs;
-while (!have_duplicate(values %newdirabbrevs)) {
-       %dirabbrevs = %newdirabbrevs;
-       last if !have_slash(values %dirabbrevs);
-       %newdirabbrevs = %dirabbrevs;
-       for (values %newdirabbrevs) {
-               s{^[^/]*/}{};
+sub print_default_results {
+       my %descrs;
+       my $descrlen = 4; # "Test"
+       for my $t (@subtests) {
+               $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr");
+               $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen;
        }
-}
 
-my %times;
-my @colwidth = ((0)x@dirs);
-for my $i (0..$#dirs) {
-       my $d = $dirs[$i];
-       my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
-       $colwidth[$i] = $w if $w > $colwidth[$i];
-}
-for my $t (@subtests) {
-       my $firstr;
+       my %newdirabbrevs = %dirabbrevs;
+       while (!have_duplicate(values %newdirabbrevs)) {
+               %dirabbrevs = %newdirabbrevs;
+               last if !have_slash(values %dirabbrevs);
+               %newdirabbrevs = %dirabbrevs;
+               for (values %newdirabbrevs) {
+                       s{^[^/]*/}{};
+               }
+       }
+
+       my %times;
+       my @colwidth = ((0)x@dirs);
        for my $i (0..$#dirs) {
                my $d = $dirs[$i];
-               $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
-               my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
-               my $w = length format_times($r,$u,$s,$firstr);
+               my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
                $colwidth[$i] = $w if $w > $colwidth[$i];
-               $firstr = $r unless defined $firstr;
        }
-}
-my $totalwidth = 3*@dirs+$descrlen;
-$totalwidth += $_ for (@colwidth);
-
-binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
+       for my $t (@subtests) {
+               my $firstr;
+               for my $i (0..$#dirs) {
+                       my $d = $dirs[$i];
+                       $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+                       my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+                       my $w = length format_times($r,$u,$s,$firstr);
+                       $colwidth[$i] = $w if $w > $colwidth[$i];
+                       $firstr = $r unless defined $firstr;
+               }
+       }
+       my $totalwidth = 3*@dirs+$descrlen;
+       $totalwidth += $_ for (@colwidth);
 
-printf "%-${descrlen}s", "Test";
-for my $i (0..$#dirs) {
-       my $d = $dirs[$i];
-       printf "   %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
-}
-print "\n";
-print "-"x$totalwidth, "\n";
-for my $t (@subtests) {
-       printf "%-${descrlen}s", $descrs{$t};
-       my $firstr;
+       printf "%-${descrlen}s", "Test";
        for my $i (0..$#dirs) {
                my $d = $dirs[$i];
-               my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
-               printf "   %-$colwidth[$i]s", format_times($r,$u,$s,$firstr);
-               $firstr = $r unless defined $firstr;
+               printf "   %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
        }
        print "\n";
+       print "-"x$totalwidth, "\n";
+       for my $t (@subtests) {
+               printf "%-${descrlen}s", $descrs{$t};
+               my $firstr;
+               for my $i (0..$#dirs) {
+                       my $d = $dirs[$i];
+                       my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+                       printf "   %-$colwidth[$i]s", format_times($r,$u,$s,$firstr);
+                       $firstr = $r unless defined $firstr;
+               }
+               print "\n";
+       }
+}
+
+sub print_codespeed_results {
+       my ($subsection) = @_;
+
+       my $project = "Git";
+
+       my $executable = `uname -s -m`;
+       chomp $executable;
+
+       if ($subsection) {
+               $executable .= ", " . $subsection;
+       }
+
+       my $environment;
+       if ($reponame) {
+               $environment = $reponame;
+       } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
+               $environment = $ENV{GIT_PERF_REPO_NAME};
+       } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
+               $environment = $ENV{GIT_TEST_INSTALLED};
+               $environment =~ s|/bin-wrappers$||;
+       } else {
+               $environment = `uname -r`;
+               chomp $environment;
+       }
+
+       my @data;
+
+       for my $t (@subtests) {
+               for my $d (@dirs) {
+                       my $commitid = $prefixes{$d};
+                       $commitid =~ s/^build_//;
+                       $commitid =~ s/\.$//;
+                       my ($result_value, $u, $s) = get_times("$resultsdir/$prefixes{$d}$t.times");
+
+                       my %vals = (
+                               "commitid" => $commitid,
+                               "project" => $project,
+                               "branch" => $dirnames{$d},
+                               "executable" => $executable,
+                               "benchmark" => $shorttests{$t} . " " . read_descr("$resultsdir/$t.descr"),
+                               "environment" => $environment,
+                               "result_value" => $result_value,
+                           );
+                       push @data, \%vals;
+               }
+       }
+
+       print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
+}
+
+binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
+
+if ($codespeed) {
+       print_codespeed_results($subsection);
+} else {
+       print_default_results();
 }
index 43e4de49ef2bea9ae09b502f28fbc0913e294571..213da5d6b9437b7db7f3e5b824afea1df465d934 100755 (executable)
@@ -105,7 +105,7 @@ get_var_from_env_or_config () {
        env_var="$1"
        conf_sec="$2"
        conf_var="$3"
-       # $4 can be set to a default value
+       conf_opts="$4" # optional
 
        # Do nothing if the env variable is already set
        eval "test -z \"\${$env_var+x}\"" || return
@@ -116,18 +116,17 @@ get_var_from_env_or_config () {
        if test -n "$GIT_PERF_SUBSECTION"
        then
                var="$conf_sec.$GIT_PERF_SUBSECTION.$conf_var"
-               conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") &&
+               conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") &&
                eval "$env_var=\"$conf_value\"" && return
        fi
        var="$conf_sec.$conf_var"
-       conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") &&
-       eval "$env_var=\"$conf_value\"" && return
-
-       test -n "${4+x}" && eval "$env_var=\"$4\""
+       conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") &&
+       eval "$env_var=\"$conf_value\""
 }
 
 run_subsection () {
-       get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" 3
+       get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" "--int"
+       : ${GIT_PERF_REPEAT_COUNT:=3}
        export GIT_PERF_REPEAT_COUNT
 
        get_var_from_env_or_config "GIT_PERF_DIRS_OR_REVS" "perf" "dirsOrRevs"
@@ -136,6 +135,9 @@ run_subsection () {
        get_var_from_env_or_config "GIT_PERF_MAKE_COMMAND" "perf" "makeCommand"
        get_var_from_env_or_config "GIT_PERF_MAKE_OPTS" "perf" "makeOpts"
 
+       get_var_from_env_or_config "GIT_PERF_REPO_NAME" "perf" "repoName"
+       export GIT_PERF_REPO_NAME
+
        GIT_PERF_AGGREGATING_LATER=t
        export GIT_PERF_AGGREGATING_LATER
 
@@ -143,10 +145,25 @@ run_subsection () {
                set -- . "$@"
        fi
 
+       codespeed_opt=
+       test "$GIT_PERF_CODESPEED_OUTPUT" = "true" && codespeed_opt="--codespeed"
+
        run_dirs "$@"
-       ./aggregate.perl "$@"
+
+       if test -z "$GIT_PERF_SEND_TO_CODESPEED"
+       then
+               ./aggregate.perl $codespeed_opt "$@"
+       else
+               json_res_file="test-results/$GIT_PERF_SUBSECTION/aggregate.json"
+               ./aggregate.perl --codespeed "$@" | tee "$json_res_file"
+               send_data_url="$GIT_PERF_SEND_TO_CODESPEED/result/add/json/"
+               curl -v --request POST --data-urlencode "json=$(cat "$json_res_file")" "$send_data_url"
+       fi
 }
 
+get_var_from_env_or_config "GIT_PERF_CODESPEED_OUTPUT" "perf" "codespeedOutput" "--bool"
+get_var_from_env_or_config "GIT_PERF_SEND_TO_CODESPEED" "perf" "sendToCodespeed"
+
 cd "$(dirname $0)"
 . ../../GIT-BUILD-OPTIONS
 
index 9670e8cbe6cb9a9faa3519b0f11dc16713496188..3691023d510a0d97bf1390b781afe1ac9fa270f4 100755 (executable)
@@ -10,15 +10,6 @@ objpath() {
        echo "$1" | sed -e 's|\(..\)|\1/|'
 }
 
-objck() {
-       p=$(objpath "$1")
-       if test ! -f "$REAL/objects/$p"
-       then
-               echo "Object not found: $REAL/objects/$p"
-               false
-       fi
-}
-
 test_expect_success 'initial setup' '
        REAL="$(pwd)/.real" &&
        mv .git "$REAL"
@@ -26,30 +17,14 @@ test_expect_success 'initial setup' '
 
 test_expect_success 'bad setup: invalid .git file format' '
        echo "gitdir $REAL" >.git &&
-       if git rev-parse 2>.err
-       then
-               echo "git rev-parse accepted an invalid .git file"
-               false
-       fi &&
-       if ! grep "Invalid gitfile format" .err
-       then
-               echo "git rev-parse returned wrong error"
-               false
-       fi
+       test_must_fail git rev-parse 2>.err &&
+       test_i18ngrep "invalid gitfile format" .err
 '
 
 test_expect_success 'bad setup: invalid .git file path' '
        echo "gitdir: $REAL.not" >.git &&
-       if git rev-parse 2>.err
-       then
-               echo "git rev-parse accepted an invalid .git file path"
-               false
-       fi &&
-       if ! grep "Not a git repository" .err
-       then
-               echo "git rev-parse returned wrong error"
-               false
-       fi
+       test_must_fail git rev-parse 2>.err &&
+       test_i18ngrep "not a git repository" .err
 '
 
 test_expect_success 'final setup + check rev-parse --git-dir' '
@@ -60,7 +35,7 @@ test_expect_success 'final setup + check rev-parse --git-dir' '
 test_expect_success 'check hash-object' '
        echo "foo" >bar &&
        SHA=$(cat bar | git hash-object -w --stdin) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check cat-file' '
@@ -69,29 +44,21 @@ test_expect_success 'check cat-file' '
 '
 
 test_expect_success 'check update-index' '
-       if test -f "$REAL/index"
-       then
-               echo "Hmm, $REAL/index exists?"
-               false
-       fi &&
+       test_path_is_missing "$REAL/index" &&
        rm -f "$REAL/objects/$(objpath $SHA)" &&
        git update-index --add bar &&
-       if ! test -f "$REAL/index"
-       then
-               echo "$REAL/index not found"
-               false
-       fi &&
-       objck $SHA
+       test_path_is_file "$REAL/index" &&
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check write-tree' '
        SHA=$(git write-tree) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check commit-tree' '
        SHA=$(echo "commit bar" | git commit-tree $SHA) &&
-       objck $SHA
+       test_path_is_file "$REAL/objects/$(objpath $SHA)"
 '
 
 test_expect_success 'check rev-list' '
index d27f438bf410d06f37ec76a6ce040316218a8138..c03f155a357446338fa831e061dc58d8e5f4c657 100755 (executable)
@@ -307,7 +307,7 @@ test_expect_success_multi 'needs work tree' '' '
                cd .git &&
                test_check_ignore "foo" 128
        ) &&
-       stderr_contains "fatal: This operation must be run in a work tree"
+       stderr_contains "fatal: this operation must be run in a work tree"
 '
 
 ############################################################################
@@ -775,6 +775,26 @@ test_expect_success PIPE 'streaming support for --stdin' '
        echo "$response" | grep "^::    two"
 '
 
+test_expect_success 'existing file and directory' '
+       test_when_finished "rm one" &&
+       test_when_finished "rmdir top-level-dir" &&
+       >one &&
+       mkdir top-level-dir &&
+       git check-ignore one top-level-dir >actual &&
+       grep one actual &&
+       grep top-level-dir actual
+'
+
+test_expect_success 'existing directory and file' '
+       test_when_finished "rm one" &&
+       test_when_finished "rmdir top-level-dir" &&
+       >one &&
+       mkdir top-level-dir &&
+       git check-ignore top-level-dir one >actual &&
+       grep one actual &&
+       grep top-level-dir actual
+'
+
 ############################################################################
 #
 # test whitespace handling
index 0c2fc81d7b0fa401db58c41cd2fed4469e80b058..04d474c84fd69121c686f5ca5adc40ce081f0e9d 100755 (executable)
@@ -291,7 +291,7 @@ test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' '
 test_expect_success 'OPT_CALLBACK() and callback errors work' '
        test_must_fail test-parse-options --no-length >output 2>output.err &&
        test_i18ncmp expect output &&
-       test_i18ncmp expect.err output.err
+       test_must_be_empty output.err
 '
 
 cat >expect <<\EOF
diff --git a/t/t0041-usage.sh b/t/t0041-usage.sh
new file mode 100755 (executable)
index 0000000..5b927b7
--- /dev/null
@@ -0,0 +1,107 @@
+#!/bin/sh
+
+test_description='Test commands behavior when given invalid argument value'
+
+. ./test-lib.sh
+
+test_expect_success 'setup ' '
+       test_commit "v1.0"
+'
+
+test_expect_success 'tag --contains <existent_tag>' '
+       git tag --contains "v1.0" >actual 2>actual.err &&
+       grep "v1.0" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --contains <inexistent_tag>' '
+       test_must_fail git tag --contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag --no-contains <existent_tag>' '
+       git tag --no-contains "v1.0" >actual 2>actual.err  &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'tag --no-contains <inexistent_tag>' '
+       test_must_fail git tag --no-contains "notag" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'tag usage error' '
+       test_must_fail git tag --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'branch --contains <existent_commit>' '
+       git branch --contains "master" >actual 2>actual.err &&
+       test_i18ngrep "master" actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch --no-contains <existent_commit>' '
+       git branch --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'branch --no-contains <inexistent_commit>' '
+       test_must_fail git branch --no-contains "nocommit" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'branch usage error' '
+       test_must_fail git branch --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --contains <existent_object>' '
+       git for-each-ref --contains "master" >actual 2>actual.err &&
+       test_line_count = 2 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <existent_object>' '
+       git for-each-ref --no-contains "master" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_line_count = 0 actual.err
+'
+
+test_expect_success 'for-each-ref --no-contains <inexistent_object>' '
+       test_must_fail git for-each-ref --no-contains "noobject" >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "error" actual.err &&
+       test_i18ngrep ! "usage" actual.err
+'
+
+test_expect_success 'for-each-ref usage error' '
+       test_must_fail git for-each-ref --noopt >actual 2>actual.err &&
+       test_line_count = 0 actual &&
+       test_i18ngrep "usage" actual.err
+'
+
+test_done
index b29d749bb7b33406b2d433d96c35d252e305eed0..192c94eccd13c3b251cfc6910ad6ef175312ea13 100755 (executable)
@@ -80,7 +80,21 @@ test_expect_success 'merge (case change)' '
        git merge topic
 '
 
-
+test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' '
+       git reset --hard initial &&
+       mkdir -p dir1/dir2 &&
+       echo >dir1/dir2/a &&
+       echo >dir1/dir2/b &&
+       git add dir1/dir2/a &&
+       git add dir1/DIR2/b &&
+       git ls-files >actual &&
+       cat >expected <<-\EOF &&
+               camelcase
+               dir1/dir2/a
+               dir1/dir2/b
+       EOF
+       test_cmp expected actual
+'
 
 test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' '
        git reset --hard initial &&
index e4739170aa2b7c833cd51a06a7ac6764c8bf0494..24c92b6cd7b1c54eb6541a81abd7e5812b3b99b0 100755 (executable)
@@ -141,4 +141,41 @@ test_expect_success 'run_command outputs ' '
        test_cmp expect actual
 '
 
+test_trace () {
+       expect="$1"
+       shift
+       GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
+               sed 's/.* run_command: //' >actual &&
+       echo "$expect true" >expect &&
+       test_cmp expect actual
+}
+
+test_expect_success 'GIT_TRACE with environment variables' '
+       test_trace "abc=1 def=2" env abc=1 env def=2 &&
+       test_trace "abc=2" env abc env abc=1 env abc=2 &&
+       test_trace "abc=2" env abc env abc=2 &&
+       (
+               abc=1 && export abc &&
+               test_trace "def=1" env abc=1 env def=1
+       ) &&
+       (
+               abc=1 && export abc &&
+               test_trace "def=1" env abc env abc=1 env def=1
+       ) &&
+       test_trace "def=1" env non-exist env def=1 &&
+       test_trace "abc=2" env abc=1 env abc env abc=2 &&
+       (
+               abc=1 def=2 && export abc def &&
+               test_trace "unset abc def;" env abc env def
+       ) &&
+       (
+               abc=1 def=2 && export abc def &&
+               test_trace "unset def; abc=3" env abc env def env abc=3
+       ) &&
+       (
+               abc=1 && export abc &&
+               test_trace "unset abc;" env abc=2 env abc
+       )
+'
+
 test_done
diff --git a/t/t0410-partial-clone.sh b/t/t0410-partial-clone.sh
new file mode 100755 (executable)
index 0000000..cc18b75
--- /dev/null
@@ -0,0 +1,343 @@
+#!/bin/sh
+
+test_description='partial clone'
+
+. ./test-lib.sh
+
+delete_object () {
+       rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|')
+}
+
+pack_as_from_promisor () {
+       HASH=$(git -C repo pack-objects .git/objects/pack/pack) &&
+       >repo/.git/objects/pack/pack-$HASH.promisor &&
+       echo $HASH
+}
+
+promise_and_delete () {
+       HASH=$(git -C repo rev-parse "$1") &&
+       git -C repo tag -a -m message my_annotated_tag "$HASH" &&
+       git -C repo rev-parse my_annotated_tag | pack_as_from_promisor &&
+       # tag -d prints a message to stdout, so redirect it
+       git -C repo tag -d my_annotated_tag >/dev/null &&
+       delete_object repo "$HASH"
+}
+
+test_expect_success 'missing reflog object, but promised by a commit, passes fsck' '
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+       C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) &&
+
+       # Reference $A only from reflog, and delete it
+       git -C repo branch my_branch "$A" &&
+       git -C repo branch -f my_branch my_commit &&
+       delete_object repo "$A" &&
+
+       # State that we got $C, which refers to $A, from promisor
+       printf "$C\n" | pack_as_from_promisor &&
+
+       # Normally, it fails
+       test_must_fail git -C repo fsck &&
+
+       # But with the extension, it succeeds
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo fsck
+'
+
+test_expect_success 'missing reflog object, but promised by a tag, passes fsck' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+       git -C repo tag -a -m d my_tag_name $A &&
+       T=$(git -C repo rev-parse my_tag_name) &&
+       git -C repo tag -d my_tag_name &&
+
+       # Reference $A only from reflog, and delete it
+       git -C repo branch my_branch "$A" &&
+       git -C repo branch -f my_branch my_commit &&
+       delete_object repo "$A" &&
+
+       # State that we got $T, which refers to $A, from promisor
+       printf "$T\n" | pack_as_from_promisor &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo fsck
+'
+
+test_expect_success 'missing reflog object alone fails fsck, even with extension set' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+       B=$(git -C repo commit-tree -m b HEAD^{tree}) &&
+
+       # Reference $A only from reflog, and delete it
+       git -C repo branch my_branch "$A" &&
+       git -C repo branch -f my_branch my_commit &&
+       delete_object repo "$A" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       test_must_fail git -C repo fsck
+'
+
+test_expect_success 'missing ref object, but promised, passes fsck' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+
+       # Reference $A only from ref
+       git -C repo branch my_branch "$A" &&
+       promise_and_delete "$A" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo fsck
+'
+
+test_expect_success 'missing object, but promised, passes fsck' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo 1 &&
+       test_commit -C repo 2 &&
+       test_commit -C repo 3 &&
+       git -C repo tag -a annotated_tag -m "annotated tag" &&
+
+       C=$(git -C repo rev-parse 1) &&
+       T=$(git -C repo rev-parse 2^{tree}) &&
+       B=$(git hash-object repo/3.t) &&
+       AT=$(git -C repo rev-parse annotated_tag) &&
+
+       promise_and_delete "$C" &&
+       promise_and_delete "$T" &&
+       promise_and_delete "$B" &&
+       promise_and_delete "$AT" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo fsck
+'
+
+test_expect_success 'missing CLI object, but promised, passes fsck' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+       promise_and_delete "$A" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo fsck "$A"
+'
+
+test_expect_success 'fetching of missing objects' '
+       rm -rf repo &&
+       test_create_repo server &&
+       test_commit -C server foo &&
+       git -C server repack -a -d --write-bitmap-index &&
+
+       git clone "file://$(pwd)/server" repo &&
+       HASH=$(git -C repo rev-parse foo) &&
+       rm -rf repo/.git/objects/* &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "origin" &&
+       git -C repo cat-file -p "$HASH" &&
+
+       # Ensure that the .promisor file is written, and check that its
+       # associated packfile contains the object
+       ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+       test_line_count = 1 promisorlist &&
+       IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+       git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised commit' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo foo &&
+       test_commit -C repo bar &&
+
+       FOO=$(git -C repo rev-parse foo) &&
+       promise_and_delete "$FOO" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+       grep $(git -C repo rev-parse bar) out &&
+       ! grep $FOO out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised tree' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo foo &&
+       mkdir repo/a_dir &&
+       echo something >repo/a_dir/something &&
+       git -C repo add a_dir/something &&
+       git -C repo commit -m bar &&
+
+       # foo^{tree} (tree referenced from commit)
+       TREE=$(git -C repo rev-parse foo^{tree}) &&
+
+       # a tree referenced by HEAD^{tree} (tree referenced from tree)
+       TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) &&
+
+       promise_and_delete "$TREE" &&
+       promise_and_delete "$TREE2" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+       grep $(git -C repo rev-parse foo) out &&
+       ! grep $TREE out &&
+       grep $(git -C repo rev-parse HEAD) out &&
+       ! grep $TREE2 out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised blob' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       echo something >repo/something &&
+       git -C repo add something &&
+       git -C repo commit -m foo &&
+
+       BLOB=$(git -C repo hash-object -w something) &&
+       promise_and_delete "$BLOB" &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+       grep $(git -C repo rev-parse HEAD) out &&
+       ! grep $BLOB out
+'
+
+test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo foo &&
+       test_commit -C repo bar &&
+       test_commit -C repo baz &&
+
+       COMMIT=$(git -C repo rev-parse foo) &&
+       TREE=$(git -C repo rev-parse bar^{tree}) &&
+       BLOB=$(git hash-object repo/baz.t) &&
+       printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+       ! grep $COMMIT out &&
+       ! grep $TREE out &&
+       ! grep $BLOB out &&
+       grep $(git -C repo rev-parse bar) out  # sanity check that some walking was done
+'
+
+test_expect_success 'rev-list accepts missing and promised objects on command line' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo foo &&
+       test_commit -C repo bar &&
+       test_commit -C repo baz &&
+
+       COMMIT=$(git -C repo rev-parse foo) &&
+       TREE=$(git -C repo rev-parse bar^{tree}) &&
+       BLOB=$(git hash-object repo/baz.t) &&
+
+       promise_and_delete $COMMIT &&
+       promise_and_delete $TREE &&
+       promise_and_delete $BLOB &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB"
+'
+
+test_expect_success 'gc does not repack promisor objects' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+       HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo gc &&
+
+       # Ensure that the promisor packfile still exists, and remove it
+       test -e repo/.git/objects/pack/pack-$HASH.pack &&
+       rm repo/.git/objects/pack/pack-$HASH.* &&
+
+       # Ensure that the single other pack contains the commit, but not the tree
+       ls repo/.git/objects/pack/pack-*.pack >packlist &&
+       test_line_count = 1 packlist &&
+       git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+       grep "$(git -C repo rev-parse HEAD)" out &&
+       ! grep "$TREE_HASH" out
+'
+
+test_expect_success 'gc stops traversal when a missing but promised object is reached' '
+       rm -rf repo &&
+       test_create_repo repo &&
+       test_commit -C repo my_commit &&
+
+       TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+       HASH=$(promise_and_delete $TREE_HASH) &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "arbitrary string" &&
+       git -C repo gc &&
+
+       # Ensure that the promisor packfile still exists, and remove it
+       test -e repo/.git/objects/pack/pack-$HASH.pack &&
+       rm repo/.git/objects/pack/pack-$HASH.* &&
+
+       # Ensure that the single other pack contains the commit, but not the tree
+       ls repo/.git/objects/pack/pack-*.pack >packlist &&
+       test_line_count = 1 packlist &&
+       git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+       grep "$(git -C repo rev-parse HEAD)" out &&
+       ! grep "$TREE_HASH" out
+'
+
+LIB_HTTPD_PORT=12345  # default port, 410, cannot be used as non-root
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetching of missing objects from an HTTP server' '
+       rm -rf repo &&
+       SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+       test_create_repo "$SERVER" &&
+       test_commit -C "$SERVER" foo &&
+       git -C "$SERVER" repack -a -d --write-bitmap-index &&
+
+       git clone $HTTPD_URL/smart/server repo &&
+       HASH=$(git -C repo rev-parse foo) &&
+       rm -rf repo/.git/objects/* &&
+
+       git -C repo config core.repositoryformatversion 1 &&
+       git -C repo config extensions.partialclone "origin" &&
+       git -C repo cat-file -p "$HASH" &&
+
+       # Ensure that the .promisor file is written, and check that its
+       # associated packfile contains the object
+       ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+       test_line_count = 1 promisorlist &&
+       IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+       git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+stop_httpd
+
+test_done
index cbeb9bebeea67c3654c279b2c12fed4825840d8f..4f8e6f5fde3295cc91d2feb1fd85fa007e56016c 100755 (executable)
@@ -1206,6 +1206,29 @@ test_expect_success 'git -c is not confused by empty environment' '
        GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
 '
 
+sq="'"
+test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
+       cat >expect <<-\EOF &&
+       env.one one
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       cat >expect <<-EOF &&
+       env.one one${sq}
+       env.two two
+       EOF
+       GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*" >actual &&
+       test_cmp expect actual &&
+
+       test_must_fail env \
+               GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
+               git config --get-regexp "env.*"
+'
+
 test_expect_success 'git config --edit works' '
        git config -f tmp test.value no &&
        echo test.value=yes >expect &&
index 79a0251efa6e049be4fffa41004e73b9950d2bd7..4ee009da666f22f6ff1ddce93dc1f30924c6cc26 100755 (executable)
@@ -157,7 +157,7 @@ test_expect_success 'relative path not found' '
 test_expect_success 'relative path outside worktree' '
        test_must_fail git rev-parse HEAD:../file.txt >output 2>error &&
        test -z "$(cat output)" &&
-       grep "outside repository" error
+       test_i18ngrep "outside repository" error
 '
 
 test_expect_success 'relative path when cwd is outside worktree' '
index b23c4e3fab604f957ec6359eae75400dd1e53174..2ce68cc277a1ed742ce7b644de31a4a847e3529b 100755 (executable)
@@ -42,7 +42,7 @@ commit_subject () {
 
 error_message () {
        (cd clone &&
-        test_must_fail git rev-parse --verify "$@")
+        test_must_fail git rev-parse --verify "$@" 2>../error)
 }
 
 test_expect_success '@{upstream} resolves to correct full name' '
@@ -159,8 +159,8 @@ test_expect_success 'branch@{u} error message when no upstream' '
        cat >expect <<-EOF &&
        fatal: no upstream configured for branch ${sq}non-tracking${sq}
        EOF
-       error_message non-tracking@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message non-tracking@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success '@{u} error message when no upstream' '
@@ -175,8 +175,8 @@ test_expect_success 'branch@{u} error message with misspelt branch' '
        cat >expect <<-EOF &&
        fatal: no such branch: ${sq}no-such-branch${sq}
        EOF
-       error_message no-such-branch@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message no-such-branch@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success '@{u} error message when not on a branch' '
@@ -192,8 +192,8 @@ test_expect_success 'branch@{u} error message if upstream branch not fetched' '
        cat >expect <<-EOF &&
        fatal: upstream branch ${sq}refs/heads/side${sq} not stored as a remote-tracking branch
        EOF
-       error_message bad-upstream@{u} 2>actual &&
-       test_i18ncmp expect actual
+       error_message bad-upstream@{u} &&
+       test_i18ncmp expect error
 '
 
 test_expect_success 'pull works when tracking a local branch' '
index 13ae12dfa7d494f50fc2e728d3e7ee36ff5534bc..e6854b828e2e68ad217721eb6139970b7c5958c0 100755 (executable)
@@ -39,6 +39,10 @@ A few rules for repo setup:
 11. When user's cwd is outside worktree, cwd remains unchanged,
     prefix is NULL.
 "
+
+# This test heavily relies on the standard error of nested function calls.
+test_untraceable=UnfortunatelyYes
+
 . ./test-lib.sh
 
 here=$(pwd)
index 2b959449730e14dd4e650ce9115a463d71eedca4..d0d2e4f7ec3310ec51da7144fa87151129f393c0 100755 (executable)
@@ -451,32 +451,68 @@ test_expect_success 'git worktree --no-guess-remote option overrides config' '
 '
 
 post_checkout_hook () {
-       test_when_finished "rm -f .git/hooks/post-checkout" &&
-       mkdir -p .git/hooks &&
-       write_script .git/hooks/post-checkout <<-\EOF
-       echo $* >hook.actual
+       gitdir=${1:-.git}
+       test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+       mkdir -p $gitdir/hooks &&
+       write_script $gitdir/hooks/post-checkout <<-\EOF
+       {
+               echo $*
+               git rev-parse --git-dir --show-toplevel
+       } >hook.actual
        EOF
 }
 
 test_expect_success '"add" invokes post-checkout hook (branch)' '
        post_checkout_hook &&
-       printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/gumby &&
+               echo $(pwd)/gumby
+       } >hook.expect &&
        git worktree add gumby &&
-       test_cmp hook.expect hook.actual
+       test_cmp hook.expect gumby/hook.actual
 '
 
 test_expect_success '"add" invokes post-checkout hook (detached)' '
        post_checkout_hook &&
-       printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/grumpy &&
+               echo $(pwd)/grumpy
+       } >hook.expect &&
        git worktree add --detach grumpy &&
-       test_cmp hook.expect hook.actual
+       test_cmp hook.expect grumpy/hook.actual
 '
 
 test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
        post_checkout_hook &&
        rm -f hook.actual &&
        git worktree add --no-checkout gloopy &&
-       test_path_is_missing hook.actual
+       test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+       post_checkout_hook &&
+       {
+               echo $_z40 $(git rev-parse HEAD) 1 &&
+               echo $(pwd)/.git/worktrees/guppy &&
+               echo $(pwd)/guppy
+       } >hook.expect &&
+       git -C gloopy worktree add --detach ../guppy &&
+       test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+       rm -rf bare &&
+       git clone --bare . bare &&
+       {
+               echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 &&
+               echo $(pwd)/bare/worktrees/goozy &&
+               echo $(pwd)/goozy
+       } >hook.expect &&
+       post_checkout_hook bare &&
+       git -C bare worktree add --detach ../goozy &&
+       test_cmp hook.expect goozy/hook.actual
 '
 
 test_done
index a0f1e3bb800ec6943648eeffb9e7ca84e328fa41..b7d6d5d45adf6067ab2f39801f658f778f9b2855 100755 (executable)
@@ -78,10 +78,9 @@ test_expect_success 'not prune locked checkout' '
 
 test_expect_success 'not prune recent checkouts' '
        test_when_finished rm -r .git/worktrees &&
-       mkdir zz &&
-       mkdir -p .git/worktrees/jlm &&
-       echo "$(pwd)"/zz >.git/worktrees/jlm/gitdir &&
-       rmdir zz &&
+       git worktree add jlm HEAD &&
+       test -d .git/worktrees/jlm &&
+       rm -rf jlm &&
        git worktree prune --verbose --expire=2.days.ago &&
        test -d .git/worktrees/jlm
 '
index 8298aaf97f706ea796a12614acafd19636aeaa70..5d5b3632ba0a7cf5364ab4db1df8ca7ea285b4d5 100755 (executable)
@@ -7,7 +7,8 @@ test_description='test git worktree move, remove, lock and unlock'
 test_expect_success 'setup' '
        test_commit init &&
        git worktree add source &&
-       git worktree list --porcelain | grep "^worktree" >actual &&
+       git worktree list --porcelain >out &&
+       grep "^worktree" out >actual &&
        cat <<-EOF >expected &&
        worktree $(pwd)
        worktree $(pwd)/source
@@ -59,4 +60,86 @@ test_expect_success 'unlock worktree twice' '
        test_path_is_missing .git/worktrees/source/locked
 '
 
+test_expect_success 'move non-worktree' '
+       mkdir abc &&
+       test_must_fail git worktree move abc def
+'
+
+test_expect_success 'move locked worktree' '
+       git worktree lock source &&
+       test_when_finished "git worktree unlock source" &&
+       test_must_fail git worktree move source destination
+'
+
+test_expect_success 'move worktree' '
+       toplevel="$(pwd)" &&
+       git worktree move source destination &&
+       test_path_is_missing source &&
+       git worktree list --porcelain >out &&
+       grep "^worktree.*/destination" out &&
+       ! grep "^worktree.*/source" out &&
+       git -C destination log --format=%s >actual2 &&
+       echo init >expected2 &&
+       test_cmp expected2 actual2
+'
+
+test_expect_success 'move main worktree' '
+       test_must_fail git worktree move . def
+'
+
+test_expect_success 'move worktree to another dir' '
+       mkdir some-dir &&
+       git worktree move destination some-dir &&
+       test_when_finished "git worktree move some-dir/destination destination" &&
+       test_path_is_missing destination &&
+       git worktree list --porcelain >out &&
+       grep "^worktree.*/some-dir/destination" out &&
+       git -C some-dir/destination log --format=%s >actual2 &&
+       echo init >expected2 &&
+       test_cmp expected2 actual2
+'
+
+test_expect_success 'remove main worktree' '
+       test_must_fail git worktree remove .
+'
+
+test_expect_success 'remove locked worktree' '
+       git worktree lock destination &&
+       test_when_finished "git worktree unlock destination" &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with dirty tracked file' '
+       echo dirty >>destination/init.t &&
+       test_when_finished "git -C destination checkout init.t" &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with untracked file' '
+       : >destination/untracked &&
+       test_must_fail git worktree remove destination
+'
+
+test_expect_success 'force remove worktree with untracked file' '
+       git worktree remove --force destination &&
+       test_path_is_missing destination
+'
+
+test_expect_success 'remove missing worktree' '
+       git worktree add to-be-gone &&
+       test -d .git/worktrees/to-be-gone &&
+       mv to-be-gone gone &&
+       git worktree remove to-be-gone &&
+       test_path_is_missing .git/worktrees/to-be-gone
+'
+
+test_expect_success 'NOT remove missing-but-locked worktree' '
+       git worktree add gone-but-locked &&
+       git worktree lock gone-but-locked &&
+       test -d .git/worktrees/gone-but-locked &&
+       mv gone-but-locked really-gone-now &&
+       test_must_fail git worktree remove gone-but-locked &&
+       test_path_is_dir .git/worktrees/gone-but-locked
+'
+
 test_done
index cdc38fe5d1a0d171aa3cdf9ac8ee73dd2cd3b18d..3563e77b374c69a138bdfa3b274b75b6094c55a7 100755 (executable)
@@ -525,20 +525,22 @@ test_expect_success 'merge-recursive w/ empty work tree - ours has rename' '
                GIT_INDEX_FILE="$PWD/ours-has-rename-index" &&
                export GIT_INDEX_FILE &&
                mkdir "$GIT_WORK_TREE" &&
-               git read-tree -i -m $c7 &&
-               git update-index --ignore-missing --refresh &&
-               git merge-recursive $c0 -- $c7 $c3 &&
-               git ls-files -s >actual-files
-       ) 2>actual-err &&
-       >expected-err &&
+               git read-tree -i -m $c7 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git update-index --ignore-missing --refresh 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git merge-recursive $c0 -- $c7 $c3 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git ls-files -s >actual-files 2>actual-err &&
+               test_must_be_empty actual-err
+       ) &&
        cat >expected-files <<-EOF &&
        100644 $o3 0    b/c
        100644 $o0 0    c
        100644 $o0 0    d/e
        100644 $o0 0    e
        EOF
-       test_cmp expected-files actual-files &&
-       test_cmp expected-err actual-err
+       test_cmp expected-files actual-files
 '
 
 test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' '
@@ -548,20 +550,22 @@ test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' '
                GIT_INDEX_FILE="$PWD/theirs-has-rename-index" &&
                export GIT_INDEX_FILE &&
                mkdir "$GIT_WORK_TREE" &&
-               git read-tree -i -m $c3 &&
-               git update-index --ignore-missing --refresh &&
-               git merge-recursive $c0 -- $c3 $c7 &&
-               git ls-files -s >actual-files
-       ) 2>actual-err &&
-       >expected-err &&
+               git read-tree -i -m $c3 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git update-index --ignore-missing --refresh 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git merge-recursive $c0 -- $c3 $c7 2>actual-err &&
+               test_must_be_empty actual-err &&
+               git ls-files -s >actual-files 2>actual-err &&
+               test_must_be_empty actual-err
+       ) &&
        cat >expected-files <<-EOF &&
        100644 $o3 0    b/c
        100644 $o0 0    c
        100644 $o0 0    d/e
        100644 $o0 0    e
        EOF
-       test_cmp expected-files actual-files &&
-       test_cmp expected-err actual-err
+       test_cmp expected-files actual-files
 '
 
 test_expect_success 'merge removes empty directories' '
index 163a14a1c2cb77b2a644a7342fd554940f7dd2e3..c1fc6ca7301eaa9b15ef091ce592989956efc156 100755 (executable)
@@ -4,266 +4,431 @@ test_description='wildmatch tests'
 
 . ./test-lib.sh
 
-match() {
-    if [ $1 = 1 ]; then
-       test_expect_success "wildmatch:     match '$3' '$4'" "
-           test-wildmatch wildmatch '$3' '$4'
-       "
-    else
-       test_expect_success "wildmatch:  no match '$3' '$4'" "
-           ! test-wildmatch wildmatch '$3' '$4'
-       "
-    fi
+should_create_test_file() {
+       file=$1
+
+       case $file in
+       # `touch .` will succeed but obviously not do what we intend
+       # here.
+       ".")
+               return 1
+               ;;
+       # We cannot create a file with an empty filename.
+       "")
+               return 1
+               ;;
+       # The tests that are testing that e.g. foo//bar is matched by
+       # foo/*/bar can't be tested on filesystems since there's no
+       # way we're getting a double slash.
+       *//*)
+               return 1
+               ;;
+       # When testing the difference between foo/bar and foo/bar/ we
+       # can't test the latter.
+       */)
+               return 1
+               ;;
+       # On Windows, \ in paths is silently converted to /, which
+       # would result in the "touch" below working, but the test
+       # itself failing. See 6fd1106aa4 ("t3700: Skip a test with
+       # backslashes in pathspec", 2009-03-13) for prior art and
+       # details.
+       *\\*)
+               if ! test_have_prereq BSLASHPSPEC
+               then
+                       return 1
+               fi
+               # NOTE: The ;;& bash extension is not portable, so
+               # this test needs to be at the end of the pattern
+               # list.
+               #
+               # If we want to add more conditional returns we either
+               # need a new case statement, or turn this whole thing
+               # into a series of "if" tests.
+               ;;
+       esac
+
+
+       # On Windows proper (i.e. not Cygwin) many file names which
+       # under Cygwin would be emulated don't work.
+       if test_have_prereq MINGW
+       then
+               case $file in
+               " ")
+                       # Files called " " are forbidden on Windows
+                       return 1
+                       ;;
+               *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**)
+                       # Files with various special characters aren't
+                       # allowed on Windows. Sourced from
+                       # https://stackoverflow.com/a/31976060
+                       return 1
+                       ;;
+               esac
+       fi
+
+       return 0
 }
 
-imatch() {
-    if [ $1 = 1 ]; then
-       test_expect_success "iwildmatch:    match '$2' '$3'" "
-           test-wildmatch iwildmatch '$2' '$3'
-       "
-    else
-       test_expect_success "iwildmatch: no match '$2' '$3'" "
-           ! test-wildmatch iwildmatch '$2' '$3'
-       "
-    fi
+match_with_function() {
+       text=$1
+       pattern=$2
+       match_expect=$3
+       match_function=$4
+
+       if test "$match_expect" = 1
+       then
+               test_expect_success "$match_function: match '$text' '$pattern'" "
+                       test-wildmatch $match_function '$text' '$pattern'
+               "
+       elif test "$match_expect" = 0
+       then
+               test_expect_success "$match_function: no match '$text' '$pattern'" "
+                       test_must_fail test-wildmatch $match_function '$text' '$pattern'
+               "
+       else
+               test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+       fi
+
+}
+
+match_with_ls_files() {
+       text=$1
+       pattern=$2
+       match_expect=$3
+       match_function=$4
+       ls_files_args=$5
+
+       match_stdout_stderr_cmp="
+               tr -d '\0' <actual.raw >actual &&
+               >expect.err &&
+               test_cmp expect.err actual.err &&
+               test_cmp expect actual"
+
+       if test "$match_expect" = 'E'
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" "
+                               printf '%s' '$text' >expect &&
+                               test_must_fail git$ls_files_args ls-files -z -- '$pattern'
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+               fi
+       elif test "$match_expect" = 1
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" "
+                               printf '%s' '$text' >expect &&
+                               git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+                               $match_stdout_stderr_cmp
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+               fi
+       elif test "$match_expect" = 0
+       then
+               if test -e .git/created_test_file
+               then
+                       test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" "
+                               >expect &&
+                               git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+                               $match_stdout_stderr_cmp
+                       "
+               else
+                       test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false'
+               fi
+       else
+               test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+       fi
 }
 
-pathmatch() {
-    if [ $1 = 1 ]; then
-       test_expect_success "pathmatch:     match '$2' '$3'" "
-           test-wildmatch pathmatch '$2' '$3'
-       "
-    else
-       test_expect_success "pathmatch:  no match '$2' '$3'" "
-           ! test-wildmatch pathmatch '$2' '$3'
-       "
-    fi
+match() {
+       if test "$#" = 6
+       then
+               # When test-wildmatch and git ls-files produce the same
+               # result.
+               match_glob=$1
+               match_file_glob=$match_glob
+               match_iglob=$2
+               match_file_iglob=$match_iglob
+               match_pathmatch=$3
+               match_file_pathmatch=$match_pathmatch
+               match_pathmatchi=$4
+               match_file_pathmatchi=$match_pathmatchi
+               text=$5
+               pattern=$6
+       elif test "$#" = 10
+       then
+               match_glob=$1
+               match_iglob=$2
+               match_pathmatch=$3
+               match_pathmatchi=$4
+               match_file_glob=$5
+               match_file_iglob=$6
+               match_file_pathmatch=$7
+               match_file_pathmatchi=$8
+               text=$9
+               pattern=${10}
+       fi
+
+       test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' '
+               if test -e .git/created_test_file
+               then
+                       git reset &&
+                       git clean -df
+               fi
+       '
+
+       printf '%s' "$text" >.git/expected_test_file
+
+       test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" '
+               file=$(cat .git/expected_test_file) &&
+               if should_create_test_file "$file"
+               then
+                       dirs=${file%/*}
+                       if test "$file" != "$dirs"
+                       then
+                               mkdir -p -- "$dirs" &&
+                               touch -- "./$text"
+                       else
+                               touch -- "./$file"
+                       fi &&
+                       git add -A &&
+                       printf "%s" "$file" >.git/created_test_file
+               elif test -e .git/created_test_file
+               then
+                       rm .git/created_test_file
+               fi
+       '
+
+       # $1: Case sensitive glob match: test-wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_glob "wildmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
+
+       # $2: Case insensitive glob match: test-wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
+
+       # $3: Case sensitive path match: test-wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
+
+       # $4: Case insensitive path match: test-wildmatch & ls-files
+       match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
+       match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
 }
 
-# Basic wildmat features
-match 1 1 foo foo
-match 0 0 foo bar
-match 1 1 '' ""
-match 1 1 foo '???'
-match 0 0 foo '??'
-match 1 1 foo '*'
-match 1 1 foo 'f*'
-match 0 0 foo '*f'
-match 1 1 foo '*foo*'
-match 1 1 foobar '*ob*a*r*'
-match 1 1 aaaaaaabababab '*ab'
-match 1 1 'foo*' 'foo\*'
-match 0 0 foobar 'foo\*bar'
-match 1 1 'f\oo' 'f\\oo'
-match 1 1 ball '*[al]?'
-match 0 0 ten '[ten]'
-match 0 1 ten '**[!te]'
-match 0 0 ten '**[!ten]'
-match 1 1 ten 't[a-g]n'
-match 0 0 ten 't[!a-g]n'
-match 1 1 ton 't[!a-g]n'
-match 1 1 ton 't[^a-g]n'
-match 1 x 'a]b' 'a[]]b'
-match 1 x a-b 'a[]-]b'
-match 1 x 'a]b' 'a[]-]b'
-match 0 x aab 'a[]-]b'
-match 1 x aab 'a[]a-]b'
-match 1 1 ']' ']'
+# Basic wildmatch features
+match 1 1 1 1 foo foo
+match 0 0 0 0 foo bar
+match 1 1 1 1 '' ""
+match 1 1 1 1 foo '???'
+match 0 0 0 0 foo '??'
+match 1 1 1 1 foo '*'
+match 1 1 1 1 foo 'f*'
+match 0 0 0 0 foo '*f'
+match 1 1 1 1 foo '*foo*'
+match 1 1 1 1 foobar '*ob*a*r*'
+match 1 1 1 1 aaaaaaabababab '*ab'
+match 1 1 1 1 'foo*' 'foo\*'
+match 0 0 0 0 foobar 'foo\*bar'
+match 1 1 1 1 'f\oo' 'f\\oo'
+match 1 1 1 1 ball '*[al]?'
+match 0 0 0 0 ten '[ten]'
+match 0 0 1 1 ten '**[!te]'
+match 0 0 0 0 ten '**[!ten]'
+match 1 1 1 1 ten 't[a-g]n'
+match 0 0 0 0 ten 't[!a-g]n'
+match 1 1 1 1 ton 't[!a-g]n'
+match 1 1 1 1 ton 't[^a-g]n'
+match 1 1 1 1 'a]b' 'a[]]b'
+match 1 1 1 1 a-b 'a[]-]b'
+match 1 1 1 1 'a]b' 'a[]-]b'
+match 0 0 0 0 aab 'a[]-]b'
+match 1 1 1 1 aab 'a[]a-]b'
+match 1 1 1 1 ']' ']'
 
 # Extended slash-matching features
-match 0 0 'foo/baz/bar' 'foo*bar'
-match 0 0 'foo/baz/bar' 'foo**bar'
-match 0 1 'foobazbar' 'foo**bar'
-match 1 1 'foo/baz/bar' 'foo/**/bar'
-match 1 0 'foo/baz/bar' 'foo/**/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar'
-match 1 0 'foo/bar' 'foo/**/bar'
-match 1 0 'foo/bar' 'foo/**/**/bar'
-match 0 0 'foo/bar' 'foo?bar'
-match 0 0 'foo/bar' 'foo[/]bar'
-match 0 0 'foo/bar' 'foo[^a-z]bar'
-match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 0 'foo' '**/foo'
-match 1 x 'XXX/foo' '**/foo'
-match 1 0 'bar/baz/foo' '**/foo'
-match 0 0 'bar/baz/foo' '*/foo'
-match 0 0 'foo/bar/baz' '**/bar*'
-match 1 0 'deep/foo/bar/baz' '**/bar/*'
-match 0 0 'deep/foo/bar/baz/' '**/bar/*'
-match 1 0 'deep/foo/bar/baz/' '**/bar/**'
-match 0 0 'deep/foo/bar' '**/bar/*'
-match 1 0 'deep/foo/bar/' '**/bar/**'
-match 0 0 'foo/bar/baz' '**/bar**'
-match 1 0 'foo/bar/baz/x' '*/bar/**'
-match 0 0 'deep/foo/bar/baz/x' '*/bar/**'
-match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*'
+match 0 0 1 1 'foo/baz/bar' 'foo*bar'
+match 0 0 1 1 'foo/baz/bar' 'foo**bar'
+match 0 0 1 1 'foobazbar' 'foo**bar'
+match 1 1 1 1 'foo/baz/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/**/bar'
+match 0 0 1 1 'foo/bar' 'foo?bar'
+match 0 0 1 1 'foo/bar' 'foo[/]bar'
+match 0 0 1 1 'foo/bar' 'foo[^a-z]bar'
+match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 0 0 'foo' '**/foo'
+match 1 1 1 1 'XXX/foo' '**/foo'
+match 1 1 1 1 'bar/baz/foo' '**/foo'
+match 0 0 1 1 'bar/baz/foo' '*/foo'
+match 0 0 1 1 'foo/bar/baz' '**/bar*'
+match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*'
+match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**'
+match 0 0 0 0 'deep/foo/bar' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/' '**/bar/**'
+match 0 0 1 1 'foo/bar/baz' '**/bar**'
+match 1 1 1 1 'foo/bar/baz/x' '*/bar/**'
+match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**'
+match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*'
 
 # Various additional tests
-match 0 0 'acrt' 'a[c-c]st'
-match 1 1 'acrt' 'a[c-c]rt'
-match 0 0 ']' '[!]-]'
-match 1 x 'a' '[!]-]'
-match 0 0 '' '\'
-match 0 x '\' '\'
-match 0 x 'XXX/\' '*/\'
-match 1 x 'XXX/\' '*/\\'
-match 1 1 'foo' 'foo'
-match 1 1 '@foo' '@foo'
-match 0 0 'foo' '@foo'
-match 1 1 '[ab]' '\[ab]'
-match 1 1 '[ab]' '[[]ab]'
-match 1 x '[ab]' '[[:]ab]'
-match 0 x '[ab]' '[[::]ab]'
-match 1 x '[ab]' '[[:digit]ab]'
-match 1 x '[ab]' '[\[:]ab]'
-match 1 1 '?a?b' '\??\?b'
-match 1 1 'abc' '\a\b\c'
-match 0 0 'foo' ''
-match 1 0 'foo/bar/baz/to' '**/t[o]'
+match 0 0 0 0 'acrt' 'a[c-c]st'
+match 1 1 1 1 'acrt' 'a[c-c]rt'
+match 0 0 0 0 ']' '[!]-]'
+match 1 1 1 1 'a' '[!]-]'
+match 0 0 0 0 '' '\'
+match 0 0 0 0 \
+      1 1 1 1 '\' '\'
+match 0 0 0 0 'XXX/\' '*/\'
+match 1 1 1 1 'XXX/\' '*/\\'
+match 1 1 1 1 'foo' 'foo'
+match 1 1 1 1 '@foo' '@foo'
+match 0 0 0 0 'foo' '@foo'
+match 1 1 1 1 '[ab]' '\[ab]'
+match 1 1 1 1 '[ab]' '[[]ab]'
+match 1 1 1 1 '[ab]' '[[:]ab]'
+match 0 0 0 0 '[ab]' '[[::]ab]'
+match 1 1 1 1 '[ab]' '[[:digit]ab]'
+match 1 1 1 1 '[ab]' '[\[:]ab]'
+match 1 1 1 1 '?a?b' '\??\?b'
+match 1 1 1 1 'abc' '\a\b\c'
+match 0 0 0 0 \
+      E E E E 'foo' ''
+match 1 1 1 1 'foo/bar/baz/to' '**/t[o]'
 
 # Character class tests
-match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
-match 0 x 'a' '[[:digit:][:upper:][:space:]]'
-match 1 x 'A' '[[:digit:][:upper:][:space:]]'
-match 1 x '1' '[[:digit:][:upper:][:space:]]'
-match 0 x '1' '[[:digit:][:upper:][:spaci:]]'
-match 1 x ' ' '[[:digit:][:upper:][:space:]]'
-match 0 x '.' '[[:digit:][:upper:][:space:]]'
-match 1 x '.' '[[:digit:][:punct:][:space:]]'
-match 1 x '5' '[[:xdigit:]]'
-match 1 x 'f' '[[:xdigit:]]'
-match 1 x 'D' '[[:xdigit:]]'
-match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
-match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
-match 1 x '5' '[a-c[:digit:]x-z]'
-match 1 x 'b' '[a-c[:digit:]x-z]'
-match 1 x 'y' '[a-c[:digit:]x-z]'
-match 0 x 'q' '[a-c[:digit:]x-z]'
-
-# Additional tests, including some malformed wildmats
-match 1 x ']' '[\\-^]'
-match 0 0 '[' '[\\-^]'
-match 1 x '-' '[\-_]'
-match 1 x ']' '[\]]'
-match 0 0 '\]' '[\]]'
-match 0 0 '\' '[\]]'
-match 0 0 'ab' 'a[]b'
-match 0 x 'a[]b' 'a[]b'
-match 0 x 'ab[' 'ab['
-match 0 0 'ab' '[!'
-match 0 0 'ab' '[-'
-match 1 1 '-' '[-]'
-match 0 0 '-' '[a-'
-match 0 0 '-' '[!a-'
-match 1 x '-' '[--A]'
-match 1 x '5' '[--A]'
-match 1 1 ' ' '[ --]'
-match 1 1 '$' '[ --]'
-match 1 1 '-' '[ --]'
-match 0 0 '0' '[ --]'
-match 1 x '-' '[---]'
-match 1 x '-' '[------]'
-match 0 0 'j' '[a-e-n]'
-match 1 x '-' '[a-e-n]'
-match 1 x 'a' '[!------]'
-match 0 0 '[' '[]-a]'
-match 1 x '^' '[]-a]'
-match 0 0 '^' '[!]-a]'
-match 1 x '[' '[!]-a]'
-match 1 1 '^' '[a^bc]'
-match 1 x '-b]' '[a-]b]'
-match 0 0 '\' '[\]'
-match 1 1 '\' '[\\]'
-match 0 0 '\' '[!\\]'
-match 1 1 'G' '[A-\\]'
-match 0 0 'aaabbb' 'b*a'
-match 0 0 'aabcaa' '*ba*'
-match 1 1 ',' '[,]'
-match 1 1 ',' '[\\,]'
-match 1 1 '\' '[\\,]'
-match 1 1 '-' '[,-.]'
-match 0 0 '+' '[,-.]'
-match 0 0 '-.]' '[,-.]'
-match 1 1 '2' '[\1-\3]'
-match 1 1 '3' '[\1-\3]'
-match 0 0 '4' '[\1-\3]'
-match 1 1 '\' '[[-\]]'
-match 1 1 '[' '[[-\]]'
-match 1 1 ']' '[[-\]]'
-match 0 0 '-' '[[-\]]'
+match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
+match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]'
+match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]'
+match 1 1 1 1 '5' '[[:xdigit:]]'
+match 1 1 1 1 'f' '[[:xdigit:]]'
+match 1 1 1 1 'D' '[[:xdigit:]]'
+match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '5' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'b' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'y' '[a-c[:digit:]x-z]'
+match 0 0 0 0 'q' '[a-c[:digit:]x-z]'
 
-# Test recursion and the abort code (use "wildtest -i" to see iteration counts)
-match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
-match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
-match 0 x foo '*/*/*'
-match 0 x foo/bar '*/*/*'
-match 1 x foo/bba/arr '*/*/*'
-match 0 x foo/bb/aa/rr '*/*/*'
-match 1 x foo/bb/aa/rr '**/**/**'
-match 1 x abcXdefXghi '*X*i'
-match 0 x ab/cXd/efXg/hi '*X*i'
-match 1 x ab/cXd/efXg/hi '*/*X*/*/*i'
-match 1 x ab/cXd/efXg/hi '**/*X*/**/*i'
+# Additional tests, including some malformed wildmatch patterns
+match 1 1 1 1 ']' '[\\-^]'
+match 0 0 0 0 '[' '[\\-^]'
+match 1 1 1 1 '-' '[\-_]'
+match 1 1 1 1 ']' '[\]]'
+match 0 0 0 0 '\]' '[\]]'
+match 0 0 0 0 '\' '[\]]'
+match 0 0 0 0 'ab' 'a[]b'
+match 0 0 0 0 \
+      1 1 1 1 'a[]b' 'a[]b'
+match 0 0 0 0 \
+      1 1 1 1 'ab[' 'ab['
+match 0 0 0 0 'ab' '[!'
+match 0 0 0 0 'ab' '[-'
+match 1 1 1 1 '-' '[-]'
+match 0 0 0 0 '-' '[a-'
+match 0 0 0 0 '-' '[!a-'
+match 1 1 1 1 '-' '[--A]'
+match 1 1 1 1 '5' '[--A]'
+match 1 1 1 1 ' ' '[ --]'
+match 1 1 1 1 '$' '[ --]'
+match 1 1 1 1 '-' '[ --]'
+match 0 0 0 0 '0' '[ --]'
+match 1 1 1 1 '-' '[---]'
+match 1 1 1 1 '-' '[------]'
+match 0 0 0 0 'j' '[a-e-n]'
+match 1 1 1 1 '-' '[a-e-n]'
+match 1 1 1 1 'a' '[!------]'
+match 0 0 0 0 '[' '[]-a]'
+match 1 1 1 1 '^' '[]-a]'
+match 0 0 0 0 '^' '[!]-a]'
+match 1 1 1 1 '[' '[!]-a]'
+match 1 1 1 1 '^' '[a^bc]'
+match 1 1 1 1 '-b]' '[a-]b]'
+match 0 0 0 0 '\' '[\]'
+match 1 1 1 1 '\' '[\\]'
+match 0 0 0 0 '\' '[!\\]'
+match 1 1 1 1 'G' '[A-\\]'
+match 0 0 0 0 'aaabbb' 'b*a'
+match 0 0 0 0 'aabcaa' '*ba*'
+match 1 1 1 1 ',' '[,]'
+match 1 1 1 1 ',' '[\\,]'
+match 1 1 1 1 '\' '[\\,]'
+match 1 1 1 1 '-' '[,-.]'
+match 0 0 0 0 '+' '[,-.]'
+match 0 0 0 0 '-.]' '[,-.]'
+match 1 1 1 1 '2' '[\1-\3]'
+match 1 1 1 1 '3' '[\1-\3]'
+match 0 0 0 0 '4' '[\1-\3]'
+match 1 1 1 1 '\' '[[-\]]'
+match 1 1 1 1 '[' '[[-\]]'
+match 1 1 1 1 ']' '[[-\]]'
+match 0 0 0 0 '-' '[[-\]]'
 
-pathmatch 1 foo foo
-pathmatch 0 foo fo
-pathmatch 1 foo/bar foo/bar
-pathmatch 1 foo/bar 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/**'
-pathmatch 1 foo/bba/arr 'foo*'
-pathmatch 1 foo/bba/arr 'foo**'
-pathmatch 1 foo/bba/arr 'foo/*arr'
-pathmatch 1 foo/bba/arr 'foo/**arr'
-pathmatch 0 foo/bba/arr 'foo/*z'
-pathmatch 0 foo/bba/arr 'foo/**z'
-pathmatch 1 foo/bar 'foo?bar'
-pathmatch 1 foo/bar 'foo[/]bar'
-pathmatch 1 foo/bar 'foo[^a-z]bar'
-pathmatch 0 foo '*/*/*'
-pathmatch 0 foo/bar '*/*/*'
-pathmatch 1 foo/bba/arr '*/*/*'
-pathmatch 1 foo/bb/aa/rr '*/*/*'
-pathmatch 1 abcXdefXghi '*X*i'
-pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i'
-pathmatch 1 ab/cXd/efXg/hi '*Xg*i'
+# Test recursion
+match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
+match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
+match 0 0 0 0 foo '*/*/*'
+match 0 0 0 0 foo/bar '*/*/*'
+match 1 1 1 1 foo/bba/arr '*/*/*'
+match 0 0 1 1 foo/bb/aa/rr '*/*/*'
+match 1 1 1 1 foo/bb/aa/rr '**/**/**'
+match 1 1 1 1 abcXdefXghi '*X*i'
+match 0 0 1 1 ab/cXd/efXg/hi '*X*i'
+match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i'
+match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i'
 
-# Case-sensitivity features
-match 0 x 'a' '[A-Z]'
-match 1 x 'A' '[A-Z]'
-match 0 x 'A' '[a-z]'
-match 1 x 'a' '[a-z]'
-match 0 x 'a' '[[:upper:]]'
-match 1 x 'A' '[[:upper:]]'
-match 0 x 'A' '[[:lower:]]'
-match 1 x 'a' '[[:lower:]]'
-match 0 x 'A' '[B-Za]'
-match 1 x 'a' '[B-Za]'
-match 0 x 'A' '[B-a]'
-match 1 x 'a' '[B-a]'
-match 0 x 'z' '[Z-y]'
-match 1 x 'Z' '[Z-y]'
+# Extra pathmatch tests
+match 0 0 0 0 foo fo
+match 1 1 1 1 foo/bar foo/bar
+match 1 1 1 1 foo/bar 'foo/*'
+match 0 0 1 1 foo/bba/arr 'foo/*'
+match 1 1 1 1 foo/bba/arr 'foo/**'
+match 0 0 1 1 foo/bba/arr 'foo*'
+match 0 0 1 1 \
+      1 1 1 1 foo/bba/arr 'foo**'
+match 0 0 1 1 foo/bba/arr 'foo/*arr'
+match 0 0 1 1 foo/bba/arr 'foo/**arr'
+match 0 0 0 0 foo/bba/arr 'foo/*z'
+match 0 0 0 0 foo/bba/arr 'foo/**z'
+match 0 0 1 1 foo/bar 'foo?bar'
+match 0 0 1 1 foo/bar 'foo[/]bar'
+match 0 0 1 1 foo/bar 'foo[^a-z]bar'
+match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i'
 
-imatch 1 'a' '[A-Z]'
-imatch 1 'A' '[A-Z]'
-imatch 1 'A' '[a-z]'
-imatch 1 'a' '[a-z]'
-imatch 1 'a' '[[:upper:]]'
-imatch 1 'A' '[[:upper:]]'
-imatch 1 'A' '[[:lower:]]'
-imatch 1 'a' '[[:lower:]]'
-imatch 1 'A' '[B-Za]'
-imatch 1 'a' '[B-Za]'
-imatch 1 'A' '[B-a]'
-imatch 1 'a' '[B-a]'
-imatch 1 'z' '[Z-y]'
-imatch 1 'Z' '[Z-y]'
+# Extra case-sensitivity tests
+match 0 1 0 1 'a' '[A-Z]'
+match 1 1 1 1 'A' '[A-Z]'
+match 0 1 0 1 'A' '[a-z]'
+match 1 1 1 1 'a' '[a-z]'
+match 0 1 0 1 'a' '[[:upper:]]'
+match 1 1 1 1 'A' '[[:upper:]]'
+match 0 1 0 1 'A' '[[:lower:]]'
+match 1 1 1 1 'a' '[[:lower:]]'
+match 0 1 0 1 'A' '[B-Za]'
+match 1 1 1 1 'a' '[B-Za]'
+match 0 1 0 1 'A' '[B-a]'
+match 1 1 1 1 'a' '[B-a]'
+match 0 1 0 1 'z' '[Z-y]'
+match 1 1 1 1 'Z' '[Z-y]'
 
 test_done
index 503a88d0296a2620ed0ba6e70f784fcd58dc4a9b..6c0b7ea4addc8f1569b1b85f58dd3072fb863f33 100755 (executable)
@@ -528,7 +528,7 @@ test_expect_success 'git branch -c -f o/q o/p should work when o/p exists' '
        git branch -c -f o/q o/p
 '
 
-test_expect_success 'git branch -c qq rr/qq should fail when r exists' '
+test_expect_success 'git branch -c qq rr/qq should fail when rr exists' '
        git branch qq &&
        git branch rr &&
        test_must_fail git branch -c qq rr/qq
index 8ac58d5ea5e4b8b75deaa74f3d6bca29f37dbcb6..72d9564747adf2d37ea2e61a6d2e479096fe6508 100755 (executable)
@@ -277,4 +277,38 @@ EOF
        test_cmp From_.msg out
 '
 
+test_expect_success 'rebase--am.sh and --show-current-patch' '
+       test_create_repo conflict-apply &&
+       (
+               cd conflict-apply &&
+               test_commit init &&
+               echo one >>init.t &&
+               git commit -a -m one &&
+               echo two >>init.t &&
+               git commit -a -m two &&
+               git tag two &&
+               test_must_fail git rebase --onto init HEAD^ &&
+               GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+               grep "show.*$(git rev-parse two)" stderr
+       )
+'
+
+test_expect_success 'rebase--merge.sh and --show-current-patch' '
+       test_create_repo conflict-merge &&
+       (
+               cd conflict-merge &&
+               test_commit init &&
+               echo one >>init.t &&
+               git commit -a -m one &&
+               echo two >>init.t &&
+               git commit -a -m two &&
+               git tag two &&
+               test_must_fail git rebase --merge --onto init HEAD^ &&
+               git rebase --show-current-patch >actual.patch &&
+               GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+               grep "show.*REBASE_HEAD" stderr &&
+               test "$(git rev-parse REBASE_HEAD)" = "$(git rev-parse two)"
+       )
+'
+
 test_done
index 481a3500900d0fccb28762ba24b2ca422956a44f..c59d0384fd6f797ae30df9c157dbdf4b0307fa02 100755 (executable)
@@ -225,6 +225,14 @@ test_expect_success 'stop on conflicting pick' '
        test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo)
 '
 
+test_expect_success 'show conflicted patch' '
+       GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+       grep "show.*REBASE_HEAD" stderr &&
+       # the original stopped-sha1 is abbreviated
+       stopped_sha1="$(git rev-parse $(cat ".git/rebase-merge/stopped-sha"))" &&
+       test "$(git rev-parse REBASE_HEAD)" = "$stopped_sha1"
+'
+
 test_expect_success 'abort' '
        git rebase --abort &&
        test $(git rev-parse new-branch1) = $(git rev-parse HEAD) &&
@@ -453,6 +461,10 @@ test_expect_success C_LOCALE_OUTPUT 'squash and fixup generate correct log messa
                git rebase -i $base &&
        git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup &&
        test_cmp expect-squash-fixup actual-squash-fixup &&
+       git cat-file commit HEAD@{2} |
+               grep "^# This is a combination of 3 commits\."  &&
+       git cat-file commit HEAD@{3} |
+               grep "^# This is a combination of 2 commits\."  &&
        git checkout to-be-rebased &&
        git branch -D squash-fixup
 '
@@ -915,10 +927,8 @@ test_expect_success 'rebase --exec works without -i ' '
 test_expect_success 'rebase -i --exec without <CMD>' '
        git reset --hard execute &&
        set_fake_editor &&
-       test_must_fail git rebase -i --exec 2>tmp &&
-       sed -e "1d" tmp >actual &&
-       test_must_fail git rebase -h >expected &&
-       test_cmp expected actual &&
+       test_must_fail git rebase -i --exec 2>actual &&
+       test_i18ngrep "requires a value" actual &&
        git checkout master
 '
 
@@ -1336,6 +1346,16 @@ test_expect_success 'editor saves as CR/LF' '
 
 SQ="'"
 test_expect_success 'rebase -i --gpg-sign=<key-id>' '
+       test_when_finished "test_might_fail git rebase --abort" &&
+       set_fake_editor &&
+       FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
+               >out 2>err &&
+       test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err
+'
+
+test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' '
+       test_when_finished "test_might_fail git rebase --abort" &&
+       test_config commit.gpgsign true &&
        set_fake_editor &&
        FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
                >out 2>err &&
index ff8c360cd58bc11366ac2cf9e7c0b141e4ac3a39..cb7c6de84abf88bf90ac9716bb24ac91b8f64bf9 100755 (executable)
@@ -3,6 +3,7 @@
 test_description='rebase should handle arbitrary git message'
 
 . ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
 
 cat >F <<\EOF
 This is an example of a commit log message
@@ -25,6 +26,7 @@ test_expect_success setup '
        test_tick &&
        git commit -m "Initial commit" &&
        git branch diff-in-message &&
+       git branch empty-message-merge &&
 
        git checkout -b multi-line-subject &&
        cat F >file2 &&
@@ -45,6 +47,11 @@ test_expect_success setup '
 
        git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 &&
 
+       git checkout empty-message-merge &&
+       echo file3 >file3 &&
+       git add file3 &&
+       git commit --allow-empty-message -m "" &&
+
        git checkout master &&
 
        echo One >file1 &&
@@ -69,4 +76,20 @@ test_expect_success 'rebase commit with diff in message' '
        test_cmp G G0
 '
 
+test_expect_success 'rebase -m commit with empty message' '
+       test_must_fail git rebase -m master empty-message-merge &&
+       git rebase --abort &&
+       git rebase -m --allow-empty-message master empty-message-merge
+'
+
+test_expect_success 'rebase -i commit with empty message' '
+       git checkout diff-in-message &&
+       set_fake_editor &&
+       test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+               git rebase -i HEAD^ &&
+       git rebase --abort &&
+       FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+               git rebase -i --allow-empty-message HEAD^
+'
+
 test_done
index 6b84e6042a6fcc9cf850a53ad2a885597fb178fc..e7292f5b9b938018f515f858f2dadc29162261ad 100755 (executable)
@@ -24,8 +24,23 @@ But otherwise with a sane description." &&
        >elif &&
        git add elif &&
        test_tick &&
-       git commit -m second
+       git commit -m second &&
 
+       git checkout -b side2 &&
+       >afile &&
+       git add afile &&
+       test_tick &&
+       git commit -m third &&
+       echo hello >afile &&
+       test_tick &&
+       git commit -a -m fourth &&
+       git checkout -b side-merge &&
+       git reset --hard HEAD^^ &&
+       git merge --no-ff -m "A merge commit log message that has a long
+summary that spills over multiple lines.
+
+But otherwise with a sane description." side2 &&
+       git branch side-merge-original
 '
 
 test_expect_success rebase '
@@ -36,6 +51,15 @@ test_expect_success rebase '
        git cat-file commit side@{1} | sed -e "1,/^\$/d" >expect &&
        test_cmp expect actual
 
+'
+test_expect_success rebasep '
+
+       git checkout side-merge &&
+       git rebase -p side &&
+       git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+       git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect &&
+       test_cmp expect actual
+
 '
 
 test_done
index 4f2a263b63e14348032959059c15b160fecba39c..0d89f6d0f651e46251c9c3c610f711353c701893 100755 (executable)
@@ -150,7 +150,10 @@ test_expect_success 'cherry-pick works with dirty renamed file' '
        test_tick &&
        git commit -m renamed &&
        echo modified >renamed &&
-       git cherry-pick refs/heads/unrelated
+       test_must_fail git cherry-pick refs/heads/unrelated >out &&
+       test_i18ngrep "Refusing to lose dirty file at renamed" out &&
+       test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+       grep -q "^modified$" renamed
 '
 
 test_done
index ce48c4fcca80b183927292cc1e5902cfe286f994..bd78287841ee053fd56a44a268f8077a222cc266 100755 (executable)
@@ -5,7 +5,6 @@ test_description='cherry-pick can handle submodules'
 . ./test-lib.sh
 . "$TEST_DIRECTORY"/lib-submodule-update.sh
 
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
 KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
 test_submodule_switch "git cherry-pick"
index db9378142a93338d2988f40e2748bc476490bcd5..5e39fcdb66c0c7c4b112c1bbe941d886db237693 100755 (executable)
@@ -25,7 +25,6 @@ git_revert () {
        git revert HEAD
 }
 
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
 KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
 test_submodule_switch "git_revert"
 
index 058698df6a4a9811b9db84fb5900472c47c61798..b170fb02b80356455d03dcf379636778149665a6 100755 (executable)
@@ -10,6 +10,19 @@ then
        test_done
 fi
 
+diff_cmp () {
+       for x
+       do
+               sed  -e '/^index/s/[0-9a-f]*[1-9a-f][0-9a-f]*\.\./1234567../' \
+                    -e '/^index/s/\.\.[0-9a-f]*[1-9a-f][0-9a-f]*/..9abcdef/' \
+                    -e '/^index/s/ 00*\.\./ 0000000../' \
+                    -e '/^index/s/\.\.00*$/..0000000/' \
+                    -e '/^index/s/\.\.00* /..0000000 /' \
+                    "$x" >"$x.filtered"
+       done
+       test_cmp "$1.filtered" "$2.filtered"
+}
+
 test_expect_success 'setup (initial)' '
        echo content >file &&
        git add file &&
@@ -22,20 +35,20 @@ test_expect_success 'status works (initial)' '
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-new file mode 100644
-index 0000000..d95f3ad
---- /dev/null
-+++ b/file
-@@ -0,0 +1 @@
-+content
-EOF
+       cat >expected <<-\EOF
+       new file mode 100644
+       index 0000000..d95f3ad
+       --- /dev/null
+       +++ b/file
+       @@ -0,0 +1 @@
+       +content
+       EOF
 '
 
 test_expect_success 'diff works (initial)' '
        (echo d; echo 1) | git add -i >output &&
        sed -ne "/new file/,/content/p" <output >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 test_expect_success 'revert works (initial)' '
        git add file &&
@@ -59,20 +72,20 @@ test_expect_success 'status works (commit)' '
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1 +1,2 @@
- baseline
-+content
-EOF
+       cat >expected <<-\EOF
+       index 180b47c..b6f2c08 100644
+       --- a/file
+       +++ b/file
+       @@ -1 +1,2 @@
       baseline
+       +content
+       EOF
 '
 
 test_expect_success 'diff works (commit)' '
        (echo d; echo 1) | git add -i >output &&
        sed -ne "/^index/,/content/p" <output >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 test_expect_success 'revert works (commit)' '
        git add file &&
@@ -83,39 +96,32 @@ test_expect_success 'revert works (commit)' '
 
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-EOF
-'
-
-test_expect_success 'setup fake editor' '
-       >fake_editor.sh &&
-       chmod a+x fake_editor.sh &&
-       test_set_editor "$(pwd)/fake_editor.sh"
+       cat >expected <<-\EOF
+       EOF
 '
 
 test_expect_success 'dummy edit works' '
+       test_set_editor : &&
        (echo e; echo a) | git add -p &&
        git diff > diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,1 +1,4 @@
- this
-+patch
--does not
- apply
-EOF
+       cat >patch <<-\EOF
+       @@ -1,1 +1,4 @@
       this
+       +patch
+       -does not
       apply
+       EOF
 '
 
 test_expect_success 'setup fake editor' '
-       echo "#!$SHELL_PATH" >fake_editor.sh &&
-       cat >>fake_editor.sh <<\EOF &&
-mv -f "$1" oldpatch &&
-mv -f patch "$1"
-EOF
-       chmod a+x fake_editor.sh &&
+       write_script "fake_editor.sh" <<-\EOF &&
+       mv -f "$1" oldpatch &&
+       mv -f patch "$1"
+       EOF
        test_set_editor "$(pwd)/fake_editor.sh"
 '
 
@@ -126,10 +132,10 @@ test_expect_success 'bad edit rejected' '
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-this patch
-is garbage
-EOF
+       cat >patch <<-\EOF
+       this patch
+       is garbage
+       EOF
 '
 
 test_expect_success 'garbage edit rejected' '
@@ -139,34 +145,34 @@ test_expect_success 'garbage edit rejected' '
 '
 
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,0 +1,0 @@
- baseline
-+content
-+newcontent
-+lines
-EOF
+       cat >patch <<-\EOF
+       @@ -1,0 +1,0 @@
       baseline
+       +content
+       +newcontent
+       +lines
+       EOF
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b5dd6c9..f910ae9 100644
---- a/file
-+++ b/file
-@@ -1,4 +1,4 @@
- baseline
- content
--newcontent
-+more
- lines
-EOF
+       cat >expected <<-\EOF
+       diff --git a/file b/file
+       index b5dd6c9..f910ae9 100644
+       --- a/file
+       +++ b/file
+       @@ -1,4 +1,4 @@
       baseline
       content
+       -newcontent
+       +more
       lines
+       EOF
 '
 
 test_expect_success 'real edit works' '
        (echo e; echo n; echo d) | git add -p &&
        git diff >output &&
-       test_cmp expected output
+       diff_cmp expected output
 '
 
 test_expect_success 'skip files similarly as commit -a' '
@@ -178,7 +184,7 @@ test_expect_success 'skip files similarly as commit -a' '
        git reset &&
        git commit -am commit &&
        git diff >expected &&
-       test_cmp expected output &&
+       diff_cmp expected output &&
        git reset --hard HEAD^
 '
 rm -f .gitignore
@@ -222,52 +228,67 @@ test_expect_success 'setup again' '
 
 # Write the patch file with a new line at the top and bottom
 test_expect_success 'setup patch' '
-cat >patch <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
-'
-
-# Expected output, similar to the patch but w/ diff at the top
+       cat >patch <<-\EOF
+       index 180b47c..b6f2c08 100644
+       --- a/file
+       +++ b/file
+       @@ -1,2 +1,4 @@
+       +firstline
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       EOF
+'
+
+# Expected output, diff is similar to the patch but w/ diff at the top
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b6f2c08..61b9053 100755
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
+       echo diff --git a/file b/file >expected &&
+       cat patch |sed "/^index/s/ 100644/ 100755/" >>expected &&
+       cat >expected-output <<-\EOF
+       --- a/file
+       +++ b/file
+       @@ -1,2 +1,4 @@
+       +firstline
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       @@ -1,2 +1,3 @@
+       +firstline
+        baseline
+        content
+       @@ -1,2 +2,3 @@
+        baseline
+        content
+       +lastline
+       \ No newline at end of file
+       EOF
 '
 
 # Test splitting the first patch, then adding both
-test_expect_success 'add first line works' '
+test_expect_success C_LOCALE_OUTPUT 'add first line works' '
        git commit -am "clear local changes" &&
        git apply patch &&
-       (echo s; echo y; echo y) | git add -p file &&
-       git diff --cached > diff &&
-       test_cmp expected diff
+       printf "%s\n" s y y | git add -p file 2>error |
+               sed -n -e "s/^Stage this hunk[^@]*\(@@ .*\)/\1/" \
+                      -e "/^[-+@ \\\\]"/p  >output &&
+       test_must_be_empty error &&
+       git diff --cached >diff &&
+       diff_cmp expected diff &&
+       test_cmp expected-output output
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/non-empty b/non-empty
-deleted file mode 100644
-index d95f3ad..0000000
---- a/non-empty
-+++ /dev/null
-@@ -1 +0,0 @@
--content
-EOF
+       cat >expected <<-\EOF
+       diff --git a/non-empty b/non-empty
+       deleted file mode 100644
+       index d95f3ad..0000000
+       --- a/non-empty
+       +++ /dev/null
+       @@ -1 +0,0 @@
+       -content
+       EOF
 '
 
 test_expect_success 'deleting a non-empty file' '
@@ -278,15 +299,15 @@ test_expect_success 'deleting a non-empty file' '
        rm non-empty &&
        echo y | git add -p non-empty &&
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/empty b/empty
-deleted file mode 100644
-index e69de29..0000000
-EOF
+       cat >expected <<-\EOF
+       diff --git a/empty b/empty
+       deleted file mode 100644
+       index e69de29..0000000
+       EOF
 '
 
 test_expect_success 'deleting an empty file' '
@@ -297,23 +318,17 @@ test_expect_success 'deleting an empty file' '
        rm empty &&
        echo y | git add -p empty &&
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success 'split hunk setup' '
        git reset --hard &&
-       for i in 10 20 30 40 50 60
-       do
-               echo $i
-       done >test &&
+       test_write_lines 10 20 30 40 50 60 >test &&
        git add test &&
        test_tick &&
        git commit -m test &&
 
-       for i in 10 15 20 21 22 23 24 30 40 50 60
-       do
-               echo $i
-       done >test
+       test_write_lines 10 15 20 21 22 23 24 30 40 50 60 >test
 '
 
 test_expect_success 'split hunk "add -p (edit)"' '
@@ -334,17 +349,7 @@ test_expect_success 'split hunk "add -p (edit)"' '
 '
 
 test_expect_failure 'split hunk "add -p (no, yes, edit)"' '
-       cat >test <<-\EOF &&
-       5
-       10
-       20
-       21
-       30
-       31
-       40
-       50
-       60
-       EOF
+       test_write_lines 5 10 20 21 30 31 40 50 60 >test &&
        git reset &&
        # test sequence is s(plit), n(o), y(es), e(dit)
        # q n q q is there to make sure we exit at the end.
@@ -378,7 +383,7 @@ test_expect_success 'patch mode ignores unmerged entries' '
        +changed
        EOF
        git diff --cached >diff &&
-       test_cmp expected diff
+       diff_cmp expected diff
 '
 
 test_expect_success TTY 'diffs can be colorized' '
@@ -392,6 +397,26 @@ test_expect_success TTY 'diffs can be colorized' '
        grep "$(printf "\\033")" output
 '
 
+test_expect_success TTY 'diffFilter filters diff' '
+       git reset --hard &&
+
+       echo content >test &&
+       test_config interactive.diffFilter "sed s/^/foo:/" &&
+       printf y | test_terminal git add -p >output 2>&1 &&
+
+       # avoid depending on the exact coloring or content of the prompts,
+       # and just make sure we saw our diff prefixed
+       grep foo:.*content output
+'
+
+test_expect_success TTY 'detect bogus diffFilter output' '
+       git reset --hard &&
+
+       echo content >test &&
+       test_config interactive.diffFilter "echo too-short" &&
+       printf y | test_must_fail test_terminal git add -p
+'
+
 test_expect_success 'patch-mode via -i prompts for files' '
        git reset --hard &&
 
@@ -407,7 +432,7 @@ test_expect_success 'patch-mode via -i prompts for files' '
 
        echo test >expect &&
        git diff --cached --name-only >actual &&
-       test_cmp expect actual
+       diff_cmp expect actual
 '
 
 test_expect_success 'add -p handles globs' '
@@ -541,4 +566,34 @@ test_expect_success 'status ignores dirty submodules (except HEAD)' '
        ! grep dirty-otherwise output
 '
 
+test_expect_success 'set up pathological context' '
+       git reset --hard &&
+       test_write_lines a a a a a a a a a a a >a &&
+       git add a &&
+       git commit -m a &&
+       test_write_lines c b a a a a a a a b a a a a >a &&
+       test_write_lines     a a a a a a a b a a a a >expected-1 &&
+       test_write_lines   b a a a a a a a b a a a a >expected-2 &&
+       # check editing can cope with missing header and deleted context lines
+       # as well as changes to other lines
+       test_write_lines +b " a" >patch
+'
+
+test_expect_success 'add -p works with pathological context lines' '
+       git reset &&
+       printf "%s\n" n y |
+       git add -p &&
+       git cat-file blob :a >actual &&
+       test_cmp expected-1 actual
+'
+
+test_expect_success 'add -p patch editing works with pathological context lines' '
+       git reset &&
+       # n q q below is in case edit fails
+       printf "%s\n" e y    n q q |
+       git add -p &&
+       git cat-file blob :a >actual &&
+       test_cmp expected-2 actual
+'
+
 test_done
index bfde4057ad2afcdd3bd38cbb43ffa2ce241aaa67..3ea5b9bb3ff0a4e439b1fc6cd8f9df86386f4126 100755 (executable)
@@ -228,4 +228,56 @@ test_expect_success 'stash previously ignored file' '
        test_path_is_file ignored.d/foo
 '
 
+test_expect_success 'stash -u -- <untracked> doesnt print error' '
+       >untracked &&
+       git stash push -u -- untracked 2>actual &&
+       test_path_is_missing untracked &&
+       test_line_count = 0 actual
+'
+
+test_expect_success 'stash -u -- <untracked> leaves rest of working tree in place' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- untracked &&
+       test_path_is_missing untracked &&
+       test_path_is_file tracked
+'
+
+test_expect_success 'stash -u -- <tracked> <untracked> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >untracked &&
+       git stash push -u -- tracked untracked &&
+       test_path_is_missing tracked &&
+       test_path_is_missing untracked
+'
+
+test_expect_success 'stash --all -- <ignored> stashes ignored file' '
+       >ignored.d/bar &&
+       git stash push --all -- ignored.d/bar &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash --all -- <tracked> <ignored> clears changes in both' '
+       >tracked &&
+       git add tracked &&
+       >ignored.d/bar &&
+       git stash push --all -- tracked ignored.d/bar &&
+       test_path_is_missing tracked &&
+       test_path_is_missing ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <ignored> leaves ignored file alone' '
+       >ignored.d/bar &&
+       git stash push -u -- ignored.d/bar &&
+       test_path_is_file ignored.d/bar
+'
+
+test_expect_success 'stash -u -- <non-existant> shows no changes when there are none' '
+       git stash push -u -- non-existant >actual &&
+       echo "No local changes to save" >expect &&
+       test_i18ncmp expect actual
+'
+
 test_done
index f10798b2dff35df131fabb351240ebf75952b446..3f9a24fd56c801d1a75abb6cc4f4e8928c2dc427 100755 (executable)
@@ -361,6 +361,11 @@ diff --no-index --raw dir2 dir
 diff --no-index --raw --abbrev=4 dir2 dir
 :noellipses diff --no-index --raw --abbrev=4 dir2 dir
 diff --no-index --raw --no-abbrev dir2 dir
+
+diff-tree --pretty --root --stat --compact-summary initial
+diff-tree --pretty -R --root --stat --compact-summary initial
+diff-tree --stat --compact-summary initial mode
+diff-tree -R --stat --compact-summary initial mode
 EOF
 
 test_expect_success 'log -S requires an argument' '
diff --git a/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_--root_--stat_--compact-summary_initial
new file mode 100644 (file)
index 0000000..d6451ff
--- /dev/null
@@ -0,0 +1,12 @@
+$ git diff-tree --pretty --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+
+ dir/sub (new) | 2 ++
+ file0 (new)   | 3 +++
+ file2 (new)   | 3 +++
+ 3 files changed, 8 insertions(+)
+$
diff --git a/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial b/t/t4013/diff.diff-tree_--pretty_-R_--root_--stat_--compact-summary_initial
new file mode 100644 (file)
index 0000000..1989e55
--- /dev/null
@@ -0,0 +1,12 @@
+$ git diff-tree --pretty -R --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date:   Mon Jun 26 00:00:00 2006 +0000
+
+    Initial
+
+ dir/sub (gone) | 2 --
+ file0 (gone)   | 3 ---
+ file2 (gone)   | 3 ---
+ 3 files changed, 8 deletions(-)
+$
diff --git a/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_--stat_--compact-summary_initial_mode
new file mode 100644 (file)
index 0000000..9c7c8f6
--- /dev/null
@@ -0,0 +1,4 @@
+$ git diff-tree --stat --compact-summary initial mode
+ file0 (mode +x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
diff --git a/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode b/t/t4013/diff.diff-tree_-R_--stat_--compact-summary_initial_mode
new file mode 100644 (file)
index 0000000..e38f3d3
--- /dev/null
@@ -0,0 +1,4 @@
+$ git diff-tree -R --stat --compact-summary initial mode
+ file0 (mode -x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
index 1795ffc3aaf3008f3ef3adc566803a66603975dd..22f9f88f0afc54f1dfeebbea623a4c41fde709f6 100755 (executable)
@@ -33,6 +33,7 @@ diffpatterns="
        css
        fortran
        fountain
+       golang
        html
        java
        matlab
diff --git a/t/t4018/golang-complex-function b/t/t4018/golang-complex-function
new file mode 100644 (file)
index 0000000..e057dce
--- /dev/null
@@ -0,0 +1,8 @@
+type Test struct {
+       a Type
+}
+
+func (t *Test) RIGHT(a Type) (Type, error) {
+       t.a = a
+       return ChangeMe, nil
+}
diff --git a/t/t4018/golang-func b/t/t4018/golang-func
new file mode 100644 (file)
index 0000000..8e9c9ac
--- /dev/null
@@ -0,0 +1,4 @@
+func RIGHT() {
+       a := 5
+       b := ChangeMe
+}
diff --git a/t/t4018/golang-interface b/t/t4018/golang-interface
new file mode 100644 (file)
index 0000000..553bede
--- /dev/null
@@ -0,0 +1,4 @@
+type RIGHT interface {
+       a() Type
+       b() ChangeMe
+}
diff --git a/t/t4018/golang-long-func b/t/t4018/golang-long-func
new file mode 100644 (file)
index 0000000..ac3a77b
--- /dev/null
@@ -0,0 +1,5 @@
+func RIGHT(aVeryVeryVeryLongVariableName AVeryVeryVeryLongType,
+       anotherLongVariableName AnotherLongType) {
+       a := 5
+       b := ChangeMe
+}
diff --git a/t/t4018/golang-struct b/t/t4018/golang-struct
new file mode 100644 (file)
index 0000000..5deda77
--- /dev/null
@@ -0,0 +1,4 @@
+type RIGHT struct {
+       a Type
+       b ChangeMe
+}
index 9f563db20a867156a825dfc0ce810c4f59109ac8..6e2cf933f761683781525b36330e38c758aaabd6 100755 (executable)
@@ -19,17 +19,33 @@ test_expect_success 'preparation' '
        git commit -m message "$name"
 '
 
+cat >expect72 <<-'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
+EOF
+test_expect_success "format-patch: small change with long name gives more space to the name" '
+       git format-patch -1 --stdout >output &&
+       grep " | " output >actual &&
+       test_cmp expect72 actual
+'
+
 while read cmd args
 do
-       cat >expect <<-'EOF'
+       cat >expect80 <<-'EOF'
         ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
        EOF
        test_expect_success "$cmd: small change with long name gives more space to the name" '
                git $cmd $args >output &&
                grep " | " output >actual &&
-               test_cmp expect actual
+               test_cmp expect80 actual
        '
+done <<\EOF
+diff HEAD^ HEAD --stat
+show --stat
+log -1 --stat
+EOF
 
+while read cmd args
+do
        cat >expect <<-'EOF'
         ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
        EOF
@@ -79,11 +95,11 @@ test_expect_success 'preparation for big change tests' '
        git commit -m message abcd
 '
 
-cat >expect80 <<'EOF'
- abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72 <<'EOF'
+ abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 EOF
-cat >expect80-graph <<'EOF'
-|  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+|  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 EOF
 cat >expect200 <<'EOF'
  abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -107,7 +123,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect200 diff HEAD^ HEAD --stat
 respects expect200 show --stat
 respects expect200 log -1 --stat
@@ -135,7 +151,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect40 diff HEAD^ HEAD --stat
 respects expect40 show --stat
 respects expect40 log -1 --stat
@@ -163,7 +179,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect40 diff HEAD^ HEAD --stat
 respects expect40 show --stat
 respects expect40 log -1 --stat
@@ -250,11 +266,11 @@ show --stat
 log -1 --stat
 EOF
 
-cat >expect80 <<'EOF'
- ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72 <<'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
 EOF
-cat >expect80-graph <<'EOF'
-|  ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+|  ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
 EOF
 cat >expect200 <<'EOF'
  aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@@ -278,7 +294,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect200 diff HEAD^ HEAD --stat
 respects expect200 show --stat
 respects expect200 log -1 --stat
@@ -308,7 +324,7 @@ do
                test_cmp "$expect-graph" actual
        '
 done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
 respects expect1 diff HEAD^ HEAD --stat
 respects expect1 show --stat
 respects expect1 log -1 --stat
diff --git a/t/t4064-diff-oidfind.sh b/t/t4064-diff-oidfind.sh
new file mode 100755 (executable)
index 0000000..3bdf317
--- /dev/null
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+test_description='test finding specific blobs in the revision walking'
+. ./test-lib.sh
+
+test_expect_success 'setup ' '
+       git commit --allow-empty -m "empty initial commit" &&
+
+       echo "Hello, world!" >greeting &&
+       git add greeting &&
+       git commit -m "add the greeting blob" && # borrowed from Git from the Bottom Up
+       git tag -m "the blob" greeting $(git rev-parse HEAD:greeting) &&
+
+       echo asdf >unrelated &&
+       git add unrelated &&
+       git commit -m "unrelated history" &&
+
+       git revert HEAD^ &&
+
+       git commit --allow-empty -m "another unrelated commit"
+'
+
+test_expect_success 'find the greeting blob' '
+       cat >expect <<-EOF &&
+       Revert "add the greeting blob"
+       add the greeting blob
+       EOF
+
+       git log --format=%s --find-object=greeting^{blob} >actual &&
+
+       test_cmp expect actual
+'
+
+test_expect_success 'setup a tree' '
+       mkdir a &&
+       echo asdf >a/file &&
+       git add a/file &&
+       git commit -m "add a file in a subdirectory"
+'
+
+test_expect_success 'find a tree' '
+       cat >expect <<-EOF &&
+       add a file in a subdirectory
+       EOF
+
+       git log --format=%s -t --find-object=HEAD:a >actual &&
+
+       test_cmp expect actual
+'
+
+test_expect_success 'setup a submodule' '
+       test_create_repo sub &&
+       test_commit -C sub sub &&
+       git submodule add ./sub sub &&
+       git commit -a -m "add sub"
+'
+
+test_expect_success 'find a submodule' '
+       cat >expect <<-EOF &&
+       add sub
+       EOF
+
+       git log --format=%s --find-object=HEAD:sub >actual &&
+
+       test_cmp expect actual
+'
+
+test_done
index 27cb0009fb1ed52b749785c31081b3097fef0022..c7c688fcc4bbdfe97a5c595fddc96e2590e021a2 100755 (executable)
@@ -89,4 +89,21 @@ test_expect_success 'traditional, whitespace-damaged, colon in timezone' '
        test_cmp expected "post image.txt"
 '
 
+cat >diff-from-svn <<\EOF
+Index: Makefile
+===================================================================
+diff --git a/branches/Makefile
+deleted file mode 100644
+--- a/branches/Makefile        (revision 13)
++++ /dev/null  (nonexistent)
+@@ +1 0,0 @@
+-
+EOF
+
+test_expect_success 'apply handles a diff generated by Subversion' '
+       >Makefile &&
+       git apply -p2 diff-from-svn &&
+       test_path_is_missing Makefile
+'
+
 test_done
index 73b67b4280b99e0328e201e6b69c3d88b766ea84..1eccfb71d0c8e26f88708459f065dea622baf9d3 100755 (executable)
@@ -662,6 +662,11 @@ test_expect_success 'am pauses on conflict' '
        test -d .git/rebase-apply
 '
 
+test_expect_success 'am --show-current-patch' '
+       git am --show-current-patch >actual.patch &&
+       test_cmp .git/rebase-apply/0001 actual.patch
+'
+
 test_expect_success 'am --skip works' '
        echo goodbye >expected &&
        git am --skip &&
@@ -1045,4 +1050,16 @@ test_expect_success 'am works with multi-line in-body headers' '
        git cat-file commit HEAD | grep "^$LONG$"
 '
 
+test_expect_success 'am --quit keeps HEAD where it is' '
+       mkdir .git/rebase-apply &&
+       >.git/rebase-apply/last &&
+       >.git/rebase-apply/next &&
+       git rev-parse HEAD^ >.git/ORIG_HEAD &&
+       git rev-parse HEAD >expected &&
+       git am --quit &&
+       test_path_is_missing .git/rebase-apply &&
+       git rev-parse HEAD >actual &&
+       test_cmp expected actual
+'
+
 test_done
index 9473c2779ef0df109f6943a949583676ea5ceabf..16432781d2e0d52c33dfb1444fcce4459b417e8b 100755 (executable)
@@ -46,9 +46,8 @@ do
 
        test_expect_success "am$with3 --skip continue after failed am$with3" '
                test_must_fail git am$with3 --skip >output &&
-               test_i18ngrep "^Applying" output >output.applying &&
-               test_i18ngrep "^Applying: 6$" output.applying &&
-               test_i18ncmp file-2-expect file-2 &&
+               test_i18ngrep "^Applying: 6$" output &&
+               test_cmp file-2-expect file-2 &&
                test ! -f .git/MERGE_RR
        '
 
index da10478f59da1a301edf7def229d37fbc964dce9..ff6649ed9a70721523da3c55142a9622b152243a 100755 (executable)
@@ -127,6 +127,11 @@ test_expect_success !MINGW 'shortlog can read --format=raw output' '
        test_cmp expect out
 '
 
+test_expect_success 'shortlog from non-git directory refuses extra arguments' '
+       test_must_fail env GIT_DIR=non-existing git shortlog foo 2>out &&
+       test_i18ngrep "too many arguments" out
+'
+
 test_expect_success 'shortlog should add newline when input line matches wraplen' '
        cat >expect <<\EOF &&
 A U Thor (2):
index c2fc584dac3d7e96748866dd0a4ae31f7cae3fc2..d695a6082edf69c6ab377ea825519097f84162f3 100755 (executable)
@@ -262,4 +262,9 @@ EOF
     grep "^warning:.* expected .tagger. line" err
 '
 
+test_expect_success 'index-pack --fsck-objects also warns upon missing tagger in tag' '
+    git index-pack --fsck-objects tag-test-${pack1}.pack 2>err &&
+    grep "^warning:.* expected .tagger. line" err
+'
+
 test_done
index 80a1a3239a64a4f551bc394ab88be500c9b5c44f..0680dec808574fb334612163ebd43d31418a37d5 100755 (executable)
@@ -482,24 +482,24 @@ test_expect_success 'set up tests of missing reference' '
 test_expect_success 'test lonely missing ref' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy
-       ) >/dev/null 2>error-m &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy 2>../error-m
+       ) &&
        test_i18ncmp expect-error error-m
 '
 
 test_expect_success 'test missing ref after existing' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy
-       ) >/dev/null 2>error-em &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy 2>../error-em
+       ) &&
        test_i18ncmp expect-error error-em
 '
 
 test_expect_success 'test missing ref before existing' '
        (
                cd client &&
-               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A
-       ) >/dev/null 2>error-me &&
+               test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A 2>../error-me
+       ) &&
        test_i18ncmp expect-error error-me
 '
 
@@ -755,4 +755,67 @@ test_expect_success 'fetching deepen' '
        )
 '
 
+test_expect_success 'filtering by size' '
+       rm -rf server client &&
+       test_create_repo server &&
+       test_commit -C server one &&
+       test_config -C server uploadpack.allowfilter 1 &&
+
+       test_create_repo client &&
+       git -C client fetch-pack --filter=blob:limit=0 ../server HEAD &&
+
+       # Ensure that object is not inadvertently fetched
+       test_must_fail git -C client cat-file -e $(git hash-object server/one.t)
+'
+
+test_expect_success 'filtering by size has no effect if support for it is not advertised' '
+       rm -rf server client &&
+       test_create_repo server &&
+       test_commit -C server one &&
+
+       test_create_repo client &&
+       git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err &&
+
+       # Ensure that object is fetched
+       git -C client cat-file -e $(git hash-object server/one.t) &&
+
+       test_i18ngrep "filtering not recognized by server" err
+'
+
+fetch_filter_blob_limit_zero () {
+       SERVER="$1"
+       URL="$2"
+
+       rm -rf "$SERVER" client &&
+       test_create_repo "$SERVER" &&
+       test_commit -C "$SERVER" one &&
+       test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+
+       git clone "$URL" client &&
+       test_config -C client extensions.partialclone origin &&
+
+       test_commit -C "$SERVER" two &&
+
+       git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere &&
+
+       # Ensure that commit is fetched, but blob is not
+       test_config -C client extensions.partialclone "arbitrary string" &&
+       git -C client cat-file -e $(git -C "$SERVER" rev-parse two) &&
+       test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t")
+}
+
+test_expect_success 'fetch with --filter=blob:limit=0' '
+       fetch_filter_blob_limit_zero server server
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' '
+       fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
+
 test_done
index 3debc87d4aefb2d0d5c77881a9485c06a2908858..da9ac0055721237f177d3d475e56ddb38b25eff1 100755 (executable)
@@ -540,82 +540,232 @@ test_expect_success "should be able to fetch with duplicate refspecs" '
 set_config_tristate () {
        # var=$1 val=$2
        case "$2" in
-       unset)  test_unconfig "$1" ;;
-       *)      git config "$1" "$2" ;;
+       unset)
+               test_unconfig "$1"
+               ;;
+       *)
+               git config "$1" "$2"
+               key=$(echo $1 | sed -e 's/^remote\.origin/fetch/')
+               git_fetch_c="$git_fetch_c -c $key=$2"
+               ;;
        esac
 }
 
 test_configured_prune () {
-       fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4
+       test_configured_prune_type "$@" "name"
+       test_configured_prune_type "$@" "link"
+}
 
-       test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" '
+test_configured_prune_type () {
+       fetch_prune=$1
+       remote_origin_prune=$2
+       fetch_prune_tags=$3
+       remote_origin_prune_tags=$4
+       expected_branch=$5
+       expected_tag=$6
+       cmdline=$7
+       mode=$8
+
+       if test -z "$cmdline_setup"
+       then
+               test_expect_success 'setup cmdline_setup variable for subsequent test' '
+                       remote_url="file://$(git -C one config remote.origin.url)" &&
+                       remote_fetch="$(git -C one config remote.origin.fetch)" &&
+                       cmdline_setup="\"$remote_url\" \"$remote_fetch\""
+               '
+       fi
+
+       if test "$mode" = 'link'
+       then
+               new_cmdline=""
+
+               if test "$cmdline" = ""
+               then
+                       new_cmdline=$cmdline_setup
+               else
+                       new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g')
+               fi
+
+               if test "$fetch_prune_tags" = 'true' ||
+                  test "$remote_origin_prune_tags" = 'true'
+               then
+                       if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/
+                       then
+                               new_cmdline="$new_cmdline refs/tags/*:refs/tags/*"
+                       fi
+               fi
+
+               cmdline="$new_cmdline"
+       fi
+
+       test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" '
                # make sure a newbranch is there in . and also in one
                git branch -f newbranch &&
+               git tag -f newtag &&
                (
                        cd one &&
                        test_unconfig fetch.prune &&
+                       test_unconfig fetch.pruneTags &&
                        test_unconfig remote.origin.prune &&
-                       git fetch &&
-                       git rev-parse --verify refs/remotes/origin/newbranch
+                       test_unconfig remote.origin.pruneTags &&
+                       git fetch '"$cmdline_setup"' &&
+                       git rev-parse --verify refs/remotes/origin/newbranch &&
+                       git rev-parse --verify refs/tags/newtag
                ) &&
 
                # now remove it
                git branch -d newbranch &&
+               git tag -d newtag &&
 
                # then test
                (
                        cd one &&
+                       git_fetch_c="" &&
                        set_config_tristate fetch.prune $fetch_prune &&
+                       set_config_tristate fetch.pruneTags $fetch_prune_tags &&
                        set_config_tristate remote.origin.prune $remote_origin_prune &&
-
-                       git fetch $cmdline &&
-                       case "$expected" in
+                       set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags &&
+
+                       if test "$mode" != "link"
+                       then
+                               git_fetch_c=""
+                       fi &&
+                       git$git_fetch_c fetch '"$cmdline"' &&
+                       case "$expected_branch" in
                        pruned)
                                test_must_fail git rev-parse --verify refs/remotes/origin/newbranch
                                ;;
                        kept)
                                git rev-parse --verify refs/remotes/origin/newbranch
                                ;;
+                       esac &&
+                       case "$expected_tag" in
+                       pruned)
+                               test_must_fail git rev-parse --verify refs/tags/newtag
+                               ;;
+                       kept)
+                               git rev-parse --verify refs/tags/newtag
+                               ;;
                        esac
                )
        '
 }
 
-test_configured_prune unset unset ""           kept
-test_configured_prune unset unset "--no-prune" kept
-test_configured_prune unset unset "--prune"    pruned
-
-test_configured_prune false unset ""           kept
-test_configured_prune false unset "--no-prune" kept
-test_configured_prune false unset "--prune"    pruned
-
-test_configured_prune true  unset ""           pruned
-test_configured_prune true  unset "--prune"    pruned
-test_configured_prune true  unset "--no-prune" kept
-
-test_configured_prune unset false ""           kept
-test_configured_prune unset false "--no-prune" kept
-test_configured_prune unset false "--prune"    pruned
-
-test_configured_prune false false ""           kept
-test_configured_prune false false "--no-prune" kept
-test_configured_prune false false "--prune"    pruned
-
-test_configured_prune true  false ""           kept
-test_configured_prune true  false "--prune"    pruned
-test_configured_prune true  false "--no-prune" kept
-
-test_configured_prune unset true  ""           pruned
-test_configured_prune unset true  "--no-prune" kept
-test_configured_prune unset true  "--prune"    pruned
-
-test_configured_prune false true  ""           pruned
-test_configured_prune false true  "--no-prune" kept
-test_configured_prune false true  "--prune"    pruned
-
-test_configured_prune true  true  ""           pruned
-test_configured_prune true  true  "--prune"    pruned
-test_configured_prune true  true  "--no-prune" kept
+# $1 config: fetch.prune
+# $2 config: remote.<name>.prune
+# $3 config: fetch.pruneTags
+# $4 config: remote.<name>.pruneTags
+# $5 expect: branch to be pruned?
+# $6 expect: tag to be pruned?
+# $7 git-fetch $cmdline:
+#
+#                     $1    $2    $3    $4    $5     $6     $7
+test_configured_prune unset unset unset unset kept   kept   ""
+test_configured_prune unset unset unset unset kept   kept   "--no-prune"
+test_configured_prune unset unset unset unset pruned kept   "--prune"
+test_configured_prune unset unset unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune unset unset unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune false unset unset unset kept   kept   ""
+test_configured_prune false unset unset unset kept   kept   "--no-prune"
+test_configured_prune false unset unset unset pruned kept   "--prune"
+
+test_configured_prune true  unset unset unset pruned kept   ""
+test_configured_prune true  unset unset unset pruned kept   "--prune"
+test_configured_prune true  unset unset unset kept   kept   "--no-prune"
+
+test_configured_prune unset false unset unset kept   kept   ""
+test_configured_prune unset false unset unset kept   kept   "--no-prune"
+test_configured_prune unset false unset unset pruned kept   "--prune"
+
+test_configured_prune false false unset unset kept   kept   ""
+test_configured_prune false false unset unset kept   kept   "--no-prune"
+test_configured_prune false false unset unset pruned kept   "--prune"
+test_configured_prune false false unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune false false unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune true  false unset unset kept   kept   ""
+test_configured_prune true  false unset unset pruned kept   "--prune"
+test_configured_prune true  false unset unset kept   kept   "--no-prune"
+
+test_configured_prune unset true  unset unset pruned kept   ""
+test_configured_prune unset true  unset unset kept   kept   "--no-prune"
+test_configured_prune unset true  unset unset pruned kept   "--prune"
+
+test_configured_prune false true  unset unset pruned kept   ""
+test_configured_prune false true  unset unset kept   kept   "--no-prune"
+test_configured_prune false true  unset unset pruned kept   "--prune"
+
+test_configured_prune true  true  unset unset pruned kept   ""
+test_configured_prune true  true  unset unset pruned kept   "--prune"
+test_configured_prune true  true  unset unset kept   kept   "--no-prune"
+test_configured_prune true  true  unset unset kept   pruned \
+       "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune true  true  unset unset pruned pruned \
+       "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+# --prune-tags on its own does nothing, needs --prune as well, same
+# for for fetch.pruneTags without fetch.prune
+test_configured_prune unset unset unset unset kept kept     "--prune-tags"
+test_configured_prune unset unset true unset  kept kept     ""
+test_configured_prune unset unset unset true  kept kept     ""
+
+# These will prune the tags
+test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags"
+test_configured_prune true  unset true  unset pruned pruned ""
+test_configured_prune unset true  unset true  pruned pruned ""
+
+# remote.<name>.pruneTags overrides fetch.pruneTags, just like
+# remote.<name>.prune overrides fetch.prune if set.
+test_configured_prune true  unset true unset pruned pruned  ""
+test_configured_prune false true  false true  pruned pruned ""
+test_configured_prune true  false true  false kept   kept   ""
+
+# When --prune-tags is supplied it's ignored if an explicit refspec is
+# given, same for the configuration options.
+test_configured_prune unset unset unset unset pruned kept \
+       "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset true  unset pruned kept \
+       "--prune origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset unset true pruned  kept \
+       "--prune origin +refs/heads/*:refs/remotes/origin/*"
+
+# Pruning that also takes place if a file:// url replaces a named
+# remote. However, because there's no implicit
+# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the
+# command-line negates --prune-tags, the branches will not be pruned.
+test_configured_prune_type unset unset unset unset kept   kept   "origin --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept   kept   "origin --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept   pruned "origin --prune --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name"
+test_configured_prune_type unset unset unset unset kept   pruned "--prune --prune-tags origin" "link"
+test_configured_prune_type unset unset true  unset pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset true  unset kept   pruned "--prune origin" "link"
+test_configured_prune_type unset unset unset true  pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset unset true  kept   pruned "--prune origin" "link"
+test_configured_prune_type true  unset true  unset pruned pruned "origin" "name"
+test_configured_prune_type true  unset true  unset kept   pruned "origin" "link"
+test_configured_prune_type unset  true true  unset pruned pruned "origin" "name"
+test_configured_prune_type unset  true true  unset kept   pruned "origin" "link"
+test_configured_prune_type unset  true unset true  pruned pruned "origin" "name"
+test_configured_prune_type unset  true unset true  kept   pruned "origin" "link"
+
+# When all remote.origin.fetch settings are deleted a --prune
+# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so
+# tags, but not tracking branches, will be deleted.
+test_expect_success 'remove remote.origin.fetch "one"' '
+       (
+               cd one &&
+               git config --unset-all remote.origin.fetch
+       )
+'
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
 
 test_expect_success 'all boundary commits are excluded' '
        test_commit base &&
index 74486c73b0bcc20ee69e49d5c1cc072994e4d7b0..9cc4b569c0566da2f5c2778f404e67e20b29ed06 100755 (executable)
@@ -85,7 +85,7 @@ test_expect_success "fetch --recurse-submodules -j2 has the same output behaviou
        add_upstream_commit &&
        (
                cd downstream &&
-               GIT_TRACE=$(pwd)/../trace.out git fetch --recurse-submodules -j2 2>../actual.err
+               GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err
        ) &&
        test_must_be_empty actual.out &&
        test_i18ncmp expect.err actual.err &&
index 644736b8a3b95c501bd8bb99850dd8de474b10d6..91f28c2f783df7391d22bd893e2488e56658515b 100755 (executable)
@@ -18,14 +18,6 @@ setup_repository () {
        )
 }
 
-verify_stderr () {
-       cat >expected &&
-       # We're not interested in the error
-       # "fatal: The remote end hung up unexpectedly":
-       test_i18ngrep -E '^(fatal|warning):' error | grep -v 'hung up' >actual | sort &&
-       test_i18ncmp expected actual
-}
-
 test_expect_success 'setup' '
        git commit --allow-empty -m "Initial" &&
        git branch branch1 &&
@@ -48,9 +40,7 @@ test_expect_success 'fetch conflict: config vs. config' '
                "+refs/heads/branch2:refs/remotes/origin/branch1" && (
                cd ccc &&
                test_must_fail git fetch origin 2>error &&
-               verify_stderr <<-\EOF
-               fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
-               EOF
+               test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
        )
 '
 
@@ -77,9 +67,7 @@ test_expect_success 'fetch conflict: arg vs. arg' '
                test_must_fail git fetch origin \
                        refs/heads/*:refs/remotes/origin/* \
                        refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
-               verify_stderr <<-\EOF
-               fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
-               EOF
+               test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
        )
 '
 
@@ -90,10 +78,8 @@ test_expect_success 'fetch conflict: criss-cross args' '
                git fetch origin \
                        refs/heads/branch1:refs/remotes/origin/branch2 \
                        refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
-               verify_stderr <<-\EOF
-               warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2
-               warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1
-               EOF
+               test_i18ngrep "warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2" error &&
+               test_i18ngrep "warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1" error
        )
 '
 
index 463783789c8ccda6a483197f6626afbfca7d8ccf..b47a95871cac3dd8593b3b9262d238be3914ac62 100755 (executable)
@@ -217,17 +217,32 @@ test_expect_success 'invalid push option in config' '
        test_refs master HEAD@{1}
 '
 
+test_expect_success 'push options keep quoted characters intact (direct)' '
+       mk_repo_pair &&
+       git -C upstream config receive.advertisePushOptions true &&
+       test_commit -C workbench one &&
+       git -C workbench push --push-option="\"embedded quotes\"" up master &&
+       echo "\"embedded quotes\"" >expect &&
+       test_cmp expect upstream/.git/hooks/pre-receive.push_options
+'
+
 . "$TEST_DIRECTORY"/lib-httpd.sh
 start_httpd
 
-test_expect_success 'push option denied properly by http server' '
+# set up http repository for fetching/pushing, with push options config
+# bool set to $1
+mk_http_pair () {
        test_when_finished "rm -rf test_http_clone" &&
-       test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
+       test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' &&
        mk_repo_pair &&
-       git -C upstream config receive.advertisePushOptions false &&
+       git -C upstream config receive.advertisePushOptions "$1" &&
        git -C upstream config http.receivepack true &&
        cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
-       git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+       git clone "$HTTPD_URL"/smart/upstream test_http_clone
+}
+
+test_expect_success 'push option denied properly by http server' '
+       mk_http_pair false &&
        test_commit -C test_http_clone one &&
        test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual &&
        test_i18ngrep "the receiving end does not support push options" actual &&
@@ -235,13 +250,7 @@ test_expect_success 'push option denied properly by http server' '
 '
 
 test_expect_success 'push options work properly across http' '
-       test_when_finished "rm -rf test_http_clone" &&
-       test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
-       mk_repo_pair &&
-       git -C upstream config receive.advertisePushOptions true &&
-       git -C upstream config http.receivepack true &&
-       cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
-       git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+       mk_http_pair true &&
 
        test_commit -C test_http_clone one &&
        git -C test_http_clone push origin master &&
@@ -260,6 +269,15 @@ test_expect_success 'push options work properly across http' '
        test_cmp expect actual
 '
 
+test_expect_success 'push options keep quoted characters intact (http)' '
+       mk_http_pair true &&
+
+       test_commit -C test_http_clone one &&
+       git -C test_http_clone push --push-option="\"embedded quotes\"" origin master &&
+       echo "\"embedded quotes\"" >expect &&
+       test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
+'
+
 stop_httpd
 
 test_done
index 755b05a8ae9de940be8d9ad4c89473d836dcdf20..0d4c52016b2b3651fa638fcc416f22d236c87fc2 100755 (executable)
@@ -50,7 +50,7 @@ test_expect_success 'no-op fetch -v stderr is as expected' '
 '
 
 test_expect_success 'no-op fetch without "-v" is quiet' '
-       (cd clone && git fetch) 2>stderr &&
+       (cd clone && git fetch 2>../stderr) &&
        ! test -s stderr
 '
 
index 8c437bf8721f929155fa2ddab1677c050ae5caca..0b620377448bc3b97d39d2b6beceb4d185edb7ba 100755 (executable)
@@ -628,4 +628,105 @@ test_expect_success 'clone on case-insensitive fs' '
        )
 '
 
+partial_clone () {
+              SERVER="$1" &&
+              URL="$2" &&
+
+       rm -rf "$SERVER" client &&
+       test_create_repo "$SERVER" &&
+       test_commit -C "$SERVER" one &&
+       HASH1=$(git hash-object "$SERVER/one.t") &&
+       git -C "$SERVER" revert HEAD &&
+       test_commit -C "$SERVER" two &&
+       HASH2=$(git hash-object "$SERVER/two.t") &&
+       test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+       test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+       git clone --filter=blob:limit=0 "$URL" client &&
+
+       git -C client fsck &&
+
+       # Ensure that unneeded blobs are not inadvertently fetched.
+       test_config -C client extensions.partialclone "not a remote" &&
+       test_must_fail git -C client cat-file -e "$HASH1" &&
+
+       # But this blob was fetched, because clone performs an initial checkout
+       git -C client cat-file -e "$HASH2"
+}
+
+test_expect_success 'partial clone' '
+       partial_clone server "file://$(pwd)/server"
+'
+
+test_expect_success 'partial clone: warn if server does not support object filtering' '
+       rm -rf server client &&
+       test_create_repo server &&
+       test_commit -C server one &&
+
+       git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err &&
+
+       test_i18ngrep "filtering not recognized by server" err
+'
+
+test_expect_success 'batch missing blob request during checkout' '
+       rm -rf server client &&
+
+       test_create_repo server &&
+       echo a >server/a &&
+       echo b >server/b &&
+       git -C server add a b &&
+
+       git -C server commit -m x &&
+       echo aa >server/a &&
+       echo bb >server/b &&
+       git -C server add a b &&
+       git -C server commit -m x &&
+
+       test_config -C server uploadpack.allowfilter 1 &&
+       test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+       git clone --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+       # Ensure that there is only one negotiation by checking that there is
+       # only "done" line sent. ("done" marks the end of negotiation.)
+       GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+       grep "git> done" trace >done_lines &&
+       test_line_count = 1 done_lines
+'
+
+test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' '
+       rm -rf server client &&
+
+       test_create_repo repo_for_submodule &&
+       test_commit -C repo_for_submodule x &&
+
+       test_create_repo server &&
+       echo a >server/a &&
+       echo b >server/b &&
+       git -C server add a b &&
+       git -C server commit -m x &&
+
+       echo aa >server/a &&
+       echo bb >server/b &&
+       # Also add a gitlink pointing to an arbitrary repository
+       git -C server submodule add "$(pwd)/repo_for_submodule" c &&
+       git -C server add a b c &&
+       git -C server commit -m x &&
+
+       test_config -C server uploadpack.allowfilter 1 &&
+       test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+       # Make sure that it succeeds
+       git clone --filter=blob:limit=0 "file://$(pwd)/server" client
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'partial clone using HTTP' '
+       partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
 test_done
diff --git a/t/t5616-partial-clone.sh b/t/t5616-partial-clone.sh
new file mode 100755 (executable)
index 0000000..cee5565
--- /dev/null
@@ -0,0 +1,157 @@
+#!/bin/sh
+
+test_description='git partial clone'
+
+. ./test-lib.sh
+
+# create a normal "src" repo where we can later create new commits.
+# expect_1.oids will contain a list of the OIDs of all blobs.
+test_expect_success 'setup normal src repo' '
+       echo "{print \$1}" >print_1.awk &&
+       echo "{print \$2}" >print_2.awk &&
+
+       git init src &&
+       for n in 1 2 3 4
+       do
+               echo "This is file: $n" > src/file.$n.txt
+               git -C src add file.$n.txt
+               git -C src commit -m "file $n"
+               git -C src ls-files -s file.$n.txt >>temp
+       done &&
+       awk -f print_2.awk <temp | sort >expect_1.oids &&
+       test_line_count = 4 expect_1.oids
+'
+
+# bare clone "src" giving "srv.bare" for use as our server.
+test_expect_success 'setup bare clone for server' '
+       git clone --bare "file://$(pwd)/src" srv.bare &&
+       git -C srv.bare config --local uploadpack.allowfilter 1 &&
+       git -C srv.bare config --local uploadpack.allowanysha1inwant 1
+'
+
+# do basic partial clone from "srv.bare"
+# confirm we are missing all of the known blobs.
+# confirm partial clone was registered in the local config.
+test_expect_success 'do partial clone 1' '
+       git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 &&
+       git -C pc1 rev-list HEAD --quiet --objects --missing=print \
+               | awk -f print_1.awk \
+               | sed "s/?//" \
+               | sort >observed.oids &&
+       test_cmp expect_1.oids observed.oids &&
+       test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" &&
+       test "$(git -C pc1 config --local extensions.partialclone)" = "origin" &&
+       test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none"
+'
+
+# checkout master to force dynamic object fetch of blobs at HEAD.
+test_expect_success 'verify checkout with dynamic object fetch' '
+       git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+       test_line_count = 4 observed &&
+       git -C pc1 checkout master &&
+       git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+       test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a blame history on file.1.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server' '
+       git -C src remote add srv "file://$(pwd)/srv.bare" &&
+       for x in a b c d e
+       do
+               echo "Mod file.1.txt $x" >>src/file.1.txt
+               git -C src add file.1.txt
+               git -C src commit -m "mod $x"
+       done &&
+       git -C src blame master -- file.1.txt >expect.blame &&
+       git -C src push -u srv master
+'
+
+# (partial) fetch in the partial clone repo from the promisor remote.
+# verify that fetch inherited the filter-spec from the config and DOES NOT
+# have the new blobs.
+test_expect_success 'partial fetch inherits filter settings' '
+       git -C pc1 fetch origin &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       test_line_count = 5 observed
+'
+
+# force dynamic object fetch using diff.
+# we should only get 1 new blob (for the file in origin/master).
+test_expect_success 'verify diff causes dynamic object fetch' '
+       git -C pc1 diff master..origin/master -- file.1.txt &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       test_line_count = 4 observed
+'
+
+# force full dynamic object fetch of the file's history using blame.
+# we should get the intermediate blobs for the file.
+test_expect_success 'verify blame causes dynamic object fetch' '
+       git -C pc1 blame origin/master -- file.1.txt >observed.blame &&
+       test_cmp expect.blame observed.blame &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.2.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.2.txt' '
+       for x in a b c d e f
+       do
+               echo "Mod file.2.txt $x" >>src/file.2.txt
+               git -C src add file.2.txt
+               git -C src commit -m "mod $x"
+       done &&
+       git -C src push -u srv master
+'
+
+# Do FULL fetch by disabling inherited filter-spec using --no-filter.
+# Verify we have all the new blobs.
+test_expect_success 'override inherited filter-spec using --no-filter' '
+       git -C pc1 fetch --no-filter origin &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+       test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.3.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.3.txt' '
+       for x in a b c d e f
+       do
+               echo "Mod file.3.txt $x" >>src/file.3.txt
+               git -C src add file.3.txt
+               git -C src commit -m "mod $x"
+       done &&
+       git -C src push -u srv master
+'
+
+# Do a partial fetch and then try to manually fetch the missing objects.
+# This can be used as the basis of a pre-command hook to bulk fetch objects
+# perhaps combined with a command in dry-run mode.
+test_expect_success 'manual prefetch of missing objects' '
+       git -C pc1 fetch --filter=blob:none origin &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+               | awk -f print_1.awk \
+               | sed "s/?//" \
+               | sort >observed.oids &&
+       test_line_count = 6 observed.oids &&
+       git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids &&
+       git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+               | awk -f print_1.awk \
+               | sed "s/?//" \
+               | sort >observed.oids &&
+       test_line_count = 0 observed.oids
+'
+
+test_expect_success 'partial clone with transfer.fsckobjects=1 uses index-pack --fsck-objects' '
+       git init src &&
+       test_commit -C src x &&
+       test_config -C src uploadpack.allowfilter 1 &&
+       test_config -C src uploadpack.allowanysha1inwant 1 &&
+
+       GIT_TRACE="$(pwd)/trace" git -c transfer.fsckobjects=1 \
+               clone --filter="blob:none" "file://$(pwd)/src" dst &&
+       grep "git index-pack.*--fsck-objects" trace
+'
+
+test_done
index 8f17fd9da8ef6b54ae0a97500f2154b1c9d968b3..716283b274677dc6c954044c2f11b902ace79527 100755 (executable)
@@ -146,6 +146,48 @@ test_expect_success 'status -s -b (diverged from upstream)' '
        test_i18ncmp expect actual
 '
 
+cat >expect <<\EOF
+## b1...origin/master [different]
+EOF
+
+test_expect_success 'status -s -b --no-ahead-behind (diverged from upstream)' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status -s -b --no-ahead-behind | head -1
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' have diverged,
+and have 1 and 1 different commits each, respectively.
+EOF
+
+test_expect_success 'status --long --branch' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status --long -b | head -3
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' refer to different commits.
+EOF
+
+test_expect_success 'status --long --branch --no-ahead-behind' '
+       (
+               cd test &&
+               git checkout b1 >/dev/null &&
+               git status --long -b --no-ahead-behind | head -2
+       ) >actual &&
+       test_i18ncmp expect actual
+'
+
 cat >expect <<\EOF
 ## b5...brokenbase [gone]
 EOF
diff --git a/t/t6043-merge-rename-directories.sh b/t/t6043-merge-rename-directories.sh
new file mode 100755 (executable)
index 0000000..2e28f29
--- /dev/null
@@ -0,0 +1,3998 @@
+#!/bin/sh
+
+test_description="recursive merge with directory renames"
+# includes checking of many corner cases, with a similar methodology to:
+#   t6042: corner cases with renames but not criss-cross merges
+#   t6036: corner cases with both renames and criss-cross merges
+#
+# The setup for all of them, pictorially, is:
+#
+#      A
+#      o
+#     / \
+#  O o   ?
+#     \ /
+#      o
+#      B
+#
+# To help make it easier to follow the flow of tests, they have been
+# divided into sections and each test will start with a quick explanation
+# of what commits O, A, and B contain.
+#
+# Notation:
+#    z/{b,c}   means  files z/b and z/c both exist
+#    x/d_1     means  file x/d exists with content d1.  (Purpose of the
+#                     underscore notation is to differentiate different
+#                     files that might be renamed into each other's paths.)
+
+. ./test-lib.sh
+
+
+###########################################################################
+# SECTION 1: Basic cases we should be able to handle
+###########################################################################
+
+# Testcase 1a, Basic directory rename.
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e/f}
+#   Expected: y/{b,c,d,e/f}
+
+test_expect_success '1a-setup: Simple directory rename detection' '
+       test_create_repo 1a &&
+       (
+               cd 1a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               mkdir z/e &&
+               echo f >z/e/f &&
+               git add z/d z/e/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1a-check: Simple directory rename detection' '
+       (
+               cd 1a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d    B:z/e/f &&
+               test_cmp expect actual &&
+
+               git hash-object y/d >actual &&
+               git rev-parse B:z/d >expect &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_must_fail git rev-parse HEAD:z/e/f &&
+               test_path_is_missing z/d &&
+               test_path_is_missing z/e/f
+       )
+'
+
+# Testcase 1b, Merge a directory with another
+#   Commit O: z/{b,c},   y/d
+#   Commit A: z/{b,c,e}, y/d
+#   Commit B: y/{b,c,d}
+#   Expected: y/{b,c,d,e}
+
+test_expect_success '1b-setup: Merge a directory with another' '
+       test_create_repo 1b &&
+       (
+               cd 1b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y &&
+               git mv z/c y &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1b-check: Merge a directory with another' '
+       (
+               cd 1b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:y/d    A:z/e &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/e
+       )
+'
+
+# Testcase 1c, Transitive renaming
+#   (Related to testcases 3a and 6d -- when should a transitive rename apply?)
+#   (Related to testcases 9c and 9d -- can transitivity repeat?)
+#   (Related to testcase 12b -- joint-transitivity?)
+#   Commit O: z/{b,c},   x/d
+#   Commit A: y/{b,c},   x/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c,d}  (because x/d -> z/d -> y/d)
+
+test_expect_success '1c-setup: Transitive renaming' '
+       test_create_repo 1c &&
+       (
+               cd 1c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1c-check: Transitive renaming' '
+       (
+               cd 1c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d
+       )
+'
+
+# Testcase 1d, Directory renames (merging two directories into one new one)
+#              cause a rename/rename(2to1) conflict
+#   (Related to testcases 1c and 7b)
+#   Commit O. z/{b,c},        y/{d,e}
+#   Commit A. x/{b,c},        y/{d,e,m,wham_1}
+#   Commit B. z/{b,c,n,wham_2}, x/{d,e}
+#   Expected: x/{b,c,d,e,m,n}, CONFLICT:(y/wham_1 & z/wham_2 -> x/wham)
+#   Note: y/m & z/n should definitely move into x.  By the same token, both
+#         y/wham_1 & z/wham_2 should too...giving us a conflict.
+
+test_expect_success '1d-setup: Directory renames cause a rename/rename(2to1) conflict' '
+       test_create_repo 1d &&
+       (
+               cd 1d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               echo e >y/e &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z x &&
+               echo m >y/m &&
+               echo wham1 >y/wham &&
+               git add y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y x &&
+               echo n >z/n &&
+               echo wham2 >z/wham &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1d-check: Directory renames cause a rename/rename(2to1) conflict' '
+       (
+               cd 1d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 8 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:x/c :0:x/d :0:x/e :0:x/m :0:x/n &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  O:y/e  A:y/m  B:z/n &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :0:x/wham &&
+               git rev-parse >actual \
+                       :2:x/wham :3:x/wham &&
+               git rev-parse >expect \
+                        A:y/wham  B:z/wham &&
+               test_cmp expect actual &&
+
+               test_path_is_missing x/wham &&
+               test_path_is_file x/wham~HEAD &&
+               test_path_is_file x/wham~B^0 &&
+
+               git hash-object >actual \
+                       x/wham~HEAD x/wham~B^0 &&
+               git rev-parse >expect \
+                       A:y/wham    B:z/wham &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 1e, Renamed directory, with all filenames being renamed too
+#   (Related to testcases 9f & 9g)
+#   Commit O: z/{oldb,oldc}
+#   Commit A: y/{newb,newc}
+#   Commit B: z/{oldb,oldc,d}
+#   Expected: y/{newb,newc,d}
+
+test_expect_success '1e-setup: Renamed directory, with all files being renamed too' '
+       test_create_repo 1e &&
+       (
+               cd 1e &&
+
+               mkdir z &&
+               echo b >z/oldb &&
+               echo c >z/oldc &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/oldb y/newb &&
+               git mv z/oldc y/newc &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1e-check: Renamed directory, with all files being renamed too' '
+       (
+               cd 1e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/newb HEAD:y/newc HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/oldb    O:z/oldc    B:z/d &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:z/d
+       )
+'
+
+# Testcase 1f, Split a directory into two other directories
+#   (Related to testcases 3a, all of section 2, and all of section 4)
+#   Commit O: z/{b,c,d,e,f}
+#   Commit A: z/{b,c,d,e,f,g}
+#   Commit B: y/{b,c}, x/{d,e,f}
+#   Expected: y/{b,c}, x/{d,e,f,g}
+
+test_expect_success '1f-setup: Split a directory into two other directories' '
+       test_create_repo 1f &&
+       (
+               cd 1f &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               echo f >z/f &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo g >z/g &&
+               git add z/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               git mv z/e x/ &&
+               git mv z/f x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '1f-check: Split a directory into two other directories' '
+       (
+               cd 1f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d HEAD:x/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    O:z/f    A:z/g &&
+               test_cmp expect actual &&
+               test_path_is_missing z/g &&
+               test_must_fail git rev-parse HEAD:z/g
+       )
+'
+
+###########################################################################
+# Rules suggested by testcases in section 1:
+#
+#   We should still detect the directory rename even if it wasn't just
+#   the directory renamed, but the files within it. (see 1b)
+#
+#   If renames split a directory into two or more others, the directory
+#   with the most renames, "wins" (see 1c).  However, see the testcases
+#   in section 2, plus testcases 3a and 4a.
+###########################################################################
+
+
+###########################################################################
+# SECTION 2: Split into multiple directories, with equal number of paths
+#
+# Explore the splitting-a-directory rules a bit; what happens in the
+# edge cases?
+#
+# Note that there is a closely related case of a directory not being
+# split on either side of history, but being renamed differently on
+# each side.  See testcase 8e for that.
+###########################################################################
+
+# Testcase 2a, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c,d}
+#   Expected: y/b, w/c, z/d, with warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2a-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2a &&
+       (
+               cd 2a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2a-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*directory rename split" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 2b, Directory split into two on one side, with equal numbers of paths
+#   Commit O: z/{b,c}
+#   Commit A: y/b, w/c
+#   Commit B: z/{b,c}, x/d
+#   Expected: y/b, w/c, x/d; No warning about z/ -> (y/ vs. w/) conflict
+test_expect_success '2b-setup: Directory split into two on one side, with equal numbers of paths' '
+       test_create_repo 2b &&
+       (
+               cd 2b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir w &&
+               git mv z/b y/ &&
+               git mv z/c w/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir x &&
+               echo d >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '2b-check: Directory split into two on one side, with equal numbers of paths' '
+       (
+               cd 2b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:w/c :0:x/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:x/d &&
+               test_cmp expect actual &&
+               test_i18ngrep ! "CONFLICT.*directory rename split" out
+       )
+'
+
+###########################################################################
+# Rules suggested by section 2:
+#
+#   None; the rule was already covered in section 1.  These testcases are
+#   here just to make sure the conflict resolution and necessary warning
+#   messages are handled correctly.
+###########################################################################
+
+
+###########################################################################
+# SECTION 3: Path in question is the source path for some rename already
+#
+# Combining cases from Section 1 and trying to handle them could lead to
+# directory renaming detection being over-applied.  So, this section
+# provides some good testcases to check that the implementation doesn't go
+# too far.
+###########################################################################
+
+# Testcase 3a, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 1c, 1f, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: z/{b,c,d} (no change)
+#   Commit B: y/{b,c}, x/d
+#   Expected: y/{b,c}, x/d
+test_expect_success '3a-setup: Avoid implicit rename if involved as source on other side' '
+       test_create_repo 3a &&
+       (
+               cd 3a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3a-check: Avoid implicit rename if involved as source on other side' '
+       (
+               cd 3a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 3b, Avoid implicit rename if involved as source on other side
+#   (Related to testcases 5c and 7c, also kind of 1e and 1f)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}, x/d
+#   Commit B: z/{b,c}, w/d
+#   Expected: y/{b,c}, CONFLICT:(z/d -> x/d vs. w/d)
+#   NOTE: We're particularly checking that since z/d is already involved as
+#         a source in a file rename on the same side of history, that we don't
+#         get it involved in directory rename detection.  If it were, we might
+#         end up with CONFLICT:(z/d -> y/d vs. x/d vs. w/d), i.e. a
+#         rename/rename/rename(1to3) conflict, which is just weird.
+test_expect_success '3b-setup: Avoid implicit rename if involved as source on current side' '
+       test_create_repo 3b &&
+       (
+               cd 3b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               git mv z/d w/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '3b-check: Avoid implicit rename if involved as source on current side' '
+       (
+               cd 3b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/d.*x/d.*w/d out &&
+               test_i18ngrep ! CONFLICT.*rename/rename.*y/d out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:z/d :2:x/d :3:w/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:z/d  O:z/d  O:z/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/d &&
+               git hash-object >actual \
+                       x/d   w/d &&
+               git rev-parse >expect \
+                       O:z/d O:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 3:
+#
+#   Avoid directory-rename-detection for a path, if that path is the source
+#   of a rename on either side of a merge.
+###########################################################################
+
+
+###########################################################################
+# SECTION 4: Partially renamed directory; still exists on both sides of merge
+#
+# What if we were to attempt to do directory rename detection when someone
+# "mostly" moved a directory but still left some files around, or,
+# equivalently, fully renamed a directory in one commmit and then recreated
+# that directory in a later commit adding some new files and then tried to
+# merge?
+#
+# It's hard to divine user intent in these cases, because you can make an
+# argument that, depending on the intermediate history of the side being
+# merged, that some users will want files in that directory to
+# automatically be detected and renamed, while users with a different
+# intermediate history wouldn't want that rename to happen.
+#
+# I think that it is best to simply not have directory rename detection
+# apply to such cases.  My reasoning for this is four-fold: (1) it's
+# easiest for users in general to figure out what happened if we don't
+# apply directory rename detection in any such case, (2) it's an easy rule
+# to explain ["We don't do directory rename detection if the directory
+# still exists on both sides of the merge"], (3) we can get some hairy
+# edge/corner cases that would be really confusing and possibly not even
+# representable in the index if we were to even try, and [related to 3] (4)
+# attempting to resolve this issue of divining user intent by examining
+# intermediate history goes against the spirit of three-way merges and is a
+# path towards crazy corner cases that are far more complex than what we're
+# already dealing with.
+#
+# Note that the wording of the rule ("We don't do directory rename
+# detection if the directory still exists on both sides of the merge.")
+# also excludes "renaming" of a directory into a subdirectory of itself
+# (e.g. /some/dir/* -> /some/dir/subdir/*).  It may be possible to carve
+# out an exception for "renaming"-beneath-itself cases without opening
+# weird edge/corner cases for other partial directory renames, but for now
+# we are keeping the rule simple.
+#
+# This section contains a test for a partially-renamed-directory case.
+###########################################################################
+
+# Testcase 4a, Directory split, with original directory still present
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d,e}
+#   Commit A: y/{b,c,d}, z/e
+#   Commit B: z/{b,c,d,e,f}
+#   Expected: y/{b,c,d}, z/{e,f}
+#   NOTE: Even though most files from z moved to y, we don't want f to follow.
+
+test_expect_success '4a-setup: Directory split, with original directory still present' '
+       test_create_repo 4a &&
+       (
+               cd 4a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               echo e >z/e &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo f >z/f &&
+               git add z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '4a-check: Directory split, with original directory still present' '
+       (
+               cd 4a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/e HEAD:z/f &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:z/d    O:z/e    B:z/f &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 4:
+#
+#   Directory-rename-detection should be turned off for any directories (as
+#   a source for renames) that exist on both sides of the merge.  (The "as
+#   a source for renames" clarification is due to cases like 1c where
+#   the target directory exists on both sides and we do want the rename
+#   detection.)  But, sadly, see testcase 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 5: Files/directories in the way of subset of to-be-renamed paths
+#
+# Implicitly renaming files due to a detected directory rename could run
+# into problems if there are files or directories in the way of the paths
+# we want to rename.  Explore such cases in this section.
+###########################################################################
+
+# Testcase 5a, Merge directories, other side adds files to original and target
+#   Commit O: z/{b,c},       y/d
+#   Commit A: z/{b,c,e_1,f}, y/{d,e_2}
+#   Commit B: y/{b,c,d}
+#   Expected: z/e_1, y/{b,c,d,e_2,f} + CONFLICT warning
+#   NOTE: While directory rename detection is active here causing z/f to
+#         become y/f, we did not apply this for z/e_1 because that would
+#         give us an add/add conflict for y/e_1 vs y/e_2.  This problem with
+#         this add/add, is that both versions of y/e are from the same side
+#         of history, giving us no way to represent this conflict in the
+#         index.
+
+test_expect_success '5a-setup: Merge directories, other side adds files to original and target' '
+       test_create_repo 5a &&
+       (
+               cd 5a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir y &&
+               echo d >y/d &&
+               git add z y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >z/e &&
+               echo f >z/f &&
+               echo e2 >y/e &&
+               git add z/e z/f y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5a-check: Merge directories, other side adds files to original and target' '
+       (
+               cd 5a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT.*implicit dir rename" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/d :0:y/e :0:z/e :0:y/f &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:y/d  A:y/e  A:z/e  A:z/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 5b, Rename/delete in order to get add/add/add conflict
+#   (Related to testcase 8d; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: y/{b,c,d_2}
+#   Commit B: z/{b,c,d_1,e}, y/d_3
+#   Expected: y/{b,c,e}, CONFLICT(add/add: y/d_2 vs. y/d_3)
+#   NOTE: If z/d_1 in commit B were to be involved in dir rename detection, as
+#         we normaly would since z/ is being renamed to y/, then this would be
+#         a rename/delete (z/d_1 -> y/d_1 vs. deleted) AND an add/add/add
+#         conflict of y/d_1 vs. y/d_2 vs. y/d_3.  Add/add/add is not
+#         representable in the index, so the existence of y/d_3 needs to
+#         cause us to bail on directory rename detection for that path, falling
+#         back to git behavior without the directory rename detection.
+
+test_expect_success '5b-setup: Rename/delete in order to get add/add/add conflict' '
+       test_create_repo 5b &&
+       (
+               cd 5b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               echo d3 >y/d &&
+               echo e >z/e &&
+               git add y/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5b-check: Rename/delete in order to get add/add/add conflict' '
+       (
+               cd 5b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  A:y/d  B:y/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               test_path_is_file y/d
+       )
+'
+
+# Testcase 5c, Transitive rename would cause rename/rename/rename/add/add/add
+#   (Directory rename detection would result in transitive rename vs.
+#    rename/rename(1to2) and turn it into a rename/rename(1to3).  Further,
+#    rename paths conflict with separate adds on the other side)
+#   (Related to testcases 3b and 7c)
+#   Commit O: z/{b,c}, x/d_1
+#   Commit A: y/{b,c,d_2}, w/d_1
+#   Commit B: z/{b,c,d_1,e}, w/d_3, y/d_4
+#   Expected: A mess, but only a rename/rename(1to2)/add/add mess.  Use the
+#             presence of y/d_4 in B to avoid doing transitive rename of
+#             x/d_1 -> z/d_1 -> y/d_1, so that the only paths we have at
+#             y/d are y/d_2 and y/d_4.  We still do the move from z/e to y/e,
+#             though, because it doesn't have anything in the way.
+
+test_expect_success '5c-setup: Transitive rename would cause rename/rename/rename/add/add/add' '
+       test_create_repo 5c &&
+       (
+               cd 5c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d2 >y/d &&
+               git add y/d &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               mkdir w &&
+               mkdir y &&
+               echo d3 >w/d &&
+               echo d4 >y/d &&
+               echo e >z/e &&
+               git add w/ y/ z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5c-check: Transitive rename would cause rename/rename/rename/add/add/add' '
+       (
+               cd 5c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*z/d" out &&
+               test_i18ngrep "CONFLICT (add/add).* y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 9 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/d &&
+               git rev-parse >actual \
+                       :2:w/d :3:w/d :1:x/d :2:y/d :3:y/d :3:z/d &&
+               git rev-parse >expect \
+                        O:x/d  B:w/d  O:x/d  A:y/d  B:y/d  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       w/d~HEAD w/d~B^0 z/d &&
+               git rev-parse >expect \
+                       O:x/d    B:w/d   O:x/d &&
+               test_cmp expect actual &&
+               test_path_is_missing x/d &&
+               test_path_is_file y/d &&
+               grep -q "<<<<" y/d  # conflict markers should be present
+       )
+'
+
+# Testcase 5d, Directory/file/file conflict due to directory rename
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c,d_1}
+#   Commit B: z/{b,c,d_2,f}, y/d/e
+#   Expected: y/{b,c,d/e,f}, z/d_2, CONFLICT(file/directory), y/d_1~HEAD
+#   Note: The fact that y/d/ exists in B makes us bail on directory rename
+#         detection for z/d_2, but that doesn't prevent us from applying the
+#         directory rename detection for z/f -> y/f.
+
+test_expect_success '5d-setup: Directory/file/file conflict due to directory rename' '
+       test_create_repo 5d &&
+       (
+               cd 5d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               echo d1 >y/d &&
+               git add y/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir -p y/d &&
+               echo e >y/d/e &&
+               echo d2 >z/d &&
+               echo f >z/f &&
+               git add y/d/e z/d z/f &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '5d-check: Directory/file/file conflict due to directory rename' '
+       (
+               cd 5d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (file/directory).*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:z/d :0:y/f :2:y/d :0:y/d/e &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/d  B:z/f  A:y/d  B:y/d/e &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~HEAD >actual &&
+               git rev-parse A:y/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 5:
+#
+#   If a subset of to-be-renamed files have a file or directory in the way,
+#   "turn off" the directory rename for those specific sub-paths, falling
+#   back to old handling.  But, sadly, see testcases 8a and 8b.
+###########################################################################
+
+
+###########################################################################
+# SECTION 6: Same side of the merge was the one that did the rename
+#
+# It may sound obvious that you only want to apply implicit directory
+# renames to directories if the _other_ side of history did the renaming.
+# If you did make an implementation that didn't explicitly enforce this
+# rule, the majority of cases that would fall under this section would
+# also be solved by following the rules from the above sections.  But
+# there are still a few that stick out, so this section covers them just
+# to make sure we also get them right.
+###########################################################################
+
+# Testcase 6a, Tricky rename/delete
+#   Commit O: z/{b,c,d}
+#   Commit A: z/b
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/b, CONFLICT(rename/delete, z/c -> y/c vs. NULL)
+#   Note: We're just checking here that the rename of z/b and z/c to put
+#         them under y/ doesn't accidentally catch z/d and make it look like
+#         it is also involved in a rename/delete conflict.
+
+test_expect_success '6a-setup: Tricky rename/delete' '
+       test_create_repo 6a &&
+       (
+               cd 6a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git rm z/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6a-check: Tricky rename/delete' '
+       (
+               cd 6a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*z/c.*y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/c &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6b, Same rename done on both sides
+#   (Related to testcases 6c and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   Note: If we did directory rename detection here, we'd move z/d into y/,
+#         but B did that rename and still decided to put the file into z/,
+#         so we probably shouldn't apply directory rename detection for it.
+
+test_expect_success '6b-setup: Same rename done on both sides' '
+       test_create_repo 6b &&
+       (
+               cd 6b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6b-check: Same rename done on both sides' '
+       (
+               cd 6b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6c, Rename only done on same side
+#   (Related to testcases 6b and 8e)
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Seems obvious, but just checking that the implementation doesn't
+#         "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6c-setup: Rename only done on same side' '
+       test_create_repo 6c &&
+       (
+               cd 6c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6c-check: Rename only done on same side' '
+       (
+               cd 6c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6d, We don't always want transitive renaming
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: z/{b,c}, x/d (no change)
+#   Commit B: y/{b,c}, z/d
+#   Expected: y/{b,c}, z/d
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c,d}.
+
+test_expect_success '6d-setup: We do not always want transitive renaming' '
+       test_create_repo 6d &&
+       (
+               cd 6d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               git mv x z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6d-check: We do not always want transitive renaming' '
+       (
+               cd 6d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 6e, Add/add from one-side
+#   Commit O: z/{b,c}
+#   Commit A: z/{b,c} (no change)
+#   Commit B: y/{b,c,d_1}, z/d_2
+#   Expected: y/{b,c,d_1}, z/d_2
+#   NOTE: Again, this seems obvious but just checking that the implementation
+#         doesn't "accidentally detect a rename" and give us y/{b,c} +
+#         add/add conflict on y/d_1 vs y/d_2.
+
+test_expect_success '6e-setup: Add/add from one side' '
+       test_create_repo 6e &&
+       (
+               cd 6e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               git commit --allow-empty -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo d1 > y/d &&
+               mkdir z &&
+               echo d2 > z/d &&
+               git add y/d z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '6e-check: Add/add from one side' '
+       (
+               cd 6e &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:z/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:y/d    B:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 6:
+#
+#   Only apply implicit directory renames to directories if the other
+#   side of history is the one doing the renaming.
+###########################################################################
+
+
+###########################################################################
+# SECTION 7: More involved Edge/Corner cases
+#
+# The ruleset we have generated in the above sections seems to provide
+# well-defined merges.  But can we find edge/corner cases that either (a)
+# are harder for users to understand, or (b) have a resolution that is
+# non-intuitive or suboptimal?
+#
+# The testcases in this section dive into cases that I've tried to craft in
+# a way to find some that might be surprising to users or difficult for
+# them to understand (the next section will look at non-intuitive or
+# suboptimal merge results).  Some of the testcases are similar to ones
+# from past sections, but have been simplified to try to highlight error
+# messages using a "modified" path (due to the directory rename).  Are
+# users okay with these?
+#
+# In my opinion, testcases that are difficult to understand from this
+# section is due to difficulty in the testcase rather than the directory
+# renaming (similar to how t6042 and t6036 have difficult resolutions due
+# to the problem setup itself being complex).  And I don't think the
+# error messages are a problem.
+#
+# On the other hand, the testcases in section 8 worry me slightly more...
+###########################################################################
+
+# Testcase 7a, rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/b, x/c, z/d
+#   Expected: y/d, CONFLICT(rename/rename for both z/b and z/c)
+#   NOTE: There's a rename of z/ here, y/ has more renames, so z/d -> y/d.
+
+test_expect_success '7a-setup: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       test_create_repo 7a &&
+       (
+               cd 7a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir w &&
+               mkdir x &&
+               git mv z/b w/ &&
+               git mv z/c x/ &&
+               echo d > z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7a-check: rename-dir vs. rename-dir (NOT split evenly) PLUS add-other-file' '
+       (
+               cd 7a &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/b.*y/b.*w/b" out &&
+               test_i18ngrep "CONFLICT (rename/rename).*z/c.*y/c.*x/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:x/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   x/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7b, rename/rename(2to1), but only due to transitive rename
+#   (Related to testcase 1d)
+#   Commit O: z/{b,c},     x/d_1, w/d_2
+#   Commit A: y/{b,c,d_2}, x/d_1
+#   Commit B: z/{b,c,d_1},        w/d_2
+#   Expected: y/{b,c}, CONFLICT(rename/rename(2to1): x/d_1, w/d_2 -> y_d)
+
+test_expect_success '7b-setup: rename/rename(2to1), but only due to transitive rename' '
+       test_create_repo 7b &&
+       (
+               cd 7b &&
+
+               mkdir z &&
+               mkdir x &&
+               mkdir w &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d1 > x/d &&
+               echo d2 > w/d &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/d y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7b-check: rename/rename(2to1), but only due to transitive rename' '
+       (
+               cd 7b &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :2:y/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:w/d  O:x/d &&
+               test_cmp expect actual &&
+
+               test_path_is_missing y/d &&
+               test_path_is_file y/d~HEAD &&
+               test_path_is_file y/d~B^0 &&
+
+               git hash-object >actual \
+                       y/d~HEAD y/d~B^0 &&
+               git rev-parse >expect \
+                       O:w/d    O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7c, rename/rename(1to...2or3); transitive rename may add complexity
+#   (Related to testcases 3b and 5c)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}, w/d
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(x/d -> w/d vs. y/d)
+#   NOTE: z/ was renamed to y/ so we do want to report
+#         neither CONFLICT(x/d -> w/d vs. z/d)
+#         nor CONFLiCT x/d -> w/d vs. y/d vs. z/d)
+
+test_expect_success '7c-setup: rename/rename(1to...2or3); transitive rename may add complexity' '
+       test_create_repo 7c &&
+       (
+               cd 7c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv x w &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7c-check: rename/rename(1to...2or3); transitive rename may add complexity' '
+       (
+               cd 7c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/rename).*x/d.*w/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :1:x/d :2:w/d :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d  O:x/d  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7d, transitive rename involved in rename/delete; how is it reported?
+#   (Related somewhat to testcases 5b and 8d)
+#   Commit O: z/{b,c}, x/d
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d}
+#   Expected: y/{b,c}, CONFLICT(delete x/d vs rename to y/d)
+#   NOTE: z->y so NOT CONFLICT(delete x/d vs rename to z/d)
+
+test_expect_success '7d-setup: transitive rename involved in rename/delete; how is it reported?' '
+       test_create_repo 7d &&
+       (
+               cd 7d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm -rf x &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7d-check: transitive rename involved in rename/delete; how is it reported?' '
+       (
+               cd 7d &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 7e, transitive rename in rename/delete AND dirs in the way
+#   (Very similar to 'both rename source and destination involved in D/F conflict' from t6022-merge-rename.sh)
+#   (Also related to testcases 9c and 9d)
+#   Commit O: z/{b,c},     x/d_1
+#   Commit A: y/{b,c,d/g}, x/d/f
+#   Commit B: z/{b,c,d_1}
+#   Expected: rename/delete(x/d_1->y/d_1 vs. None) + D/F conflict on y/d
+#             y/{b,c,d/g}, y/d_1~B^0, x/d/f
+
+#   NOTE: The main path of interest here is d_1 and where it ends up, but
+#         this is actually a case that has two potential directory renames
+#         involved and D/F conflict(s), so it makes sense to walk through
+#         each step.
+#
+#         Commit A renames z/ -> y/.  Thus everything that B adds to z/
+#         should be instead moved to y/.  This gives us the D/F conflict on
+#         y/d because x/d_1 -> z/d_1 -> y/d_1 conflicts with y/d/g.
+#
+#         Further, commit B renames x/ -> z/, thus everything A adds to x/
+#         should instead be moved to z/...BUT we removed z/ and renamed it
+#         to y/, so maybe everything should move not from x/ to z/, but
+#         from x/ to z/ to y/.  Doing so might make sense from the logic so
+#         far, but note that commit A had both an x/ and a y/; it did the
+#         renaming of z/ to y/ and created x/d/f and it clearly made these
+#         things separate, so it doesn't make much sense to push these
+#         together.  Doing so is what I'd call a doubly transitive rename;
+#         see testcases 9c and 9d for further discussion of this issue and
+#         how it's resolved.
+
+test_expect_success '7e-setup: transitive rename in rename/delete AND dirs in the way' '
+       test_create_repo 7e &&
+       (
+               cd 7e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d1 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git rm x/d &&
+               mkdir -p x/d &&
+               mkdir -p y/d &&
+               echo f >x/d/f &&
+               echo g >y/d/g &&
+               git add x/d/f y/d/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/ &&
+               rmdir x &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '7e-check: transitive rename in rename/delete AND dirs in the way' '
+       (
+               cd 7e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (rename/delete).*x/d.*y/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:x/d/f :0:y/d/g :0:y/b :0:y/c :3:y/d &&
+               git rev-parse >expect \
+                        A:x/d/f  A:y/d/g  O:z/b  O:z/c  O:x/d &&
+               test_cmp expect actual &&
+
+               git hash-object y/d~B^0 >actual &&
+               git rev-parse O:x/d >expect &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 8: Suboptimal merges
+#
+# As alluded to in the last section, the ruleset we have built up for
+# detecting directory renames unfortunately has some special cases where it
+# results in slightly suboptimal or non-intuitive behavior.  This section
+# explores these cases.
+#
+# To be fair, we already had non-intuitive or suboptimal behavior for most
+# of these cases in git before introducing implicit directory rename
+# detection, but it'd be nice if there was a modified ruleset out there
+# that handled these cases a bit better.
+###########################################################################
+
+# Testcase 8a, Dual-directory rename, one into the others' way
+#   Commit O. x/{a,b},   y/{c,d}
+#   Commit A. x/{a,b,e}, y/{c,d,f}
+#   Commit B. y/{a,b},   z/{c,d}
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: y/{a,b,f},   z/{c,d},   x/e
+#   Currently expected:       y/{a,b,e,f}, z/{c,d}
+#   Optimal:                  y/{a,b,e},   z/{c,d,f}
+#
+# Note: Both x and y got renamed and it'd be nice to detect both, and we do
+# better with directory rename detection than git did without, but the
+# simple rule from section 5 prevents me from handling this as optimally as
+# we potentially could.
+
+test_expect_success '8a-setup: Dual-directory rename, one into the others way' '
+       test_create_repo 8a &&
+       (
+               cd 8a &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a >x/a &&
+               echo b >x/b &&
+               echo c >y/c &&
+               echo d >y/d &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e >x/e &&
+               echo f >y/f &&
+               git add x/e y/f &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8a-check: Dual-directory rename, one into the others way' '
+       (
+               cd 8a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:y/e HEAD:y/f HEAD:z/c HEAD:z/d &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    A:x/e    A:y/f    O:y/c    O:y/d &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8b, Dual-directory rename, one into the others' way, with conflicting filenames
+#   Commit O. x/{a_1,b_1},     y/{a_2,b_2}
+#   Commit A. x/{a_1,b_1,e_1}, y/{a_2,b_2,e_2}
+#   Commit B. y/{a_1,b_1},     z/{a_2,b_2}
+#
+#   w/o dir-rename detection: y/{a_1,b_1,e_2}, z/{a_2,b_2}, x/e_1
+#   Currently expected:       <same>
+#   Scary:                    y/{a_1,b_1},     z/{a_2,b_2}, CONFLICT(add/add, e_1 vs. e_2)
+#   Optimal:                  y/{a_1,b_1,e_1}, z/{a_2,b_2,e_2}
+#
+# Note: Very similar to 8a, except instead of 'e' and 'f' in directories x and
+# y, both are named 'e'.  Without directory rename detection, neither file
+# moves directories.  Implement directory rename detection suboptimally, and
+# you get an add/add conflict, but both files were added in commit A, so this
+# is an add/add conflict where one side of history added both files --
+# something we can't represent in the index.  Obviously, we'd prefer the last
+# resolution, but our previous rules are too coarse to allow it.  Using both
+# the rules from section 4 and section 5 save us from the Scary resolution,
+# making us fall back to pre-directory-rename-detection behavior for both
+# e_1 and e_2.
+
+test_expect_success '8b-setup: Dual-directory rename, one into the others way, with conflicting filenames' '
+       test_create_repo 8b &&
+       (
+               cd 8b &&
+
+               mkdir x &&
+               mkdir y &&
+               echo a1 >x/a &&
+               echo b1 >x/b &&
+               echo a2 >y/a &&
+               echo b2 >y/b &&
+               git add x y &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo e1 >x/e &&
+               echo e2 >y/e &&
+               git add x/e y/e &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv y z &&
+               git mv x y &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8b-check: Dual-directory rename, one into the others way, with conflicting filenames' '
+       (
+               cd 8b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/a HEAD:y/b HEAD:z/a HEAD:z/b HEAD:x/e HEAD:y/e &&
+               git rev-parse >expect \
+                       O:x/a    O:x/b    O:y/a    O:y/b    A:x/e    A:y/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8c, modify/delete or rename+modify/delete?
+#   (Related to testcases 5b, 8d, and 9h)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d_modified,e}
+#   Expected: y/{b,c,e}, CONFLICT(modify/delete: on z/d)
+#
+#   Note: It could easily be argued that the correct resolution here is
+#         y/{b,c,e}, CONFLICT(rename/delete: z/d -> y/d vs deleted)
+#         and that the modifed version of d should be present in y/ after
+#         the merge, just marked as conflicted.  Indeed, I previously did
+#         argue that.  But applying directory renames to the side of
+#         history where a file is merely modified results in spurious
+#         rename/rename(1to2) conflicts -- see testcase 9h.  See also
+#         notes in 8d.
+
+test_expect_success '8c-setup: modify/delete or rename+modify/delete?' '
+       test_create_repo 8c &&
+       (
+               cd 8c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >z/d &&
+               test_chmod +x z/d &&
+               echo e >z/e &&
+               git add z/d z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8c-check: modify/delete or rename+modify/delete' '
+       (
+               cd 8c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               test_i18ngrep "CONFLICT (modify/delete).* z/d" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 5 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :0:y/c :0:y/e :1:z/d :3:z/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/c  B:z/e  O:z/d  B:z/d &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :2:z/d &&
+               git ls-files -s z/d | grep ^100755 &&
+               test_path_is_file z/d &&
+               test_path_is_missing y/d
+       )
+'
+
+# Testcase 8d, rename/delete...or not?
+#   (Related to testcase 5b; these may appear slightly inconsistent to users;
+#    Also related to testcases 7d and 7e)
+#   Commit O: z/{b,c,d}
+#   Commit A: y/{b,c}
+#   Commit B: z/{b,c,d,e}
+#   Expected: y/{b,c,e}
+#
+#   Note: It would also be somewhat reasonable to resolve this as
+#             y/{b,c,e}, CONFLICT(rename/delete: x/d -> y/d or deleted)
+#
+#   In this case, I'm leaning towards: commit A was the one that deleted z/d
+#   and it did the rename of z to y, so the two "conflicts" (rename vs.
+#   delete) are both coming from commit A, which is illogical.  Conflicts
+#   during merging are supposed to be about opposite sides doing things
+#   differently.
+
+test_expect_success '8d-setup: rename/delete...or not?' '
+       test_create_repo 8d &&
+       (
+               cd 8d &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               test_seq 1 10 >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/d &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8d-check: rename/delete...or not?' '
+       (
+               cd 8d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/e &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/e &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 8e, Both sides rename, one side adds to original directory
+#   Commit O: z/{b,c}
+#   Commit A: y/{b,c}
+#   Commit B: w/{b,c}, z/d
+#
+# Possible Resolutions:
+#   w/o dir-rename detection: z/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Currently expected:       y/d, CONFLICT(z/b -> y/b vs. w/b),
+#                                  CONFLICT(z/c -> y/c vs. w/c)
+#   Optimal:                  ??
+#
+# Notes: In commit A, directory z got renamed to y.  In commit B, directory z
+#        did NOT get renamed; the directory is still present; instead it is
+#        considered to have just renamed a subset of paths in directory z
+#        elsewhere.  Therefore, the directory rename done in commit A to z/
+#        applies to z/d and maps it to y/d.
+#
+#        It's possible that users would get confused about this, but what
+#        should we do instead?  Silently leaving at z/d seems just as bad or
+#        maybe even worse.  Perhaps we could print a big warning about z/d
+#        and how we're moving to y/d in this case, but when I started thinking
+#        about the ramifications of doing that, I didn't know how to rule out
+#        that opening other weird edge and corner cases so I just punted.
+
+test_expect_success '8e-setup: Both sides rename, one side adds to original directory' '
+       test_create_repo 8e &&
+       (
+               cd 8e &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z w &&
+               mkdir z &&
+               echo d >z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '8e-check: Both sides rename, one side adds to original directory' '
+       (
+               cd 8e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/c.*y/c.*w/c out &&
+               test_i18ngrep CONFLICT.*rename/rename.*z/b.*y/b.*w/b out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :1:z/b :2:y/b :3:w/b :1:z/c :2:y/c :3:w/c :0:y/d &&
+               git rev-parse >expect \
+                        O:z/b  O:z/b  O:z/b  O:z/c  O:z/c  O:z/c  B:z/d &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/b   w/b   y/c   w/c &&
+               git rev-parse >expect \
+                       O:z/b O:z/b O:z/c O:z/c &&
+               test_cmp expect actual &&
+
+               test_path_is_missing z/b &&
+               test_path_is_missing z/c
+       )
+'
+
+###########################################################################
+# SECTION 9: Other testcases
+#
+# This section consists of miscellaneous testcases I thought of during
+# the implementation which round out the testing.
+###########################################################################
+
+# Testcase 9a, Inner renamed directory within outer renamed directory
+#   (Related to testcase 1f)
+#   Commit O: z/{b,c,d/{e,f,g}}
+#   Commit A: y/{b,c}, x/w/{e,f,g}
+#   Commit B: z/{b,c,d/{e,f,g,h},i}
+#   Expected: y/{b,c,i}, x/w/{e,f,g,h}
+#   NOTE: The only reason this one is interesting is because when a directory
+#         is split into multiple other directories, we determine by the weight
+#         of which one had the most paths going to it.  A naive implementation
+#         of that could take the new file in commit B at z/i to x/w/i or x/i.
+
+test_expect_success '9a-setup: Inner renamed directory within outer renamed directory' '
+       test_create_repo 9a &&
+       (
+               cd 9a &&
+
+               mkdir -p z/d &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo e >z/d/e &&
+               echo f >z/d/f &&
+               echo g >z/d/g &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir x &&
+               git mv z/d x/w &&
+               git mv z y &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo h >z/d/h &&
+               echo i >z/i &&
+               git add z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9a-check: Inner renamed directory within outer renamed directory' '
+       (
+               cd 9a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/i &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    B:z/i &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       HEAD:x/w/e HEAD:x/w/f HEAD:x/w/g HEAD:x/w/h &&
+               git rev-parse >expect \
+                       O:z/d/e    O:z/d/f    O:z/d/g    B:z/d/h &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9b, Transitive rename with content merge
+#   (Related to testcase 1c)
+#   Commit O: z/{b,c},   x/d_1
+#   Commit A: y/{b,c},   x/d_2
+#   Commit B: z/{b,c,d_3}
+#   Expected: y/{b,c,d_merged}
+
+test_expect_success '9b-setup: Transitive rename with content merge' '
+       test_create_repo 9b &&
+       (
+               cd 9b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               test_seq 1 10 >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               test_seq 1 11 >x/d &&
+               git add x/d &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               test_seq 0 10 >x/d &&
+               git mv x/d z/d &&
+               git add z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9b-check: Transitive rename with content merge' '
+       (
+               cd 9b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               test_seq 0 11 >expected &&
+               test_cmp expected y/d &&
+               git add expected &&
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    :0:expected &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:x/d &&
+               test_must_fail git rev-parse HEAD:z/d &&
+               test_path_is_missing z/d &&
+
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse O:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse A:x/d) &&
+               test $(git rev-parse HEAD:y/d) != $(git rev-parse B:z/d)
+       )
+'
+
+# Testcase 9c, Doubly transitive rename?
+#   (Related to testcase 1c, 7e, and 9d)
+#   Commit O: z/{b,c},     x/{d,e},    w/f
+#   Commit A: y/{b,c},     x/{d,e,f,g}
+#   Commit B: z/{b,c,d,e},             w/f
+#   Expected: y/{b,c,d,e}, x/{f,g}
+#
+#   NOTE: x/f and x/g may be slightly confusing here.  The rename from w/f to
+#         x/f is clear.  Let's look beyond that.  Here's the logic:
+#            Commit B renamed x/ -> z/
+#            Commit A renamed z/ -> y/
+#         So, we could possibly further rename x/f to z/f to y/f, a doubly
+#         transient rename.  However, where does it end?  We can chain these
+#         indefinitely (see testcase 9d).  What if there is a D/F conflict
+#         at z/f/ or y/f/?  Or just another file conflict at one of those
+#         paths?  In the case of an N-long chain of transient renamings,
+#         where do we "abort" the rename at?  Can the user make sense of
+#         the resulting conflict and resolve it?
+#
+#         To avoid this confusion I use the simple rule that if the other side
+#         of history did a directory rename to a path that your side renamed
+#         away, then ignore that particular rename from the other side of
+#         history for any implicit directory renames.
+
+test_expect_success '9c-setup: Doubly transitive rename?' '
+       test_create_repo 9c &&
+       (
+               cd 9c &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               mkdir x &&
+               echo d >x/d &&
+               echo e >x/e &&
+               mkdir w &&
+               echo f >w/f &&
+               git add z x w &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z y &&
+               git mv w/f x/ &&
+               echo g >x/g &&
+               git add x/g &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/d &&
+               git mv x/e z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9c-check: Doubly transitive rename?' '
+       (
+               cd 9c &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> z rename to x/f" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:y/d HEAD:y/e HEAD:x/f HEAD:x/g &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    O:x/d    O:x/e    O:w/f    A:x/g &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9d, N-fold transitive rename?
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: z/a, y/b, x/c, w/d, v/e, u/f
+#   Commit A:  y/{a,b},  w/{c,d},  u/{e,f}
+#   Commit B: z/{a,t}, x/{b,c}, v/{d,e}, u/f
+#   Expected: <see NOTE first>
+#
+#   NOTE: z/ -> y/ (in commit A)
+#         y/ -> x/ (in commit B)
+#         x/ -> w/ (in commit A)
+#         w/ -> v/ (in commit B)
+#         v/ -> u/ (in commit A)
+#         So, if we add a file to z, say z/t, where should it end up?  In u?
+#         What if there's another file or directory named 't' in one of the
+#         intervening directories and/or in u itself?  Also, shouldn't the
+#         same logic that places 't' in u/ also move ALL other files to u/?
+#         What if there are file or directory conflicts in any of them?  If
+#         we attempted to do N-way (N-fold? N-ary? N-uple?) transitive renames
+#         like this, would the user have any hope of understanding any
+#         conflicts or how their working tree ended up?  I think not, so I'm
+#         ruling out N-ary transitive renames for N>1.
+#
+#   Therefore our expected result is:
+#     z/t, y/a, x/b, w/c, u/d, u/e, u/f
+#   The reason that v/d DOES get transitively renamed to u/d is that u/ isn't
+#   renamed somewhere.  A slightly sub-optimal result, but it uses fairly
+#   simple rules that are consistent with what we need for all the other
+#   testcases and simplifies things for the user.
+
+test_expect_success '9d-setup: N-way transitive rename?' '
+       test_create_repo 9d &&
+       (
+               cd 9d &&
+
+               mkdir z y x w v u &&
+               echo a >z/a &&
+               echo b >y/b &&
+               echo c >x/c &&
+               echo d >w/d &&
+               echo e >v/e &&
+               echo f >u/f &&
+               git add z y x w v u &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/a y/ &&
+               git mv x/c w/ &&
+               git mv v/e u/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo t >z/t &&
+               git mv y/b x/ &&
+               git mv w/d v/ &&
+               git add z/t &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9d-check: N-way transitive rename?' '
+       (
+               cd 9d &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 >out &&
+               test_i18ngrep "WARNING: Avoiding applying z -> y rename to z/t" out &&
+               test_i18ngrep "WARNING: Avoiding applying y -> x rename to y/a" out &&
+               test_i18ngrep "WARNING: Avoiding applying x -> w rename to x/b" out &&
+               test_i18ngrep "WARNING: Avoiding applying w -> v rename to w/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -o >out &&
+               test_line_count = 1 out &&
+
+               git rev-parse >actual \
+                       HEAD:z/t \
+                       HEAD:y/a HEAD:x/b HEAD:w/c \
+                       HEAD:u/d HEAD:u/e HEAD:u/f &&
+               git rev-parse >expect \
+                       B:z/t    \
+                       O:z/a    O:y/b    O:x/c    \
+                       O:w/d    O:v/e    A:u/f &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9e, N-to-1 whammo
+#   (Related to testcase 9c...and 1c and 7e)
+#   Commit O: dir1/{a,b}, dir2/{d,e}, dir3/{g,h}, dirN/{j,k}
+#   Commit A: dir1/{a,b,c,yo}, dir2/{d,e,f,yo}, dir3/{g,h,i,yo}, dirN/{j,k,l,yo}
+#   Commit B: combined/{a,b,d,e,g,h,j,k}
+#   Expected: combined/{a,b,c,d,e,f,g,h,i,j,k,l}, CONFLICT(Nto1) warnings,
+#             dir1/yo, dir2/yo, dir3/yo, dirN/yo
+
+test_expect_success '9e-setup: N-to-1 whammo' '
+       test_create_repo 9e &&
+       (
+               cd 9e &&
+
+               mkdir dir1 dir2 dir3 dirN &&
+               echo a >dir1/a &&
+               echo b >dir1/b &&
+               echo d >dir2/d &&
+               echo e >dir2/e &&
+               echo g >dir3/g &&
+               echo h >dir3/h &&
+               echo j >dirN/j &&
+               echo k >dirN/k &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               echo c  >dir1/c &&
+               echo yo >dir1/yo &&
+               echo f  >dir2/f &&
+               echo yo >dir2/yo &&
+               echo i  >dir3/i &&
+               echo yo >dir3/yo &&
+               echo l  >dirN/l &&
+               echo yo >dirN/yo &&
+               git add dir* &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv dir1 combined &&
+               git mv dir2/* combined/ &&
+               git mv dir3/* combined/ &&
+               git mv dirN/* combined/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success C_LOCALE_OUTPUT '9e-check: N-to-1 whammo' '
+       (
+               cd 9e &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 >out &&
+               grep "CONFLICT (implicit dir rename): Cannot map more than one path to combined/yo" out >error_line &&
+               grep -q dir1/yo error_line &&
+               grep -q dir2/yo error_line &&
+               grep -q dir3/yo error_line &&
+               grep -q dirN/yo error_line &&
+
+               git ls-files -s >out &&
+               test_line_count = 16 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 2 out &&
+
+               git rev-parse >actual \
+                       :0:combined/a :0:combined/b :0:combined/c \
+                       :0:combined/d :0:combined/e :0:combined/f \
+                       :0:combined/g :0:combined/h :0:combined/i \
+                       :0:combined/j :0:combined/k :0:combined/l &&
+               git rev-parse >expect \
+                        O:dir1/a      O:dir1/b      A:dir1/c \
+                        O:dir2/d      O:dir2/e      A:dir2/f \
+                        O:dir3/g      O:dir3/h      A:dir3/i \
+                        O:dirN/j      O:dirN/k      A:dirN/l &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:dir1/yo :0:dir2/yo :0:dir3/yo :0:dirN/yo &&
+               git rev-parse >expect \
+                        A:dir1/yo  A:dir2/yo  A:dir3/yo  A:dirN/yo &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 9f, Renamed directory that only contained immediate subdirs
+#   (Related to testcases 1e & 9g)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{a,b}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{a,b}/$more_files, priority/c
+
+test_expect_success '9f-setup: Renamed directory that only contained immediate subdirs' '
+       test_create_repo 9f &&
+       (
+               cd 9f &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv goal/ priority &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9f-check: Renamed directory that only contained immediate subdirs' '
+       (
+               cd 9f &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/a/foo \
+                       HEAD:priority/b/bar \
+                       HEAD:priority/b/baz \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9g, Renamed directory that only contained immediate subdirs, immediate subdirs renamed
+#   (Related to testcases 1e & 9f)
+#   Commit O: goal/{a,b}/$more_files
+#   Commit A: priority/{alpha,bravo}/$more_files
+#   Commit B: goal/{a,b}/$more_files, goal/c
+#   Expected: priority/{alpha,bravo}/$more_files, priority/c
+
+test_expect_success '9g-setup: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       test_create_repo 9g &&
+       (
+               cd 9g &&
+
+               mkdir -p goal/a &&
+               mkdir -p goal/b &&
+               echo foo >goal/a/foo &&
+               echo bar >goal/b/bar &&
+               echo baz >goal/b/baz &&
+               git add goal &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir priority &&
+               git mv goal/a/ priority/alpha &&
+               git mv goal/b/ priority/beta &&
+               rmdir goal/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >goal/c &&
+               git add goal/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '9g-check: Renamed directory that only contained immediate subdirs, immediate subdirs renamed' '
+       (
+               cd 9g &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:priority/alpha/foo \
+                       HEAD:priority/beta/bar  \
+                       HEAD:priority/beta/baz  \
+                       HEAD:priority/c &&
+               git rev-parse >expect \
+                       O:goal/a/foo \
+                       O:goal/b/bar \
+                       O:goal/b/baz \
+                       B:goal/c &&
+               test_cmp expect actual &&
+               test_must_fail git rev-parse HEAD:goal/c
+       )
+'
+
+# Testcase 9h, Avoid implicit rename if involved as source on other side
+#   (Extremely closely related to testcase 3a)
+#   Commit O: z/{b,c,d_1}
+#   Commit A: z/{b,c,d_2}
+#   Commit B: y/{b,c}, x/d_1
+#   Expected: y/{b,c}, x/d_2
+#   NOTE: If we applied the z/ -> y/ rename to z/d, then we'd end up with
+#         a rename/rename(1to2) conflict (z/d -> y/d vs. x/d)
+test_expect_success '9h-setup: Avoid dir rename on merely modified path' '
+       test_create_repo 9h &&
+       (
+               cd 9h &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nd\n" >z/d &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               test_tick &&
+               echo more >>z/d &&
+               git add z/d &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y &&
+               mkdir x &&
+               git mv z/b y/ &&
+               git mv z/c y/ &&
+               git mv z/d x/ &&
+               rmdir z &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '9h-check: Avoid dir rename on merely modified path' '
+       (
+               cd 9h &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       HEAD:y/b HEAD:y/c HEAD:x/d &&
+               git rev-parse >expect \
+                       O:z/b    O:z/c    A:z/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# Rules suggested by section 9:
+#
+#   If the other side of history did a directory rename to a path that your
+#   side renamed away, then ignore that particular rename from the other
+#   side of history for any implicit directory renames.
+###########################################################################
+
+###########################################################################
+# SECTION 10: Handling untracked files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling, at least in the case of directory renames.
+###########################################################################
+
+# Testcase 10a, Overwrite untracked: normal rename/delete
+#   Commit O: z/{b,c_1}
+#   Commit A: z/b + untracked z/c + untracked z/d
+#   Commit B: z/{b,d_1}
+#   Expected: Aborted Merge +
+#       ERROR_MSG(untracked working tree files would be overwritten by merge)
+
+test_expect_success '10a-setup: Overwrite untracked with normal rename/delete' '
+       test_create_repo 10a &&
+       (
+               cd 10a &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10a-check: Overwrite untracked with normal rename/delete' '
+       (
+               cd 10a &&
+
+               git checkout A^0 &&
+               echo very >z/c &&
+               echo important >z/d &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "The following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo very >expect &&
+               test_cmp expect z/c &&
+
+               echo important >expect &&
+               test_cmp expect z/d &&
+
+               git rev-parse HEAD:z/b >actual &&
+               git rev-parse O:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10b, Overwrite untracked: dir rename + delete
+#   Commit O: z/{b,c_1}
+#   Commit A: y/b + untracked y/{c,d,e}
+#   Commit B: z/{b,d_1,e}
+#   Expected: Failed Merge; y/b + untracked y/c + untracked y/d on disk +
+#             z/c_1 -> z/d_1 rename recorded at stage 3 for y/d +
+#       ERROR_MSG(refusing to lose untracked file at 'y/d')
+
+test_expect_success '10b-setup: Overwrite untracked with dir rename + delete' '
+       test_create_repo 10b &&
+       (
+               cd 10b &&
+
+               mkdir z &&
+               echo b >z/b &&
+               echo c >z/c &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git rm z/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z/c z/d &&
+               echo e >z/e &&
+               git add z/e &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10b-check: Overwrite untracked with dir rename + delete' '
+       (
+               cd 10b &&
+
+               git checkout A^0 &&
+               echo very >y/c &&
+               echo important >y/d &&
+               echo contents >y/e &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/delete).*Version B\^0 of y/d left in tree at y/d~B\^0" out &&
+               test_i18ngrep "Error: Refusing to lose untracked file at y/e; writing to y/e~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:y/b :3:y/d :3:y/e &&
+               git rev-parse >expect \
+                       O:z/b  O:z/c  B:z/e &&
+               test_cmp expect actual &&
+
+               echo very >expect &&
+               test_cmp expect y/c &&
+
+               echo important >expect &&
+               test_cmp expect y/d &&
+
+               echo contents >expect &&
+               test_cmp expect y/e
+       )
+'
+
+# Testcase 10c, Overwrite untracked: dir rename/rename(1to2)
+#   Commit O: z/{a,b}, x/{c,d}
+#   Commit A: y/{a,b}, w/c, x/d + different untracked y/c
+#   Commit B: z/{a,b,c}, x/d
+#   Expected: Failed Merge; y/{a,b} + x/d + untracked y/c +
+#             CONFLICT(rename/rename) x/c -> w/c vs y/c +
+#             y/c~B^0 +
+#             ERROR_MSG(Refusing to lose untracked file at y/c)
+
+test_expect_success '10c-setup: Overwrite untracked with dir rename/rename(1to2)' '
+       test_create_repo 10c &&
+       (
+               cd 10c &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               mkdir w &&
+               git mv x/c w/c &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10c-check: Overwrite untracked with dir rename/rename(1to2)' '
+       (
+               cd 10c &&
+
+               git checkout A^0 &&
+               echo important >y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/c; adding as y/c~B\^0 instead" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 3 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~B^0 >actual &&
+               git rev-parse O:x/c >expect &&
+               test_cmp expect actual &&
+
+               echo important >expect &&
+               test_cmp expect y/c
+       )
+'
+
+# Testcase 10d, Delete untracked w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b,c_1},        x/{d,e,f_2}
+#   Commit A: y/{a,b},            x/{d,e,f_2,wham_1} + untracked y/wham
+#   Commit B: z/{a,b,c_1,wham_2}, y/{d,e}
+#   Expected: Failed Merge; y/{a,b,d,e} + untracked y/{wham,wham~B^0,wham~HEAD}+
+#             CONFLICT(rename/rename) z/c_1 vs x/f_2 -> y/wham
+#             ERROR_MSG(Refusing to lose untracked file at y/wham)
+
+test_expect_success '10d-setup: Delete untracked with dir rename/rename(2to1)' '
+       test_create_repo 10d &&
+       (
+               cd 10d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >z/c &&
+               echo d >x/d &&
+               echo e >x/e &&
+               echo f >x/f &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/c x/wham &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/f z/wham &&
+               git mv x/ y/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '10d-check: Delete untracked with dir rename/rename(2to1)' '
+       (
+               cd 10d &&
+
+               git checkout A^0 &&
+               echo important >y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose untracked file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/d :0:y/e :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/e  O:z/c     O:x/f &&
+               test_cmp expect actual &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+
+               echo important >expect &&
+               test_cmp expect y/wham &&
+
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/f      O:z/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 10e, Does git complain about untracked file that's not in the way?
+#   Commit O: z/{a,b}
+#   Commit A: y/{a,b} + untracked z/c
+#   Commit B: z/{a,b,c}
+#   Expected: y/{a,b,c} + untracked z/c
+
+test_expect_success '10e-setup: Does git complain about untracked file that is not really in the way?' '
+       test_create_repo 10e &&
+       (
+               cd 10e &&
+
+               mkdir z &&
+               echo a >z/a &&
+               echo b >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo c >z/c &&
+               git add z/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_failure '10e-check: Does git complain about untracked file that is not really in the way?' '
+       (
+               cd 10e &&
+
+               git checkout A^0 &&
+               mkdir z &&
+               echo random >z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep ! "following untracked working tree files would be overwritten by merge" err &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  B:z/c &&
+               test_cmp expect actual &&
+
+               echo random >expect &&
+               test_cmp expect z/c
+       )
+'
+
+###########################################################################
+# SECTION 11: Handling dirty (not up-to-date) files
+#
+# unpack_trees(), upon which the recursive merge algorithm is based, aborts
+# the operation if untracked or dirty files would be deleted or overwritten
+# by the merge.  Unfortunately, unpack_trees() does not understand renames,
+# and if it doesn't abort, then it muddies up the working directory before
+# we even get to the point of detecting renames, so we need some special
+# handling.  This was true even of normal renames, but there are additional
+# codepaths that need special handling with directory renames.  Add
+# testcases for both renamed-by-directory-rename-detection and standard
+# rename cases.
+###########################################################################
+
+# Testcase 11a, Avoid losing dirty contents with simple rename
+#   Commit O: z/{a,b_v1},
+#   Commit A: z/{a,c_v1}, and z/c_v1 has uncommitted mods
+#   Commit B: z/{a,b_v2}
+#   Expected: ERROR_MSG(Refusing to lose dirty file at z/c) +
+#             z/a, staged version of z/c has sha1sum matching B:z/b_v2,
+#             z/c~HEAD with contents of B:z/b_v2,
+#             z/c with uncommitted mods on top of A:z/c_v1
+
+test_expect_success '11a-setup: Avoid losing dirty contents with simple rename' '
+       test_create_repo 11a &&
+       (
+               cd 11a &&
+
+               mkdir z &&
+               echo a >z/a &&
+               test_seq 1 10 >z/b &&
+               git add z &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/b z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo 11 >>z/b &&
+               git add z/b &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11a-check: Avoid losing dirty contents with simple rename' '
+       (
+               cd 11a &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 2 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:z/a :2:z/c &&
+               git rev-parse >expect \
+                        O:z/a  B:z/b &&
+               test_cmp expect actual &&
+
+               git hash-object z/c~HEAD >actual &&
+               git rev-parse B:z/b >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11b, Avoid losing dirty file involved in directory rename
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/a,         x/{b,c_v2}
+#   Expected: y/{a,c_v2}, x/b, z/c_v1 with uncommitted mods untracked,
+#             ERROR_MSG(Refusing to lose dirty file at z/c)
+
+
+test_expect_success '11b-setup: Avoid losing dirty file involved in directory rename' '
+       test_create_repo 11b &&
+       (
+               cd 11b &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               echo 11 >>x/c &&
+               git add x/c &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11b-check: Avoid losing dirty file involved in directory rename' '
+       (
+               cd 11b &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 0 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11c, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: y/a,         x/{b,c_v1}
+#   Commit A: y/{a,c_v1},  x/b,       and y/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: Abort_msg("following files would be overwritten by merge") +
+#             y/c left untouched (still has uncommitted mods)
+
+test_expect_success '11c-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11c &&
+       (
+               cd 11c &&
+
+               mkdir y x &&
+               echo a >y/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add y x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11c-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11c &&
+
+               git checkout A^0 &&
+               echo stuff >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "following files would be overwritten by merge" err &&
+
+               grep -q stuff y/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected y/c &&
+
+               git ls-files -s >out &&
+               test_line_count = 3 out &&
+               git ls-files -u >out &&
+               test_line_count = 0 out &&
+               git ls-files -m >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 3 out
+       )
+'
+
+# Testcase 11d, Avoid losing not-up-to-date with rename + D/F conflict
+#   Commit O: z/a,         x/{b,c_v1}
+#   Commit A: z/{a,c_v1},  x/b,       and z/c_v1 has uncommitted mods
+#   Commit B: y/{a,c/d},   x/{b,c_v2}
+#   Expected: D/F: y/c_v2 vs y/c/d) +
+#             Warning_Msg("Refusing to lose dirty file at z/c) +
+#             y/{a,c~HEAD,c/d}, x/b, now-untracked z/c_v1 with uncommitted mods
+
+test_expect_success '11d-setup: Avoid losing not-uptodate with rename + D/F conflict' '
+       test_create_repo 11d &&
+       (
+               cd 11d &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >x/b &&
+               test_seq 1 10 >x/c &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv x/c z/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv z y &&
+               mkdir y/c &&
+               echo d >y/c/d &&
+               echo 11 >>x/c &&
+               git add x/c y/c/d &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11d-check: Avoid losing not-uptodate with rename + D/F conflict' '
+       (
+               cd 11d &&
+
+               git checkout A^0 &&
+               echo stuff >>z/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "Refusing to lose dirty file at z/c" out &&
+
+               grep -q stuff z/c &&
+               test_seq 1 10 >expected &&
+               echo stuff >>expected &&
+               test_cmp expected z/c
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 1 out &&
+               git ls-files -o >out &&
+               test_line_count = 5 out &&
+
+               git rev-parse >actual \
+                       :0:x/b :0:y/a :0:y/c/d :3:y/c &&
+               git rev-parse >expect \
+                        O:x/b  O:z/a  B:y/c/d  B:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object y/c~HEAD >actual &&
+               git rev-parse B:x/c >expect &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11e, Avoid deleting not-up-to-date with dir rename/rename(1to2)/add
+#   Commit O: z/{a,b},      x/{c_1,d}
+#   Commit A: y/{a,b,c_2},  x/d, w/c_1, and y/c_2 has uncommitted mods
+#   Commit B: z/{a,b,c_1},  x/d
+#   Expected: Failed Merge; y/{a,b} + x/d +
+#             CONFLICT(rename/rename) x/c_1 -> w/c_1 vs y/c_1 +
+#             ERROR_MSG(Refusing to lose dirty file at y/c)
+#             y/c~B^0 has O:x/c_1 contents
+#             y/c~HEAD has A:y/c_2 contents
+#             y/c has dirty file from before merge
+
+test_expect_success '11e-setup: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       test_create_repo 11e &&
+       (
+               cd 11e &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               echo c >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               echo different >y/c &&
+               mkdir w &&
+               git mv x/c w/ &&
+               git add y/c &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/c z/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11e-check: Avoid deleting not-uptodate with dir rename/rename(1to2)/add' '
+       (
+               cd 11e &&
+
+               git checkout A^0 &&
+               echo mods >>y/c &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/c" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 7 out &&
+               git ls-files -u >out &&
+               test_line_count = 4 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               echo different >expected &&
+               echo mods >>expected &&
+               test_cmp expected y/c &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :0:x/d :1:x/c :2:w/c :2:y/c :3:y/c &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/d  O:x/c  O:x/c  A:y/c  O:x/c &&
+               test_cmp expect actual &&
+
+               git hash-object >actual \
+                       y/c~B^0 y/c~HEAD &&
+               git rev-parse >expect \
+                       O:x/c   A:y/c &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 11f, Avoid deleting not-up-to-date w/ dir rename/rename(2to1)
+#   Commit O: z/{a,b},        x/{c_1,d_2}
+#   Commit A: y/{a,b,wham_1}, x/d_2, except y/wham has uncommitted mods
+#   Commit B: z/{a,b,wham_2}, x/c_1
+#   Expected: Failed Merge; y/{a,b} + untracked y/{wham~B^0,wham~B^HEAD} +
+#             y/wham with dirty changes from before merge +
+#             CONFLICT(rename/rename) x/c vs x/d -> y/wham
+#             ERROR_MSG(Refusing to lose dirty file at y/wham)
+
+test_expect_success '11f-setup: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       test_create_repo 11f &&
+       (
+               cd 11f &&
+
+               mkdir z x &&
+               echo a >z/a &&
+               echo b >z/b &&
+               test_seq 1 10 >x/c &&
+               echo d >x/d &&
+               git add z x &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv z/ y/ &&
+               git mv x/c y/wham &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv x/d z/wham &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '11f-check: Avoid deleting not-uptodate with dir rename/rename(2to1)' '
+       (
+               cd 11f &&
+
+               git checkout A^0 &&
+               echo important >>y/wham &&
+
+               test_must_fail git merge -s recursive B^0 >out 2>err &&
+               test_i18ngrep "CONFLICT (rename/rename)" out &&
+               test_i18ngrep "Refusing to lose dirty file at y/wham" out &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+               git ls-files -u >out &&
+               test_line_count = 2 out &&
+               git ls-files -o >out &&
+               test_line_count = 4 out &&
+
+               test_seq 1 10 >expected &&
+               echo important >>expected &&
+               test_cmp expected y/wham &&
+
+               test_must_fail git rev-parse :1:y/wham &&
+               git hash-object >actual \
+                       y/wham~B^0 y/wham~HEAD &&
+               git rev-parse >expect \
+                       O:x/d      O:x/c &&
+               test_cmp expect actual &&
+
+               git rev-parse >actual \
+                       :0:y/a :0:y/b :2:y/wham :3:y/wham &&
+               git rev-parse >expect \
+                        O:z/a  O:z/b  O:x/c     O:x/d &&
+               test_cmp expect actual
+       )
+'
+
+###########################################################################
+# SECTION 12: Everything else
+#
+# Tests suggested by others.  Tests added after implementation completed
+# and submitted.  Grab bag.
+###########################################################################
+
+# Testcase 12a, Moving one directory hierarchy into another
+#   (Related to testcase 9a)
+#   Commit O: node1/{leaf1,leaf2}, node2/{leaf3,leaf4}
+#   Commit A: node1/{leaf1,leaf2,node2/{leaf3,leaf4}}
+#   Commit B: node1/{leaf1,leaf2,leaf5}, node2/{leaf3,leaf4,leaf6}
+#   Expected: node1/{leaf1,leaf2,leaf5,node2/{leaf3,leaf4,leaf6}}
+
+test_expect_success '12a-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12a &&
+       (
+               cd 12a &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               echo leaf5 >node1/leaf5 &&
+               echo leaf6 >node2/leaf6 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12a-check: Moving one directory hierarchy into another' '
+       (
+               cd 12a &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 6 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/leaf1 HEAD:node1/leaf2 HEAD:node1/leaf5 \
+                       HEAD:node1/node2/leaf3 \
+                       HEAD:node1/node2/leaf4 \
+                       HEAD:node1/node2/leaf6 &&
+               git rev-parse >expect \
+                       O:node1/leaf1    O:node1/leaf2    B:node1/leaf5 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       B:node2/leaf6 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12b, Moving two directory hierarchies into each other
+#   (Related to testcases 1c and 12c)
+#   Commit O: node1/{leaf1, leaf2}, node2/{leaf3, leaf4}
+#   Commit A: node1/{leaf1, leaf2, node2/{leaf3, leaf4}}
+#   Commit B: node2/{leaf3, leaf4, node1/{leaf1, leaf2}}
+#   Expected: node1/node2/node1/{leaf1, leaf2},
+#             node2/node1/node2/{leaf3, leaf4}
+#   NOTE: Without directory renames, we would expect
+#                   node2/node1/{leaf1, leaf2},
+#                   node1/node2/{leaf3, leaf4}
+#         with directory rename detection, we note that
+#             commit A renames node2/ -> node1/node2/
+#             commit B renames node1/ -> node2/node1/
+#         therefore, applying those directory renames to the initial result
+#         (making all four paths experience a transitive renaming), yields
+#         the expected result.
+#
+#         You may ask, is it weird to have two directories rename each other?
+#         To which, I can do no more than shrug my shoulders and say that
+#         even simple rules give weird results when given weird inputs.
+
+test_expect_success '12b-setup: Moving one directory hierarchy into another' '
+       test_create_repo 12b &&
+       (
+               cd 12b &&
+
+               mkdir -p node1 node2 &&
+               echo leaf1 >node1/leaf1 &&
+               echo leaf2 >node1/leaf2 &&
+               echo leaf3 >node2/leaf3 &&
+               echo leaf4 >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12b-check: Moving one directory hierarchy into another' '
+       (
+               cd 12b &&
+
+               git checkout A^0 &&
+
+               git merge -s recursive B^0 &&
+
+               git ls-files -s >out &&
+               test_line_count = 4 out &&
+
+               git rev-parse >actual \
+                       HEAD:node1/node2/node1/leaf1 \
+                       HEAD:node1/node2/node1/leaf2 \
+                       HEAD:node2/node1/node2/leaf3 \
+                       HEAD:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+# Testcase 12c, Moving two directory hierarchies into each other w/ content merge
+#   (Related to testcase 12b)
+#   Commit O: node1/{       leaf1_1, leaf2_1}, node2/{leaf3_1, leaf4_1}
+#   Commit A: node1/{       leaf1_2, leaf2_2,  node2/{leaf3_2, leaf4_2}}
+#   Commit B: node2/{node1/{leaf1_3, leaf2_3},        leaf3_3, leaf4_3}
+#   Expected: Content merge conflicts for each of:
+#               node1/node2/node1/{leaf1, leaf2},
+#               node2/node1/node2/{leaf3, leaf4}
+#   NOTE: This is *exactly* like 12c, except that every path is modified on
+#         each side of the merge.
+
+test_expect_success '12c-setup: Moving one directory hierarchy into another w/ content merge' '
+       test_create_repo 12c &&
+       (
+               cd 12c &&
+
+               mkdir -p node1 node2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf1\n" >node1/leaf1 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf2\n" >node1/leaf2 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf3\n" >node2/leaf3 &&
+               printf "1\n2\n3\n4\n5\n6\n7\n8\nleaf4\n" >node2/leaf4 &&
+               git add node1 node2 &&
+               test_tick &&
+               git commit -m "O" &&
+
+               git branch O &&
+               git branch A &&
+               git branch B &&
+
+               git checkout A &&
+               git mv node2/ node1/ &&
+               for i in `git ls-files`; do echo side A >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "A" &&
+
+               git checkout B &&
+               git mv node1/ node2/ &&
+               for i in `git ls-files`; do echo side B >>$i; done &&
+               git add -u &&
+               test_tick &&
+               git commit -m "B"
+       )
+'
+
+test_expect_success '12c-check: Moving one directory hierarchy into another w/ content merge' '
+       (
+               cd 12c &&
+
+               git checkout A^0 &&
+
+               test_must_fail git merge -s recursive B^0 &&
+
+               git ls-files -u >out &&
+               test_line_count = 12 out &&
+
+               git rev-parse >actual \
+                       :1:node1/node2/node1/leaf1 \
+                       :1:node1/node2/node1/leaf2 \
+                       :1:node2/node1/node2/leaf3 \
+                       :1:node2/node1/node2/leaf4 \
+                       :2:node1/node2/node1/leaf1 \
+                       :2:node1/node2/node1/leaf2 \
+                       :2:node2/node1/node2/leaf3 \
+                       :2:node2/node1/node2/leaf4 \
+                       :3:node1/node2/node1/leaf1 \
+                       :3:node1/node2/node1/leaf2 \
+                       :3:node2/node1/node2/leaf3 \
+                       :3:node2/node1/node2/leaf4 &&
+               git rev-parse >expect \
+                       O:node1/leaf1 \
+                       O:node1/leaf2 \
+                       O:node2/leaf3 \
+                       O:node2/leaf4 \
+                       A:node1/leaf1 \
+                       A:node1/leaf2 \
+                       A:node1/node2/leaf3 \
+                       A:node1/node2/leaf4 \
+                       B:node2/node1/leaf1 \
+                       B:node2/node1/leaf2 \
+                       B:node2/leaf3 \
+                       B:node2/leaf4 &&
+               test_cmp expect actual
+       )
+'
+
+test_done
index a5d901502414f25616a474152ee0f5816465bd37..bae78c4e89e2402d5ab37e3b7cd094dde41d69b3 100755 (executable)
@@ -378,4 +378,12 @@ check_describe tags/A --all A
 check_describe tags/c --all c
 check_describe heads/branch_A --all --match='branch_*' branch_A
 
+test_expect_success 'describe complains about tree object' '
+       test_must_fail git describe HEAD^{tree}
+'
+
+test_expect_success 'describe complains about missing object' '
+       test_must_fail git describe $_z40
+'
+
 test_done
index 2e2fb0e9572f3d570311470aebf47da37974b8f8..a54a52aaa4e680bdbc97750c4ae4855a45dcdb80 100755 (executable)
@@ -512,7 +512,7 @@ test_expect_success 'merge-msg with "merging" an annotated tag' '
 
        test_when_finished "git reset --hard" &&
        annote=$(git rev-parse annote) &&
-       git merge --no-commit $annote &&
+       git merge --no-commit --no-ff $annote &&
        {
                cat <<-EOF
                Merge tag '\''$annote'\''
index c128dfc5790790de9edf1b4d2cfa8b028c1036bc..295d1475bde0151df65e0098e6e4a169193906ed 100755 (executable)
@@ -373,11 +373,8 @@ test_expect_success 'Quoting style: tcl' '
 
 for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do
        test_expect_success "more than one quoting style: $i" "
-               git for-each-ref $i 2>&1 | (read line &&
-               case \$line in
-               \"error: more than one quoting style\"*) : happy;;
-               *) false
-               esac)
+               test_must_fail git for-each-ref $i 2>err &&
+               grep '^error: more than one quoting style' err
        "
 done
 
index a9af2de9960b345878ac0f85c33b1efd3e038d28..2aac77af701989dc16980268155d6e40500354bb 100755 (executable)
@@ -452,6 +452,21 @@ test_expect_success \
        test_cmp expect actual
 '
 
+get_tag_header annotated-tag-edit $commit commit $time >expect
+echo "An edited message" >>expect
+test_expect_success 'set up editor' '
+       write_script fakeeditor <<-\EOF
+       sed -e "s/A message/An edited message/g" <"$1" >"$1-"
+       mv "$1-" "$1"
+       EOF
+'
+test_expect_success \
+       'creating an annotated tag with -m message --edit should succeed' '
+       GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit &&
+       get_tag_msg annotated-tag-edit >actual &&
+       test_cmp expect actual
+'
+
 cat >msgfile <<EOF
 Another message
 in a file.
@@ -465,6 +480,21 @@ test_expect_success \
        test_cmp expect actual
 '
 
+get_tag_header file-annotated-tag-edit $commit commit $time >expect
+sed -e "s/Another message/Another edited message/g" msgfile >>expect
+test_expect_success 'set up editor' '
+       write_script fakeeditor <<-\EOF
+       sed -e "s/Another message/Another edited message/g" <"$1" >"$1-"
+       mv "$1-" "$1"
+       EOF
+'
+test_expect_success \
+       'creating an annotated tag with -F messagefile --edit should succeed' '
+       GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit &&
+       get_tag_msg file-annotated-tag-edit >actual &&
+       test_cmp expect actual
+'
+
 cat >inputmsg <<EOF
 A message from the
 standard input
index f5f46a95b4be9bca4ca30e70cbe75051a191c96a..7541ba5edbcae1f1555c367b7f8bfc795913ff60 100755 (executable)
@@ -110,13 +110,6 @@ test_expect_success TTY 'configuration can disable pager' '
        ! test -e paginated.out
 '
 
-test_expect_success TTY 'git config uses a pager if configured to' '
-       rm -f paginated.out &&
-       test_config pager.config true &&
-       test_terminal git config --list &&
-       test -e paginated.out
-'
-
 test_expect_success TTY 'configuration can enable pager (from subdir)' '
        rm -f paginated.out &&
        mkdir -p subdir &&
@@ -252,6 +245,48 @@ test_expect_success TTY 'git branch --set-upstream-to ignores pager.branch' '
        ! test -e paginated.out
 '
 
+test_expect_success TTY 'git config ignores pager.config when setting' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config config foo.bar bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --edit ignores pager.config' '
+       rm -f paginated.out editor.used &&
+       write_script editor <<-\EOF &&
+               touch editor.used
+       EOF
+       EDITOR=./editor test_terminal git -c pager.config config --edit &&
+       ! test -e paginated.out &&
+       test -e editor.used
+'
+
+test_expect_success TTY 'git config --get ignores pager.config' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config config --get foo.bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-urlmatch defaults to paging' '
+       rm -f paginated.out &&
+       test_terminal git -c http."https://foo.com/".bar=foo \
+                         config --get-urlmatch http https://foo.com &&
+       test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-all respects pager.config' '
+       rm -f paginated.out &&
+       test_terminal git -c pager.config=false config --get-all foo.bar &&
+       ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --list defaults to paging' '
+       rm -f paginated.out &&
+       test_terminal git config --list &&
+       test -e paginated.out
+'
+
+
 # A colored commit log will begin with an appropriate ANSI escape
 # for the first color; the text "commit" comes later.
 colorful() {
index e5fb892f9575fda4baf0b2a0e6b31cf13a0d6c0b..c61e304e97376b09de51299a797dfb388335a365 100755 (executable)
@@ -14,6 +14,9 @@ test_description='test untracked cache'
 # See <20160803174522.5571-1-pclouds@gmail.com> if you want to know
 # more.
 
+GIT_FORCE_UNTRACKED_CACHE=true
+export GIT_FORCE_UNTRACKED_CACHE
+
 sync_mtime () {
        find . -type d -ls >/dev/null
 }
@@ -22,6 +25,12 @@ avoid_racy() {
        sleep 1
 }
 
+status_is_clean() {
+       >../status.expect &&
+       git status --porcelain >../status.actual &&
+       test_cmp ../status.expect ../status.actual
+}
+
 test_lazy_prereq UNTRACKED_CACHE '
        { git update-index --test-untracked-cache; ret=$?; } &&
        test $ret -ne 1
@@ -683,4 +692,85 @@ test_expect_success 'untracked cache survives a commit' '
        test_cmp ../before ../after
 '
 
+test_expect_success 'teardown worktree' '
+       cd ..
+'
+
+test_expect_success SYMLINKS 'setup worktree for symlink test' '
+       git init worktree-symlink &&
+       cd worktree-symlink &&
+       git config core.untrackedCache true &&
+       mkdir one two &&
+       touch one/file two/file &&
+       git add one/file two/file &&
+       git commit -m"first commit" &&
+       git rm -rf one &&
+       ln -s two one &&
+       git add one &&
+       git commit -m"second commit"
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' '
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' '
+       git config core.untrackedCache false &&
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
+test_expect_success 'setup worktree for non-symlink test' '
+       git init worktree-non-symlink &&
+       cd worktree-non-symlink &&
+       git config core.untrackedCache true &&
+       mkdir one two &&
+       touch one/file two/file &&
+       git add one/file two/file &&
+       git commit -m"first commit" &&
+       git rm -rf one &&
+       cp two/file one &&
+       git add one &&
+       git commit -m"second commit"
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=true' '
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       test-dump-untracked-cache >../actual &&
+       grep -F "recurse valid" ../actual >../actual.grep &&
+       cat >../expect.grep <<EOF &&
+/ 0000000000000000000000000000000000000000 recurse valid
+/two/ 0000000000000000000000000000000000000000 recurse valid
+EOF
+       status_is_clean &&
+       test_cmp ../expect.grep ../actual.grep
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=false' '
+       git config core.untrackedCache false &&
+       git checkout HEAD~ &&
+       status_is_clean &&
+       status_is_clean &&
+       git checkout master &&
+       avoid_racy &&
+       status_is_clean &&
+       status_is_clean
+'
+
 test_done
index e319fa2e8470791340a479989a81d934b79405da..8f795327a00f6c1b751b82e7e395c8387543d388 100755 (executable)
@@ -390,6 +390,68 @@ test_expect_success 'verify upstream fields in branch header' '
        )
 '
 
+test_expect_success 'verify --[no-]ahead-behind with V2 format' '
+       git checkout master &&
+       test_when_finished "rm -rf sub_repo" &&
+       git clone . sub_repo &&
+       (
+               ## Confirm local master tracks remote master.
+               cd sub_repo &&
+               HUF=$(git rev-parse HEAD) &&
+
+               # Confirm --no-ahead-behind reports traditional branch.ab with 0/0 for equal branches.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +0 -0
+               EOF
+
+               git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               # Confirm --ahead-behind reports traditional branch.ab with 0/0.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +0 -0
+               EOF
+
+               git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               ## Test non-equal ahead/behind.
+               echo xyz >file_xyz &&
+               git add file_xyz &&
+               git commit -m xyz &&
+
+               HUF=$(git rev-parse HEAD) &&
+
+               # Confirm --no-ahead-behind reports branch.ab with ?/? for non-equal branches.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +? -?
+               EOF
+
+               git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual &&
+
+               # Confirm --ahead-behind reports traditional branch.ab with 1/0.
+               cat >expect <<-EOF &&
+               # branch.oid $HUF
+               # branch.head master
+               # branch.upstream origin/master
+               # branch.ab +1 -0
+               EOF
+
+               git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+               test_cmp expect actual
+       )
+'
+
 test_expect_success 'create and add submodule, submodule appears clean (A. S...)' '
        git checkout master &&
        git clone . sub_repo &&
index b13f72975ecce17887c4c8275c6935d78d4b09a0..1f43b3cd4cd34ec6f4c3de4cfe8a26291e4e480f 100755 (executable)
@@ -4,6 +4,38 @@ test_description='prepare-commit-msg hook'
 
 . ./test-lib.sh
 
+test_expect_success 'set up commits for rebasing' '
+       test_commit root &&
+       test_commit a a a &&
+       test_commit b b b &&
+       git checkout -b rebase-me root &&
+       test_commit rebase-a a aa &&
+       test_commit rebase-b b bb &&
+       for i in $(test_seq 1 13)
+       do
+               test_commit rebase-$i c $i
+       done &&
+       git checkout master &&
+
+       cat >rebase-todo <<-EOF
+       pick $(git rev-parse rebase-a)
+       pick $(git rev-parse rebase-b)
+       fixup $(git rev-parse rebase-1)
+       fixup $(git rev-parse rebase-2)
+       pick $(git rev-parse rebase-3)
+       fixup $(git rev-parse rebase-4)
+       squash $(git rev-parse rebase-5)
+       reword $(git rev-parse rebase-6)
+       squash $(git rev-parse rebase-7)
+       fixup $(git rev-parse rebase-8)
+       fixup $(git rev-parse rebase-9)
+       edit $(git rev-parse rebase-10)
+       squash $(git rev-parse rebase-11)
+       squash $(git rev-parse rebase-12)
+       edit $(git rev-parse rebase-13)
+       EOF
+'
+
 test_expect_success 'with no hook' '
 
        echo "foo" > file &&
@@ -31,17 +63,41 @@ mkdir -p "$HOOKDIR"
 echo "#!$SHELL_PATH" > "$HOOK"
 cat >> "$HOOK" <<'EOF'
 
-if test "$2" = commit; then
-  source=$(git rev-parse "$3")
+GIT_DIR=$(git rev-parse --git-dir)
+if test -d "$GIT_DIR/rebase-merge"
+then
+       rebasing=1
 else
-  source=${2-default}
+       rebasing=0
 fi
-if test "$GIT_EDITOR" = :; then
-  sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp
+
+get_last_cmd () {
+       tail -n1 "$GIT_DIR/rebase-merge/done" | {
+               read cmd id _
+               git log --pretty="[$cmd %s]" -n1 $id
+       }
+}
+
+if test "$2" = commit
+then
+       if test $rebasing = 1
+       then
+               source="$3"
+       else
+               source=$(git rev-parse "$3")
+       fi
 else
-  sed -e "1s/.*/$source/" "$1" > msg.tmp
+       source=${2-default}
+fi
+test "$GIT_EDITOR" = : && source="$source (no editor)"
+
+if test $rebasing = 1
+then
+       echo "$source $(get_last_cmd)" >"$1"
+else
+       sed -e "1s/.*/$source/" "$1" >msg.tmp
+       mv msg.tmp "$1"
 fi
-mv msg.tmp "$1"
 exit 0
 EOF
 chmod +x "$HOOK"
@@ -156,6 +212,63 @@ test_expect_success 'with hook and editor (merge)' '
        test "$(git log -1 --pretty=format:%s)" = "merge"
 '
 
+test_rebase () {
+       expect=$1 &&
+       mode=$2 &&
+       test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" '
+               test_when_finished "\
+                       git rebase --abort
+                       git checkout -f master
+                       git branch -D tmp" &&
+               git checkout -b tmp rebase-me &&
+               GIT_SEQUENCE_EDITOR="cp rebase-todo" &&
+               GIT_EDITOR="\"$FAKE_EDITOR\"" &&
+               (
+                       export GIT_SEQUENCE_EDITOR GIT_EDITOR &&
+                       test_must_fail git rebase $mode b &&
+                       echo x >a &&
+                       git add a &&
+                       test_must_fail git rebase --continue &&
+                       echo x >b &&
+                       git add b &&
+                       git commit &&
+                       git rebase --continue &&
+                       echo y >a &&
+                       git add a &&
+                       git commit &&
+                       git rebase --continue &&
+                       echo y >b &&
+                       git add b &&
+                       git rebase --continue
+               ) &&
+               if test $mode = -p # reword amended after pick
+               then
+                       n=18
+               else
+                       n=17
+               fi &&
+               git log --pretty=%s -g -n$n HEAD@{1} >actual &&
+               test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual
+       '
+}
+
+test_rebase success -i
+test_rebase success -p
+
+test_expect_success 'with hook (cherry-pick)' '
+       test_when_finished "git checkout -f master" &&
+       git checkout -B other b &&
+       git cherry-pick rebase-1 &&
+       test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
+'
+
+test_expect_success 'with hook and editor (cherry-pick)' '
+       test_when_finished "git checkout -f master" &&
+       git checkout -B other b &&
+       git cherry-pick -e rebase-1 &&
+       test "$(git log -1 --pretty=format:%s)" = merge
+'
+
 cat > "$HOOK" <<'EOF'
 #!/bin/sh
 exit 1
@@ -197,4 +310,11 @@ test_expect_success 'with failing hook (merge)' '
 
 '
 
+test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' '
+       test_when_finished "git checkout -f master" &&
+       git checkout -B other b &&
+       test_must_fail git cherry-pick rebase-1 2>actual &&
+       test $(grep -c prepare-commit-msg actual) = 1
+'
+
 test_done
diff --git a/t/t7505/expected-rebase-i b/t/t7505/expected-rebase-i
new file mode 100644 (file)
index 0000000..c514bdb
--- /dev/null
@@ -0,0 +1,17 @@
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+message [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
diff --git a/t/t7505/expected-rebase-p b/t/t7505/expected-rebase-p
new file mode 100644 (file)
index 0000000..93bada5
--- /dev/null
@@ -0,0 +1,18 @@
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+HEAD [reword rebase-6]
+message (no editor) [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
index eb2d13bbcf8abefd7af2be6f9cb3bf97e389ab15..756beb0d8eb466d78b235af363b6a36dde37c79e 100755 (executable)
@@ -314,4 +314,43 @@ test_expect_success 'splitting the index results in the same state' '
        test_cmp expect actual
 '
 
+test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' '
+       test_create_repo dot-git &&
+       (
+               cd dot-git &&
+               mkdir -p .git/hooks &&
+               : >tracked &&
+               : >modified &&
+               mkdir dir1 &&
+               : >dir1/tracked &&
+               : >dir1/modified &&
+               mkdir dir2 &&
+               : >dir2/tracked &&
+               : >dir2/modified &&
+               write_integration_script &&
+               git config core.fsmonitor .git/hooks/fsmonitor-test &&
+               git update-index --untracked-cache &&
+               git update-index --fsmonitor &&
+               GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \
+               git status &&
+               test-dump-untracked-cache >../before
+       ) &&
+       cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF &&
+       printf ".git\0"
+       printf ".git/index\0"
+       printf "dir1/.git\0"
+       printf "dir1/.git/index\0"
+       EOF
+       (
+               cd dot-git &&
+               GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \
+               git status &&
+               test-dump-untracked-cache >../after
+       ) &&
+       grep "directory invalidation" trace-before >>before &&
+       grep "directory invalidation" trace-after >>after &&
+       # UNTR extension unchanged, dir invalidation count unchanged
+       test_cmp before after
+'
+
 test_done
index dfde6a675a8cd28297324627923a861bc7410ff2..6736d8d13139c946b07165d5bbad6c8d14617cbd 100755 (executable)
@@ -700,6 +700,42 @@ test_expect_success 'merge --no-ff --edit' '
        test_cmp expected actual
 '
 
+test_expect_success 'merge annotated/signed tag w/o tracking' '
+       test_when_finished "rm -rf dst; git tag -d anno1" &&
+       git tag -a -m "anno c1" anno1 c1 &&
+       git init dst &&
+       git rev-parse c1 >dst/expect &&
+       (
+               # c0 fast-forwards to c1 but because this repository
+               # is not a "downstream" whose refs/tags follows along
+               # tag from the "upstream", this pull defaults to --no-ff
+               cd dst &&
+               git pull .. c0 &&
+               git pull .. anno1 &&
+               git rev-parse HEAD^2 >actual &&
+               test_cmp expect actual
+       )
+'
+
+test_expect_success 'merge annotated/signed tag w/ tracking' '
+       test_when_finished "rm -rf dst; git tag -d anno1" &&
+       git tag -a -m "anno c1" anno1 c1 &&
+       git init dst &&
+       git rev-parse c1 >dst/expect &&
+       (
+               # c0 fast-forwards to c1 and because this repository
+               # is a "downstream" whose refs/tags follows along
+               # tag from the "upstream", this pull defaults to --ff
+               cd dst &&
+               git remote add origin .. &&
+               git pull origin c0 &&
+               git fetch origin &&
+               git merge anno1 &&
+               git rev-parse HEAD >actual &&
+               test_cmp expect actual
+       )
+'
+
 test_expect_success GPG 'merge --ff-only tag' '
        git reset --hard c0 &&
        git commit --allow-empty -m "A newer commit" &&
@@ -718,7 +754,7 @@ test_expect_success GPG 'merge --no-edit tag should skip editor' '
        git tag -f -s -m "A newer commit" signed &&
        git reset --hard c0 &&
 
-       EDITOR=false git merge --no-edit signed &&
+       EDITOR=false git merge --no-edit --no-ff signed &&
        git rev-parse signed^0 >expect &&
        git rev-parse HEAD^2 >actual &&
        test_cmp expect actual
index 9444d6a9b9026748028178c4b66ee8bbbc1d89fa..dd8ab7ede182fc3c3da840fee083bf23a94ce13c 100755 (executable)
@@ -92,12 +92,15 @@ test_expect_success 'will not overwrite removed file with staged changes' '
        test_cmp important c1.c
 '
 
-test_expect_failure 'will not overwrite unstaged changes in renamed file' '
+test_expect_success 'will not overwrite unstaged changes in renamed file' '
        git reset --hard c1 &&
        git mv c1.c other.c &&
        git commit -m rename &&
        cp important other.c &&
-       git merge c1a &&
+       test_must_fail git merge c1a >out &&
+       test_i18ngrep "Refusing to lose dirty file at other.c" out &&
+       test_path_is_file other.c~HEAD &&
+       test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) &&
        test_cmp important other.c
 '
 
diff --git a/t/t9000-addresses.sh b/t/t9000-addresses.sh
deleted file mode 100755 (executable)
index a1ebef6..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-test_description='compare address parsing with and without Mail::Address'
-. ./test-lib.sh
-
-if ! test_have_prereq PERL; then
-       skip_all='skipping perl interface tests, perl not available'
-       test_done
-fi
-
-perl -MTest::More -e 0 2>/dev/null || {
-       skip_all="Perl Test::More unavailable, skipping test"
-       test_done
-}
-
-perl -MMail::Address -e 0 2>/dev/null || {
-       skip_all="Perl Mail::Address unavailable, skipping test"
-       test_done
-}
-
-test_external_has_tap=1
-
-test_external_without_stderr \
-       'Perl address parsing function' \
-       perl "$TEST_DIRECTORY"/t9000/test.pl
-
-test_done
diff --git a/t/t9000/test.pl b/t/t9000/test.pl
deleted file mode 100755 (executable)
index dfeaa9c..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/perl
-use lib (split(/:/, $ENV{GITPERLLIB}));
-
-use 5.008;
-use warnings;
-use strict;
-
-use Test::More qw(no_plan);
-use Mail::Address;
-
-BEGIN { use_ok('Git') }
-
-my @success_list = (q[Jane],
-       q[jdoe@example.com],
-       q[<jdoe@example.com>],
-       q[Jane <jdoe@example.com>],
-       q[Jane Doe <jdoe@example.com>],
-       q["Jane" <jdoe@example.com>],
-       q["Doe, Jane" <jdoe@example.com>],
-       q["Jane@:;\>.,()<Doe" <jdoe@example.com>],
-       q[Jane!#$%&'*+-/=?^_{|}~Doe' <jdoe@example.com>],
-       q["<jdoe@example.com>"],
-       q["Jane jdoe@example.com"],
-       q[Jane Doe <jdoe    @   example.com  >],
-       q[Jane       Doe <  jdoe@example.com  >],
-       q[Jane @ Doe @ Jane @ Doe],
-       q["Jane, 'Doe'" <jdoe@example.com>],
-       q['Doe, "Jane' <jdoe@example.com>],
-       q["Jane" "Do"e <jdoe@example.com>],
-       q["Jane' Doe" <jdoe@example.com>],
-       q["Jane Doe <jdoe@example.com>" <jdoe@example.com>],
-       q["Jane\" Doe" <jdoe@example.com>],
-       q[Doe, jane <jdoe@example.com>],
-       q["Jane Doe <jdoe@example.com>],
-       q['Jane 'Doe' <jdoe@example.com>],
-       q[Jane@:;\.,()<>Doe <jdoe@example.com>],
-       q[Jane <jdoe@example.com> Doe],
-       q[<jdoe@example.com> Jane Doe]);
-
-my @known_failure_list = (q[Jane\ Doe <jdoe@example.com>],
-       q["Doe, Ja"ne <jdoe@example.com>],
-       q["Doe, Katarina" Jane <jdoe@example.com>],
-       q[Jane jdoe@example.com],
-       q["Jane "Kat"a" ri"na" ",Doe" <jdoe@example.com>],
-       q[Jane Doe],
-       q[Jane "Doe <jdoe@example.com>"],
-       q[\"Jane Doe <jdoe@example.com>],
-       q[Jane\"\" Doe <jdoe@example.com>],
-       q['Jane "Katarina\" \' Doe' <jdoe@example.com>]);
-
-foreach my $str (@success_list) {
-       my @expected = map { $_->format } Mail::Address->parse("$str");
-       my @actual = Git::parse_mailboxes("$str");
-       is_deeply(\@expected, \@actual, qq[same output : $str]);
-}
-
-TODO: {
-       local $TODO = "known breakage";
-       foreach my $str (@known_failure_list) {
-               my @expected = map { $_->format } Mail::Address->parse("$str");
-               my @actual = Git::parse_mailboxes("$str");
-               is_deeply(\@expected, \@actual, qq[same output : $str]);
-       }
-}
-
-my $is_passing = eval { Test::More->is_passing };
-exit($is_passing ? 0 : 1) unless $@ =~ /Can't locate object method/;
index 81869d89133a55b8e2d1fe676ff217636466f66f..e80eacbb1b81a319232c0b5f28aab3c82d795a93 100755 (executable)
@@ -178,6 +178,25 @@ test_expect_success $PREREQ 'cc trailer with various syntax' '
        test_cmp expected-cc commandline1
 '
 
+test_expect_success $PREREQ 'setup fake get_maintainer.pl script for cc trailer' "
+       write_script expected-cc-script.sh <<-EOF
+       echo 'One Person <one@example.com> (supporter:THIS (FOO/bar))'
+       echo 'Two Person <two@example.com> (maintainer:THIS THING)'
+       echo 'Third List <three@example.com> (moderated list:THIS THING (FOO/bar))'
+       echo '<four@example.com> (moderated list:FOR THING)'
+       echo 'five@example.com (open list:FOR THING (FOO/bar))'
+       echo 'six@example.com (open list)'
+       EOF
+"
+
+test_expect_success $PREREQ 'cc trailer with get_maintainer.pl output' '
+       clean_fake_sendmail &&
+       git send-email -1 --to=recipient@example.com \
+               --cc-cmd=./expected-cc-script.sh \
+               --smtp-server="$(pwd)/fake.sendmail" &&
+       test_cmp expected-cc commandline1
+'
+
 test_expect_success $PREREQ 'setup expect' "
 cat >expected-show-all-headers <<\EOF
 0001-Second.patch
@@ -205,6 +224,7 @@ Message-Id: MESSAGE-ID-STRING
 X-Mailer: X-MAILER-STRING
 In-Reply-To: <unique-message-id@example.com>
 References: <unique-message-id@example.com>
+Reply-To: Reply <reply@example.com>
 
 Result: OK
 EOF
@@ -297,6 +317,7 @@ test_expect_success $PREREQ 'Show all headers' '
                --dry-run \
                --suppress-cc=sob \
                --from="Example <from@example.com>" \
+               --reply-to="Reply <reply@example.com>" \
                --to=to@example.com \
                --cc=cc@example.com \
                --bcc=bcc@example.com \
index c30660d60626c886dfa5993acddaebf2d3364de9..06742748e99fda241ec629e34c0700ae4050692e 100755 (executable)
@@ -447,12 +447,10 @@ test_expect_success 'cvs update (-p)' '
     git push gitcvs.git >/dev/null &&
     cd cvswork &&
     GIT_CONFIG="$git_config" cvs update &&
-    rm -f failures &&
     for i in merge no-lf empty really-empty; do
-        GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out
-       test_cmp $i.out ../$i >>failures 2>&1
-    done &&
-    test -z "$(cat failures)"
+       GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out &&
+       test_cmp $i.out ../$i || return 1
+    done
 '
 
 cd "$WORKDIR"
index 6d2d3c8739cbd6978d823e7947d9f95c758ff241..cf31ace66763741a7f53e3b9a1cb3c0019255f56 100755 (executable)
@@ -455,20 +455,20 @@ test_expect_success 'cvs up -r $(git rev-parse v1)' '
 '
 
 test_expect_success 'cvs diff -r v1 -u' '
-       ( cd cvswork && cvs -f diff -r v1 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && cvs -f diff -r v1 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvsDiff.out &&
        test_must_be_empty cvs.log
 '
 
 test_expect_success 'cvs diff -N -r v2 -u' '
-       ( cd cvswork && ! cvs -f diff -N -r v2 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && ! cvs -f diff -N -r v2 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        check_diff cvsDiff.out v2 v1 >check_diff.out 2>&1
 '
 
 test_expect_success 'cvs diff -N -r v2 -r v1.2' '
-       ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u ) >cvsDiff.out 2>cvs.log &&
+       ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u >../cvsDiff.out 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        check_diff cvsDiff.out v2 v1.2 >check_diff.out 2>&1
@@ -487,7 +487,7 @@ test_expect_success 'apply early [cvswork3] diff to b3' '
 '
 
 test_expect_success 'check [cvswork3] diff' '
-       ( cd cvswork3 && ! cvs -f diff -N -u ) >"$WORKDIR/cvsDiff.out" 2>cvs.log &&
+       ( cd cvswork3 && ! cvs -f diff -N -u >"$WORKDIR/cvsDiff.out" 2>../cvs.log ) &&
        test_must_be_empty cvs.log &&
        test -s cvsDiff.out &&
        test $(grep Index: cvsDiff.out | wc -l) = 3 &&
index fc614dcbfa74c39e120ff2fc80cdec07ea16a338..b7f5b1e632fb27a0448239361d6b4207be4b9908 100755 (executable)
@@ -1237,17 +1237,19 @@ test_expect_success 'double dash "git" itself' '
 test_expect_success 'double dash "git checkout"' '
        test_completion "git checkout --" <<-\EOF
        --quiet Z
+       --detach Z
+       --track Z
+       --orphan=Z
        --ours Z
        --theirs Z
-       --track Z
-       --no-track Z
        --merge Z
-       --conflict=
-       --orphan Z
+       --conflict=Z
        --patch Z
-       --detach Z
        --ignore-skip-worktree-bits Z
+       --ignore-other-worktrees Z
        --recurse-submodules Z
+       --progress Z
+       --no-track Z
        --no-recurse-submodules Z
        EOF
 '
@@ -1495,4 +1497,35 @@ do
        '
 done
 
+test_expect_success 'sourcing the completion script clears cached commands' '
+       __git_compute_all_commands &&
+       verbose test -n "$__git_all_commands" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_all_commands"
+'
+
+test_expect_success 'sourcing the completion script clears cached porcelain commands' '
+       __git_compute_porcelain_commands &&
+       verbose test -n "$__git_porcelain_commands" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_porcelain_commands"
+'
+
+test_expect_success !GETTEXT_POISON 'sourcing the completion script clears cached merge strategies' '
+       __git_compute_merge_strategies &&
+       verbose test -n "$__git_merge_strategies" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__git_merge_strategies"
+'
+
+test_expect_success 'sourcing the completion script clears cached --options' '
+       __gitcomp_builtin checkout &&
+       verbose test -n "$__gitcomp_builtin_checkout" &&
+       __gitcomp_builtin notes_edit &&
+       verbose test -n "$__gitcomp_builtin_notes_edit" &&
+       . "$GIT_BUILD_DIR/contrib/completion/git-completion.bash" &&
+       verbose test -z "$__gitcomp_builtin_checkout" &&
+       verbose test -z "$__gitcomp_builtin_notes_edit"
+'
+
 test_done
index 97c9b32c2ecfa608f1e9c37c1c6402a1772e4187..8f5c811dd7d627ea5a0611be5161cea98deedb95 100755 (executable)
@@ -735,22 +735,12 @@ test_expect_success 'prompt - hide if pwd ignored - env var set, config unset, p
        test_cmp expected "$actual"
 '
 
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stdout)' '
+test_expect_success 'prompt - hide if pwd ignored - inside gitdir' '
        printf " (GIT_DIR!)" >expected &&
        (
                GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
                cd .git &&
-               __git_ps1 >"$actual" 2>/dev/null
-       ) &&
-       test_cmp expected "$actual"
-'
-
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stderr)' '
-       printf "" >expected &&
-       (
-               GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
-               cd .git &&
-               __git_ps1 >/dev/null 2>"$actual"
+               __git_ps1 >"$actual"
        ) &&
        test_cmp expected "$actual"
 '
index 8a8a9329eeaa322121ecdbb3cd1f5fa6c9ecc94c..b895366feef6027ac00bf47271b2530e8b7e7162 100644 (file)
@@ -629,30 +629,30 @@ test_must_fail () {
                _test_ok=
                ;;
        esac
-       "$@"
+       "$@" 2>&7
        exit_code=$?
        if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
        then
-               echo >&2 "test_must_fail: command succeeded: $*"
+               echo >&4 "test_must_fail: command succeeded: $*"
                return 1
        elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe
        then
                return 0
        elif test $exit_code -gt 129 && test $exit_code -le 192
        then
-               echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
+               echo >&4 "test_must_fail: died by signal $(($exit_code - 128)): $*"
                return 1
        elif test $exit_code -eq 127
        then
-               echo >&2 "test_must_fail: command not found: $*"
+               echo >&4 "test_must_fail: command not found: $*"
                return 1
        elif test $exit_code -eq 126
        then
-               echo >&2 "test_must_fail: valgrind error: $*"
+               echo >&4 "test_must_fail: valgrind error: $*"
                return 1
        fi
        return 0
-}
+} 7>&2 2>&4
 
 # Similar to test_must_fail, but tolerates success, too.  This is
 # meant to be used in contexts like:
@@ -668,8 +668,8 @@ test_must_fail () {
 # Accepts the same options as test_must_fail.
 
 test_might_fail () {
-       test_must_fail ok=success "$@"
-}
+       test_must_fail ok=success "$@" 2>&7
+} 7>&2 2>&4
 
 # Similar to test_must_fail and test_might_fail, but check that a
 # given command exited with a given exit code. Meant to be used as:
@@ -681,16 +681,16 @@ test_might_fail () {
 test_expect_code () {
        want_code=$1
        shift
-       "$@"
+       "$@" 2>&7
        exit_code=$?
        if test $exit_code = $want_code
        then
                return 0
        fi
 
-       echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
+       echo >&4 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
        return 1
-}
+} 7>&2 2>&4
 
 # test_cmp is a helper function to compare actual and expected output.
 # You can use it like:
@@ -752,18 +752,18 @@ test_i18ngrep () {
                shift
                ! grep "$@" && return 0
 
-               echo >&2 "error: '! grep $@' did find a match in:"
+               echo >&4 "error: '! grep $@' did find a match in:"
        else
                grep "$@" && return 0
 
-               echo >&2 "error: 'grep $@' didn't find a match in:"
+               echo >&4 "error: 'grep $@' didn't find a match in:"
        fi
 
        if test -s "$last_arg"
        then
-               cat >&2 "$last_arg"
+               cat >&4 "$last_arg"
        else
-               echo >&2 "<File '$last_arg' is empty>"
+               echo >&4 "<File '$last_arg' is empty>"
        fi
 
        return 1
@@ -774,7 +774,7 @@ test_i18ngrep () {
 # not output anything when they fail.
 verbose () {
        "$@" && return 0
-       echo >&2 "command failed: $(git rev-parse --sq-quote "$@")"
+       echo >&4 "command failed: $(git rev-parse --sq-quote "$@")"
        return 1
 }
 
@@ -782,7 +782,11 @@ verbose () {
 # otherwise.
 
 test_must_be_empty () {
-       if test -s "$1"
+       if ! test -f "$1"
+       then
+               echo "'$1' is missing"
+               return 1
+       elif test -s "$1"
        then
                echo "'$1' is not empty, it contains:"
                cat "$1"
@@ -892,8 +896,8 @@ test_write_lines () {
 }
 
 perl () {
-       command "$PERL_PATH" "$@"
-}
+       command "$PERL_PATH" "$@" 2>&7
+} 7>&2 2>&4
 
 # Is the value one of the various ways to spell a boolean true/false?
 test_normalize_bool () {
@@ -1033,13 +1037,13 @@ test_env () {
                                shift
                                ;;
                        *)
-                               "$@"
+                               "$@" 2>&7
                                exit
                                ;;
                        esac
                done
        )
-}
+} 7>&2 2>&4
 
 # Returns true if the numeric exit code in "$2" represents the expected signal
 # in "$1". Signals should be given numerically.
@@ -1081,9 +1085,9 @@ nongit () {
                GIT_CEILING_DIRECTORIES=$(pwd) &&
                export GIT_CEILING_DIRECTORIES &&
                cd non-repo &&
-               "$@"
+               "$@" 2>&7
        )
-}
+} 7>&2 2>&4
 
 # convert stdin to pktline representation; note that empty input becomes an
 # empty packet, not a flush packet (for that you can just print 0000 yourself).
index 816e6923911cb6d594f3508e8540d680ecd983e9..7740d511d289f44bb1313308fe49d5894f64b3c2 100644 (file)
@@ -264,7 +264,24 @@ do
                GIT_TEST_CHAIN_LINT=0
                shift ;;
        -x)
-               trace=t
+               # Some test scripts can't be reliably traced  with '-x',
+               # unless the test is run with a Bash version supporting
+               # BASH_XTRACEFD (introduced in Bash v4.1).  Check whether
+               # this test is marked as such, and ignore '-x' if it
+               # isn't executed with a suitable Bash version.
+               if test -z "$test_untraceable" || {
+                    test -n "$BASH_VERSION" && {
+                      test ${BASH_VERSINFO[0]} -gt 4 || {
+                        test ${BASH_VERSINFO[0]} -eq 4 &&
+                        test ${BASH_VERSINFO[1]} -ge 1
+                      }
+                    }
+                  }
+               then
+                       trace=t
+               else
+                       echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD"
+               fi
                shift ;;
        --verbose-log)
                verbose_log=t
@@ -940,7 +957,7 @@ then
        fi
 fi
 
-GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git
+GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib
 export GITPERLLIB
 test -d "$GIT_BUILD_DIR"/templates/blt || {
        error "You haven't built things yet, have you?"
@@ -1107,6 +1124,10 @@ test_lazy_prereq EXPENSIVE '
        test -n "$GIT_TEST_LONG"
 '
 
+test_lazy_prereq EXPENSIVE_ON_WINDOWS '
+       test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN
+'
+
 test_lazy_prereq USR_BIN_TIME '
        test -x /usr/bin/time
 '
diff --git a/tag.c b/tag.c
index fcbe012f7a2203e198dac9eac03aa9dab999f334..86b1dcbb8270e944ac643a39835d490bbaf35b20 100644 (file)
--- a/tag.c
+++ b/tag.c
@@ -41,20 +41,20 @@ int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
        unsigned long size;
        int ret;
 
-       type = sha1_object_info(oid->hash, NULL);
+       type = oid_object_info(oid, NULL);
        if (type != OBJ_TAG)
                return error("%s: cannot verify a non-tag object of type %s.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
-                               typename(type));
+                               find_unique_abbrev(oid, DEFAULT_ABBREV),
+                               type_name(type));
 
-       buf = read_sha1_file(oid->hash, &type, &size);
+       buf = read_object_file(oid, &type, &size);
        if (!buf)
                return error("%s: unable to read file.",
                                name_to_report ?
                                name_to_report :
-                               find_unique_abbrev(oid->hash, DEFAULT_ABBREV));
+                               find_unique_abbrev(oid, DEFAULT_ABBREV));
 
        ret = run_gpg_verify(buf, size, flags);
 
@@ -182,7 +182,7 @@ int parse_tag(struct tag *item)
 
        if (item->object.parsed)
                return 0;
-       data = read_sha1_file(item->object.oid.hash, &type, &size);
+       data = read_object_file(&item->object.oid, &type, &size);
        if (!data)
                return error("Could not read %s",
                             oid_to_hex(&item->object.oid));
index 5fdafdd2d2d72390ee9fe3c2afd501ad222fac8e..139ecd97f8eb88b597aab50c2eb2b171a11ef3ef 100644 (file)
@@ -165,11 +165,11 @@ struct tempfile *register_tempfile(const char *path)
        return tempfile;
 }
 
-struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_sm(const char *filename_template, int suffixlen, int mode)
 {
        struct tempfile *tempfile = new_tempfile();
 
-       strbuf_add_absolute_path(&tempfile->filename, template);
+       strbuf_add_absolute_path(&tempfile->filename, filename_template);
        tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
        if (tempfile->fd < 0) {
                deactivate_tempfile(tempfile);
@@ -179,7 +179,7 @@ struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode)
        return tempfile;
 }
 
-struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_tsm(const char *filename_template, int suffixlen, int mode)
 {
        struct tempfile *tempfile = new_tempfile();
        const char *tmpdir;
@@ -188,7 +188,7 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
        if (!tmpdir)
                tmpdir = "/tmp";
 
-       strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, template);
+       strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, filename_template);
        tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
        if (tempfile->fd < 0) {
                deactivate_tempfile(tempfile);
@@ -198,12 +198,12 @@ struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
        return tempfile;
 }
 
-struct tempfile *xmks_tempfile_m(const char *template, int mode)
+struct tempfile *xmks_tempfile_m(const char *filename_template, int mode)
 {
        struct tempfile *tempfile;
        struct strbuf full_template = STRBUF_INIT;
 
-       strbuf_add_absolute_path(&full_template, template);
+       strbuf_add_absolute_path(&full_template, filename_template);
        tempfile = mks_tempfile_m(full_template.buf, mode);
        if (!tempfile)
                die_errno("Unable to create temporary file '%s'",
index 450908b2e0bc4a94bc23a862abf04a56366aedd7..8959c5f1b5761dc34f1742a96941f67efd6429a7 100644 (file)
@@ -135,58 +135,58 @@ extern struct tempfile *register_tempfile(const char *path);
  */
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_sm(const char *template,
+extern struct tempfile *mks_tempfile_sm(const char *filename_template,
                                        int suffixlen, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_s(const char *template,
+static inline struct tempfile *mks_tempfile_s(const char *filename_template,
                                              int suffixlen)
 {
-       return mks_tempfile_sm(template, suffixlen, 0600);
+       return mks_tempfile_sm(filename_template, suffixlen, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_m(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_m(const char *filename_template, int mode)
 {
-       return mks_tempfile_sm(template, 0, mode);
+       return mks_tempfile_sm(filename_template, 0, mode);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile(const char *template)
+static inline struct tempfile *mks_tempfile(const char *filename_template)
 {
-       return mks_tempfile_sm(template, 0, 0600);
+       return mks_tempfile_sm(filename_template, 0, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_tsm(const char *template,
+extern struct tempfile *mks_tempfile_tsm(const char *filename_template,
                                         int suffixlen, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_ts(const char *template,
+static inline struct tempfile *mks_tempfile_ts(const char *filename_template,
                                               int suffixlen)
 {
-       return mks_tempfile_tsm(template, suffixlen, 0600);
+       return mks_tempfile_tsm(filename_template, suffixlen, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_tm(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_tm(const char *filename_template, int mode)
 {
-       return mks_tempfile_tsm(template, 0, mode);
+       return mks_tempfile_tsm(filename_template, 0, mode);
 }
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_t(const char *template)
+static inline struct tempfile *mks_tempfile_t(const char *filename_template)
 {
-       return mks_tempfile_tsm(template, 0, 0600);
+       return mks_tempfile_tsm(filename_template, 0, 0600);
 }
 
 /* See "mks_tempfile functions" above. */
-extern struct tempfile *xmks_tempfile_m(const char *template, int mode);
+extern struct tempfile *xmks_tempfile_m(const char *filename_template, int mode);
 
 /* See "mks_tempfile functions" above. */
-static inline struct tempfile *xmks_tempfile(const char *template)
+static inline struct tempfile *xmks_tempfile(const char *filename_template)
 {
-       return xmks_tempfile_m(template, 0600);
+       return xmks_tempfile_m(filename_template, 0600);
 }
 
 /*
diff --git a/trace.c b/trace.c
index b7530b51a9e4d20825d42fcfd2a5dd389ad3b44e..7f3b08e148044c6c94cbef03ae265e134391357a 100644 (file)
--- a/trace.c
+++ b/trace.c
@@ -131,7 +131,6 @@ static void print_trace_line(struct trace_key *key, struct strbuf *buf)
 {
        strbuf_complete_line(buf);
        trace_write(key, buf->buf, buf->len);
-       strbuf_release(buf);
 }
 
 static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
@@ -144,6 +143,7 @@ static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
 
        strbuf_vaddf(&buf, format, ap);
        print_trace_line(key, &buf);
+       strbuf_release(&buf);
 }
 
 static void trace_argv_vprintf_fl(const char *file, int line,
@@ -157,8 +157,9 @@ static void trace_argv_vprintf_fl(const char *file, int line,
 
        strbuf_vaddf(&buf, format, ap);
 
-       sq_quote_argv(&buf, argv, 0);
+       sq_quote_argv_pretty(&buf, argv);
        print_trace_line(&trace_default_key, &buf);
+       strbuf_release(&buf);
 }
 
 void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
@@ -171,6 +172,7 @@ void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
 
        strbuf_addbuf(&buf, data);
        print_trace_line(key, &buf);
+       strbuf_release(&buf);
 }
 
 static void trace_performance_vprintf_fl(const char *file, int line,
@@ -190,6 +192,7 @@ static void trace_performance_vprintf_fl(const char *file, int line,
        }
 
        print_trace_line(&trace_perf_key, &buf);
+       strbuf_release(&buf);
 }
 
 #ifndef HAVE_VARIADIC_MACROS
@@ -426,6 +429,6 @@ void trace_command_performance(const char **argv)
                atexit(print_command_performance_atexit);
 
        strbuf_reset(&command_line);
-       sq_quote_argv(&command_line, argv, 0);
+       sq_quote_argv_pretty(&command_line, argv);
        command_start_time = getnanotime();
 }
index 3ba157ed0d6157281f0a9e91f7b7f208652adfcc..c508c9b7521b48c18f1b373ada9b9f2e5b02681d 100644 (file)
--- a/trailer.c
+++ b/trailer.c
@@ -174,12 +174,12 @@ static void print_all(FILE *outfile, struct list_head *head,
 
 static struct trailer_item *trailer_from_arg(struct arg_item *arg_tok)
 {
-       struct trailer_item *new = xcalloc(sizeof(*new), 1);
-       new->token = arg_tok->token;
-       new->value = arg_tok->value;
+       struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = arg_tok->token;
+       new_item->value = arg_tok->value;
        arg_tok->token = arg_tok->value = NULL;
        free_arg_item(arg_tok);
-       return new;
+       return new_item;
 }
 
 static void add_arg_to_input_list(struct trailer_item *on_tok,
@@ -666,30 +666,30 @@ static void parse_trailer(struct strbuf *tok, struct strbuf *val,
 static struct trailer_item *add_trailer_item(struct list_head *head, char *tok,
                                             char *val)
 {
-       struct trailer_item *new = xcalloc(sizeof(*new), 1);
-       new->token = tok;
-       new->value = val;
-       list_add_tail(&new->list, head);
-       return new;
+       struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = tok;
+       new_item->value = val;
+       list_add_tail(&new_item->list, head);
+       return new_item;
 }
 
 static void add_arg_item(struct list_head *arg_head, char *tok, char *val,
                         const struct conf_info *conf,
                         const struct new_trailer_item *new_trailer_item)
 {
-       struct arg_item *new = xcalloc(sizeof(*new), 1);
-       new->token = tok;
-       new->value = val;
-       duplicate_conf(&new->conf, conf);
+       struct arg_item *new_item = xcalloc(sizeof(*new_item), 1);
+       new_item->token = tok;
+       new_item->value = val;
+       duplicate_conf(&new_item->conf, conf);
        if (new_trailer_item) {
                if (new_trailer_item->where != WHERE_DEFAULT)
-                       new->conf.where = new_trailer_item->where;
+                       new_item->conf.where = new_trailer_item->where;
                if (new_trailer_item->if_exists != EXISTS_DEFAULT)
-                       new->conf.if_exists = new_trailer_item->if_exists;
+                       new_item->conf.if_exists = new_trailer_item->if_exists;
                if (new_trailer_item->if_missing != MISSING_DEFAULT)
-                       new->conf.if_missing = new_trailer_item->if_missing;
+                       new_item->conf.if_missing = new_trailer_item->if_missing;
        }
-       list_add_tail(&new->list, arg_head);
+       list_add_tail(&new_item->list, arg_head);
 }
 
 static void process_command_line_args(struct list_head *arg_head,
@@ -1000,7 +1000,7 @@ static struct tempfile *trailers_tempfile;
 static FILE *create_in_place_tempfile(const char *file)
 {
        struct stat st;
-       struct strbuf template = STRBUF_INIT;
+       struct strbuf filename_template = STRBUF_INIT;
        const char *tail;
        FILE *outfile;
 
@@ -1014,11 +1014,11 @@ static FILE *create_in_place_tempfile(const char *file)
        /* Create temporary file in the same directory as the original */
        tail = strrchr(file, '/');
        if (tail != NULL)
-               strbuf_add(&template, file, tail - file + 1);
-       strbuf_addstr(&template, "git-interpret-trailers-XXXXXX");
+               strbuf_add(&filename_template, file, tail - file + 1);
+       strbuf_addstr(&filename_template, "git-interpret-trailers-XXXXXX");
 
-       trailers_tempfile = xmks_tempfile_m(template.buf, st.st_mode);
-       strbuf_release(&template);
+       trailers_tempfile = xmks_tempfile_m(filename_template.buf, st.st_mode);
+       strbuf_release(&filename_template);
        outfile = fdopen_tempfile(trailers_tempfile, "w");
        if (!outfile)
                die_errno(_("could not open temporary file"));
index 508015023176290ec0f8d78043d852cdd37d5ce6..3f380d87d99eab317d5ac567b43e3cea05885145 100644 (file)
@@ -672,6 +672,11 @@ static int fetch(struct transport *transport,
        if (data->transport_options.update_shallow)
                set_helper_option(transport, "update-shallow", "true");
 
+       if (data->transport_options.filter_options.choice)
+               set_helper_option(
+                       transport, "filter",
+                       data->transport_options.filter_options.filter_spec);
+
        if (data->fetch)
                return fetch_with_fetch(transport, nr_heads, to_fetch);
 
index fc802260f61c7496f4250b15ef6f5ab8dd316898..b9dfa11bd2a1f849f2c2d33aa4943a1ea9cb2d8b 100644 (file)
@@ -161,6 +161,15 @@ static int set_git_option(struct git_transport_options *opts,
        } else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) {
                opts->deepen_relative = !!value;
                return 0;
+       } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) {
+               opts->from_promisor = !!value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) {
+               opts->no_dependents = !!value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) {
+               parse_list_objects_filter(&opts->filter_options, value);
+               return 0;
        }
        return 1;
 }
@@ -229,6 +238,9 @@ static int fetch_refs_via_pack(struct transport *transport,
                data->options.check_self_contained_and_connected;
        args.cloning = transport->cloning;
        args.update_shallow = data->options.update_shallow;
+       args.from_promisor = data->options.from_promisor;
+       args.no_dependents = data->options.no_dependents;
+       args.filter_options = data->options.filter_options;
 
        if (!data->got_remote_heads) {
                connect_setup(transport, 0);
@@ -355,7 +367,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                char type;
                const char *msg;
 
-               strbuf_add_unique_abbrev(&quickref, ref->old_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->old_oid,
                                         DEFAULT_ABBREV);
                if (ref->forced_update) {
                        strbuf_addstr(&quickref, "...");
@@ -366,7 +378,7 @@ static void print_ok_ref_status(struct ref *ref, int porcelain, int summary_widt
                        type = ' ';
                        msg = NULL;
                }
-               strbuf_add_unique_abbrev(&quickref, ref->new_oid.hash,
+               strbuf_add_unique_abbrev(&quickref, &ref->new_oid,
                                         DEFAULT_ABBREV);
 
                print_ref_status(type, quickref.buf, ref, ref->peer_ref, msg,
@@ -449,7 +461,7 @@ static int print_one_push_status(struct ref *ref, const char *dest, int count,
 static int measure_abbrev(const struct object_id *oid, int sofar)
 {
        char hex[GIT_MAX_HEXSZ + 1];
-       int w = find_unique_abbrev_r(hex, oid->hash, DEFAULT_ABBREV);
+       int w = find_unique_abbrev_r(hex, oid, DEFAULT_ABBREV);
 
        return (w < sofar) ? sofar : w;
 }
index 731c78b6795740f8e9bfb80741b0170671b37dd3..3c68d73b215bbabc81a75a810b1083697bfd6329 100644 (file)
@@ -4,6 +4,7 @@
 #include "cache.h"
 #include "run-command.h"
 #include "remote.h"
+#include "list-objects-filter-options.h"
 
 struct string_list;
 
@@ -15,12 +16,15 @@ struct git_transport_options {
        unsigned self_contained_and_connected : 1;
        unsigned update_shallow : 1;
        unsigned deepen_relative : 1;
+       unsigned from_promisor : 1;
+       unsigned no_dependents : 1;
        int depth;
        const char *deepen_since;
        const struct string_list *deepen_not;
        const char *uploadpack;
        const char *receivepack;
        struct push_cas_option *cas;
+       struct list_objects_filter_options filter_options;
 };
 
 enum transport_family {
@@ -159,6 +163,18 @@ void transport_check_allowed(const char *type);
 /* Send push certificates */
 #define TRANS_OPT_PUSH_CERT "pushcert"
 
+/* Indicate that these objects are being fetched by a promisor */
+#define TRANS_OPT_FROM_PROMISOR "from-promisor"
+
+/*
+ * Indicate that only the objects wanted need to be fetched, not their
+ * dependents
+ */
+#define TRANS_OPT_NO_DEPENDENTS "no-dependents"
+
+/* Filter objects for partial clone and fetch */
+#define TRANS_OPT_LIST_OBJECTS_FILTER "filter"
+
 /**
  * Returns 0 if the option was used, non-zero otherwise. Prints a
  * message to stderr if the option is not used.
index 63a87ed666bbb10cb3c2bd0e27117ac696e7d1b3..e11b3063afa610239162dc45c24528dd144c4759 100644 (file)
@@ -84,8 +84,7 @@ void *fill_tree_descriptor(struct tree_desc *desc, const struct object_id *oid)
        void *buf = NULL;
 
        if (oid) {
-               buf = read_object_with_reference(oid->hash, tree_type, &size,
-                                                NULL);
+               buf = read_object_with_reference(oid, tree_type, &size, NULL);
                if (!buf)
                        die("unable to read tree %s", oid_to_hex(oid));
        }
@@ -492,7 +491,7 @@ struct dir_state {
        unsigned char sha1[20];
 };
 
-static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char *result, unsigned *mode)
+static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned *mode)
 {
        int namelen = strlen(name);
        while (t->size) {
@@ -511,7 +510,7 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (cmp < 0)
                        break;
                if (entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
                if (name[entrylen] != '/')
@@ -519,27 +518,27 @@ static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char
                if (!S_ISDIR(*mode))
                        break;
                if (++entrylen == namelen) {
-                       hashcpy(result, oid->hash);
+                       oidcpy(result, oid);
                        return 0;
                }
-               return get_tree_entry(oid->hash, name + entrylen, result, mode);
+               return get_tree_entry(oid, name + entrylen, result, mode);
        }
        return -1;
 }
 
-int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned char *sha1, unsigned *mode)
+int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned *mode)
 {
        int retval;
        void *tree;
        unsigned long size;
-       unsigned char root[20];
+       struct object_id root;
 
-       tree = read_object_with_reference(tree_sha1, tree_type, &size, root);
+       tree = read_object_with_reference(tree_oid, tree_type, &size, &root);
        if (!tree)
                return -1;
 
        if (name[0] == '\0') {
-               hashcpy(sha1, root);
+               oidcpy(oid, &root);
                free(tree);
                return 0;
        }
@@ -549,7 +548,7 @@ int get_tree_entry(const unsigned char *tree_sha1, const char *name, unsigned ch
        } else {
                struct tree_desc t;
                init_tree_desc(&t, tree, size);
-               retval = find_tree_entry(&t, name, sha1, mode);
+               retval = find_tree_entry(&t, name, oid, mode);
        }
        free(tree);
        return retval;
@@ -583,14 +582,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
        struct dir_state *parents = NULL;
        size_t parents_alloc = 0;
        size_t i, parents_nr = 0;
-       unsigned char current_tree_sha1[20];
+       struct object_id current_tree_oid;
        struct strbuf namebuf = STRBUF_INIT;
        struct tree_desc t;
        int follows_remaining = GET_TREE_ENTRY_FOLLOW_SYMLINKS_MAX_LINKS;
 
        init_tree_desc(&t, NULL, 0UL);
        strbuf_addstr(&namebuf, name);
-       hashcpy(current_tree_sha1, tree_sha1);
+       hashcpy(current_tree_oid.hash, tree_sha1);
 
        while (1) {
                int find_result;
@@ -599,22 +598,22 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                if (!t.buffer) {
                        void *tree;
-                       unsigned char root[20];
+                       struct object_id root;
                        unsigned long size;
-                       tree = read_object_with_reference(current_tree_sha1,
+                       tree = read_object_with_reference(&current_tree_oid,
                                                          tree_type, &size,
-                                                         root);
+                                                         &root);
                        if (!tree)
                                goto done;
 
                        ALLOC_GROW(parents, parents_nr + 1, parents_alloc);
                        parents[parents_nr].tree = tree;
                        parents[parents_nr].size = size;
-                       hashcpy(parents[parents_nr].sha1, root);
+                       hashcpy(parents[parents_nr].sha1, root.hash);
                        parents_nr++;
 
                        if (namebuf.buf[0] == '\0') {
-                               hashcpy(result, root);
+                               hashcpy(result, root.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -671,14 +670,14 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
 
                /* Look up the first (or only) path component in the tree. */
                find_result = find_tree_entry(&t, namebuf.buf,
-                                             current_tree_sha1, mode);
+                                             &current_tree_oid, mode);
                if (find_result) {
                        goto done;
                }
 
                if (S_ISDIR(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                                goto done;
                        }
@@ -688,7 +687,7 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                                      1 + first_slash - namebuf.buf);
                } else if (S_ISREG(*mode)) {
                        if (!remainder) {
-                               hashcpy(result, current_tree_sha1);
+                               hashcpy(result, current_tree_oid.hash);
                                retval = FOUND;
                        } else {
                                retval = NOT_DIR;
@@ -714,8 +713,8 @@ enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_s
                         */
                        retval = DANGLING_SYMLINK;
 
-                       contents = read_sha1_file(current_tree_sha1, &type,
-                                                 &link_len);
+                       contents = read_object_file(&current_tree_oid, &type,
+                                                   &link_len);
 
                        if (!contents)
                                goto done;
index b6bd1b4ccfbb8bb69c464ea687c63a2058a424b8..4617deeb0e09e71c7ba7231192ece055eccb2dde 100644 (file)
@@ -79,7 +79,7 @@ struct traverse_info {
        int show_all_errors;
 };
 
-int get_tree_entry(const unsigned char *, const char *, unsigned char *, unsigned *);
+int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned *);
 extern char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
 extern void setup_traverse_info(struct traverse_info *info, const char *base);
 
diff --git a/tree.c b/tree.c
index b224115e0f4d61368560eba406a04f0259b7c4f0..1c68ea586bd30d3e3389efa1c83f25ed607d1d80 100644 (file)
--- a/tree.c
+++ b/tree.c
@@ -10,7 +10,7 @@
 const char *tree_type = "tree";
 
 static int read_one_entry_opt(struct index_state *istate,
-                             const unsigned char *sha1,
+                             const struct object_id *oid,
                              const char *base, int baselen,
                              const char *pathname,
                              unsigned mode, int stage, int opt)
@@ -31,16 +31,16 @@ static int read_one_entry_opt(struct index_state *istate,
        ce->ce_namelen = baselen + len;
        memcpy(ce->name, base, baselen);
        memcpy(ce->name + baselen, pathname, len+1);
-       hashcpy(ce->oid.hash, sha1);
+       oidcpy(&ce->oid, oid);
        return add_index_entry(istate, ce, opt);
 }
 
-static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry(const struct object_id *oid, struct strbuf *base,
                          const char *pathname, unsigned mode, int stage,
                          void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
 }
@@ -49,12 +49,12 @@ static int read_one_entry(const unsigned char *sha1, struct strbuf *base,
  * This is used when the caller knows there is no existing entries at
  * the stage that will conflict with the entry being added.
  */
-static int read_one_entry_quick(const unsigned char *sha1, struct strbuf *base,
+static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
                                const char *pathname, unsigned mode, int stage,
                                void *context)
 {
        struct index_state *istate = context;
-       return read_one_entry_opt(istate, sha1, base->buf, base->len, pathname,
+       return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
                                  mode, stage,
                                  ADD_CACHE_JUST_APPEND);
 }
@@ -83,7 +83,7 @@ static int read_tree_1(struct tree *tree, struct strbuf *base,
                                continue;
                }
 
-               switch (fn(entry.oid->hash, base,
+               switch (fn(entry.oid, base,
                           entry.path, entry.mode, stage, context)) {
                case 0:
                        continue;
@@ -219,7 +219,7 @@ int parse_tree_gently(struct tree *item, int quiet_on_missing)
 
        if (item->object.parsed)
                return 0;
-       buffer = read_sha1_file(item->object.oid.hash, &type, &size);
+       buffer = read_object_file(&item->object.oid, &type, &size);
        if (!buffer)
                return quiet_on_missing ? -1 :
                        error("Could not read %s",
diff --git a/tree.h b/tree.h
index 744e6dc2ac883adfa0e079f5f84f45a45e22b59d..e2a80be4ef87e35d895e8591a0f8a75df347d347 100644 (file)
--- a/tree.h
+++ b/tree.h
@@ -27,7 +27,7 @@ void free_tree_buffer(struct tree *tree);
 struct tree *parse_tree_indirect(const struct object_id *oid);
 
 #define READ_TREE_RECURSIVE 1
-typedef int (*read_tree_fn_t)(const unsigned char *, struct strbuf *, const char *, unsigned int, int, void *);
+typedef int (*read_tree_fn_t)(const struct object_id *, struct strbuf *, const char *, unsigned int, int, void *);
 
 extern int read_tree_recursive(struct tree *tree,
                               const char *base, int baselen,
index 96c3327f19de4b1060ed43ef291a57a76ff2b8cc..31a2cf2d0b5422b6c046da1a5f7600557b75977c 100644 (file)
@@ -15,6 +15,7 @@
 #include "submodule.h"
 #include "submodule-config.h"
 #include "fsmonitor.h"
+#include "fetch-object.h"
 
 /*
  * Error messages expected by scripts out of plumbing commands such as
@@ -194,10 +195,10 @@ static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
 static struct cache_entry *dup_entry(const struct cache_entry *ce)
 {
        unsigned int size = ce_size(ce);
-       struct cache_entry *new = xmalloc(size);
+       struct cache_entry *new_entry = xmalloc(size);
 
-       memcpy(new, ce, size);
-       return new;
+       memcpy(new_entry, ce, size);
+       return new_entry;
 }
 
 static void add_entry(struct unpack_trees_options *o,
@@ -370,6 +371,27 @@ static int check_updates(struct unpack_trees_options *o)
                load_gitmodules_file(index, &state);
 
        enable_delayed_checkout(&state);
+       if (repository_format_partial_clone && o->update && !o->dry_run) {
+               /*
+                * Prefetch the objects that are to be checked out in the loop
+                * below.
+                */
+               struct oid_array to_fetch = OID_ARRAY_INIT;
+               int fetch_if_missing_store = fetch_if_missing;
+               fetch_if_missing = 0;
+               for (i = 0; i < index->cache_nr; i++) {
+                       struct cache_entry *ce = index->cache[i];
+                       if ((ce->ce_flags & CE_UPDATE) &&
+                           !S_ISGITLINK(ce->ce_mode)) {
+                               if (!has_object_file(&ce->oid))
+                                       oid_array_append(&to_fetch, &ce->oid);
+                       }
+               }
+               if (to_fetch.nr)
+                       fetch_objects(repository_format_partial_clone,
+                                     &to_fetch);
+               fetch_if_missing = fetch_if_missing_store;
+       }
        for (i = 0; i < index->cache_nr; i++) {
                struct cache_entry *ce = index->cache[i];
 
@@ -1486,8 +1508,8 @@ static int verify_uptodate_1(const struct cache_entry *ce,
                add_rejected_path(o, error_type, ce->name);
 }
 
-static int verify_uptodate(const struct cache_entry *ce,
-                          struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o)
 {
        if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
                return 0;
@@ -1506,7 +1528,7 @@ static void invalidate_ce_path(const struct cache_entry *ce,
        if (!ce)
                return;
        cache_tree_invalidate_path(o->src_index, ce->name);
-       untracked_cache_invalidate_path(o->src_index, ce->name);
+       untracked_cache_invalidate_path(o->src_index, ce->name, 1);
 }
 
 /*
index 6c48117b845fbf7b983852be302e4472c5e6d651..41178ada94a4b7c5222cab7dd17d9eeb7a1956e4 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef UNPACK_TREES_H
 #define UNPACK_TREES_H
 
+#include "tree-walk.h"
 #include "string-list.h"
 
 #define MAX_UNPACK_TREES 8
@@ -78,6 +79,9 @@ struct unpack_trees_options {
 extern int unpack_trees(unsigned n, struct tree_desc *t,
                struct unpack_trees_options *options);
 
+int verify_uptodate(const struct cache_entry *ce,
+                   struct unpack_trees_options *o);
+
 int threeway_merge(const struct cache_entry * const *stages,
                   struct unpack_trees_options *o);
 int twoway_merge(const struct cache_entry * const *src,
index d5de18127c63a9d017578e0969743392d39526d4..f51b6cfca9435d6b3b3f5b44b593a7c5cd2b1d3e 100644 (file)
@@ -10,6 +10,8 @@
 #include "diff.h"
 #include "revision.h"
 #include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
 #include "run-command.h"
 #include "connect.h"
 #include "sigchain.h"
@@ -19,6 +21,7 @@
 #include "argv-array.h"
 #include "prio-queue.h"
 #include "protocol.h"
+#include "quote.h"
 
 static const char * const upload_pack_usage[] = {
        N_("git upload-pack [<options>] <dir>"),
@@ -65,6 +68,10 @@ static int advertise_refs;
 static int stateless_rpc;
 static const char *pack_objects_hook;
 
+static int filter_capability_requested;
+static int filter_advertise;
+static struct list_objects_filter_options filter_options;
+
 static void reset_timeout(void)
 {
        alarm(timeout);
@@ -132,6 +139,17 @@ static void create_pack_file(void)
                argv_array_push(&pack_objects.args, "--delta-base-offset");
        if (use_include_tag)
                argv_array_push(&pack_objects.args, "--include-tag");
+       if (filter_options.filter_spec) {
+               if (pack_objects.use_shell) {
+                       struct strbuf buf = STRBUF_INIT;
+                       sq_quote_buf(&buf, filter_options.filter_spec);
+                       argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf);
+                       strbuf_release(&buf);
+               } else {
+                       argv_array_pushf(&pack_objects.args, "--filter=%s",
+                                        filter_options.filter_spec);
+               }
+       }
 
        pack_objects.in = -1;
        pack_objects.out = -1;
@@ -795,6 +813,12 @@ static void receive_needs(void)
                        deepen_rev_list = 1;
                        continue;
                }
+               if (skip_prefix(line, "filter ", &arg)) {
+                       if (!filter_capability_requested)
+                               die("git upload-pack: filtering capability not negotiated");
+                       parse_list_objects_filter(&filter_options, arg);
+                       continue;
+               }
                if (!skip_prefix(line, "want ", &arg) ||
                    get_oid_hex(arg, &oid_buf))
                        die("git upload-pack: protocol error, "
@@ -822,6 +846,8 @@ static void receive_needs(void)
                        no_progress = 1;
                if (parse_feature_request(features, "include-tag"))
                        use_include_tag = 1;
+               if (parse_feature_request(features, "filter"))
+                       filter_capability_requested = 1;
 
                o = parse_object(&oid_buf);
                if (!o) {
@@ -941,7 +967,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
                struct strbuf symref_info = STRBUF_INIT;
 
                format_symref_info(&symref_info, cb_data);
-               packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n",
+               packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
                             oid_to_hex(oid), refname_nons,
                             0, capabilities,
                             (allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
@@ -950,6 +976,7 @@ static int send_ref(const char *refname, const struct object_id *oid,
                                     " allow-reachable-sha1-in-want" : "",
                             stateless_rpc ? " no-done" : "",
                             symref_info.buf,
+                            filter_advertise ? " filter" : "",
                             git_user_agent_sanitized());
                strbuf_release(&symref_info);
        } else {
@@ -1028,6 +1055,8 @@ static int upload_pack_config(const char *var, const char *value, void *unused)
        } else if (current_config_scope() != CONFIG_SCOPE_REPO) {
                if (!strcmp("uploadpack.packobjectshook", var))
                        return git_config_string(&pack_objects_hook, var, value);
+       } else if (!strcmp("uploadpack.allowfilter", var)) {
+               filter_advertise = git_config_bool(var, value);
        }
        return parse_hide_refs_config(var, value, "uploadpack");
 }
index dbfb4e13cddceaa44feee51016c9b837f7f4fce1..a69241b25ddaff5b61380aa8b451a6fcb833502c 100644 (file)
@@ -38,6 +38,15 @@ IPATTERN("fortran",
         "|//|\\*\\*|::|[/<>=]="),
 IPATTERN("fountain", "^((\\.[^.]|(int|ext|est|int\\.?/ext|i/e)[. ]).*)$",
         "[^ \t-]+"),
+PATTERNS("golang",
+        /* Functions */
+        "^[ \t]*(func[ \t]*.*(\\{[ \t]*)?)\n"
+        /* Structs and interfaces */
+        "^[ \t]*(type[ \t].*(struct|interface)[ \t]*(\\{[ \t]*)?)",
+        /* -- */
+        "[a-zA-Z_][a-zA-Z0-9_]*"
+        "|[-+0-9.eE]+i?|0[xX]?[0-9a-fA-F]+i?"
+        "|[-+*/<>%&^|=!:]=|--|\\+\\+|<<=?|>>=?|&\\^=?|&&|\\|\\||<-|\\.{3}"),
 PATTERNS("html", "^[ \t]*(<[Hh][1-6]([ \t].*)?>.*)$",
         "[^<>= \t]+"),
 PATTERNS("java",
@@ -138,7 +147,7 @@ PATTERNS("csharp",
         /* Keywords */
         "!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n"
         /* Methods and constructors */
-        "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
+        "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe|async)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
         /* Properties */
         "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n"
         /* Type definitions */
index 5d4d3733f75648dfa8d6955a5ec1ed9f3dfeb77b..dffb9c8e37c220e71e108060dc5a81bc21f8370c 100644 (file)
--- a/walker.c
+++ b/walker.c
@@ -22,7 +22,7 @@ void walker_say(struct walker *walker, const char *fmt, ...)
 static void report_missing(const struct object *obj)
 {
        fprintf(stderr, "Cannot obtain needed %s %s\n",
-               obj->type ? typename(obj->type): "object",
+               obj->type ? type_name(obj->type): "object",
                oid_to_hex(&obj->oid));
        if (!is_null_oid(&current_commit_oid))
                fprintf(stderr, "while processing commit %s.\n",
@@ -134,7 +134,7 @@ static int process_object(struct walker *walker, struct object *obj)
        }
        return error("Unable to determine requirements "
                     "of type %s for %s",
-                    typename(obj->type), oid_to_hex(&obj->oid));
+                    type_name(obj->type), oid_to_hex(&obj->oid));
 }
 
 static int process(struct walker *walker, struct object *obj)
index f5da7d286d537fa99a1bd2dd5180068b9d85da2f..28989cf06ef4bd15b3b97f86ae9fd32c4b78401f 100644 (file)
@@ -254,6 +254,102 @@ const char *is_worktree_locked(struct worktree *wt)
        return wt->lock_reason;
 }
 
+/* convenient wrapper to deal with NULL strbuf */
+static void strbuf_addf_gently(struct strbuf *buf, const char *fmt, ...)
+{
+       va_list params;
+
+       if (!buf)
+               return;
+
+       va_start(params, fmt);
+       strbuf_vaddf(buf, fmt, params);
+       va_end(params);
+}
+
+int validate_worktree(const struct worktree *wt, struct strbuf *errmsg,
+                     unsigned flags)
+{
+       struct strbuf wt_path = STRBUF_INIT;
+       char *path = NULL;
+       int err, ret = -1;
+
+       strbuf_addf(&wt_path, "%s/.git", wt->path);
+
+       if (is_main_worktree(wt)) {
+               if (is_directory(wt_path.buf)) {
+                       ret = 0;
+                       goto done;
+               }
+               /*
+                * Main worktree using .git file to point to the
+                * repository would make it impossible to know where
+                * the actual worktree is if this function is executed
+                * from another worktree. No .git file support for now.
+                */
+               strbuf_addf_gently(errmsg,
+                                  _("'%s' at main working tree is not the repository directory"),
+                                  wt_path.buf);
+               goto done;
+       }
+
+       /*
+        * Make sure "gitdir" file points to a real .git file and that
+        * file points back here.
+        */
+       if (!is_absolute_path(wt->path)) {
+               strbuf_addf_gently(errmsg,
+                                  _("'%s' file does not contain absolute path to the working tree location"),
+                                  git_common_path("worktrees/%s/gitdir", wt->id));
+               goto done;
+       }
+
+       if (flags & WT_VALIDATE_WORKTREE_MISSING_OK &&
+           !file_exists(wt->path)) {
+               ret = 0;
+               goto done;
+       }
+
+       if (!file_exists(wt_path.buf)) {
+               strbuf_addf_gently(errmsg, _("'%s' does not exist"), wt_path.buf);
+               goto done;
+       }
+
+       path = xstrdup_or_null(read_gitfile_gently(wt_path.buf, &err));
+       if (!path) {
+               strbuf_addf_gently(errmsg, _("'%s' is not a .git file, error code %d"),
+                                  wt_path.buf, err);
+               goto done;
+       }
+
+       ret = fspathcmp(path, real_path(git_common_path("worktrees/%s", wt->id)));
+
+       if (ret)
+               strbuf_addf_gently(errmsg, _("'%s' does not point back to '%s'"),
+                                  wt->path, git_common_path("worktrees/%s", wt->id));
+done:
+       free(path);
+       strbuf_release(&wt_path);
+       return ret;
+}
+
+void update_worktree_location(struct worktree *wt, const char *path_)
+{
+       struct strbuf path = STRBUF_INIT;
+
+       if (is_main_worktree(wt))
+               die("BUG: can't relocate main worktree");
+
+       strbuf_realpath(&path, path_, 1);
+       if (fspathcmp(wt->path, path.buf)) {
+               write_file(git_common_path("worktrees/%s/gitdir", wt->id),
+                          "%s/.git", path.buf);
+               free(wt->path);
+               wt->path = strbuf_detach(&path, NULL);
+       }
+       strbuf_release(&path);
+}
+
 int is_worktree_being_rebased(const struct worktree *wt,
                              const char *target)
 {
index c28a880e1839ef26604d80de78eb9c663a6be381..fe38ce10c300ba456f406950bf960d19d5d79cce 100644 (file)
@@ -3,6 +3,8 @@
 
 #include "refs.h"
 
+struct strbuf;
+
 struct worktree {
        char *path;
        char *id;
@@ -59,6 +61,22 @@ extern int is_main_worktree(const struct worktree *wt);
  */
 extern const char *is_worktree_locked(struct worktree *wt);
 
+#define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0)
+
+/*
+ * Return zero if the worktree is in good condition. Error message is
+ * returned if "errmsg" is not NULL.
+ */
+extern int validate_worktree(const struct worktree *wt,
+                            struct strbuf *errmsg,
+                            unsigned flags);
+
+/*
+ * Update worktrees/xxx/gitdir with the new path.
+ */
+extern void update_worktree_location(struct worktree *wt,
+                                    const char *path_);
+
 /*
  * Free up the memory for worktree(s)
  */
index 22b6e4948fbfa97da33dd0299da2130944940a42..5842408817aa7e5c584f244a7625cf458882d568 100644 (file)
@@ -14,7 +14,7 @@ else
        GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt'
        export GIT_TEMPLATE_DIR
 fi
-GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}"
+GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}"
 GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale'
 PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"
 
index d20356a776bc63d4a94e4b421582433fb915087a..1fd5e33ea8f5c8cb4e6720b0bec74acf09232e3c 100644 (file)
--- a/wrapper.c
+++ b/wrapper.c
@@ -445,21 +445,21 @@ FILE *fopen_or_warn(const char *path, const char *mode)
        return NULL;
 }
 
-int xmkstemp(char *template)
+int xmkstemp(char *filename_template)
 {
        int fd;
        char origtemplate[PATH_MAX];
-       strlcpy(origtemplate, template, sizeof(origtemplate));
+       strlcpy(origtemplate, filename_template, sizeof(origtemplate));
 
-       fd = mkstemp(template);
+       fd = mkstemp(filename_template);
        if (fd < 0) {
                int saved_errno = errno;
                const char *nonrelative_template;
 
-               if (strlen(template) != strlen(origtemplate))
-                       template = origtemplate;
+               if (strlen(filename_template) != strlen(origtemplate))
+                       filename_template = origtemplate;
 
-               nonrelative_template = absolute_path(template);
+               nonrelative_template = absolute_path(filename_template);
                errno = saved_errno;
                die_errno("Unable to create temporary file '%s'",
                        nonrelative_template);
@@ -481,7 +481,7 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
        static const int num_letters = 62;
        uint64_t value;
        struct timeval tv;
-       char *template;
+       char *filename_template;
        size_t len;
        int fd, count;
 
@@ -503,16 +503,16 @@ int git_mkstemps_mode(char *pattern, int suffix_len, int mode)
         */
        gettimeofday(&tv, NULL);
        value = ((size_t)(tv.tv_usec << 16)) ^ tv.tv_sec ^ getpid();
-       template = &pattern[len - 6 - suffix_len];
+       filename_template = &pattern[len - 6 - suffix_len];
        for (count = 0; count < TMP_MAX; ++count) {
                uint64_t v = value;
                /* Fill in the random bits. */
-               template[0] = letters[v % num_letters]; v /= num_letters;
-               template[1] = letters[v % num_letters]; v /= num_letters;
-               template[2] = letters[v % num_letters]; v /= num_letters;
-               template[3] = letters[v % num_letters]; v /= num_letters;
-               template[4] = letters[v % num_letters]; v /= num_letters;
-               template[5] = letters[v % num_letters]; v /= num_letters;
+               filename_template[0] = letters[v % num_letters]; v /= num_letters;
+               filename_template[1] = letters[v % num_letters]; v /= num_letters;
+               filename_template[2] = letters[v % num_letters]; v /= num_letters;
+               filename_template[3] = letters[v % num_letters]; v /= num_letters;
+               filename_template[4] = letters[v % num_letters]; v /= num_letters;
+               filename_template[5] = letters[v % num_letters]; v /= num_letters;
 
                fd = open(pattern, O_CREAT | O_EXCL | O_RDWR, mode);
                if (fd >= 0)
@@ -541,21 +541,21 @@ int git_mkstemp_mode(char *pattern, int mode)
        return git_mkstemps_mode(pattern, 0, mode);
 }
 
-int xmkstemp_mode(char *template, int mode)
+int xmkstemp_mode(char *filename_template, int mode)
 {
        int fd;
        char origtemplate[PATH_MAX];
-       strlcpy(origtemplate, template, sizeof(origtemplate));
+       strlcpy(origtemplate, filename_template, sizeof(origtemplate));
 
-       fd = git_mkstemp_mode(template, mode);
+       fd = git_mkstemp_mode(filename_template, mode);
        if (fd < 0) {
                int saved_errno = errno;
                const char *nonrelative_template;
 
-               if (!template[0])
-                       template = origtemplate;
+               if (!filename_template[0])
+                       filename_template = origtemplate;
 
-               nonrelative_template = absolute_path(template);
+               nonrelative_template = absolute_path(filename_template);
                errno = saved_errno;
                die_errno("Unable to create temporary file '%s'",
                        nonrelative_template);
index f5debcd2b4f05c50d5e70efc95d10d95ca6372cd..50815e5faffba0e9df861dbc4881e9fbffd7a941 100644 (file)
@@ -136,6 +136,7 @@ void wt_status_prepare(struct wt_status *s)
        s->ignored.strdup_strings = 1;
        s->show_branch = -1;  /* unspecified */
        s->show_stash = 0;
+       s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED;
        s->display_comment_prefix = 0;
 }
 
@@ -1032,7 +1033,7 @@ static void wt_longstatus_print_tracking(struct wt_status *s)
        if (!skip_prefix(s->branch, "refs/heads/", &branch_name))
                return;
        branch = branch_get(branch_name);
-       if (!format_tracking_info(branch, &sb))
+       if (!format_tracking_info(branch, &sb, s->ahead_behind_flags))
                return;
 
        i = 0;
@@ -1187,7 +1188,7 @@ static void abbrev_sha1_in_line(struct strbuf *line)
                strbuf_trim(split[1]);
                if (!get_oid(split[1]->buf, &oid)) {
                        strbuf_reset(split[1]);
-                       strbuf_add_unique_abbrev(split[1], oid.hash,
+                       strbuf_add_unique_abbrev(split[1], &oid,
                                                 DEFAULT_ABBREV);
                        strbuf_addch(split[1], ' ');
                        strbuf_reset(line);
@@ -1349,7 +1350,7 @@ static void show_cherry_pick_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently cherry-picking commit %s."),
-                       find_unique_abbrev(state->cherry_pick_head_sha1, DEFAULT_ABBREV));
+                       find_unique_abbrev(&state->cherry_pick_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1368,7 +1369,7 @@ static void show_revert_in_progress(struct wt_status *s,
                                        const char *color)
 {
        status_printf_ln(s, color, _("You are currently reverting commit %s."),
-                        find_unique_abbrev(state->revert_head_sha1, DEFAULT_ABBREV));
+                        find_unique_abbrev(&state->revert_head_oid, DEFAULT_ABBREV));
        if (s->hints) {
                if (has_unmerged(s))
                        status_printf_ln(s, color,
@@ -1421,7 +1422,7 @@ static char *get_branch(const struct worktree *wt, const char *path)
                ;
        else if (!get_oid_hex(sb.buf, &oid)) {
                strbuf_reset(&sb);
-               strbuf_add_unique_abbrev(&sb, oid.hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&sb, &oid, DEFAULT_ABBREV);
        } else if (!strcmp(sb.buf, "detached HEAD")) /* rebase */
                goto got_nothing;
        else                    /* bisect */
@@ -1458,7 +1459,7 @@ static int grab_1st_switch(struct object_id *ooid, struct object_id *noid,
        if (!strcmp(cb->buf.buf, "HEAD")) {
                /* HEAD is relative. Resolve it to the right reflog entry. */
                strbuf_reset(&cb->buf);
-               strbuf_add_unique_abbrev(&cb->buf, noid->hash, DEFAULT_ABBREV);
+               strbuf_add_unique_abbrev(&cb->buf, noid, DEFAULT_ABBREV);
        }
        return 1;
 }
@@ -1488,10 +1489,10 @@ static void wt_status_get_detached_from(struct wt_status_state *state)
                state->detached_from = xstrdup(from);
        } else
                state->detached_from =
-                       xstrdup(find_unique_abbrev(cb.noid.hash, DEFAULT_ABBREV));
-       hashcpy(state->detached_sha1, cb.noid.hash);
+                       xstrdup(find_unique_abbrev(&cb.noid, DEFAULT_ABBREV));
+       oidcpy(&state->detached_oid, &cb.noid);
        state->detached_at = !get_oid("HEAD", &oid) &&
-                            !hashcmp(oid.hash, state->detached_sha1);
+                            !oidcmp(&oid, &state->detached_oid);
 
        free(ref);
        strbuf_release(&cb.buf);
@@ -1550,13 +1551,13 @@ void wt_status_get_state(struct wt_status_state *state,
        } else if (!stat(git_path_cherry_pick_head(), &st) &&
                        !get_oid("CHERRY_PICK_HEAD", &oid)) {
                state->cherry_pick_in_progress = 1;
-               hashcpy(state->cherry_pick_head_sha1, oid.hash);
+               oidcpy(&state->cherry_pick_head_oid, &oid);
        }
        wt_status_check_bisect(NULL, state);
        if (!stat(git_path_revert_head(), &st) &&
            !get_oid("REVERT_HEAD", &oid)) {
                state->revert_in_progress = 1;
-               hashcpy(state->revert_head_sha1, oid.hash);
+               oidcpy(&state->revert_head_oid, &oid);
        }
 
        if (get_detached_from)
@@ -1793,7 +1794,7 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
        const char *base;
        char *short_base;
        const char *branch_name;
-       int num_ours, num_theirs;
+       int num_ours, num_theirs, sti;
        int upstream_is_gone = 0;
 
        color_fprintf(s->fp, color(WT_STATUS_HEADER, s), "## ");
@@ -1819,7 +1820,9 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
 
        color_fprintf(s->fp, branch_color_local, "%s", branch_name);
 
-       if (stat_tracking_info(branch, &num_ours, &num_theirs, &base) < 0) {
+       sti = stat_tracking_info(branch, &num_ours, &num_theirs, &base,
+                                s->ahead_behind_flags);
+       if (sti < 0) {
                if (!base)
                        goto conclude;
 
@@ -1831,12 +1834,14 @@ static void wt_shortstatus_print_tracking(struct wt_status *s)
        color_fprintf(s->fp, branch_color_remote, "%s", short_base);
        free(short_base);
 
-       if (!upstream_is_gone && !num_ours && !num_theirs)
+       if (!upstream_is_gone && !sti)
                goto conclude;
 
        color_fprintf(s->fp, header_color, " [");
        if (upstream_is_gone) {
                color_fprintf(s->fp, header_color, LABEL(N_("gone")));
+       } else if (s->ahead_behind_flags == AHEAD_BEHIND_QUICK) {
+               color_fprintf(s->fp, header_color, LABEL(N_("different")));
        } else if (!num_ours) {
                color_fprintf(s->fp, header_color, LABEL(N_("behind ")));
                color_fprintf(s->fp, branch_color_remote, "%d", num_theirs);
@@ -1905,18 +1910,19 @@ static void wt_porcelain_print(struct wt_status *s)
  *
  *    <upstream> ::= the upstream branch name, when set.
  *
- *       <ahead> ::= integer ahead value, when upstream set
- *                   and the commit is present (not gone).
- *
- *      <behind> ::= integer behind value, when upstream set
- *                   and commit is present.
+ *       <ahead> ::= integer ahead value or '?'.
  *
+ *      <behind> ::= integer behind value or '?'.
  *
  * The end-of-line is defined by the -z flag.
  *
  *                 <eol> ::= NUL when -z,
  *                           LF when NOT -z.
  *
+ * When an upstream is set and present, the 'branch.ab' line will
+ * be printed with the ahead/behind counts for the branch and the
+ * upstream.  When AHEAD_BEHIND_QUICK is requested and the branches
+ * are different, '?' will be substituted for the actual count.
  */
 static void wt_porcelain_v2_print_tracking(struct wt_status *s)
 {
@@ -1956,14 +1962,25 @@ static void wt_porcelain_v2_print_tracking(struct wt_status *s)
                /* Lookup stats on the upstream tracking branch, if set. */
                branch = branch_get(branch_name);
                base = NULL;
-               ab_info = (stat_tracking_info(branch, &nr_ahead, &nr_behind, &base) == 0);
+               ab_info = stat_tracking_info(branch, &nr_ahead, &nr_behind,
+                                            &base, s->ahead_behind_flags);
                if (base) {
                        base = shorten_unambiguous_ref(base, 0);
                        fprintf(s->fp, "# branch.upstream %s%c", base, eol);
                        free((char *)base);
 
-                       if (ab_info)
-                               fprintf(s->fp, "# branch.ab +%d -%d%c", nr_ahead, nr_behind, eol);
+                       if (ab_info > 0) {
+                               /* different */
+                               if (nr_ahead || nr_behind)
+                                       fprintf(s->fp, "# branch.ab +%d -%d%c",
+                                               nr_ahead, nr_behind, eol);
+                               else
+                                       fprintf(s->fp, "# branch.ab +? -?%c",
+                                               eol);
+                       } else if (!ab_info) {
+                               /* same */
+                               fprintf(s->fp, "# branch.ab +0 -0%c", eol);
+                       }
                }
        }
 
index 3f84d5c29ff270596a894e2d42843b03d7aa37c5..430770b854c41b38b95dae6e8fa8943629a2309b 100644 (file)
@@ -5,6 +5,7 @@
 #include "string-list.h"
 #include "color.h"
 #include "pathspec.h"
+#include "remote.h"
 
 struct worktree;
 
@@ -87,6 +88,7 @@ struct wt_status {
        int show_branch;
        int show_stash;
        int hints;
+       enum ahead_behind_flags ahead_behind_flags;
 
        enum wt_status_format status_format;
        unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */
@@ -116,9 +118,9 @@ struct wt_status_state {
        char *branch;
        char *onto;
        char *detached_from;
-       unsigned char detached_sha1[20];
-       unsigned char revert_head_sha1[20];
-       unsigned char cherry_pick_head_sha1[20];
+       struct object_id detached_oid;
+       struct object_id revert_head_oid;
+       struct object_id cherry_pick_head_oid;
 };
 
 size_t wt_status_locate_end(const char *s, size_t len);
index 770e1f7f8185e05f2618c261b70a5773041432fb..9315bc0ede11ba0377e27d711e37b6a0ae555c43 100644 (file)
@@ -191,7 +191,7 @@ void read_mmblob(mmfile_t *ptr, const struct object_id *oid)
                return;
        }
 
-       ptr->ptr = read_sha1_file(oid->hash, &type, &size);
+       ptr->ptr = read_object_file(oid, &type, &size);
        if (!ptr->ptr || type != OBJ_BLOB)
                die("unable to read blob object %s", oid_to_hex(oid));
        ptr->size = size;